blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
490db4e8d1d756d73832a1634cf6a28177fd6c25 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/optimized_2040.py | 8a363172a1b50d5cfa0a341f9e7897d38cb2722f | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,580 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((573.679, 413.934, 589.856), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((510.524, 576.355, 443.039), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((382.804, 624.377, 470.584), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((403.834, 550.392, 767.711), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((491.978, 565.805, 484.115), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((514.203, 574.851, 469.244), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((407.78, 393.952, 433.493), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((397.715, 422.213, 633.49), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((299.134, 345.348, 540.342), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((661.347, 715.119, 527.436), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((487.987, 544.677, 575.148), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((469.098, 619.63, 444.515), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((393.42, 571.365, 677.83), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((473.712, 591.978, 490.691), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((538.76, 519.949, 518.606), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((430.746, 442.112, 406.136), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((539.245, 560.924, 539.566), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((445.16, 667.254, 480.14), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((311.998, 611.308, 568.923), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((499.633, 590.037, 527.988), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((449.243, 637.42, 675.147), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
1377caae35112646cf15378fcb1642bc351c221b | d82b879f41e906589a0a6ad5a6a09e0a0032aa3f | /ObservationScripts/observe_source_rtcor.py | bf3940f7e3082d056e6821d91546c3ffa169c64c | [] | no_license | SETIatHCRO/ATA-Utils | 66718eed669882792148fe0b7a2f977cd0f6ac2e | 59f4d21b086effaf41d5e11e338ce602c803cfd0 | refs/heads/master | 2023-08-16T20:41:44.233507 | 2023-08-10T20:39:13 | 2023-08-10T20:39:13 | 137,617,987 | 5 | 5 | null | 2023-08-10T20:39:14 | 2018-06-17T00:07:05 | Jupyter Notebook | UTF-8 | Python | false | false | 1,840 | py | #!/home/obsuser/miniconda3/envs/ATAobs/bin/python
import atexit
from ATATools import ata_control, logger_defaults
from SNAPobs import snap_dada, snap_if
import numpy as np
import os,sys
import time
import argparse
import logging
import os
def main():
logger = logger_defaults.getProgramLogger("observe",
loglevel=logging.INFO)
# Define antennas
ant_list = ["1c", "1g", "1h", "1k", "1e", "2a", "2b", "2c",
"2e", "2h", "2j", "2k", "2l", "2m", "3c", "3d",
"3l", "4j", "5b", "4g"]
ata_control.reserve_antennas(ant_list)
atexit.register(ata_control.release_antennas, ant_list, False)
antlo_list = [ant+lo.upper() for ant in ant_list for lo in ['b','c']]
freqs = [3000]*len(ant_list)
freqs_c = [3000]*len(ant_list)
# set LO + focus frequencies
ata_control.set_freq(freqs, ant_list, lo='b', nofocus=True)
ata_control.set_freq(freqs_c, ant_list, lo='c')
time.sleep(30)
# Track source
source = "3c84"
ata_control.make_and_track_ephems(source, ant_list)
# autotune + IF tuning
ata_control.autotune(ant_list)
snap_if.tune_if_antslo(antlo_list)
print("Tuning complete")
#time.sleep(20)
xgpu_int_time = 6.5536 #seconds
obs_time = 600 #seconds
print("="*79)
print("Setting correlator integration time")
print("set_postproc_keys.py -s --prefix XTIMEINT=%f" %xgpu_int_time)
os.system("set_postproc_keys.py -s --prefix XTIMEINT=%f" %xgpu_int_time)
print("="*79)
print("Starting new obs")
print("start_record_in_x.py -H 1 2 3 4 5 6 7 8 -i 10 -n %i" %obs_time)
os.system("start_record_in_x.py -H 1 2 3 4 5 6 7 8 -i 10 -n %i" %obs_time)
print("Recording...")
time.sleep(obs_time+20)
print("="*79)
print("Obs completed")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
68503476a63039e16c0973604a668ad6bdf2eec2 | 8d6f9a3d65a189d99eff10e30cfabb0b761b635f | /scripts/support_hypercube_measures.py | d9bcd6c5528f1e6e38aab8773754772429a2826d | [
"BSD-3-Clause"
] | permissive | arita37/mystic | db2ebbed139b163e3e5df49c2325b3de35dd8cd0 | 3dcdd4627eb759672091859e8334be075bfd25a5 | refs/heads/master | 2021-01-22T20:19:22.569893 | 2016-08-20T15:52:46 | 2016-08-20T15:52:46 | 66,545,670 | 1 | 0 | null | 2016-08-25T09:42:31 | 2016-08-25T09:42:31 | null | UTF-8 | Python | false | false | 466 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
from mystic.support import hypercube_measures
__doc__ = hypercube_measures.__doc__
if __name__ == '__main__':
import sys
hypercube_measures(sys.argv[1:])
# EOF
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
8e645a2eefb43001aacb88d512c374af92c2427d | df690ac0484ff04cb63f71f528a9d0a0e557d6a3 | /.history/ws_20210608101643.py | 7b6d31a7d4f3a30447282a1e853cf3f05e3824be | [] | no_license | khanhdk0000/Mqtt-Web-Socket | 437777c740c68d4197353e334f6fe6a629094afd | 4f9e49a3817baa9ebc4e4f8dcffc21b6ea9d0134 | refs/heads/master | 2023-06-20T17:08:09.447381 | 2021-06-08T17:42:37 | 2021-06-08T17:42:37 | 375,090,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | from flask import Flask
from flask_sock import Sock
import time
app = Flask(__name__)
sock = Sock(app)
import threading
BROKER = 'io.adafruit.com'
USER = 'khanhdk0000'
PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
TOPIC = 'khanhdk0000/feeds/'
LIGHT = 'light'
SOUND = 'sound'
TEMP = 'temp'
LCD = 'iot_led'
BUZZER = 'buzzer'
########
# USER = 'CSE_BBC'
# PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
# TOPIC = 'CSE_BBC/feeds/'
# USER1 = 'CSE_BBC1'
# PASSWORD1 = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
# TOPIC1 = 'CSE_BBC1/feeds/'
# LIGHT = 'bk-iot-light'
# SOUND = 'bk-iot-sound'
# TEMP = 'bk-iot-temp-humid'
# LCD = 'bk-iot-lcd'
# BUZZER = 'bk-iot-speaker'
resLight = '"id":"12","name":"LIGHT","data":"0","unit":""'
prevLight = resLight
resTemp = '"id":"13","name":"SOUND","data":"0","unit":""'
prevTemp = resTemp
resSound = '"id":"7","name":"TEMP-HUMID","data":"0","unit":""'
prevSound = resSound
def mqttGet(user, password,topic,device):
import paho.mqtt.client as mqtt, pu
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
if rc == 0:
print('good')
else:
print('no good')
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
def on_disconnect(client, userdata, flags, rc=0):
print("Disconnected result code " + str(rc))
def on_message(client, userdata, message):
if device == LIGHT:
global resLight
message = str(message.payload.decode("utf-8"))
resLight = message
elif device == TEMP:
global resTemp
message = str(message.payload.decode("utf-8"))
resTemp = message
elif device == SOUND:
global resSound
message = str(message.payload.decode("utf-8"))
resSound = message
client = mqtt.Client()
client.username_pw_set(username=user,password=password)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.connect(BROKER, 1883, 60)
client.subscribe(topic)
client.loop_forever()
t1 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + LIGHT, LIGHT))
t1.start()
t2 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + TEMP, TEMP))
t2.start()
t3 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + SOUND, SOUND))
t3.start()
@sock.route('/light')
def reverse(ws):
global resLight, prevLight
while True:
if prevLight == resLight:
continue
else:
ws.send(resLight)
prevLight = resLight
@sock.route('/sound')
def reverse(ws):
global resSound, prevSound
while True:
if prevSound == resSound:
continue
else:
ws.send(resSound)
prevSound = resSound
@sock.route('/temp')
def reverse(ws):
global resTemp, prevTemp
while True:
if prevTemp == resTemp:
continue
else:
ws.send(resTemp)
prevTemp = resTemp
if __name__ == '__main__':
app.run(debug=True) | [
"[email protected]"
] | |
58020d50d29701fd850fcf8ccf83bbf252227aba | 4ee2ed5479e34c11e78b98ec2428c623c0075772 | /bots/lrs/posedziai.py | 96ffa01f3441c06324d71503b6e240f289883fec | [] | no_license | sirex/databot-bots | 7c46ed7a7e5a4b7b5d7d7ab9cc7f17b1301e3e0b | c2bc4d4d5a3cfffe35eabf0660790f5e9b81ce41 | refs/heads/master | 2020-04-07T02:48:37.782069 | 2018-06-02T12:56:40 | 2018-06-02T12:56:40 | 44,805,410 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,286 | py | #!/usr/bin/env python3
import yaml
import botlib
from databot import define, task
def append_last_session(task):
last = max(task.source.rows(), default=None, key=lambda x: x.value['pradžia'])
if last:
task.target.append(last.key, last.value)
with open('settings.yml') as f:
settings = yaml.load(f)
cookies = settings['cookies']['www.lrs.lt']
pipeline = {
'pipes': [
define('pradžios-puslapiai', compress=True),
define('sesijų-sąrašas'),
define('sesijų-puslapiai', compress=True),
define('posėdžių-sąrašas'),
define('posėdžių-puslapiai', compress=True),
define('klausimų-sąrašas'),
define('klausimų-puslapiai', compress=True),
],
'tasks': [
# Pirmas puslapis
task('pradžios-puslapiai').daily().download(
'http://www.lrs.lt/sip/portal.show?p_r=15275&p_k=1', cookies=cookies, check='#page-content h1.page-title'
),
# Sesijų sąrašas
task('pradžios-puslapiai', 'sesijų-sąrašas').select([
'#page-content .tbl-default xpath:tr[count(td)=3]', (
'td[1] > a.link@href', {
'url': 'td[1] > a.link@href',
'pavadinimas': 'td[1] > a.link:text',
'pradžia': 'td[2]:text',
'pabaiga': 'td[3]:text',
},
),
]).dedup(),
task('sesijų-sąrašas', 'sesijų-puslapiai').download(cookies=cookies, check='#page-content h1.page-title'),
# Paskutinė sesija
# Visada siunčiam paskutinę sisiją, kadangi ten gali būti naujų posėdžių.
task('sesijų-sąrašas', 'sesijų-sąrašas').daily().apply(append_last_session),
task('sesijų-sąrašas', 'sesijų-puslapiai').download(cookies=cookies, check='#page-content h1.page-title'),
# Posėdžių sąrašas
task('sesijų-puslapiai', 'posėdžių-sąrašas').select([
'#page-content .tbl-default xpath:tr[count(td)=4]/td[2]/a', (
'@href', {
'url': '@href',
'tipas': ':text',
'data': 'xpath:../../td[1]/a/text()',
'darbotvarkė': 'xpath:../../td[3]/a/@href',
'priimti projektai': 'xpath:../../td[4]/a/@href',
},
),
], check='#page-content h1.page-title').dedup(),
task('posėdžių-sąrašas', 'posėdžių-puslapiai').download(cookies=cookies, check='#page-content h1.page-title'),
# Svarstytų klausimų sąrašas
task('posėdžių-puslapiai', 'klausimų-sąrašas').select([
'#page-content .tbl-default xpath:tr[count(td)=3]', (
'td[3] > a@href', {
'url': 'td[3] > a@href',
'laikas': 'td[1]:text',
'numeris': 'td[2]:text',
'klausimas': 'td[3] > a:text',
'tipas': 'xpath:td[3]/text()?',
},
),
], check='.fakt_pos > .list.main li > a').dedup(),
task('klausimų-sąrašas', 'klausimų-puslapiai').download(cookies=cookies, check='#page-content h1.page-title'),
],
}
if __name__ == '__main__':
botlib.runbot(pipeline)
| [
"[email protected]"
] | |
6be72f888e1a08d62fc7d499a22b2a5afc8712d0 | 958c4e0cc47caf325bc0dfb54ad37d5e90ceb28b | /src/s17/taskmanager/interfaces.py | d2127491be017cbfa88368380140c538b55f26e2 | [] | no_license | simplesconsultoria/s17.taskmanager | 75cf0acfa9b1525f6b2849270edf0b780cbb1483 | 9ff31d4bf7cce4708956397f616900ca4d83d3ed | refs/heads/master | 2021-01-25T07:28:28.857133 | 2015-07-29T18:09:55 | 2015-07-29T18:09:55 | 5,602,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | # -*- coding: utf-8 -*-
from plone.app.textfield import RichText
from plone.directives import form
from s17.taskmanager import MessageFactory as _
from zope import schema
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
priorities = SimpleVocabulary([
SimpleTerm(value=1, title=_(u'High')),
SimpleTerm(value=2, title=_(u'Normal')),
SimpleTerm(value=3, title=_(u'Low')),
])
class ITaskPanel(form.Schema):
responsible = schema.Choice(
title=_(u'Responsible'),
description=_(''),
required=False,
vocabulary='plone.app.vocabularies.Users',
)
can_add_tasks = schema.List(
title=_(u'Who can add tasks?'),
description=_(''),
required=False,
value_type=schema.Choice(vocabulary='plone.app.vocabularies.Groups'),
)
class ITask(form.Schema):
title = schema.TextLine(
title=_(u'Title'),
description=_(''),
required=True,
)
responsible = schema.Choice(
title=_(u'Responsible'),
description=_(''),
required=False,
vocabulary='plone.app.vocabularies.Users',
)
priority = schema.Choice(
title=_(u'Priority'),
description=_(''),
required=True,
vocabulary=priorities,
default=2,
)
text = RichText(
title=_(u'Task Detail'),
description=_(''),
required=False,
)
initial_date = schema.Date(
title=_(u'Initial date'),
description=_(''),
required=False,
readonly=True,
)
end_date = schema.Date(
title=_(u'End date'),
description=_(''),
required=False,
readonly=True,
)
provided_date = schema.Date(
title=_(u'Expected date'),
description=_(''),
required=False,
)
| [
"[email protected]"
] | |
f4a15652109abf926330829d5155be89a22ea2db | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/purview/azure-purview-scanning/tests/test_smoke.py | cccf9d71ecc17eda6ab343d13d4a1c3bbbe34273 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 830 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from testcase import PurviewScanningTest, PurviewScanningPowerShellPreparer
from devtools_testutils import recorded_by_proxy
class TestPurviewScanningSmoke(PurviewScanningTest):
@PurviewScanningPowerShellPreparer()
@recorded_by_proxy
def test_basic_smoke_test(self, purviewscanning_endpoint):
client = self.create_client(endpoint=purviewscanning_endpoint)
response = client.data_sources.list_all()
result = [item for item in response]
assert len(result) >= 1
| [
"[email protected]"
] | |
d9624f99e2fc4929785c6620e4acd57b50a94da3 | 29145db13229d311269f317bf2819af6cba7d356 | /july easy/gen.py | 0d77f3556f15fdc55fdcee6fb3e9b9aa945401dc | [] | no_license | rocket3989/hackerEarth2019 | 802d1ca6fd03e80657cbe07a3f123e087679af4d | 42c0a7005e52c3762496220136cc5c1ee93571bb | refs/heads/master | 2021-07-05T01:32:42.203964 | 2020-12-22T03:40:20 | 2020-12-22T03:40:20 | 211,607,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from random import randint
out = []
for i in range(1000):
out.append(randint(0, 1000))
print(*out) | [
"[email protected]"
] | |
9d7ea0b3a6493a8f7d5db68f4107169567274d8f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03463/s773991951.py | 929d4cbd0719590df2e6984427d7f287fe852c8f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | N,A,B = map(int,input().split())
#print(A,B)
for i in range(N):
if A+1<B:
A +=1
else:
A += -1
if A <1:
ans="Borys"
break
#print(i,A,B)
if A<B-1:
B += -1
else:
B += 1
if B >N:
ans="Alice"
break
#print(i,A,B)
else:
ans="Draw"
print(ans) | [
"[email protected]"
] | |
842ca3cbd506420ec21602475bff1e1984496e9f | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part010511.py | 937406fd5f610cca55b5bb4a2d6afec74f258fa1 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher145294(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.1.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i3.1.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher145294._instance is None:
CommutativeMatcher145294._instance = CommutativeMatcher145294()
return CommutativeMatcher145294._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 145293
return
yield
from collections import deque | [
"[email protected]"
] | |
fa17fafd30775f5c3d2aa23f4002d6aa9268cf9b | e06c7fd594c52425ab7fc5498c07ae14daf9578b | /api/common/encryption_service.py | af34eb1615955ea54f079a26447530fecaa2fb9d | [] | no_license | rwheeler-7864/simplenight-api | bc35560eca1e1c25092a1bcdc4af1633367413b8 | 602646911a0155df5b70991d1445c10cee18cd33 | refs/heads/master | 2023-03-12T03:10:51.516499 | 2021-02-25T20:40:44 | 2021-02-25T20:40:44 | 342,370,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | import os
import secrets
from base64 import b64encode, b64decode
from typing import Optional
# noinspection PyPackageRequirements
from Crypto.Cipher import AES
# noinspection PyPackageRequirements
from Crypto.Util import Padding
from django.core.exceptions import ImproperlyConfigured
try:
from api.models.models import Feature
from api.common.request_context import get_request_context
except ImproperlyConfigured:
pass # Ignore in tests
class EncryptionService:
TEST_MODE_KEY = b"ABCDEFG012345678"
def __init__(self, encryption_key=None):
if encryption_key is not None:
self.encryption_key = bytes.fromhex(encryption_key)
else:
self.encryption_key = self._get_encryption_key()
def encrypt(self, clear_text: str) -> Optional[str]:
if clear_text is None:
return None
padded_clear_text = Padding.pad(clear_text.encode("utf-8"), AES.block_size)
return b64encode(self._get_cipher().encrypt(padded_clear_text)).decode("utf-8")
def decrypt(self, crypt_text: str) -> Optional[str]:
if crypt_text is None:
return None
clear_text = Padding.unpad(self._get_cipher().decrypt(b64decode(crypt_text)), AES.block_size)
return clear_text.decode("utf-8")
@staticmethod
def generate_encryption_key():
return secrets.token_bytes(16).hex()
def _get_cipher(self):
key = self.encryption_key
return AES.new(key, AES.MODE_CBC, key)
def _get_encryption_key(self):
encoded_key = os.getenv("ENCRYPTION_KEY")
if not encoded_key:
return self.TEST_MODE_KEY
return bytes.fromhex(encoded_key)
| [
"[email protected]"
] | |
d3c768b63e78f7556f0ff94a9873ea79d9bf2d33 | b16bc512603cbe3bdc5a56586cfc9147fe5fb3f6 | /venv/lib/python3.6/site-packages/watson_developer_cloud/assistant_v1.py | 5c15d5f237eb12e9bfb1f28e8e5f368cf756f8f2 | [] | no_license | hoang-ho/TechTogether | caa565b14165c7b0889bd4232098e16a0137ba67 | fa4ca8375ab00d1791d2fce02384503eff5df7e0 | refs/heads/master | 2020-05-01T08:24:22.561868 | 2019-05-13T06:55:46 | 2019-05-13T06:55:46 | 177,377,979 | 2 | 2 | null | 2019-05-13T06:55:47 | 2019-03-24T06:15:31 | Python | UTF-8 | Python | false | false | 334,778 | py | # coding: utf-8
# Copyright 2018 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The IBM Watson™ Assistant service combines machine learning, natural language
understanding, and integrated dialog tools to create conversation flows between your apps
and your users.
"""
from __future__ import absolute_import
import json
from .watson_service import datetime_to_string, string_to_datetime
from .watson_service import WatsonService
##############################################################################
# Service
##############################################################################
class AssistantV1(WatsonService):
"""The Assistant V1 service."""
default_url = 'https://gateway.watsonplatform.net/assistant/api'
def __init__(
self,
version,
url=default_url,
username=None,
password=None,
iam_apikey=None,
iam_access_token=None,
iam_url=None,
):
"""
Construct a new client for the Assistant service.
:param str version: The API version date to use with the service, in
"YYYY-MM-DD" format. Whenever the API is changed in a backwards
incompatible way, a new minor version of the API is released.
The service uses the API version for the date you specify, or
the most recent version before that date. Note that you should
not programmatically specify the current date at runtime, in
case the API has been updated since your application's release.
Instead, specify a version date that is compatible with your
application, and don't change it until your application is
ready for a later version.
:param str url: The base url to use when contacting the service (e.g.
"https://gateway.watsonplatform.net/assistant/api").
The base url may differ between Bluemix regions.
:param str username: The username used to authenticate with the service.
Username and password credentials are only required to run your
application locally or outside of Bluemix. When running on
Bluemix, the credentials will be automatically loaded from the
`VCAP_SERVICES` environment variable.
:param str password: The password used to authenticate with the service.
Username and password credentials are only required to run your
application locally or outside of Bluemix. When running on
Bluemix, the credentials will be automatically loaded from the
`VCAP_SERVICES` environment variable.
:param str iam_apikey: An API key that can be used to request IAM tokens. If
this API key is provided, the SDK will manage the token and handle the
refreshing.
:param str iam_access_token: An IAM access token is fully managed by the application.
Responsibility falls on the application to refresh the token, either before
it expires or reactively upon receiving a 401 from the service as any requests
made with an expired token will fail.
:param str iam_url: An optional URL for the IAM service API. Defaults to
'https://iam.bluemix.net/identity/token'.
"""
WatsonService.__init__(
self,
vcap_services_name='conversation',
url=url,
username=username,
password=password,
iam_apikey=iam_apikey,
iam_access_token=iam_access_token,
iam_url=iam_url,
use_vcap_services=True,
display_name='Assistant')
self.version = version
#########################
# Message
#########################
def message(self,
workspace_id,
input=None,
alternate_intents=None,
context=None,
entities=None,
intents=None,
output=None,
nodes_visited_details=None,
**kwargs):
"""
Get response to user input.
Send user input to a workspace and receive a response.
There is no rate limit for this operation.
:param str workspace_id: Unique identifier of the workspace.
:param InputData input: An input object that includes the input text.
:param bool alternate_intents: Whether to return more than one intent. Set to
`true` to return all matching intents.
:param Context context: State information for the conversation. To maintain state,
include the context from the previous response.
:param list[RuntimeEntity] entities: Entities to use when evaluating the message.
Include entities from the previous response to continue using those entities
rather than detecting entities in the new input.
:param list[RuntimeIntent] intents: Intents to use when evaluating the user input.
Include intents from the previous response to continue using those intents rather
than trying to recognize intents in the new input.
:param OutputData output: An output object that includes the response to the user,
the dialog nodes that were triggered, and messages from the log.
:param bool nodes_visited_details: Whether to include additional diagnostic
information about the dialog nodes that were visited during processing of the
message.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if input is not None:
input = self._convert_model(input, InputData)
if context is not None:
context = self._convert_model(context, Context)
if entities is not None:
entities = [self._convert_model(x, RuntimeEntity) for x in entities]
if intents is not None:
intents = [self._convert_model(x, RuntimeIntent) for x in intents]
if output is not None:
output = self._convert_model(output, OutputData)
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=message'
params = {
'version': self.version,
'nodes_visited_details': nodes_visited_details
}
data = {
'input': input,
'alternate_intents': alternate_intents,
'context': context,
'entities': entities,
'intents': intents,
'output': output
}
url = '/v1/workspaces/{0}/message'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
#########################
# Workspaces
#########################
def create_workspace(self,
name=None,
description=None,
language=None,
intents=None,
entities=None,
dialog_nodes=None,
counterexamples=None,
metadata=None,
learning_opt_out=None,
system_settings=None,
**kwargs):
"""
Create workspace.
Create a workspace based on component objects. You must provide workspace
components defining the content of the new workspace.
This operation is limited to 30 requests per 30 minutes. For more information, see
**Rate limiting**.
:param str name: The name of the workspace. This string cannot contain carriage
return, newline, or tab characters, and it must be no longer than 64 characters.
:param str description: The description of the workspace. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str language: The language of the workspace.
:param list[CreateIntent] intents: An array of objects defining the intents for
the workspace.
:param list[CreateEntity] entities: An array of objects defining the entities for
the workspace.
:param list[CreateDialogNode] dialog_nodes: An array of objects defining the nodes
in the dialog.
:param list[CreateCounterexample] counterexamples: An array of objects defining
input examples that have been marked as irrelevant input.
:param object metadata: Any metadata related to the workspace.
:param bool learning_opt_out: Whether training data from the workspace can be used
by IBM for general service improvements. `true` indicates that workspace training
data is not to be used.
:param WorkspaceSystemSettings system_settings: Global settings for the workspace.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if intents is not None:
intents = [self._convert_model(x, CreateIntent) for x in intents]
if entities is not None:
entities = [self._convert_model(x, CreateEntity) for x in entities]
if dialog_nodes is not None:
dialog_nodes = [
self._convert_model(x, CreateDialogNode) for x in dialog_nodes
]
if counterexamples is not None:
counterexamples = [
self._convert_model(x, CreateCounterexample)
for x in counterexamples
]
if system_settings is not None:
system_settings = self._convert_model(system_settings,
WorkspaceSystemSettings)
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=create_workspace'
params = {'version': self.version}
data = {
'name': name,
'description': description,
'language': language,
'intents': intents,
'entities': entities,
'dialog_nodes': dialog_nodes,
'counterexamples': counterexamples,
'metadata': metadata,
'learning_opt_out': learning_opt_out,
'system_settings': system_settings
}
url = '/v1/workspaces'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
def delete_workspace(self, workspace_id, **kwargs):
"""
Delete workspace.
Delete a workspace from the service instance.
This operation is limited to 30 requests per 30 minutes. For more information, see
**Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=delete_workspace'
params = {'version': self.version}
url = '/v1/workspaces/{0}'.format(*self._encode_path_vars(workspace_id))
response = self.request(
method='DELETE',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def get_workspace(self,
workspace_id,
export=None,
include_audit=None,
sort=None,
**kwargs):
"""
Get information about a workspace.
Get information about a workspace, optionally including all workspace content.
With **export**=`false`, this operation is limited to 6000 requests per 5 minutes.
With **export**=`true`, the limit is 20 requests per 30 minutes. For more
information, see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param bool export: Whether to include all element content in the returned data.
If **export**=`false`, the returned data includes only information about the
element itself. If **export**=`true`, all content, including subelements, is
included.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param str sort: Indicates how the returned workspace data will be sorted. This
parameter is valid only if **export**=`true`. Specify `sort=stable` to sort all
workspace objects by unique identifier, in ascending alphabetical order.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=get_workspace'
params = {
'version': self.version,
'export': export,
'include_audit': include_audit,
'sort': sort
}
url = '/v1/workspaces/{0}'.format(*self._encode_path_vars(workspace_id))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def list_workspaces(self,
page_limit=None,
include_count=None,
sort=None,
cursor=None,
include_audit=None,
**kwargs):
"""
List workspaces.
List the workspaces associated with a Watson Assistant service instance.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param int page_limit: The number of records to return in each page of results.
:param bool include_count: Whether to include information about the number of
records returned.
:param str sort: The attribute by which returned workspaces will be sorted. To
reverse the sort order, prefix the value with a minus sign (`-`).
:param str cursor: A token identifying the page of results to retrieve.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_workspaces'
params = {
'version': self.version,
'page_limit': page_limit,
'include_count': include_count,
'sort': sort,
'cursor': cursor,
'include_audit': include_audit
}
url = '/v1/workspaces'
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def update_workspace(self,
workspace_id,
name=None,
description=None,
language=None,
intents=None,
entities=None,
dialog_nodes=None,
counterexamples=None,
metadata=None,
learning_opt_out=None,
system_settings=None,
append=None,
**kwargs):
"""
Update workspace.
Update an existing workspace with new or modified data. You must provide component
objects defining the content of the updated workspace.
This operation is limited to 30 request per 30 minutes. For more information, see
**Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str name: The name of the workspace. This string cannot contain carriage
return, newline, or tab characters, and it must be no longer than 64 characters.
:param str description: The description of the workspace. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str language: The language of the workspace.
:param list[CreateIntent] intents: An array of objects defining the intents for
the workspace.
:param list[CreateEntity] entities: An array of objects defining the entities for
the workspace.
:param list[CreateDialogNode] dialog_nodes: An array of objects defining the nodes
in the dialog.
:param list[CreateCounterexample] counterexamples: An array of objects defining
input examples that have been marked as irrelevant input.
:param object metadata: Any metadata related to the workspace.
:param bool learning_opt_out: Whether training data from the workspace can be used
by IBM for general service improvements. `true` indicates that workspace training
data is not to be used.
:param WorkspaceSystemSettings system_settings: Global settings for the workspace.
:param bool append: Whether the new data is to be appended to the existing data in
the workspace. If **append**=`false`, elements included in the new data completely
replace the corresponding existing elements, including all subelements. For
example, if the new data includes **entities** and **append**=`false`, all
existing entities in the workspace are discarded and replaced with the new
entities.
If **append**=`true`, existing elements are preserved, and the new elements are
added. If any elements in the new data collide with existing elements, the update
request fails.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intents is not None:
intents = [self._convert_model(x, CreateIntent) for x in intents]
if entities is not None:
entities = [self._convert_model(x, CreateEntity) for x in entities]
if dialog_nodes is not None:
dialog_nodes = [
self._convert_model(x, CreateDialogNode) for x in dialog_nodes
]
if counterexamples is not None:
counterexamples = [
self._convert_model(x, CreateCounterexample)
for x in counterexamples
]
if system_settings is not None:
system_settings = self._convert_model(system_settings,
WorkspaceSystemSettings)
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=update_workspace'
params = {'version': self.version, 'append': append}
data = {
'name': name,
'description': description,
'language': language,
'intents': intents,
'entities': entities,
'dialog_nodes': dialog_nodes,
'counterexamples': counterexamples,
'metadata': metadata,
'learning_opt_out': learning_opt_out,
'system_settings': system_settings
}
url = '/v1/workspaces/{0}'.format(*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
#########################
# Intents
#########################
def create_intent(self,
workspace_id,
intent,
description=None,
examples=None,
**kwargs):
"""
Create intent.
Create a new intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str description: The description of the intent. This string cannot contain
carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:param list[CreateExample] examples: An array of user input examples for the
intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if examples is not None:
examples = [self._convert_model(x, CreateExample) for x in examples]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=create_intent'
params = {'version': self.version}
data = {
'intent': intent,
'description': description,
'examples': examples
}
url = '/v1/workspaces/{0}/intents'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
def delete_intent(self, workspace_id, intent, **kwargs):
"""
Delete intent.
Delete an intent from a workspace.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=delete_intent'
params = {'version': self.version}
url = '/v1/workspaces/{0}/intents/{1}'.format(
*self._encode_path_vars(workspace_id, intent))
response = self.request(
method='DELETE',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def get_intent(self,
workspace_id,
intent,
export=None,
include_audit=None,
**kwargs):
"""
Get intent.
Get information about an intent, optionally including all intent content.
With **export**=`false`, this operation is limited to 6000 requests per 5 minutes.
With **export**=`true`, the limit is 400 requests per 30 minutes. For more
information, see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param bool export: Whether to include all element content in the returned data.
If **export**=`false`, the returned data includes only information about the
element itself. If **export**=`true`, all content, including subelements, is
included.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=get_intent'
params = {
'version': self.version,
'export': export,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/intents/{1}'.format(
*self._encode_path_vars(workspace_id, intent))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def list_intents(self,
workspace_id,
export=None,
page_limit=None,
include_count=None,
sort=None,
cursor=None,
include_audit=None,
**kwargs):
"""
List intents.
List the intents for a workspace.
With **export**=`false`, this operation is limited to 2000 requests per 30
minutes. With **export**=`true`, the limit is 400 requests per 30 minutes. For
more information, see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param bool export: Whether to include all element content in the returned data.
If **export**=`false`, the returned data includes only information about the
element itself. If **export**=`true`, all content, including subelements, is
included.
:param int page_limit: The number of records to return in each page of results.
:param bool include_count: Whether to include information about the number of
records returned.
:param str sort: The attribute by which returned intents will be sorted. To
reverse the sort order, prefix the value with a minus sign (`-`).
:param str cursor: A token identifying the page of results to retrieve.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_intents'
params = {
'version': self.version,
'export': export,
'page_limit': page_limit,
'include_count': include_count,
'sort': sort,
'cursor': cursor,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/intents'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def update_intent(self,
workspace_id,
intent,
new_intent=None,
new_description=None,
new_examples=None,
**kwargs):
"""
Update intent.
Update an existing intent with new or modified data. You must provide component
objects defining the content of the updated intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param str new_intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str new_description: The description of the intent.
:param list[CreateExample] new_examples: An array of user input examples for the
intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if new_examples is not None:
new_examples = [
self._convert_model(x, CreateExample) for x in new_examples
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=update_intent'
params = {'version': self.version}
data = {
'intent': new_intent,
'description': new_description,
'examples': new_examples
}
url = '/v1/workspaces/{0}/intents/{1}'.format(
*self._encode_path_vars(workspace_id, intent))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
#########################
# Examples
#########################
def create_example(self,
workspace_id,
intent,
text,
mentions=None,
**kwargs):
"""
Create user input example.
Add a new user input example to an intent.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param str text: The text of a user input example. This string must conform to the
following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 1024 characters.
:param list[Mentions] mentions: An array of contextual entity mentions.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if text is None:
raise ValueError('text must be provided')
if mentions is not None:
mentions = [self._convert_model(x, Mentions) for x in mentions]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=create_example'
params = {'version': self.version}
data = {'text': text, 'mentions': mentions}
url = '/v1/workspaces/{0}/intents/{1}/examples'.format(
*self._encode_path_vars(workspace_id, intent))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
def delete_example(self, workspace_id, intent, text, **kwargs):
"""
Delete user input example.
Delete a user input example from an intent.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param str text: The text of the user input example.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=delete_example'
params = {'version': self.version}
url = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format(
*self._encode_path_vars(workspace_id, intent, text))
response = self.request(
method='DELETE',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def get_example(self,
workspace_id,
intent,
text,
include_audit=None,
**kwargs):
"""
Get user input example.
Get information about a user input example.
This operation is limited to 6000 requests per 5 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param str text: The text of the user input example.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=get_example'
params = {'version': self.version, 'include_audit': include_audit}
url = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format(
*self._encode_path_vars(workspace_id, intent, text))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def list_examples(self,
workspace_id,
intent,
page_limit=None,
include_count=None,
sort=None,
cursor=None,
include_audit=None,
**kwargs):
"""
List user input examples.
List the user input examples for an intent, optionally including contextual entity
mentions.
This operation is limited to 2500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param int page_limit: The number of records to return in each page of results.
:param bool include_count: Whether to include information about the number of
records returned.
:param str sort: The attribute by which returned examples will be sorted. To
reverse the sort order, prefix the value with a minus sign (`-`).
:param str cursor: A token identifying the page of results to retrieve.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_examples'
params = {
'version': self.version,
'page_limit': page_limit,
'include_count': include_count,
'sort': sort,
'cursor': cursor,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/intents/{1}/examples'.format(
*self._encode_path_vars(workspace_id, intent))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def update_example(self,
workspace_id,
intent,
text,
new_text=None,
new_mentions=None,
**kwargs):
"""
Update user input example.
Update the text of a user input example.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param str text: The text of the user input example.
:param str new_text: The text of the user input example. This string must conform
to the following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 1024 characters.
:param list[Mentions] new_mentions: An array of contextual entity mentions.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if text is None:
raise ValueError('text must be provided')
if new_mentions is not None:
new_mentions = [
self._convert_model(x, Mentions) for x in new_mentions
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=update_example'
params = {'version': self.version}
data = {'text': new_text, 'mentions': new_mentions}
url = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format(
*self._encode_path_vars(workspace_id, intent, text))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
#########################
# Counterexamples
#########################
def create_counterexample(self, workspace_id, text, **kwargs):
"""
Create counterexample.
Add a new counterexample to a workspace. Counterexamples are examples that have
been marked as irrelevant input.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str text: The text of a user input marked as irrelevant input. This string
must conform to the following restrictions:
- It cannot contain carriage return, newline, or tab characters
- It cannot consist of only whitespace characters
- It must be no longer than 1024 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=create_counterexample'
params = {'version': self.version}
data = {'text': text}
url = '/v1/workspaces/{0}/counterexamples'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
def delete_counterexample(self, workspace_id, text, **kwargs):
"""
Delete counterexample.
Delete a counterexample from a workspace. Counterexamples are examples that have
been marked as irrelevant input.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str text: The text of a user input counterexample (for example, `What are
you wearing?`).
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=delete_counterexample'
params = {'version': self.version}
url = '/v1/workspaces/{0}/counterexamples/{1}'.format(
*self._encode_path_vars(workspace_id, text))
response = self.request(
method='DELETE',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def get_counterexample(self,
workspace_id,
text,
include_audit=None,
**kwargs):
"""
Get counterexample.
Get information about a counterexample. Counterexamples are examples that have
been marked as irrelevant input.
This operation is limited to 6000 requests per 5 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str text: The text of a user input counterexample (for example, `What are
you wearing?`).
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=get_counterexample'
params = {'version': self.version, 'include_audit': include_audit}
url = '/v1/workspaces/{0}/counterexamples/{1}'.format(
*self._encode_path_vars(workspace_id, text))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def list_counterexamples(self,
workspace_id,
page_limit=None,
include_count=None,
sort=None,
cursor=None,
include_audit=None,
**kwargs):
"""
List counterexamples.
List the counterexamples for a workspace. Counterexamples are examples that have
been marked as irrelevant input.
This operation is limited to 2500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param int page_limit: The number of records to return in each page of results.
:param bool include_count: Whether to include information about the number of
records returned.
:param str sort: The attribute by which returned counterexamples will be sorted.
To reverse the sort order, prefix the value with a minus sign (`-`).
:param str cursor: A token identifying the page of results to retrieve.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_counterexamples'
params = {
'version': self.version,
'page_limit': page_limit,
'include_count': include_count,
'sort': sort,
'cursor': cursor,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/counterexamples'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def update_counterexample(self, workspace_id, text, new_text=None,
**kwargs):
"""
Update counterexample.
Update the text of a counterexample. Counterexamples are examples that have been
marked as irrelevant input.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str text: The text of a user input counterexample (for example, `What are
you wearing?`).
:param str new_text: The text of a user input counterexample.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=update_counterexample'
params = {'version': self.version}
data = {'text': new_text}
url = '/v1/workspaces/{0}/counterexamples/{1}'.format(
*self._encode_path_vars(workspace_id, text))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
#########################
# Entities
#########################
def create_entity(self,
workspace_id,
entity,
description=None,
metadata=None,
values=None,
fuzzy_match=None,
**kwargs):
"""
Create entity.
Create a new entity, or enable a system entity.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, and hyphen characters.
- It must be no longer than 64 characters.
If you specify an entity name beginning with the reserved prefix `sys-`, it must
be the name of a system entity that you want to enable. (Any entity content
specified with the request is ignored.).
:param str description: The description of the entity. This string cannot contain
carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:param object metadata: Any metadata related to the value.
:param list[CreateValue] values: An array of objects describing the entity values.
:param bool fuzzy_match: Whether to use fuzzy matching for the entity.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if values is not None:
values = [self._convert_model(x, CreateValue) for x in values]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=create_entity'
params = {'version': self.version}
data = {
'entity': entity,
'description': description,
'metadata': metadata,
'values': values,
'fuzzy_match': fuzzy_match
}
url = '/v1/workspaces/{0}/entities'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
def delete_entity(self, workspace_id, entity, **kwargs):
"""
Delete entity.
Delete an entity from a workspace, or disable a system entity.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=delete_entity'
params = {'version': self.version}
url = '/v1/workspaces/{0}/entities/{1}'.format(
*self._encode_path_vars(workspace_id, entity))
response = self.request(
method='DELETE',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def get_entity(self,
workspace_id,
entity,
export=None,
include_audit=None,
**kwargs):
"""
Get entity.
Get information about an entity, optionally including all entity content.
With **export**=`false`, this operation is limited to 6000 requests per 5 minutes.
With **export**=`true`, the limit is 200 requests per 30 minutes. For more
information, see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param bool export: Whether to include all element content in the returned data.
If **export**=`false`, the returned data includes only information about the
element itself. If **export**=`true`, all content, including subelements, is
included.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=get_entity'
params = {
'version': self.version,
'export': export,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/entities/{1}'.format(
*self._encode_path_vars(workspace_id, entity))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def list_entities(self,
workspace_id,
export=None,
page_limit=None,
include_count=None,
sort=None,
cursor=None,
include_audit=None,
**kwargs):
"""
List entities.
List the entities for a workspace.
With **export**=`false`, this operation is limited to 1000 requests per 30
minutes. With **export**=`true`, the limit is 200 requests per 30 minutes. For
more information, see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param bool export: Whether to include all element content in the returned data.
If **export**=`false`, the returned data includes only information about the
element itself. If **export**=`true`, all content, including subelements, is
included.
:param int page_limit: The number of records to return in each page of results.
:param bool include_count: Whether to include information about the number of
records returned.
:param str sort: The attribute by which returned entities will be sorted. To
reverse the sort order, prefix the value with a minus sign (`-`).
:param str cursor: A token identifying the page of results to retrieve.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_entities'
params = {
'version': self.version,
'export': export,
'page_limit': page_limit,
'include_count': include_count,
'sort': sort,
'cursor': cursor,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/entities'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def update_entity(self,
workspace_id,
entity,
new_entity=None,
new_description=None,
new_metadata=None,
new_fuzzy_match=None,
new_values=None,
**kwargs):
"""
Update entity.
Update an existing entity with new or modified data. You must provide component
objects defining the content of the updated entity.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str new_entity: The name of the entity. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, and hyphen characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 64 characters.
:param str new_description: The description of the entity. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param object new_metadata: Any metadata related to the entity.
:param bool new_fuzzy_match: Whether to use fuzzy matching for the entity.
:param list[CreateValue] new_values: An array of entity values.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if new_values is not None:
new_values = [
self._convert_model(x, CreateValue) for x in new_values
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=update_entity'
params = {'version': self.version}
data = {
'entity': new_entity,
'description': new_description,
'metadata': new_metadata,
'fuzzy_match': new_fuzzy_match,
'values': new_values
}
url = '/v1/workspaces/{0}/entities/{1}'.format(
*self._encode_path_vars(workspace_id, entity))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
#########################
# Mentions
#########################
def list_mentions(self,
workspace_id,
entity,
export=None,
include_audit=None,
**kwargs):
"""
List entity mentions.
List mentions for a contextual entity. An entity mention is an occurrence of a
contextual entity in the context of an intent user input example.
This operation is limited to 200 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param bool export: Whether to include all element content in the returned data.
If **export**=`false`, the returned data includes only information about the
element itself. If **export**=`true`, all content, including subelements, is
included.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_mentions'
params = {
'version': self.version,
'export': export,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/entities/{1}/mentions'.format(
*self._encode_path_vars(workspace_id, entity))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
#########################
# Values
#########################
def create_value(self,
workspace_id,
entity,
value,
metadata=None,
synonyms=None,
patterns=None,
value_type=None,
**kwargs):
"""
Add entity value.
Create a new value for an entity.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str value: The text of the entity value. This string must conform to the
following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:param object metadata: Any metadata related to the entity value.
:param list[str] synonyms: An array containing any synonyms for the entity value.
You can provide either synonyms or patterns (as indicated by **type**), but not
both. A synonym must conform to the following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:param list[str] patterns: An array of patterns for the entity value. You can
provide either synonyms or patterns (as indicated by **type**), but not both. A
pattern is a regular expression no longer than 512 characters. For more
information about how to specify a pattern, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#creating-entities).
:param str value_type: Specifies the type of value.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if value is None:
raise ValueError('value must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=create_value'
params = {'version': self.version}
data = {
'value': value,
'metadata': metadata,
'synonyms': synonyms,
'patterns': patterns,
'type': value_type
}
url = '/v1/workspaces/{0}/entities/{1}/values'.format(
*self._encode_path_vars(workspace_id, entity))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
def delete_value(self, workspace_id, entity, value, **kwargs):
"""
Delete entity value.
Delete a value from an entity.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str value: The text of the entity value.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if value is None:
raise ValueError('value must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=delete_value'
params = {'version': self.version}
url = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format(
*self._encode_path_vars(workspace_id, entity, value))
response = self.request(
method='DELETE',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def get_value(self,
workspace_id,
entity,
value,
export=None,
include_audit=None,
**kwargs):
"""
Get entity value.
Get information about an entity value.
This operation is limited to 6000 requests per 5 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str value: The text of the entity value.
:param bool export: Whether to include all element content in the returned data.
If **export**=`false`, the returned data includes only information about the
element itself. If **export**=`true`, all content, including subelements, is
included.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if value is None:
raise ValueError('value must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=get_value'
params = {
'version': self.version,
'export': export,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format(
*self._encode_path_vars(workspace_id, entity, value))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def list_values(self,
workspace_id,
entity,
export=None,
page_limit=None,
include_count=None,
sort=None,
cursor=None,
include_audit=None,
**kwargs):
"""
List entity values.
List the values for an entity.
This operation is limited to 2500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param bool export: Whether to include all element content in the returned data.
If **export**=`false`, the returned data includes only information about the
element itself. If **export**=`true`, all content, including subelements, is
included.
:param int page_limit: The number of records to return in each page of results.
:param bool include_count: Whether to include information about the number of
records returned.
:param str sort: The attribute by which returned entity values will be sorted. To
reverse the sort order, prefix the value with a minus sign (`-`).
:param str cursor: A token identifying the page of results to retrieve.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_values'
params = {
'version': self.version,
'export': export,
'page_limit': page_limit,
'include_count': include_count,
'sort': sort,
'cursor': cursor,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/entities/{1}/values'.format(
*self._encode_path_vars(workspace_id, entity))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def update_value(self,
workspace_id,
entity,
value,
new_value=None,
new_metadata=None,
new_type=None,
new_synonyms=None,
new_patterns=None,
**kwargs):
"""
Update entity value.
Update an existing entity value with new or modified data. You must provide
component objects defining the content of the updated entity value.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str value: The text of the entity value.
:param str new_value: The text of the entity value. This string must conform to
the following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:param object new_metadata: Any metadata related to the entity value.
:param str new_type: Specifies the type of value.
:param list[str] new_synonyms: An array of synonyms for the entity value. You can
provide either synonyms or patterns (as indicated by **type**), but not both. A
synonym must conform to the following resrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:param list[str] new_patterns: An array of patterns for the entity value. You can
provide either synonyms or patterns (as indicated by **type**), but not both. A
pattern is a regular expression no longer than 512 characters. For more
information about how to specify a pattern, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#creating-entities).
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if value is None:
raise ValueError('value must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=update_value'
params = {'version': self.version}
data = {
'value': new_value,
'metadata': new_metadata,
'type': new_type,
'synonyms': new_synonyms,
'patterns': new_patterns
}
url = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format(
*self._encode_path_vars(workspace_id, entity, value))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
#########################
# Synonyms
#########################
def create_synonym(self, workspace_id, entity, value, synonym, **kwargs):
"""
Add entity value synonym.
Add a new synonym to an entity value.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str value: The text of the entity value.
:param str synonym: The text of the synonym. This string must conform to the
following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if value is None:
raise ValueError('value must be provided')
if synonym is None:
raise ValueError('synonym must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=create_synonym'
params = {'version': self.version}
data = {'synonym': synonym}
url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format(
*self._encode_path_vars(workspace_id, entity, value))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
def delete_synonym(self, workspace_id, entity, value, synonym, **kwargs):
"""
Delete entity value synonym.
Delete a synonym from an entity value.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str value: The text of the entity value.
:param str synonym: The text of the synonym.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if value is None:
raise ValueError('value must be provided')
if synonym is None:
raise ValueError('synonym must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=delete_synonym'
params = {'version': self.version}
url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format(
*self._encode_path_vars(workspace_id, entity, value, synonym))
response = self.request(
method='DELETE',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def get_synonym(self,
workspace_id,
entity,
value,
synonym,
include_audit=None,
**kwargs):
"""
Get entity value synonym.
Get information about a synonym of an entity value.
This operation is limited to 6000 requests per 5 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str value: The text of the entity value.
:param str synonym: The text of the synonym.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if value is None:
raise ValueError('value must be provided')
if synonym is None:
raise ValueError('synonym must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=get_synonym'
params = {'version': self.version, 'include_audit': include_audit}
url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format(
*self._encode_path_vars(workspace_id, entity, value, synonym))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def list_synonyms(self,
workspace_id,
entity,
value,
page_limit=None,
include_count=None,
sort=None,
cursor=None,
include_audit=None,
**kwargs):
"""
List entity value synonyms.
List the synonyms for an entity value.
This operation is limited to 2500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str value: The text of the entity value.
:param int page_limit: The number of records to return in each page of results.
:param bool include_count: Whether to include information about the number of
records returned.
:param str sort: The attribute by which returned entity value synonyms will be
sorted. To reverse the sort order, prefix the value with a minus sign (`-`).
:param str cursor: A token identifying the page of results to retrieve.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if value is None:
raise ValueError('value must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_synonyms'
params = {
'version': self.version,
'page_limit': page_limit,
'include_count': include_count,
'sort': sort,
'cursor': cursor,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format(
*self._encode_path_vars(workspace_id, entity, value))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def update_synonym(self,
workspace_id,
entity,
value,
synonym,
new_synonym=None,
**kwargs):
"""
Update entity value synonym.
Update an existing entity value synonym with new text.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity.
:param str value: The text of the entity value.
:param str synonym: The text of the synonym.
:param str new_synonym: The text of the synonym. This string must conform to the
following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if value is None:
raise ValueError('value must be provided')
if synonym is None:
raise ValueError('synonym must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=update_synonym'
params = {'version': self.version}
data = {'synonym': new_synonym}
url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format(
*self._encode_path_vars(workspace_id, entity, value, synonym))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
#########################
# Dialog nodes
#########################
def create_dialog_node(self,
workspace_id,
dialog_node,
description=None,
conditions=None,
parent=None,
previous_sibling=None,
output=None,
context=None,
metadata=None,
next_step=None,
actions=None,
title=None,
node_type=None,
event_name=None,
variable=None,
digress_in=None,
digress_out=None,
digress_out_slots=None,
user_label=None,
**kwargs):
"""
Create dialog node.
Create a new dialog node.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:param str description: The description of the dialog node. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str conditions: The condition that will trigger the dialog node. This
string cannot contain carriage return, newline, or tab characters, and it must be
no longer than 2048 characters.
:param str parent: The ID of the parent dialog node.
:param str previous_sibling: The ID of the previous dialog node.
:param DialogNodeOutput output: The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#complex).
:param object context: The context for the dialog node.
:param object metadata: The metadata for the dialog node.
:param DialogNodeNextStep next_step: The next step to execute following this
dialog node.
:param list[DialogNodeAction] actions: An array of objects describing any actions
to be invoked by the dialog node.
:param str title: The alias used to identify the dialog node. This string must
conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:param str node_type: How the dialog node is processed.
:param str event_name: How an `event_handler` node is processed.
:param str variable: The location in the dialog context where output is stored.
:param str digress_in: Whether this top-level dialog node can be digressed into.
:param str digress_out: Whether this dialog node can be returned to after a
digression.
:param str digress_out_slots: Whether the user can digress to top-level nodes
while filling out slots.
:param str user_label: A label that can be displayed externally to describe the
purpose of the node to users. This string must be no longer than 512 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if dialog_node is None:
raise ValueError('dialog_node must be provided')
if output is not None:
output = self._convert_model(output, DialogNodeOutput)
if next_step is not None:
next_step = self._convert_model(next_step, DialogNodeNextStep)
if actions is not None:
actions = [
self._convert_model(x, DialogNodeAction) for x in actions
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=create_dialog_node'
params = {'version': self.version}
data = {
'dialog_node': dialog_node,
'description': description,
'conditions': conditions,
'parent': parent,
'previous_sibling': previous_sibling,
'output': output,
'context': context,
'metadata': metadata,
'next_step': next_step,
'actions': actions,
'title': title,
'type': node_type,
'event_name': event_name,
'variable': variable,
'digress_in': digress_in,
'digress_out': digress_out,
'digress_out_slots': digress_out_slots,
'user_label': user_label
}
url = '/v1/workspaces/{0}/dialog_nodes'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
def delete_dialog_node(self, workspace_id, dialog_node, **kwargs):
"""
Delete dialog node.
Delete a dialog node from a workspace.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID (for example, `get_order`).
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if dialog_node is None:
raise ValueError('dialog_node must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=delete_dialog_node'
params = {'version': self.version}
url = '/v1/workspaces/{0}/dialog_nodes/{1}'.format(
*self._encode_path_vars(workspace_id, dialog_node))
response = self.request(
method='DELETE',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def get_dialog_node(self,
workspace_id,
dialog_node,
include_audit=None,
**kwargs):
"""
Get dialog node.
Get information about a dialog node.
This operation is limited to 6000 requests per 5 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID (for example, `get_order`).
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if dialog_node is None:
raise ValueError('dialog_node must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=get_dialog_node'
params = {'version': self.version, 'include_audit': include_audit}
url = '/v1/workspaces/{0}/dialog_nodes/{1}'.format(
*self._encode_path_vars(workspace_id, dialog_node))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def list_dialog_nodes(self,
workspace_id,
page_limit=None,
include_count=None,
sort=None,
cursor=None,
include_audit=None,
**kwargs):
"""
List dialog nodes.
List the dialog nodes for a workspace.
This operation is limited to 2500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param int page_limit: The number of records to return in each page of results.
:param bool include_count: Whether to include information about the number of
records returned.
:param str sort: The attribute by which returned dialog nodes will be sorted. To
reverse the sort order, prefix the value with a minus sign (`-`).
:param str cursor: A token identifying the page of results to retrieve.
:param bool include_audit: Whether to include the audit properties (`created` and
`updated` timestamps) in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_dialog_nodes'
params = {
'version': self.version,
'page_limit': page_limit,
'include_count': include_count,
'sort': sort,
'cursor': cursor,
'include_audit': include_audit
}
url = '/v1/workspaces/{0}/dialog_nodes'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def update_dialog_node(self,
workspace_id,
dialog_node,
new_dialog_node=None,
new_description=None,
new_conditions=None,
new_parent=None,
new_previous_sibling=None,
new_output=None,
new_context=None,
new_metadata=None,
new_next_step=None,
new_title=None,
new_type=None,
new_event_name=None,
new_variable=None,
new_actions=None,
new_digress_in=None,
new_digress_out=None,
new_digress_out_slots=None,
new_user_label=None,
**kwargs):
"""
Update dialog node.
Update an existing dialog node with new or modified data.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID (for example, `get_order`).
:param str new_dialog_node: The dialog node ID. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:param str new_description: The description of the dialog node. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str new_conditions: The condition that will trigger the dialog node. This
string cannot contain carriage return, newline, or tab characters, and it must be
no longer than 2048 characters.
:param str new_parent: The ID of the parent dialog node.
:param str new_previous_sibling: The ID of the previous sibling dialog node.
:param DialogNodeOutput new_output: The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#complex).
:param object new_context: The context for the dialog node.
:param object new_metadata: The metadata for the dialog node.
:param DialogNodeNextStep new_next_step: The next step to execute following this
dialog node.
:param str new_title: The alias used to identify the dialog node. This string must
conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:param str new_type: How the dialog node is processed.
:param str new_event_name: How an `event_handler` node is processed.
:param str new_variable: The location in the dialog context where output is
stored.
:param list[DialogNodeAction] new_actions: An array of objects describing any
actions to be invoked by the dialog node.
:param str new_digress_in: Whether this top-level dialog node can be digressed
into.
:param str new_digress_out: Whether this dialog node can be returned to after a
digression.
:param str new_digress_out_slots: Whether the user can digress to top-level nodes
while filling out slots.
:param str new_user_label: A label that can be displayed externally to describe
the purpose of the node to users. This string must be no longer than 512
characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if dialog_node is None:
raise ValueError('dialog_node must be provided')
if new_output is not None:
new_output = self._convert_model(new_output, DialogNodeOutput)
if new_next_step is not None:
new_next_step = self._convert_model(new_next_step,
DialogNodeNextStep)
if new_actions is not None:
new_actions = [
self._convert_model(x, DialogNodeAction) for x in new_actions
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=update_dialog_node'
params = {'version': self.version}
data = {
'dialog_node': new_dialog_node,
'description': new_description,
'conditions': new_conditions,
'parent': new_parent,
'previous_sibling': new_previous_sibling,
'output': new_output,
'context': new_context,
'metadata': new_metadata,
'next_step': new_next_step,
'title': new_title,
'type': new_type,
'event_name': new_event_name,
'variable': new_variable,
'actions': new_actions,
'digress_in': new_digress_in,
'digress_out': new_digress_out,
'digress_out_slots': new_digress_out_slots,
'user_label': new_user_label
}
url = '/v1/workspaces/{0}/dialog_nodes/{1}'.format(
*self._encode_path_vars(workspace_id, dialog_node))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
#########################
# Logs
#########################
def list_all_logs(self,
filter,
sort=None,
page_limit=None,
cursor=None,
**kwargs):
"""
List log events in all workspaces.
List the events from the logs of all workspaces in the service instance.
If **cursor** is not specified, this operation is limited to 40 requests per 30
minutes. If **cursor** is specified, the limit is 120 requests per minute. For
more information, see **Rate limiting**.
:param str filter: A cacheable parameter that limits the results to those matching
the specified filter. You must specify a filter query that includes a value for
`language`, as well as a value for `workspace_id` or
`request.context.metadata.deployment`. For more information, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/filter-reference.html#filter-query-syntax).
:param str sort: How to sort the returned log events. You can sort by
**request_timestamp**. To reverse the sort order, prefix the parameter value with
a minus sign (`-`).
:param int page_limit: The number of records to return in each page of results.
:param str cursor: A token identifying the page of results to retrieve.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if filter is None:
raise ValueError('filter must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_all_logs'
params = {
'version': self.version,
'filter': filter,
'sort': sort,
'page_limit': page_limit,
'cursor': cursor
}
url = '/v1/logs'
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
def list_logs(self,
workspace_id,
sort=None,
filter=None,
page_limit=None,
cursor=None,
**kwargs):
"""
List log events in a workspace.
List the events from the log of a specific workspace.
If **cursor** is not specified, this operation is limited to 40 requests per 30
minutes. If **cursor** is specified, the limit is 120 requests per minute. For
more information, see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str sort: How to sort the returned log events. You can sort by
**request_timestamp**. To reverse the sort order, prefix the parameter value with
a minus sign (`-`).
:param str filter: A cacheable parameter that limits the results to those matching
the specified filter. For more information, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/filter-reference.html#filter-query-syntax).
:param int page_limit: The number of records to return in each page of results.
:param str cursor: A token identifying the page of results to retrieve.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=list_logs'
params = {
'version': self.version,
'sort': sort,
'filter': filter,
'page_limit': page_limit,
'cursor': cursor
}
url = '/v1/workspaces/{0}/logs'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
#########################
# User data
#########################
def delete_user_data(self, customer_id, **kwargs):
"""
Delete labeled data.
Deletes all data associated with a specified customer ID. The method has no effect
if no data is associated with the customer ID.
You associate a customer ID with data by passing the `X-Watson-Metadata` header
with a request that passes data. For more information about personal data and
customer IDs, see [Information
security](https://cloud.ibm.com/docs/services/assistant/information-security.html).
:param str customer_id: The customer ID for which all data is to be deleted.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if customer_id is None:
raise ValueError('customer_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=conversation;service_version=V1;operation_id=delete_user_data'
params = {'version': self.version, 'customer_id': customer_id}
url = '/v1/user_data'
response = self.request(
method='DELETE',
url=url,
headers=headers,
params=params,
accept_json=True)
return response
##############################################################################
# Models
##############################################################################
class CaptureGroup(object):
"""
CaptureGroup.
:attr str group: A recognized capture group for the entity.
:attr list[int] location: (optional) Zero-based character offsets that indicate where
the entity value begins and ends in the input text.
"""
def __init__(self, group, location=None):
"""
Initialize a CaptureGroup object.
:param str group: A recognized capture group for the entity.
:param list[int] location: (optional) Zero-based character offsets that indicate
where the entity value begins and ends in the input text.
"""
self.group = group
self.location = location
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CaptureGroup object from a json dictionary."""
args = {}
if 'group' in _dict:
args['group'] = _dict.get('group')
else:
raise ValueError(
'Required property \'group\' not present in CaptureGroup JSON')
if 'location' in _dict:
args['location'] = _dict.get('location')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'group') and self.group is not None:
_dict['group'] = self.group
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
return _dict
def __str__(self):
"""Return a `str` version of this CaptureGroup object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Context(object):
"""
State information for the conversation. To maintain state, include the context from
the previous response.
:attr str conversation_id: (optional) The unique identifier of the conversation.
:attr SystemResponse system: (optional) For internal use only.
:attr MessageContextMetadata metadata: (optional) Metadata related to the message.
"""
def __init__(self,
conversation_id=None,
system=None,
metadata=None,
**kwargs):
"""
Initialize a Context object.
:param str conversation_id: (optional) The unique identifier of the conversation.
:param SystemResponse system: (optional) For internal use only.
:param MessageContextMetadata metadata: (optional) Metadata related to the
message.
:param **kwargs: (optional) Any additional properties.
"""
self.conversation_id = conversation_id
self.system = system
self.metadata = metadata
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Context object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'conversation_id' in _dict:
args['conversation_id'] = _dict.get('conversation_id')
del xtra['conversation_id']
if 'system' in _dict:
args['system'] = SystemResponse._from_dict(_dict.get('system'))
del xtra['system']
if 'metadata' in _dict:
args['metadata'] = MessageContextMetadata._from_dict(
_dict.get('metadata'))
del xtra['metadata']
args.update(xtra)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'conversation_id') and self.conversation_id is not None:
_dict['conversation_id'] = self.conversation_id
if hasattr(self, 'system') and self.system is not None:
_dict['system'] = self.system._to_dict()
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata._to_dict()
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def __setattr__(self, name, value):
properties = {'conversation_id', 'system', 'metadata'}
if not hasattr(self, '_additionalProperties'):
super(Context, self).__setattr__('_additionalProperties', set())
if name not in properties:
self._additionalProperties.add(name)
super(Context, self).__setattr__(name, value)
def __str__(self):
"""Return a `str` version of this Context object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Counterexample(object):
"""
Counterexample.
:attr str text: The text of the counterexample.
:attr datetime created: (optional) The timestamp for creation of the counterexample.
:attr datetime updated: (optional) The timestamp for the last update to the
counterexample.
"""
def __init__(self, text, created=None, updated=None):
"""
Initialize a Counterexample object.
:param str text: The text of the counterexample.
:param datetime created: (optional) The timestamp for creation of the
counterexample.
:param datetime updated: (optional) The timestamp for the last update to the
counterexample.
"""
self.text = text
self.created = created
self.updated = updated
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Counterexample object from a json dictionary."""
args = {}
if 'text' in _dict:
args['text'] = _dict.get('text')
else:
raise ValueError(
'Required property \'text\' not present in Counterexample JSON')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
return _dict
def __str__(self):
"""Return a `str` version of this Counterexample object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CounterexampleCollection(object):
"""
CounterexampleCollection.
:attr list[Counterexample] counterexamples: An array of objects describing the
examples marked as irrelevant input.
:attr Pagination pagination: The pagination data for the returned objects.
"""
def __init__(self, counterexamples, pagination):
"""
Initialize a CounterexampleCollection object.
:param list[Counterexample] counterexamples: An array of objects describing the
examples marked as irrelevant input.
:param Pagination pagination: The pagination data for the returned objects.
"""
self.counterexamples = counterexamples
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CounterexampleCollection object from a json dictionary."""
args = {}
if 'counterexamples' in _dict:
args['counterexamples'] = [
Counterexample._from_dict(x)
for x in (_dict.get('counterexamples'))
]
else:
raise ValueError(
'Required property \'counterexamples\' not present in CounterexampleCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in CounterexampleCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'counterexamples') and self.counterexamples is not None:
_dict['counterexamples'] = [
x._to_dict() for x in self.counterexamples
]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this CounterexampleCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateCounterexample(object):
"""
CreateCounterexample.
:attr str text: The text of a user input marked as irrelevant input. This string must
conform to the following restrictions:
- It cannot contain carriage return, newline, or tab characters
- It cannot consist of only whitespace characters
- It must be no longer than 1024 characters.
"""
def __init__(self, text):
"""
Initialize a CreateCounterexample object.
:param str text: The text of a user input marked as irrelevant input. This string
must conform to the following restrictions:
- It cannot contain carriage return, newline, or tab characters
- It cannot consist of only whitespace characters
- It must be no longer than 1024 characters.
"""
self.text = text
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateCounterexample object from a json dictionary."""
args = {}
if 'text' in _dict:
args['text'] = _dict.get('text')
else:
raise ValueError(
'Required property \'text\' not present in CreateCounterexample JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def __str__(self):
"""Return a `str` version of this CreateCounterexample object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateDialogNode(object):
"""
CreateDialogNode.
:attr str dialog_node: The dialog node ID. This string must conform to the following
restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:attr str description: (optional) The description of the dialog node. This string
cannot contain carriage return, newline, or tab characters, and it must be no longer
than 128 characters.
:attr str conditions: (optional) The condition that will trigger the dialog node. This
string cannot contain carriage return, newline, or tab characters, and it must be no
longer than 2048 characters.
:attr str parent: (optional) The ID of the parent dialog node.
:attr str previous_sibling: (optional) The ID of the previous dialog node.
:attr DialogNodeOutput output: (optional) The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#complex).
:attr object context: (optional) The context for the dialog node.
:attr object metadata: (optional) The metadata for the dialog node.
:attr DialogNodeNextStep next_step: (optional) The next step to execute following this
dialog node.
:attr list[DialogNodeAction] actions: (optional) An array of objects describing any
actions to be invoked by the dialog node.
:attr str title: (optional) The alias used to identify the dialog node. This string
must conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:attr str node_type: (optional) How the dialog node is processed.
:attr str event_name: (optional) How an `event_handler` node is processed.
:attr str variable: (optional) The location in the dialog context where output is
stored.
:attr str digress_in: (optional) Whether this top-level dialog node can be digressed
into.
:attr str digress_out: (optional) Whether this dialog node can be returned to after a
digression.
:attr str digress_out_slots: (optional) Whether the user can digress to top-level
nodes while filling out slots.
:attr str user_label: (optional) A label that can be displayed externally to describe
the purpose of the node to users. This string must be no longer than 512 characters.
"""
def __init__(self,
dialog_node,
description=None,
conditions=None,
parent=None,
previous_sibling=None,
output=None,
context=None,
metadata=None,
next_step=None,
actions=None,
title=None,
node_type=None,
event_name=None,
variable=None,
digress_in=None,
digress_out=None,
digress_out_slots=None,
user_label=None):
"""
Initialize a CreateDialogNode object.
:param str dialog_node: The dialog node ID. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:param str description: (optional) The description of the dialog node. This string
cannot contain carriage return, newline, or tab characters, and it must be no
longer than 128 characters.
:param str conditions: (optional) The condition that will trigger the dialog node.
This string cannot contain carriage return, newline, or tab characters, and it
must be no longer than 2048 characters.
:param str parent: (optional) The ID of the parent dialog node.
:param str previous_sibling: (optional) The ID of the previous dialog node.
:param DialogNodeOutput output: (optional) The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#complex).
:param object context: (optional) The context for the dialog node.
:param object metadata: (optional) The metadata for the dialog node.
:param DialogNodeNextStep next_step: (optional) The next step to execute following
this dialog node.
:param list[DialogNodeAction] actions: (optional) An array of objects describing
any actions to be invoked by the dialog node.
:param str title: (optional) The alias used to identify the dialog node. This
string must conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:param str node_type: (optional) How the dialog node is processed.
:param str event_name: (optional) How an `event_handler` node is processed.
:param str variable: (optional) The location in the dialog context where output is
stored.
:param str digress_in: (optional) Whether this top-level dialog node can be
digressed into.
:param str digress_out: (optional) Whether this dialog node can be returned to
after a digression.
:param str digress_out_slots: (optional) Whether the user can digress to top-level
nodes while filling out slots.
:param str user_label: (optional) A label that can be displayed externally to
describe the purpose of the node to users. This string must be no longer than 512
characters.
"""
self.dialog_node = dialog_node
self.description = description
self.conditions = conditions
self.parent = parent
self.previous_sibling = previous_sibling
self.output = output
self.context = context
self.metadata = metadata
self.next_step = next_step
self.actions = actions
self.title = title
self.node_type = node_type
self.event_name = event_name
self.variable = variable
self.digress_in = digress_in
self.digress_out = digress_out
self.digress_out_slots = digress_out_slots
self.user_label = user_label
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateDialogNode object from a json dictionary."""
args = {}
if 'dialog_node' in _dict:
args['dialog_node'] = _dict.get('dialog_node')
else:
raise ValueError(
'Required property \'dialog_node\' not present in CreateDialogNode JSON'
)
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'conditions' in _dict:
args['conditions'] = _dict.get('conditions')
if 'parent' in _dict:
args['parent'] = _dict.get('parent')
if 'previous_sibling' in _dict:
args['previous_sibling'] = _dict.get('previous_sibling')
if 'output' in _dict:
args['output'] = DialogNodeOutput._from_dict(_dict.get('output'))
if 'context' in _dict:
args['context'] = _dict.get('context')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'next_step' in _dict:
args['next_step'] = DialogNodeNextStep._from_dict(
_dict.get('next_step'))
if 'actions' in _dict:
args['actions'] = [
DialogNodeAction._from_dict(x) for x in (_dict.get('actions'))
]
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'type' in _dict or 'node_type' in _dict:
args['node_type'] = _dict.get('type') or _dict.get('node_type')
if 'event_name' in _dict:
args['event_name'] = _dict.get('event_name')
if 'variable' in _dict:
args['variable'] = _dict.get('variable')
if 'digress_in' in _dict:
args['digress_in'] = _dict.get('digress_in')
if 'digress_out' in _dict:
args['digress_out'] = _dict.get('digress_out')
if 'digress_out_slots' in _dict:
args['digress_out_slots'] = _dict.get('digress_out_slots')
if 'user_label' in _dict:
args['user_label'] = _dict.get('user_label')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'dialog_node') and self.dialog_node is not None:
_dict['dialog_node'] = self.dialog_node
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'conditions') and self.conditions is not None:
_dict['conditions'] = self.conditions
if hasattr(self, 'parent') and self.parent is not None:
_dict['parent'] = self.parent
if hasattr(self,
'previous_sibling') and self.previous_sibling is not None:
_dict['previous_sibling'] = self.previous_sibling
if hasattr(self, 'output') and self.output is not None:
_dict['output'] = self.output._to_dict()
if hasattr(self, 'context') and self.context is not None:
_dict['context'] = self.context
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'next_step') and self.next_step is not None:
_dict['next_step'] = self.next_step._to_dict()
if hasattr(self, 'actions') and self.actions is not None:
_dict['actions'] = [x._to_dict() for x in self.actions]
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'node_type') and self.node_type is not None:
_dict['type'] = self.node_type
if hasattr(self, 'event_name') and self.event_name is not None:
_dict['event_name'] = self.event_name
if hasattr(self, 'variable') and self.variable is not None:
_dict['variable'] = self.variable
if hasattr(self, 'digress_in') and self.digress_in is not None:
_dict['digress_in'] = self.digress_in
if hasattr(self, 'digress_out') and self.digress_out is not None:
_dict['digress_out'] = self.digress_out
if hasattr(self,
'digress_out_slots') and self.digress_out_slots is not None:
_dict['digress_out_slots'] = self.digress_out_slots
if hasattr(self, 'user_label') and self.user_label is not None:
_dict['user_label'] = self.user_label
return _dict
def __str__(self):
"""Return a `str` version of this CreateDialogNode object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateEntity(object):
"""
CreateEntity.
:attr str entity: The name of the entity. This string must conform to the following
restrictions:
- It can contain only Unicode alphanumeric, underscore, and hyphen characters.
- It must be no longer than 64 characters.
If you specify an entity name beginning with the reserved prefix `sys-`, it must be
the name of a system entity that you want to enable. (Any entity content specified
with the request is ignored.).
:attr str description: (optional) The description of the entity. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:attr object metadata: (optional) Any metadata related to the value.
:attr list[CreateValue] values: (optional) An array of objects describing the entity
values.
:attr bool fuzzy_match: (optional) Whether to use fuzzy matching for the entity.
"""
def __init__(self,
entity,
description=None,
metadata=None,
values=None,
fuzzy_match=None):
"""
Initialize a CreateEntity object.
:param str entity: The name of the entity. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, and hyphen characters.
- It must be no longer than 64 characters.
If you specify an entity name beginning with the reserved prefix `sys-`, it must
be the name of a system entity that you want to enable. (Any entity content
specified with the request is ignored.).
:param str description: (optional) The description of the entity. This string
cannot contain carriage return, newline, or tab characters, and it must be no
longer than 128 characters.
:param object metadata: (optional) Any metadata related to the value.
:param list[CreateValue] values: (optional) An array of objects describing the
entity values.
:param bool fuzzy_match: (optional) Whether to use fuzzy matching for the entity.
"""
self.entity = entity
self.description = description
self.metadata = metadata
self.values = values
self.fuzzy_match = fuzzy_match
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateEntity object from a json dictionary."""
args = {}
if 'entity' in _dict:
args['entity'] = _dict.get('entity')
else:
raise ValueError(
'Required property \'entity\' not present in CreateEntity JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'values' in _dict:
args['values'] = [
CreateValue._from_dict(x) for x in (_dict.get('values'))
]
if 'fuzzy_match' in _dict:
args['fuzzy_match'] = _dict.get('fuzzy_match')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entity') and self.entity is not None:
_dict['entity'] = self.entity
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'values') and self.values is not None:
_dict['values'] = [x._to_dict() for x in self.values]
if hasattr(self, 'fuzzy_match') and self.fuzzy_match is not None:
_dict['fuzzy_match'] = self.fuzzy_match
return _dict
def __str__(self):
"""Return a `str` version of this CreateEntity object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateExample(object):
"""
CreateExample.
:attr str text: The text of a user input example. This string must conform to the
following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 1024 characters.
:attr list[Mentions] mentions: (optional) An array of contextual entity mentions.
"""
def __init__(self, text, mentions=None):
"""
Initialize a CreateExample object.
:param str text: The text of a user input example. This string must conform to the
following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 1024 characters.
:param list[Mentions] mentions: (optional) An array of contextual entity mentions.
"""
self.text = text
self.mentions = mentions
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateExample object from a json dictionary."""
args = {}
if 'text' in _dict:
args['text'] = _dict.get('text')
else:
raise ValueError(
'Required property \'text\' not present in CreateExample JSON')
if 'mentions' in _dict:
args['mentions'] = [
Mentions._from_dict(x) for x in (_dict.get('mentions'))
]
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'mentions') and self.mentions is not None:
_dict['mentions'] = [x._to_dict() for x in self.mentions]
return _dict
def __str__(self):
"""Return a `str` version of this CreateExample object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateIntent(object):
"""
CreateIntent.
:attr str intent: The name of the intent. This string must conform to the following
restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:attr str description: (optional) The description of the intent. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:attr list[CreateExample] examples: (optional) An array of user input examples for the
intent.
"""
def __init__(self, intent, description=None, examples=None):
"""
Initialize a CreateIntent object.
:param str intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str description: (optional) The description of the intent. This string
cannot contain carriage return, newline, or tab characters, and it must be no
longer than 128 characters.
:param list[CreateExample] examples: (optional) An array of user input examples
for the intent.
"""
self.intent = intent
self.description = description
self.examples = examples
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateIntent object from a json dictionary."""
args = {}
if 'intent' in _dict:
args['intent'] = _dict.get('intent')
else:
raise ValueError(
'Required property \'intent\' not present in CreateIntent JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'examples' in _dict:
args['examples'] = [
CreateExample._from_dict(x) for x in (_dict.get('examples'))
]
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'intent') and self.intent is not None:
_dict['intent'] = self.intent
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'examples') and self.examples is not None:
_dict['examples'] = [x._to_dict() for x in self.examples]
return _dict
def __str__(self):
"""Return a `str` version of this CreateIntent object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateValue(object):
"""
CreateValue.
:attr str value: The text of the entity value. This string must conform to the
following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:attr object metadata: (optional) Any metadata related to the entity value.
:attr list[str] synonyms: (optional) An array containing any synonyms for the entity
value. You can provide either synonyms or patterns (as indicated by **type**), but not
both. A synonym must conform to the following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:attr list[str] patterns: (optional) An array of patterns for the entity value. You
can provide either synonyms or patterns (as indicated by **type**), but not both. A
pattern is a regular expression no longer than 512 characters. For more information
about how to specify a pattern, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#creating-entities).
:attr str value_type: (optional) Specifies the type of value.
"""
def __init__(self,
value,
metadata=None,
synonyms=None,
patterns=None,
value_type=None):
"""
Initialize a CreateValue object.
:param str value: The text of the entity value. This string must conform to the
following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:param object metadata: (optional) Any metadata related to the entity value.
:param list[str] synonyms: (optional) An array containing any synonyms for the
entity value. You can provide either synonyms or patterns (as indicated by
**type**), but not both. A synonym must conform to the following restrictions:
- It cannot contain carriage return, newline, or tab characters.
- It cannot consist of only whitespace characters.
- It must be no longer than 64 characters.
:param list[str] patterns: (optional) An array of patterns for the entity value.
You can provide either synonyms or patterns (as indicated by **type**), but not
both. A pattern is a regular expression no longer than 512 characters. For more
information about how to specify a pattern, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#creating-entities).
:param str value_type: (optional) Specifies the type of value.
"""
self.value = value
self.metadata = metadata
self.synonyms = synonyms
self.patterns = patterns
self.value_type = value_type
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateValue object from a json dictionary."""
args = {}
if 'value' in _dict:
args['value'] = _dict.get('value')
else:
raise ValueError(
'Required property \'value\' not present in CreateValue JSON')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'synonyms' in _dict:
args['synonyms'] = _dict.get('synonyms')
if 'patterns' in _dict:
args['patterns'] = _dict.get('patterns')
if 'type' in _dict or 'value_type' in _dict:
args['value_type'] = _dict.get('type') or _dict.get('value_type')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'synonyms') and self.synonyms is not None:
_dict['synonyms'] = self.synonyms
if hasattr(self, 'patterns') and self.patterns is not None:
_dict['patterns'] = self.patterns
if hasattr(self, 'value_type') and self.value_type is not None:
_dict['type'] = self.value_type
return _dict
def __str__(self):
"""Return a `str` version of this CreateValue object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNode(object):
"""
DialogNode.
:attr str dialog_node_id: The dialog node ID.
:attr str description: (optional) The description of the dialog node.
:attr str conditions: (optional) The condition that triggers the dialog node.
:attr str parent: (optional) The ID of the parent dialog node. This property is not
returned if the dialog node has no parent.
:attr str previous_sibling: (optional) The ID of the previous sibling dialog node.
This property is not returned if the dialog node has no previous sibling.
:attr DialogNodeOutput output: (optional) The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#complex).
:attr object context: (optional) The context (if defined) for the dialog node.
:attr object metadata: (optional) Any metadata for the dialog node.
:attr DialogNodeNextStep next_step: (optional) The next step to execute following this
dialog node.
:attr datetime created: (optional) The timestamp for creation of the dialog node.
:attr datetime updated: (optional) The timestamp for the most recent update to the
dialog node.
:attr list[DialogNodeAction] actions: (optional) The actions for the dialog node.
:attr str title: (optional) The alias used to identify the dialog node.
:attr bool disabled: (optional) For internal use only.
:attr str node_type: (optional) How the dialog node is processed.
:attr str event_name: (optional) How an `event_handler` node is processed.
:attr str variable: (optional) The location in the dialog context where output is
stored.
:attr str digress_in: (optional) Whether this top-level dialog node can be digressed
into.
:attr str digress_out: (optional) Whether this dialog node can be returned to after a
digression.
:attr str digress_out_slots: (optional) Whether the user can digress to top-level
nodes while filling out slots.
:attr str user_label: (optional) A label that can be displayed externally to describe
the purpose of the node to users. This string must be no longer than 512 characters.
"""
def __init__(self,
dialog_node_id,
description=None,
conditions=None,
parent=None,
previous_sibling=None,
output=None,
context=None,
metadata=None,
next_step=None,
created=None,
updated=None,
actions=None,
title=None,
disabled=None,
node_type=None,
event_name=None,
variable=None,
digress_in=None,
digress_out=None,
digress_out_slots=None,
user_label=None):
"""
Initialize a DialogNode object.
:param str dialog_node_id: The dialog node ID.
:param str description: (optional) The description of the dialog node.
:param str conditions: (optional) The condition that triggers the dialog node.
:param str parent: (optional) The ID of the parent dialog node. This property is
not returned if the dialog node has no parent.
:param str previous_sibling: (optional) The ID of the previous sibling dialog
node. This property is not returned if the dialog node has no previous sibling.
:param DialogNodeOutput output: (optional) The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#complex).
:param object context: (optional) The context (if defined) for the dialog node.
:param object metadata: (optional) Any metadata for the dialog node.
:param DialogNodeNextStep next_step: (optional) The next step to execute following
this dialog node.
:param datetime created: (optional) The timestamp for creation of the dialog node.
:param datetime updated: (optional) The timestamp for the most recent update to
the dialog node.
:param list[DialogNodeAction] actions: (optional) The actions for the dialog node.
:param str title: (optional) The alias used to identify the dialog node.
:param bool disabled: (optional) For internal use only.
:param str node_type: (optional) How the dialog node is processed.
:param str event_name: (optional) How an `event_handler` node is processed.
:param str variable: (optional) The location in the dialog context where output is
stored.
:param str digress_in: (optional) Whether this top-level dialog node can be
digressed into.
:param str digress_out: (optional) Whether this dialog node can be returned to
after a digression.
:param str digress_out_slots: (optional) Whether the user can digress to top-level
nodes while filling out slots.
:param str user_label: (optional) A label that can be displayed externally to
describe the purpose of the node to users. This string must be no longer than 512
characters.
"""
self.dialog_node_id = dialog_node_id
self.description = description
self.conditions = conditions
self.parent = parent
self.previous_sibling = previous_sibling
self.output = output
self.context = context
self.metadata = metadata
self.next_step = next_step
self.created = created
self.updated = updated
self.actions = actions
self.title = title
self.disabled = disabled
self.node_type = node_type
self.event_name = event_name
self.variable = variable
self.digress_in = digress_in
self.digress_out = digress_out
self.digress_out_slots = digress_out_slots
self.user_label = user_label
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNode object from a json dictionary."""
args = {}
if 'dialog_node' in _dict or 'dialog_node_id' in _dict:
args['dialog_node_id'] = _dict.get('dialog_node') or _dict.get(
'dialog_node_id')
else:
raise ValueError(
'Required property \'dialog_node\' not present in DialogNode JSON'
)
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'conditions' in _dict:
args['conditions'] = _dict.get('conditions')
if 'parent' in _dict:
args['parent'] = _dict.get('parent')
if 'previous_sibling' in _dict:
args['previous_sibling'] = _dict.get('previous_sibling')
if 'output' in _dict:
args['output'] = DialogNodeOutput._from_dict(_dict.get('output'))
if 'context' in _dict:
args['context'] = _dict.get('context')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'next_step' in _dict:
args['next_step'] = DialogNodeNextStep._from_dict(
_dict.get('next_step'))
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'actions' in _dict:
args['actions'] = [
DialogNodeAction._from_dict(x) for x in (_dict.get('actions'))
]
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'disabled' in _dict:
args['disabled'] = _dict.get('disabled')
if 'type' in _dict or 'node_type' in _dict:
args['node_type'] = _dict.get('type') or _dict.get('node_type')
if 'event_name' in _dict:
args['event_name'] = _dict.get('event_name')
if 'variable' in _dict:
args['variable'] = _dict.get('variable')
if 'digress_in' in _dict:
args['digress_in'] = _dict.get('digress_in')
if 'digress_out' in _dict:
args['digress_out'] = _dict.get('digress_out')
if 'digress_out_slots' in _dict:
args['digress_out_slots'] = _dict.get('digress_out_slots')
if 'user_label' in _dict:
args['user_label'] = _dict.get('user_label')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'dialog_node_id') and self.dialog_node_id is not None:
_dict['dialog_node'] = self.dialog_node_id
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'conditions') and self.conditions is not None:
_dict['conditions'] = self.conditions
if hasattr(self, 'parent') and self.parent is not None:
_dict['parent'] = self.parent
if hasattr(self,
'previous_sibling') and self.previous_sibling is not None:
_dict['previous_sibling'] = self.previous_sibling
if hasattr(self, 'output') and self.output is not None:
_dict['output'] = self.output._to_dict()
if hasattr(self, 'context') and self.context is not None:
_dict['context'] = self.context
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'next_step') and self.next_step is not None:
_dict['next_step'] = self.next_step._to_dict()
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'actions') and self.actions is not None:
_dict['actions'] = [x._to_dict() for x in self.actions]
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'disabled') and self.disabled is not None:
_dict['disabled'] = self.disabled
if hasattr(self, 'node_type') and self.node_type is not None:
_dict['type'] = self.node_type
if hasattr(self, 'event_name') and self.event_name is not None:
_dict['event_name'] = self.event_name
if hasattr(self, 'variable') and self.variable is not None:
_dict['variable'] = self.variable
if hasattr(self, 'digress_in') and self.digress_in is not None:
_dict['digress_in'] = self.digress_in
if hasattr(self, 'digress_out') and self.digress_out is not None:
_dict['digress_out'] = self.digress_out
if hasattr(self,
'digress_out_slots') and self.digress_out_slots is not None:
_dict['digress_out_slots'] = self.digress_out_slots
if hasattr(self, 'user_label') and self.user_label is not None:
_dict['user_label'] = self.user_label
return _dict
def __str__(self):
"""Return a `str` version of this DialogNode object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeAction(object):
"""
DialogNodeAction.
:attr str name: The name of the action.
:attr str action_type: (optional) The type of action to invoke.
:attr object parameters: (optional) A map of key/value pairs to be provided to the
action.
:attr str result_variable: The location in the dialog context where the result of the
action is stored.
:attr str credentials: (optional) The name of the context variable that the client
application will use to pass in credentials for the action.
"""
def __init__(self,
name,
result_variable,
action_type=None,
parameters=None,
credentials=None):
"""
Initialize a DialogNodeAction object.
:param str name: The name of the action.
:param str result_variable: The location in the dialog context where the result of
the action is stored.
:param str action_type: (optional) The type of action to invoke.
:param object parameters: (optional) A map of key/value pairs to be provided to
the action.
:param str credentials: (optional) The name of the context variable that the
client application will use to pass in credentials for the action.
"""
self.name = name
self.action_type = action_type
self.parameters = parameters
self.result_variable = result_variable
self.credentials = credentials
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeAction object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in DialogNodeAction JSON'
)
if 'type' in _dict or 'action_type' in _dict:
args['action_type'] = _dict.get('type') or _dict.get('action_type')
if 'parameters' in _dict:
args['parameters'] = _dict.get('parameters')
if 'result_variable' in _dict:
args['result_variable'] = _dict.get('result_variable')
else:
raise ValueError(
'Required property \'result_variable\' not present in DialogNodeAction JSON'
)
if 'credentials' in _dict:
args['credentials'] = _dict.get('credentials')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'action_type') and self.action_type is not None:
_dict['type'] = self.action_type
if hasattr(self, 'parameters') and self.parameters is not None:
_dict['parameters'] = self.parameters
if hasattr(self,
'result_variable') and self.result_variable is not None:
_dict['result_variable'] = self.result_variable
if hasattr(self, 'credentials') and self.credentials is not None:
_dict['credentials'] = self.credentials
return _dict
def __str__(self):
"""Return a `str` version of this DialogNodeAction object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeCollection(object):
"""
An array of dialog nodes.
:attr list[DialogNode] dialog_nodes: An array of objects describing the dialog nodes
defined for the workspace.
:attr Pagination pagination: The pagination data for the returned objects.
"""
def __init__(self, dialog_nodes, pagination):
"""
Initialize a DialogNodeCollection object.
:param list[DialogNode] dialog_nodes: An array of objects describing the dialog
nodes defined for the workspace.
:param Pagination pagination: The pagination data for the returned objects.
"""
self.dialog_nodes = dialog_nodes
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeCollection object from a json dictionary."""
args = {}
if 'dialog_nodes' in _dict:
args['dialog_nodes'] = [
DialogNode._from_dict(x) for x in (_dict.get('dialog_nodes'))
]
else:
raise ValueError(
'Required property \'dialog_nodes\' not present in DialogNodeCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in DialogNodeCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'dialog_nodes') and self.dialog_nodes is not None:
_dict['dialog_nodes'] = [x._to_dict() for x in self.dialog_nodes]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this DialogNodeCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeNextStep(object):
"""
The next step to execute following this dialog node.
:attr str behavior: What happens after the dialog node completes. The valid values
depend on the node type:
- The following values are valid for any node:
- `get_user_input`
- `skip_user_input`
- `jump_to`
- If the node is of type `event_handler` and its parent node is of type `slot` or
`frame`, additional values are also valid:
- if **event_name**=`filled` and the type of the parent node is `slot`:
- `reprompt`
- `skip_all_slots`
- if **event_name**=`nomatch` and the type of the parent node is `slot`:
- `reprompt`
- `skip_slot`
- `skip_all_slots`
- if **event_name**=`generic` and the type of the parent node is `frame`:
- `reprompt`
- `skip_slot`
- `skip_all_slots`
If you specify `jump_to`, then you must also specify a value for the `dialog_node`
property.
:attr str dialog_node: (optional) The ID of the dialog node to process next. This
parameter is required if **behavior**=`jump_to`.
:attr str selector: (optional) Which part of the dialog node to process next.
"""
def __init__(self, behavior, dialog_node=None, selector=None):
"""
Initialize a DialogNodeNextStep object.
:param str behavior: What happens after the dialog node completes. The valid
values depend on the node type:
- The following values are valid for any node:
- `get_user_input`
- `skip_user_input`
- `jump_to`
- If the node is of type `event_handler` and its parent node is of type `slot` or
`frame`, additional values are also valid:
- if **event_name**=`filled` and the type of the parent node is `slot`:
- `reprompt`
- `skip_all_slots`
- if **event_name**=`nomatch` and the type of the parent node is `slot`:
- `reprompt`
- `skip_slot`
- `skip_all_slots`
- if **event_name**=`generic` and the type of the parent node is `frame`:
- `reprompt`
- `skip_slot`
- `skip_all_slots`
If you specify `jump_to`, then you must also specify a value for the `dialog_node`
property.
:param str dialog_node: (optional) The ID of the dialog node to process next. This
parameter is required if **behavior**=`jump_to`.
:param str selector: (optional) Which part of the dialog node to process next.
"""
self.behavior = behavior
self.dialog_node = dialog_node
self.selector = selector
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeNextStep object from a json dictionary."""
args = {}
if 'behavior' in _dict:
args['behavior'] = _dict.get('behavior')
else:
raise ValueError(
'Required property \'behavior\' not present in DialogNodeNextStep JSON'
)
if 'dialog_node' in _dict:
args['dialog_node'] = _dict.get('dialog_node')
if 'selector' in _dict:
args['selector'] = _dict.get('selector')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'behavior') and self.behavior is not None:
_dict['behavior'] = self.behavior
if hasattr(self, 'dialog_node') and self.dialog_node is not None:
_dict['dialog_node'] = self.dialog_node
if hasattr(self, 'selector') and self.selector is not None:
_dict['selector'] = self.selector
return _dict
def __str__(self):
"""Return a `str` version of this DialogNodeNextStep object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeOutput(object):
"""
The output of the dialog node. For more information about how to specify dialog node
output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#complex).
:attr list[DialogNodeOutputGeneric] generic: (optional) An array of objects describing
the output defined for the dialog node.
:attr DialogNodeOutputModifiers modifiers: (optional) Options that modify how
specified output is handled.
"""
def __init__(self, generic=None, modifiers=None, **kwargs):
"""
Initialize a DialogNodeOutput object.
:param list[DialogNodeOutputGeneric] generic: (optional) An array of objects
describing the output defined for the dialog node.
:param DialogNodeOutputModifiers modifiers: (optional) Options that modify how
specified output is handled.
:param **kwargs: (optional) Any additional properties.
"""
self.generic = generic
self.modifiers = modifiers
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeOutput object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'generic' in _dict:
args['generic'] = [
DialogNodeOutputGeneric._from_dict(x)
for x in (_dict.get('generic'))
]
del xtra['generic']
if 'modifiers' in _dict:
args['modifiers'] = DialogNodeOutputModifiers._from_dict(
_dict.get('modifiers'))
del xtra['modifiers']
args.update(xtra)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'generic') and self.generic is not None:
_dict['generic'] = [x._to_dict() for x in self.generic]
if hasattr(self, 'modifiers') and self.modifiers is not None:
_dict['modifiers'] = self.modifiers._to_dict()
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def __setattr__(self, name, value):
properties = {'generic', 'modifiers'}
if not hasattr(self, '_additionalProperties'):
super(DialogNodeOutput, self).__setattr__('_additionalProperties',
set())
if name not in properties:
self._additionalProperties.add(name)
super(DialogNodeOutput, self).__setattr__(name, value)
def __str__(self):
"""Return a `str` version of this DialogNodeOutput object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeOutputGeneric(object):
"""
DialogNodeOutputGeneric.
:attr str response_type: The type of response returned by the dialog node. The
specified response type must be supported by the client application or channel.
:attr list[DialogNodeOutputTextValuesElement] values: (optional) A list of one or more
objects defining text responses. Required when **response_type**=`text`.
:attr str selection_policy: (optional) How a response is selected from the list, if
more than one response is specified. Valid only when **response_type**=`text`.
:attr str delimiter: (optional) The delimiter to use as a separator between responses
when `selection_policy`=`multiline`.
:attr int time: (optional) How long to pause, in milliseconds. The valid values are
from 0 to 10000. Valid only when **response_type**=`pause`.
:attr bool typing: (optional) Whether to send a "user is typing" event during the
pause. Ignored if the channel does not support this event. Valid only when
**response_type**=`pause`.
:attr str source: (optional) The URL of the image. Required when
**response_type**=`image`.
:attr str title: (optional) An optional title to show before the response. Valid only
when **response_type**=`image` or `option`. This string must be no longer than 512
characters.
:attr str description: (optional) An optional description to show with the response.
Valid only when **response_type**=`image` or `option`. This string must be no longer
than 256 characters.
:attr str preference: (optional) The preferred type of control to display, if
supported by the channel. Valid only when **response_type**=`option`.
:attr list[DialogNodeOutputOptionsElement] options: (optional) An array of objects
describing the options from which the user can choose. You can include up to 20
options. Required when **response_type**=`option`.
:attr str message_to_human_agent: (optional) An optional message to be sent to the
human agent who will be taking over the conversation. Valid only when
**reponse_type**=`connect_to_agent`. This string must be no longer than 256
characters.
"""
def __init__(self,
response_type,
values=None,
selection_policy=None,
delimiter=None,
time=None,
typing=None,
source=None,
title=None,
description=None,
preference=None,
options=None,
message_to_human_agent=None):
"""
Initialize a DialogNodeOutputGeneric object.
:param str response_type: The type of response returned by the dialog node. The
specified response type must be supported by the client application or channel.
:param list[DialogNodeOutputTextValuesElement] values: (optional) A list of one or
more objects defining text responses. Required when **response_type**=`text`.
:param str selection_policy: (optional) How a response is selected from the list,
if more than one response is specified. Valid only when **response_type**=`text`.
:param str delimiter: (optional) The delimiter to use as a separator between
responses when `selection_policy`=`multiline`.
:param int time: (optional) How long to pause, in milliseconds. The valid values
are from 0 to 10000. Valid only when **response_type**=`pause`.
:param bool typing: (optional) Whether to send a "user is typing" event during the
pause. Ignored if the channel does not support this event. Valid only when
**response_type**=`pause`.
:param str source: (optional) The URL of the image. Required when
**response_type**=`image`.
:param str title: (optional) An optional title to show before the response. Valid
only when **response_type**=`image` or `option`. This string must be no longer
than 512 characters.
:param str description: (optional) An optional description to show with the
response. Valid only when **response_type**=`image` or `option`. This string must
be no longer than 256 characters.
:param str preference: (optional) The preferred type of control to display, if
supported by the channel. Valid only when **response_type**=`option`.
:param list[DialogNodeOutputOptionsElement] options: (optional) An array of
objects describing the options from which the user can choose. You can include up
to 20 options. Required when **response_type**=`option`.
:param str message_to_human_agent: (optional) An optional message to be sent to
the human agent who will be taking over the conversation. Valid only when
**reponse_type**=`connect_to_agent`. This string must be no longer than 256
characters.
"""
self.response_type = response_type
self.values = values
self.selection_policy = selection_policy
self.delimiter = delimiter
self.time = time
self.typing = typing
self.source = source
self.title = title
self.description = description
self.preference = preference
self.options = options
self.message_to_human_agent = message_to_human_agent
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeOutputGeneric object from a json dictionary."""
args = {}
if 'response_type' in _dict:
args['response_type'] = _dict.get('response_type')
else:
raise ValueError(
'Required property \'response_type\' not present in DialogNodeOutputGeneric JSON'
)
if 'values' in _dict:
args['values'] = [
DialogNodeOutputTextValuesElement._from_dict(x)
for x in (_dict.get('values'))
]
if 'selection_policy' in _dict:
args['selection_policy'] = _dict.get('selection_policy')
if 'delimiter' in _dict:
args['delimiter'] = _dict.get('delimiter')
if 'time' in _dict:
args['time'] = _dict.get('time')
if 'typing' in _dict:
args['typing'] = _dict.get('typing')
if 'source' in _dict:
args['source'] = _dict.get('source')
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'preference' in _dict:
args['preference'] = _dict.get('preference')
if 'options' in _dict:
args['options'] = [
DialogNodeOutputOptionsElement._from_dict(x)
for x in (_dict.get('options'))
]
if 'message_to_human_agent' in _dict:
args['message_to_human_agent'] = _dict.get('message_to_human_agent')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'response_type') and self.response_type is not None:
_dict['response_type'] = self.response_type
if hasattr(self, 'values') and self.values is not None:
_dict['values'] = [x._to_dict() for x in self.values]
if hasattr(self,
'selection_policy') and self.selection_policy is not None:
_dict['selection_policy'] = self.selection_policy
if hasattr(self, 'delimiter') and self.delimiter is not None:
_dict['delimiter'] = self.delimiter
if hasattr(self, 'time') and self.time is not None:
_dict['time'] = self.time
if hasattr(self, 'typing') and self.typing is not None:
_dict['typing'] = self.typing
if hasattr(self, 'source') and self.source is not None:
_dict['source'] = self.source
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'preference') and self.preference is not None:
_dict['preference'] = self.preference
if hasattr(self, 'options') and self.options is not None:
_dict['options'] = [x._to_dict() for x in self.options]
if hasattr(self, 'message_to_human_agent'
) and self.message_to_human_agent is not None:
_dict['message_to_human_agent'] = self.message_to_human_agent
return _dict
def __str__(self):
"""Return a `str` version of this DialogNodeOutputGeneric object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeOutputModifiers(object):
"""
Options that modify how specified output is handled.
:attr bool overwrite: (optional) Whether values in the output will overwrite output
values in an array specified by previously executed dialog nodes. If this option is
set to **false**, new values will be appended to previously specified values.
"""
def __init__(self, overwrite=None):
"""
Initialize a DialogNodeOutputModifiers object.
:param bool overwrite: (optional) Whether values in the output will overwrite
output values in an array specified by previously executed dialog nodes. If this
option is set to **false**, new values will be appended to previously specified
values.
"""
self.overwrite = overwrite
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeOutputModifiers object from a json dictionary."""
args = {}
if 'overwrite' in _dict:
args['overwrite'] = _dict.get('overwrite')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'overwrite') and self.overwrite is not None:
_dict['overwrite'] = self.overwrite
return _dict
def __str__(self):
"""Return a `str` version of this DialogNodeOutputModifiers object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeOutputOptionsElement(object):
"""
DialogNodeOutputOptionsElement.
:attr str label: The user-facing label for the option.
:attr DialogNodeOutputOptionsElementValue value: An object defining the message input
to be sent to the Watson Assistant service if the user selects the corresponding
option.
"""
def __init__(self, label, value):
"""
Initialize a DialogNodeOutputOptionsElement object.
:param str label: The user-facing label for the option.
:param DialogNodeOutputOptionsElementValue value: An object defining the message
input to be sent to the Watson Assistant service if the user selects the
corresponding option.
"""
self.label = label
self.value = value
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeOutputOptionsElement object from a json dictionary."""
args = {}
if 'label' in _dict:
args['label'] = _dict.get('label')
else:
raise ValueError(
'Required property \'label\' not present in DialogNodeOutputOptionsElement JSON'
)
if 'value' in _dict:
args['value'] = DialogNodeOutputOptionsElementValue._from_dict(
_dict.get('value'))
else:
raise ValueError(
'Required property \'value\' not present in DialogNodeOutputOptionsElement JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this DialogNodeOutputOptionsElement object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeOutputOptionsElementValue(object):
"""
An object defining the message input to be sent to the Watson Assistant service if the
user selects the corresponding option.
:attr InputData input: (optional) An input object that includes the input text.
"""
def __init__(self, input=None):
"""
Initialize a DialogNodeOutputOptionsElementValue object.
:param InputData input: (optional) An input object that includes the input text.
"""
self.input = input
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeOutputOptionsElementValue object from a json dictionary."""
args = {}
if 'input' in _dict:
args['input'] = InputData._from_dict(_dict.get('input'))
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'input') and self.input is not None:
_dict['input'] = self.input._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this DialogNodeOutputOptionsElementValue object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeOutputTextValuesElement(object):
"""
DialogNodeOutputTextValuesElement.
:attr str text: (optional) The text of a response. This string can include newline
characters (`
`), Markdown tagging, or other special characters, if supported by the channel. It
must be no longer than 4096 characters.
"""
def __init__(self, text=None):
"""
Initialize a DialogNodeOutputTextValuesElement object.
:param str text: (optional) The text of a response. This string can include
newline characters (`
`), Markdown tagging, or other special characters, if supported by the channel. It
must be no longer than 4096 characters.
"""
self.text = text
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeOutputTextValuesElement object from a json dictionary."""
args = {}
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def __str__(self):
"""Return a `str` version of this DialogNodeOutputTextValuesElement object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogNodeVisitedDetails(object):
"""
DialogNodeVisitedDetails.
:attr str dialog_node: (optional) A dialog node that was triggered during processing
of the input message.
:attr str title: (optional) The title of the dialog node.
:attr str conditions: (optional) The conditions that trigger the dialog node.
"""
def __init__(self, dialog_node=None, title=None, conditions=None):
"""
Initialize a DialogNodeVisitedDetails object.
:param str dialog_node: (optional) A dialog node that was triggered during
processing of the input message.
:param str title: (optional) The title of the dialog node.
:param str conditions: (optional) The conditions that trigger the dialog node.
"""
self.dialog_node = dialog_node
self.title = title
self.conditions = conditions
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeVisitedDetails object from a json dictionary."""
args = {}
if 'dialog_node' in _dict:
args['dialog_node'] = _dict.get('dialog_node')
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'conditions' in _dict:
args['conditions'] = _dict.get('conditions')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'dialog_node') and self.dialog_node is not None:
_dict['dialog_node'] = self.dialog_node
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'conditions') and self.conditions is not None:
_dict['conditions'] = self.conditions
return _dict
def __str__(self):
"""Return a `str` version of this DialogNodeVisitedDetails object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogRuntimeResponseGeneric(object):
"""
DialogRuntimeResponseGeneric.
:attr str response_type: The type of response returned by the dialog node. The
specified response type must be supported by the client application or channel.
**Note:** The **suggestion** response type is part of the disambiguation feature,
which is only available for Premium users.
:attr str text: (optional) The text of the response.
:attr int time: (optional) How long to pause, in milliseconds.
:attr bool typing: (optional) Whether to send a "user is typing" event during the
pause.
:attr str source: (optional) The URL of the image.
:attr str title: (optional) The title or introductory text to show before the
response.
:attr str description: (optional) The description to show with the the response.
:attr str preference: (optional) The preferred type of control to display.
:attr list[DialogNodeOutputOptionsElement] options: (optional) An array of objects
describing the options from which the user can choose.
:attr str message_to_human_agent: (optional) A message to be sent to the human agent
who will be taking over the conversation.
:attr str topic: (optional) A label identifying the topic of the conversation, derived
from the **user_label** property of the relevant node.
:attr list[DialogSuggestion] suggestions: (optional) An array of objects describing
the possible matching dialog nodes from which the user can choose.
**Note:** The **suggestions** property is part of the disambiguation feature, which is
only available for Premium users.
"""
def __init__(self,
response_type,
text=None,
time=None,
typing=None,
source=None,
title=None,
description=None,
preference=None,
options=None,
message_to_human_agent=None,
topic=None,
suggestions=None):
"""
Initialize a DialogRuntimeResponseGeneric object.
:param str response_type: The type of response returned by the dialog node. The
specified response type must be supported by the client application or channel.
**Note:** The **suggestion** response type is part of the disambiguation feature,
which is only available for Premium users.
:param str text: (optional) The text of the response.
:param int time: (optional) How long to pause, in milliseconds.
:param bool typing: (optional) Whether to send a "user is typing" event during the
pause.
:param str source: (optional) The URL of the image.
:param str title: (optional) The title or introductory text to show before the
response.
:param str description: (optional) The description to show with the the response.
:param str preference: (optional) The preferred type of control to display.
:param list[DialogNodeOutputOptionsElement] options: (optional) An array of
objects describing the options from which the user can choose.
:param str message_to_human_agent: (optional) A message to be sent to the human
agent who will be taking over the conversation.
:param str topic: (optional) A label identifying the topic of the conversation,
derived from the **user_label** property of the relevant node.
:param list[DialogSuggestion] suggestions: (optional) An array of objects
describing the possible matching dialog nodes from which the user can choose.
**Note:** The **suggestions** property is part of the disambiguation feature,
which is only available for Premium users.
"""
self.response_type = response_type
self.text = text
self.time = time
self.typing = typing
self.source = source
self.title = title
self.description = description
self.preference = preference
self.options = options
self.message_to_human_agent = message_to_human_agent
self.topic = topic
self.suggestions = suggestions
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogRuntimeResponseGeneric object from a json dictionary."""
args = {}
if 'response_type' in _dict:
args['response_type'] = _dict.get('response_type')
else:
raise ValueError(
'Required property \'response_type\' not present in DialogRuntimeResponseGeneric JSON'
)
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'time' in _dict:
args['time'] = _dict.get('time')
if 'typing' in _dict:
args['typing'] = _dict.get('typing')
if 'source' in _dict:
args['source'] = _dict.get('source')
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'preference' in _dict:
args['preference'] = _dict.get('preference')
if 'options' in _dict:
args['options'] = [
DialogNodeOutputOptionsElement._from_dict(x)
for x in (_dict.get('options'))
]
if 'message_to_human_agent' in _dict:
args['message_to_human_agent'] = _dict.get('message_to_human_agent')
if 'topic' in _dict:
args['topic'] = _dict.get('topic')
if 'suggestions' in _dict:
args['suggestions'] = [
DialogSuggestion._from_dict(x)
for x in (_dict.get('suggestions'))
]
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'response_type') and self.response_type is not None:
_dict['response_type'] = self.response_type
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'time') and self.time is not None:
_dict['time'] = self.time
if hasattr(self, 'typing') and self.typing is not None:
_dict['typing'] = self.typing
if hasattr(self, 'source') and self.source is not None:
_dict['source'] = self.source
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'preference') and self.preference is not None:
_dict['preference'] = self.preference
if hasattr(self, 'options') and self.options is not None:
_dict['options'] = [x._to_dict() for x in self.options]
if hasattr(self, 'message_to_human_agent'
) and self.message_to_human_agent is not None:
_dict['message_to_human_agent'] = self.message_to_human_agent
if hasattr(self, 'topic') and self.topic is not None:
_dict['topic'] = self.topic
if hasattr(self, 'suggestions') and self.suggestions is not None:
_dict['suggestions'] = [x._to_dict() for x in self.suggestions]
return _dict
def __str__(self):
"""Return a `str` version of this DialogRuntimeResponseGeneric object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogSuggestion(object):
"""
DialogSuggestion.
:attr str label: The user-facing label for the disambiguation option. This label is
taken from the **user_label** property of the corresponding dialog node.
:attr DialogSuggestionValue value: An object defining the message input, intents, and
entities to be sent to the Watson Assistant service if the user selects the
corresponding disambiguation option.
:attr object output: (optional) The dialog output that will be returned from the
Watson Assistant service if the user selects the corresponding option.
"""
def __init__(self, label, value, output=None):
"""
Initialize a DialogSuggestion object.
:param str label: The user-facing label for the disambiguation option. This label
is taken from the **user_label** property of the corresponding dialog node.
:param DialogSuggestionValue value: An object defining the message input, intents,
and entities to be sent to the Watson Assistant service if the user selects the
corresponding disambiguation option.
:param object output: (optional) The dialog output that will be returned from the
Watson Assistant service if the user selects the corresponding option.
"""
self.label = label
self.value = value
self.output = output
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogSuggestion object from a json dictionary."""
args = {}
if 'label' in _dict:
args['label'] = _dict.get('label')
else:
raise ValueError(
'Required property \'label\' not present in DialogSuggestion JSON'
)
if 'value' in _dict:
args['value'] = DialogSuggestionValue._from_dict(_dict.get('value'))
else:
raise ValueError(
'Required property \'value\' not present in DialogSuggestion JSON'
)
if 'output' in _dict:
args['output'] = _dict.get('output')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value._to_dict()
if hasattr(self, 'output') and self.output is not None:
_dict['output'] = self.output
return _dict
def __str__(self):
"""Return a `str` version of this DialogSuggestion object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DialogSuggestionValue(object):
"""
An object defining the message input, intents, and entities to be sent to the Watson
Assistant service if the user selects the corresponding disambiguation option.
:attr InputData input: (optional) An input object that includes the input text.
:attr list[RuntimeIntent] intents: (optional) An array of intents to be sent along
with the user input.
:attr list[RuntimeEntity] entities: (optional) An array of entities to be sent along
with the user input.
"""
def __init__(self, input=None, intents=None, entities=None):
"""
Initialize a DialogSuggestionValue object.
:param InputData input: (optional) An input object that includes the input text.
:param list[RuntimeIntent] intents: (optional) An array of intents to be sent
along with the user input.
:param list[RuntimeEntity] entities: (optional) An array of entities to be sent
along with the user input.
"""
self.input = input
self.intents = intents
self.entities = entities
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogSuggestionValue object from a json dictionary."""
args = {}
if 'input' in _dict:
args['input'] = InputData._from_dict(_dict.get('input'))
if 'intents' in _dict:
args['intents'] = [
RuntimeIntent._from_dict(x) for x in (_dict.get('intents'))
]
if 'entities' in _dict:
args['entities'] = [
RuntimeEntity._from_dict(x) for x in (_dict.get('entities'))
]
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'input') and self.input is not None:
_dict['input'] = self.input._to_dict()
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x._to_dict() for x in self.intents]
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
return _dict
def __str__(self):
"""Return a `str` version of this DialogSuggestionValue object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Entity(object):
"""
Entity.
:attr str entity_name: The name of the entity.
:attr datetime created: (optional) The timestamp for creation of the entity.
:attr datetime updated: (optional) The timestamp for the last update to the entity.
:attr str description: (optional) The description of the entity.
:attr object metadata: (optional) Any metadata related to the entity.
:attr bool fuzzy_match: (optional) Whether fuzzy matching is used for the entity.
"""
def __init__(self,
entity_name,
created=None,
updated=None,
description=None,
metadata=None,
fuzzy_match=None):
"""
Initialize a Entity object.
:param str entity_name: The name of the entity.
:param datetime created: (optional) The timestamp for creation of the entity.
:param datetime updated: (optional) The timestamp for the last update to the
entity.
:param str description: (optional) The description of the entity.
:param object metadata: (optional) Any metadata related to the entity.
:param bool fuzzy_match: (optional) Whether fuzzy matching is used for the entity.
"""
self.entity_name = entity_name
self.created = created
self.updated = updated
self.description = description
self.metadata = metadata
self.fuzzy_match = fuzzy_match
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Entity object from a json dictionary."""
args = {}
if 'entity' in _dict or 'entity_name' in _dict:
args[
'entity_name'] = _dict.get('entity') or _dict.get('entity_name')
else:
raise ValueError(
'Required property \'entity\' not present in Entity JSON')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'fuzzy_match' in _dict:
args['fuzzy_match'] = _dict.get('fuzzy_match')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entity_name') and self.entity_name is not None:
_dict['entity'] = self.entity_name
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'fuzzy_match') and self.fuzzy_match is not None:
_dict['fuzzy_match'] = self.fuzzy_match
return _dict
def __str__(self):
"""Return a `str` version of this Entity object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class EntityCollection(object):
"""
An array of entities.
:attr list[EntityExport] entities: An array of objects describing the entities defined
for the workspace.
:attr Pagination pagination: The pagination data for the returned objects.
"""
def __init__(self, entities, pagination):
"""
Initialize a EntityCollection object.
:param list[EntityExport] entities: An array of objects describing the entities
defined for the workspace.
:param Pagination pagination: The pagination data for the returned objects.
"""
self.entities = entities
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a EntityCollection object from a json dictionary."""
args = {}
if 'entities' in _dict:
args['entities'] = [
EntityExport._from_dict(x) for x in (_dict.get('entities'))
]
else:
raise ValueError(
'Required property \'entities\' not present in EntityCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in EntityCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this EntityCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class EntityExport(object):
"""
EntityExport.
:attr str entity_name: The name of the entity.
:attr datetime created: (optional) The timestamp for creation of the entity.
:attr datetime updated: (optional) The timestamp for the last update to the entity.
:attr str description: (optional) The description of the entity.
:attr object metadata: (optional) Any metadata related to the entity.
:attr bool fuzzy_match: (optional) Whether fuzzy matching is used for the entity.
:attr list[ValueExport] values: (optional) An array objects describing the entity
values.
"""
def __init__(self,
entity_name,
created=None,
updated=None,
description=None,
metadata=None,
fuzzy_match=None,
values=None):
"""
Initialize a EntityExport object.
:param str entity_name: The name of the entity.
:param datetime created: (optional) The timestamp for creation of the entity.
:param datetime updated: (optional) The timestamp for the last update to the
entity.
:param str description: (optional) The description of the entity.
:param object metadata: (optional) Any metadata related to the entity.
:param bool fuzzy_match: (optional) Whether fuzzy matching is used for the entity.
:param list[ValueExport] values: (optional) An array objects describing the entity
values.
"""
self.entity_name = entity_name
self.created = created
self.updated = updated
self.description = description
self.metadata = metadata
self.fuzzy_match = fuzzy_match
self.values = values
@classmethod
def _from_dict(cls, _dict):
"""Initialize a EntityExport object from a json dictionary."""
args = {}
if 'entity' in _dict or 'entity_name' in _dict:
args[
'entity_name'] = _dict.get('entity') or _dict.get('entity_name')
else:
raise ValueError(
'Required property \'entity\' not present in EntityExport JSON')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'fuzzy_match' in _dict:
args['fuzzy_match'] = _dict.get('fuzzy_match')
if 'values' in _dict:
args['values'] = [
ValueExport._from_dict(x) for x in (_dict.get('values'))
]
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entity_name') and self.entity_name is not None:
_dict['entity'] = self.entity_name
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'fuzzy_match') and self.fuzzy_match is not None:
_dict['fuzzy_match'] = self.fuzzy_match
if hasattr(self, 'values') and self.values is not None:
_dict['values'] = [x._to_dict() for x in self.values]
return _dict
def __str__(self):
"""Return a `str` version of this EntityExport object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class EntityMention(object):
"""
An object describing a contextual entity mention.
:attr str example_text: The text of the user input example.
:attr str intent_name: The name of the intent.
:attr list[int] location: An array of zero-based character offsets that indicate where
the entity mentions begin and end in the input text.
"""
def __init__(self, example_text, intent_name, location):
"""
Initialize a EntityMention object.
:param str example_text: The text of the user input example.
:param str intent_name: The name of the intent.
:param list[int] location: An array of zero-based character offsets that indicate
where the entity mentions begin and end in the input text.
"""
self.example_text = example_text
self.intent_name = intent_name
self.location = location
@classmethod
def _from_dict(cls, _dict):
"""Initialize a EntityMention object from a json dictionary."""
args = {}
if 'text' in _dict or 'example_text' in _dict:
args[
'example_text'] = _dict.get('text') or _dict.get('example_text')
else:
raise ValueError(
'Required property \'text\' not present in EntityMention JSON')
if 'intent' in _dict or 'intent_name' in _dict:
args[
'intent_name'] = _dict.get('intent') or _dict.get('intent_name')
else:
raise ValueError(
'Required property \'intent\' not present in EntityMention JSON'
)
if 'location' in _dict:
args['location'] = _dict.get('location')
else:
raise ValueError(
'Required property \'location\' not present in EntityMention JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'example_text') and self.example_text is not None:
_dict['text'] = self.example_text
if hasattr(self, 'intent_name') and self.intent_name is not None:
_dict['intent'] = self.intent_name
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
return _dict
def __str__(self):
"""Return a `str` version of this EntityMention object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class EntityMentionCollection(object):
"""
EntityMentionCollection.
:attr list[EntityMention] examples: An array of objects describing the entity mentions
defined for an entity.
:attr Pagination pagination: The pagination data for the returned objects.
"""
def __init__(self, examples, pagination):
"""
Initialize a EntityMentionCollection object.
:param list[EntityMention] examples: An array of objects describing the entity
mentions defined for an entity.
:param Pagination pagination: The pagination data for the returned objects.
"""
self.examples = examples
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a EntityMentionCollection object from a json dictionary."""
args = {}
if 'examples' in _dict:
args['examples'] = [
EntityMention._from_dict(x) for x in (_dict.get('examples'))
]
else:
raise ValueError(
'Required property \'examples\' not present in EntityMentionCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in EntityMentionCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'examples') and self.examples is not None:
_dict['examples'] = [x._to_dict() for x in self.examples]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this EntityMentionCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Example(object):
"""
Example.
:attr str example_text: The text of the user input example.
:attr datetime created: (optional) The timestamp for creation of the example.
:attr datetime updated: (optional) The timestamp for the last update to the example.
:attr list[Mentions] mentions: (optional) An array of contextual entity mentions.
"""
def __init__(self, example_text, created=None, updated=None, mentions=None):
"""
Initialize a Example object.
:param str example_text: The text of the user input example.
:param datetime created: (optional) The timestamp for creation of the example.
:param datetime updated: (optional) The timestamp for the last update to the
example.
:param list[Mentions] mentions: (optional) An array of contextual entity mentions.
"""
self.example_text = example_text
self.created = created
self.updated = updated
self.mentions = mentions
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Example object from a json dictionary."""
args = {}
if 'text' in _dict or 'example_text' in _dict:
args[
'example_text'] = _dict.get('text') or _dict.get('example_text')
else:
raise ValueError(
'Required property \'text\' not present in Example JSON')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'mentions' in _dict:
args['mentions'] = [
Mentions._from_dict(x) for x in (_dict.get('mentions'))
]
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'example_text') and self.example_text is not None:
_dict['text'] = self.example_text
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'mentions') and self.mentions is not None:
_dict['mentions'] = [x._to_dict() for x in self.mentions]
return _dict
def __str__(self):
"""Return a `str` version of this Example object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ExampleCollection(object):
"""
ExampleCollection.
:attr list[Example] examples: An array of objects describing the examples defined for
the intent.
:attr Pagination pagination: The pagination data for the returned objects.
"""
def __init__(self, examples, pagination):
"""
Initialize a ExampleCollection object.
:param list[Example] examples: An array of objects describing the examples defined
for the intent.
:param Pagination pagination: The pagination data for the returned objects.
"""
self.examples = examples
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ExampleCollection object from a json dictionary."""
args = {}
if 'examples' in _dict:
args['examples'] = [
Example._from_dict(x) for x in (_dict.get('examples'))
]
else:
raise ValueError(
'Required property \'examples\' not present in ExampleCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in ExampleCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'examples') and self.examples is not None:
_dict['examples'] = [x._to_dict() for x in self.examples]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this ExampleCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class InputData(object):
"""
An input object that includes the input text.
:attr str text: The text of the user input. This string cannot contain carriage
return, newline, or tab characters, and it must be no longer than 2048 characters.
"""
def __init__(self, text, **kwargs):
"""
Initialize a InputData object.
:param str text: The text of the user input. This string cannot contain carriage
return, newline, or tab characters, and it must be no longer than 2048 characters.
:param **kwargs: (optional) Any additional properties.
"""
self.text = text
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a InputData object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'text' in _dict:
args['text'] = _dict.get('text')
del xtra['text']
else:
raise ValueError(
'Required property \'text\' not present in InputData JSON')
args.update(xtra)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def __setattr__(self, name, value):
properties = {'text'}
if not hasattr(self, '_additionalProperties'):
super(InputData, self).__setattr__('_additionalProperties', set())
if name not in properties:
self._additionalProperties.add(name)
super(InputData, self).__setattr__(name, value)
def __str__(self):
"""Return a `str` version of this InputData object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Intent(object):
"""
Intent.
:attr str intent_name: The name of the intent.
:attr datetime created: (optional) The timestamp for creation of the intent.
:attr datetime updated: (optional) The timestamp for the last update to the intent.
:attr str description: (optional) The description of the intent.
"""
def __init__(self,
intent_name,
created=None,
updated=None,
description=None):
"""
Initialize a Intent object.
:param str intent_name: The name of the intent.
:param datetime created: (optional) The timestamp for creation of the intent.
:param datetime updated: (optional) The timestamp for the last update to the
intent.
:param str description: (optional) The description of the intent.
"""
self.intent_name = intent_name
self.created = created
self.updated = updated
self.description = description
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Intent object from a json dictionary."""
args = {}
if 'intent' in _dict or 'intent_name' in _dict:
args[
'intent_name'] = _dict.get('intent') or _dict.get('intent_name')
else:
raise ValueError(
'Required property \'intent\' not present in Intent JSON')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'description' in _dict:
args['description'] = _dict.get('description')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'intent_name') and self.intent_name is not None:
_dict['intent'] = self.intent_name
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
return _dict
def __str__(self):
"""Return a `str` version of this Intent object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class IntentCollection(object):
"""
IntentCollection.
:attr list[IntentExport] intents: An array of objects describing the intents defined
for the workspace.
:attr Pagination pagination: The pagination data for the returned objects.
"""
def __init__(self, intents, pagination):
"""
Initialize a IntentCollection object.
:param list[IntentExport] intents: An array of objects describing the intents
defined for the workspace.
:param Pagination pagination: The pagination data for the returned objects.
"""
self.intents = intents
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a IntentCollection object from a json dictionary."""
args = {}
if 'intents' in _dict:
args['intents'] = [
IntentExport._from_dict(x) for x in (_dict.get('intents'))
]
else:
raise ValueError(
'Required property \'intents\' not present in IntentCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in IntentCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x._to_dict() for x in self.intents]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this IntentCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class IntentExport(object):
"""
IntentExport.
:attr str intent_name: The name of the intent.
:attr datetime created: (optional) The timestamp for creation of the intent.
:attr datetime updated: (optional) The timestamp for the last update to the intent.
:attr str description: (optional) The description of the intent.
:attr list[Example] examples: (optional) An array of objects describing the user input
examples for the intent.
"""
def __init__(self,
intent_name,
created=None,
updated=None,
description=None,
examples=None):
"""
Initialize a IntentExport object.
:param str intent_name: The name of the intent.
:param datetime created: (optional) The timestamp for creation of the intent.
:param datetime updated: (optional) The timestamp for the last update to the
intent.
:param str description: (optional) The description of the intent.
:param list[Example] examples: (optional) An array of objects describing the user
input examples for the intent.
"""
self.intent_name = intent_name
self.created = created
self.updated = updated
self.description = description
self.examples = examples
@classmethod
def _from_dict(cls, _dict):
"""Initialize a IntentExport object from a json dictionary."""
args = {}
if 'intent' in _dict or 'intent_name' in _dict:
args[
'intent_name'] = _dict.get('intent') or _dict.get('intent_name')
else:
raise ValueError(
'Required property \'intent\' not present in IntentExport JSON')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'examples' in _dict:
args['examples'] = [
Example._from_dict(x) for x in (_dict.get('examples'))
]
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'intent_name') and self.intent_name is not None:
_dict['intent'] = self.intent_name
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'examples') and self.examples is not None:
_dict['examples'] = [x._to_dict() for x in self.examples]
return _dict
def __str__(self):
"""Return a `str` version of this IntentExport object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LogCollection(object):
"""
LogCollection.
:attr list[LogExport] logs: An array of objects describing log events.
:attr LogPagination pagination: The pagination data for the returned objects.
"""
def __init__(self, logs, pagination):
"""
Initialize a LogCollection object.
:param list[LogExport] logs: An array of objects describing log events.
:param LogPagination pagination: The pagination data for the returned objects.
"""
self.logs = logs
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a LogCollection object from a json dictionary."""
args = {}
if 'logs' in _dict:
args['logs'] = [
LogExport._from_dict(x) for x in (_dict.get('logs'))
]
else:
raise ValueError(
'Required property \'logs\' not present in LogCollection JSON')
if 'pagination' in _dict:
args['pagination'] = LogPagination._from_dict(
_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in LogCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'logs') and self.logs is not None:
_dict['logs'] = [x._to_dict() for x in self.logs]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this LogCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LogExport(object):
"""
LogExport.
:attr MessageRequest request: A request sent to the workspace, including the user
input and context.
:attr MessageResponse response: The response sent by the workspace, including the
output text, detected intents and entities, and context.
:attr str log_id: A unique identifier for the logged event.
:attr str request_timestamp: The timestamp for receipt of the message.
:attr str response_timestamp: The timestamp for the system response to the message.
:attr str workspace_id: The unique identifier of the workspace where the request was
made.
:attr str language: The language of the workspace where the message request was made.
"""
def __init__(self, request, response, log_id, request_timestamp,
response_timestamp, workspace_id, language):
"""
Initialize a LogExport object.
:param MessageRequest request: A request sent to the workspace, including the user
input and context.
:param MessageResponse response: The response sent by the workspace, including the
output text, detected intents and entities, and context.
:param str log_id: A unique identifier for the logged event.
:param str request_timestamp: The timestamp for receipt of the message.
:param str response_timestamp: The timestamp for the system response to the
message.
:param str workspace_id: The unique identifier of the workspace where the request
was made.
:param str language: The language of the workspace where the message request was
made.
"""
self.request = request
self.response = response
self.log_id = log_id
self.request_timestamp = request_timestamp
self.response_timestamp = response_timestamp
self.workspace_id = workspace_id
self.language = language
@classmethod
def _from_dict(cls, _dict):
"""Initialize a LogExport object from a json dictionary."""
args = {}
if 'request' in _dict:
args['request'] = MessageRequest._from_dict(_dict.get('request'))
else:
raise ValueError(
'Required property \'request\' not present in LogExport JSON')
if 'response' in _dict:
args['response'] = MessageResponse._from_dict(_dict.get('response'))
else:
raise ValueError(
'Required property \'response\' not present in LogExport JSON')
if 'log_id' in _dict:
args['log_id'] = _dict.get('log_id')
else:
raise ValueError(
'Required property \'log_id\' not present in LogExport JSON')
if 'request_timestamp' in _dict:
args['request_timestamp'] = _dict.get('request_timestamp')
else:
raise ValueError(
'Required property \'request_timestamp\' not present in LogExport JSON'
)
if 'response_timestamp' in _dict:
args['response_timestamp'] = _dict.get('response_timestamp')
else:
raise ValueError(
'Required property \'response_timestamp\' not present in LogExport JSON'
)
if 'workspace_id' in _dict:
args['workspace_id'] = _dict.get('workspace_id')
else:
raise ValueError(
'Required property \'workspace_id\' not present in LogExport JSON'
)
if 'language' in _dict:
args['language'] = _dict.get('language')
else:
raise ValueError(
'Required property \'language\' not present in LogExport JSON')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'request') and self.request is not None:
_dict['request'] = self.request._to_dict()
if hasattr(self, 'response') and self.response is not None:
_dict['response'] = self.response._to_dict()
if hasattr(self, 'log_id') and self.log_id is not None:
_dict['log_id'] = self.log_id
if hasattr(self,
'request_timestamp') and self.request_timestamp is not None:
_dict['request_timestamp'] = self.request_timestamp
if hasattr(
self,
'response_timestamp') and self.response_timestamp is not None:
_dict['response_timestamp'] = self.response_timestamp
if hasattr(self, 'workspace_id') and self.workspace_id is not None:
_dict['workspace_id'] = self.workspace_id
if hasattr(self, 'language') and self.language is not None:
_dict['language'] = self.language
return _dict
def __str__(self):
"""Return a `str` version of this LogExport object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LogMessage(object):
"""
Log message details.
:attr str level: The severity of the log message.
:attr str msg: The text of the log message.
"""
def __init__(self, level, msg, **kwargs):
"""
Initialize a LogMessage object.
:param str level: The severity of the log message.
:param str msg: The text of the log message.
:param **kwargs: (optional) Any additional properties.
"""
self.level = level
self.msg = msg
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a LogMessage object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'level' in _dict:
args['level'] = _dict.get('level')
del xtra['level']
else:
raise ValueError(
'Required property \'level\' not present in LogMessage JSON')
if 'msg' in _dict:
args['msg'] = _dict.get('msg')
del xtra['msg']
else:
raise ValueError(
'Required property \'msg\' not present in LogMessage JSON')
args.update(xtra)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'level') and self.level is not None:
_dict['level'] = self.level
if hasattr(self, 'msg') and self.msg is not None:
_dict['msg'] = self.msg
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def __setattr__(self, name, value):
properties = {'level', 'msg'}
if not hasattr(self, '_additionalProperties'):
super(LogMessage, self).__setattr__('_additionalProperties', set())
if name not in properties:
self._additionalProperties.add(name)
super(LogMessage, self).__setattr__(name, value)
def __str__(self):
"""Return a `str` version of this LogMessage object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LogPagination(object):
"""
The pagination data for the returned objects.
:attr str next_url: (optional) The URL that will return the next page of results, if
any.
:attr int matched: (optional) Reserved for future use.
:attr str next_cursor: (optional) A token identifying the next page of results.
"""
def __init__(self, next_url=None, matched=None, next_cursor=None):
"""
Initialize a LogPagination object.
:param str next_url: (optional) The URL that will return the next page of results,
if any.
:param int matched: (optional) Reserved for future use.
:param str next_cursor: (optional) A token identifying the next page of results.
"""
self.next_url = next_url
self.matched = matched
self.next_cursor = next_cursor
@classmethod
def _from_dict(cls, _dict):
"""Initialize a LogPagination object from a json dictionary."""
args = {}
if 'next_url' in _dict:
args['next_url'] = _dict.get('next_url')
if 'matched' in _dict:
args['matched'] = _dict.get('matched')
if 'next_cursor' in _dict:
args['next_cursor'] = _dict.get('next_cursor')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'next_url') and self.next_url is not None:
_dict['next_url'] = self.next_url
if hasattr(self, 'matched') and self.matched is not None:
_dict['matched'] = self.matched
if hasattr(self, 'next_cursor') and self.next_cursor is not None:
_dict['next_cursor'] = self.next_cursor
return _dict
def __str__(self):
"""Return a `str` version of this LogPagination object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Mentions(object):
"""
A mention of a contextual entity.
:attr str entity: The name of the entity.
:attr list[int] location: An array of zero-based character offsets that indicate where
the entity mentions begin and end in the input text.
"""
def __init__(self, entity, location):
"""
Initialize a Mentions object.
:param str entity: The name of the entity.
:param list[int] location: An array of zero-based character offsets that indicate
where the entity mentions begin and end in the input text.
"""
self.entity = entity
self.location = location
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Mentions object from a json dictionary."""
args = {}
if 'entity' in _dict:
args['entity'] = _dict.get('entity')
else:
raise ValueError(
'Required property \'entity\' not present in Mentions JSON')
if 'location' in _dict:
args['location'] = _dict.get('location')
else:
raise ValueError(
'Required property \'location\' not present in Mentions JSON')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entity') and self.entity is not None:
_dict['entity'] = self.entity
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
return _dict
def __str__(self):
"""Return a `str` version of this Mentions object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MessageContextMetadata(object):
"""
Metadata related to the message.
:attr str deployment: (optional) A label identifying the deployment environment, used
for filtering log data. This string cannot contain carriage return, newline, or tab
characters.
:attr str user_id: (optional) A string value that identifies the user who is
interacting with the workspace. The client must provide a unique identifier for each
individual end user who accesses the application. For Plus and Premium plans, this
user ID is used to identify unique users for billing purposes. This string cannot
contain carriage return, newline, or tab characters.
"""
def __init__(self, deployment=None, user_id=None):
"""
Initialize a MessageContextMetadata object.
:param str deployment: (optional) A label identifying the deployment environment,
used for filtering log data. This string cannot contain carriage return, newline,
or tab characters.
:param str user_id: (optional) A string value that identifies the user who is
interacting with the workspace. The client must provide a unique identifier for
each individual end user who accesses the application. For Plus and Premium plans,
this user ID is used to identify unique users for billing purposes. This string
cannot contain carriage return, newline, or tab characters.
"""
self.deployment = deployment
self.user_id = user_id
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MessageContextMetadata object from a json dictionary."""
args = {}
if 'deployment' in _dict:
args['deployment'] = _dict.get('deployment')
if 'user_id' in _dict:
args['user_id'] = _dict.get('user_id')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'deployment') and self.deployment is not None:
_dict['deployment'] = self.deployment
if hasattr(self, 'user_id') and self.user_id is not None:
_dict['user_id'] = self.user_id
return _dict
def __str__(self):
"""Return a `str` version of this MessageContextMetadata object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MessageInput(object):
"""
The text of the user input.
:attr str text: (optional) The user's input.
"""
def __init__(self, text=None):
"""
Initialize a MessageInput object.
:param str text: (optional) The user's input.
"""
self.text = text
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MessageInput object from a json dictionary."""
args = {}
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def __str__(self):
"""Return a `str` version of this MessageInput object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MessageRequest(object):
"""
A request sent to the workspace, including the user input and context.
:attr InputData input: (optional) An input object that includes the input text.
:attr bool alternate_intents: (optional) Whether to return more than one intent. Set
to `true` to return all matching intents.
:attr Context context: (optional) State information for the conversation. To maintain
state, include the context from the previous response.
:attr list[RuntimeEntity] entities: (optional) Entities to use when evaluating the
message. Include entities from the previous response to continue using those entities
rather than detecting entities in the new input.
:attr list[RuntimeIntent] intents: (optional) Intents to use when evaluating the user
input. Include intents from the previous response to continue using those intents
rather than trying to recognize intents in the new input.
:attr OutputData output: (optional) An output object that includes the response to the
user, the dialog nodes that were triggered, and messages from the log.
"""
def __init__(self,
input=None,
alternate_intents=None,
context=None,
entities=None,
intents=None,
output=None):
"""
Initialize a MessageRequest object.
:param InputData input: (optional) An input object that includes the input text.
:param bool alternate_intents: (optional) Whether to return more than one intent.
Set to `true` to return all matching intents.
:param Context context: (optional) State information for the conversation. To
maintain state, include the context from the previous response.
:param list[RuntimeEntity] entities: (optional) Entities to use when evaluating
the message. Include entities from the previous response to continue using those
entities rather than detecting entities in the new input.
:param list[RuntimeIntent] intents: (optional) Intents to use when evaluating the
user input. Include intents from the previous response to continue using those
intents rather than trying to recognize intents in the new input.
:param OutputData output: (optional) An output object that includes the response
to the user, the dialog nodes that were triggered, and messages from the log.
"""
self.input = input
self.alternate_intents = alternate_intents
self.context = context
self.entities = entities
self.intents = intents
self.output = output
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MessageRequest object from a json dictionary."""
args = {}
if 'input' in _dict:
args['input'] = InputData._from_dict(_dict.get('input'))
if 'alternate_intents' in _dict:
args['alternate_intents'] = _dict.get('alternate_intents')
if 'context' in _dict:
args['context'] = Context._from_dict(_dict.get('context'))
if 'entities' in _dict:
args['entities'] = [
RuntimeEntity._from_dict(x) for x in (_dict.get('entities'))
]
if 'intents' in _dict:
args['intents'] = [
RuntimeIntent._from_dict(x) for x in (_dict.get('intents'))
]
if 'output' in _dict:
args['output'] = OutputData._from_dict(_dict.get('output'))
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'input') and self.input is not None:
_dict['input'] = self.input._to_dict()
if hasattr(self,
'alternate_intents') and self.alternate_intents is not None:
_dict['alternate_intents'] = self.alternate_intents
if hasattr(self, 'context') and self.context is not None:
_dict['context'] = self.context._to_dict()
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x._to_dict() for x in self.intents]
if hasattr(self, 'output') and self.output is not None:
_dict['output'] = self.output._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this MessageRequest object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MessageResponse(object):
"""
The response sent by the workspace, including the output text, detected intents and
entities, and context.
:attr MessageInput input: (optional) The text of the user input.
:attr list[RuntimeIntent] intents: An array of intents recognized in the user input,
sorted in descending order of confidence.
:attr list[RuntimeEntity] entities: An array of entities identified in the user input.
:attr bool alternate_intents: (optional) Whether to return more than one intent. A
value of `true` indicates that all matching intents are returned.
:attr Context context: State information for the conversation. To maintain state,
include the context from the previous response.
:attr OutputData output: An output object that includes the response to the user, the
dialog nodes that were triggered, and messages from the log.
:attr list[DialogNodeAction] actions: (optional) An array of objects describing any
actions requested by the dialog node.
"""
def __init__(self,
intents,
entities,
context,
output,
input=None,
alternate_intents=None,
actions=None):
"""
Initialize a MessageResponse object.
:param list[RuntimeIntent] intents: An array of intents recognized in the user
input, sorted in descending order of confidence.
:param list[RuntimeEntity] entities: An array of entities identified in the user
input.
:param Context context: State information for the conversation. To maintain state,
include the context from the previous response.
:param OutputData output: An output object that includes the response to the user,
the dialog nodes that were triggered, and messages from the log.
:param MessageInput input: (optional) The text of the user input.
:param bool alternate_intents: (optional) Whether to return more than one intent.
A value of `true` indicates that all matching intents are returned.
:param list[DialogNodeAction] actions: (optional) An array of objects describing
any actions requested by the dialog node.
"""
self.input = input
self.intents = intents
self.entities = entities
self.alternate_intents = alternate_intents
self.context = context
self.output = output
self.actions = actions
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MessageResponse object from a json dictionary."""
args = {}
if 'input' in _dict:
args['input'] = MessageInput._from_dict(_dict.get('input'))
if 'intents' in _dict:
args['intents'] = [
RuntimeIntent._from_dict(x) for x in (_dict.get('intents'))
]
else:
raise ValueError(
'Required property \'intents\' not present in MessageResponse JSON'
)
if 'entities' in _dict:
args['entities'] = [
RuntimeEntity._from_dict(x) for x in (_dict.get('entities'))
]
else:
raise ValueError(
'Required property \'entities\' not present in MessageResponse JSON'
)
if 'alternate_intents' in _dict:
args['alternate_intents'] = _dict.get('alternate_intents')
if 'context' in _dict:
args['context'] = Context._from_dict(_dict.get('context'))
else:
raise ValueError(
'Required property \'context\' not present in MessageResponse JSON'
)
if 'output' in _dict:
args['output'] = OutputData._from_dict(_dict.get('output'))
else:
raise ValueError(
'Required property \'output\' not present in MessageResponse JSON'
)
if 'actions' in _dict:
args['actions'] = [
DialogNodeAction._from_dict(x) for x in (_dict.get('actions'))
]
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'input') and self.input is not None:
_dict['input'] = self.input._to_dict()
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x._to_dict() for x in self.intents]
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self,
'alternate_intents') and self.alternate_intents is not None:
_dict['alternate_intents'] = self.alternate_intents
if hasattr(self, 'context') and self.context is not None:
_dict['context'] = self.context._to_dict()
if hasattr(self, 'output') and self.output is not None:
_dict['output'] = self.output._to_dict()
if hasattr(self, 'actions') and self.actions is not None:
_dict['actions'] = [x._to_dict() for x in self.actions]
return _dict
def __str__(self):
"""Return a `str` version of this MessageResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OutputData(object):
"""
An output object that includes the response to the user, the dialog nodes that were
triggered, and messages from the log.
:attr list[LogMessage] log_messages: An array of up to 50 messages logged with the
request.
:attr list[str] text: An array of responses to the user.
:attr list[DialogRuntimeResponseGeneric] generic: (optional) Output intended for any
channel. It is the responsibility of the client application to implement the supported
response types.
:attr list[str] nodes_visited: (optional) An array of the nodes that were triggered to
create the response, in the order in which they were visited. This information is
useful for debugging and for tracing the path taken through the node tree.
:attr list[DialogNodeVisitedDetails] nodes_visited_details: (optional) An array of
objects containing detailed diagnostic information about the nodes that were triggered
during processing of the input message. Included only if **nodes_visited_details** is
set to `true` in the message request.
"""
def __init__(self,
log_messages,
text,
generic=None,
nodes_visited=None,
nodes_visited_details=None,
**kwargs):
"""
Initialize a OutputData object.
:param list[LogMessage] log_messages: An array of up to 50 messages logged with
the request.
:param list[str] text: An array of responses to the user.
:param list[DialogRuntimeResponseGeneric] generic: (optional) Output intended for
any channel. It is the responsibility of the client application to implement the
supported response types.
:param list[str] nodes_visited: (optional) An array of the nodes that were
triggered to create the response, in the order in which they were visited. This
information is useful for debugging and for tracing the path taken through the
node tree.
:param list[DialogNodeVisitedDetails] nodes_visited_details: (optional) An array
of objects containing detailed diagnostic information about the nodes that were
triggered during processing of the input message. Included only if
**nodes_visited_details** is set to `true` in the message request.
:param **kwargs: (optional) Any additional properties.
"""
self.log_messages = log_messages
self.text = text
self.generic = generic
self.nodes_visited = nodes_visited
self.nodes_visited_details = nodes_visited_details
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a OutputData object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'log_messages' in _dict:
args['log_messages'] = [
LogMessage._from_dict(x) for x in (_dict.get('log_messages'))
]
del xtra['log_messages']
else:
raise ValueError(
'Required property \'log_messages\' not present in OutputData JSON'
)
if 'text' in _dict:
args['text'] = _dict.get('text')
del xtra['text']
else:
raise ValueError(
'Required property \'text\' not present in OutputData JSON')
if 'generic' in _dict:
args['generic'] = [
DialogRuntimeResponseGeneric._from_dict(x)
for x in (_dict.get('generic'))
]
del xtra['generic']
if 'nodes_visited' in _dict:
args['nodes_visited'] = _dict.get('nodes_visited')
del xtra['nodes_visited']
if 'nodes_visited_details' in _dict:
args['nodes_visited_details'] = [
DialogNodeVisitedDetails._from_dict(x)
for x in (_dict.get('nodes_visited_details'))
]
del xtra['nodes_visited_details']
args.update(xtra)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'log_messages') and self.log_messages is not None:
_dict['log_messages'] = [x._to_dict() for x in self.log_messages]
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'generic') and self.generic is not None:
_dict['generic'] = [x._to_dict() for x in self.generic]
if hasattr(self, 'nodes_visited') and self.nodes_visited is not None:
_dict['nodes_visited'] = self.nodes_visited
if hasattr(self, 'nodes_visited_details'
) and self.nodes_visited_details is not None:
_dict['nodes_visited_details'] = [
x._to_dict() for x in self.nodes_visited_details
]
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def __setattr__(self, name, value):
properties = {
'log_messages', 'text', 'generic', 'nodes_visited',
'nodes_visited_details'
}
if not hasattr(self, '_additionalProperties'):
super(OutputData, self).__setattr__('_additionalProperties', set())
if name not in properties:
self._additionalProperties.add(name)
super(OutputData, self).__setattr__(name, value)
def __str__(self):
"""Return a `str` version of this OutputData object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Pagination(object):
"""
The pagination data for the returned objects.
:attr str refresh_url: The URL that will return the same page of results.
:attr str next_url: (optional) The URL that will return the next page of results.
:attr int total: (optional) Reserved for future use.
:attr int matched: (optional) Reserved for future use.
:attr str refresh_cursor: (optional) A token identifying the current page of results.
:attr str next_cursor: (optional) A token identifying the next page of results.
"""
def __init__(self,
refresh_url,
next_url=None,
total=None,
matched=None,
refresh_cursor=None,
next_cursor=None):
"""
Initialize a Pagination object.
:param str refresh_url: The URL that will return the same page of results.
:param str next_url: (optional) The URL that will return the next page of results.
:param int total: (optional) Reserved for future use.
:param int matched: (optional) Reserved for future use.
:param str refresh_cursor: (optional) A token identifying the current page of
results.
:param str next_cursor: (optional) A token identifying the next page of results.
"""
self.refresh_url = refresh_url
self.next_url = next_url
self.total = total
self.matched = matched
self.refresh_cursor = refresh_cursor
self.next_cursor = next_cursor
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Pagination object from a json dictionary."""
args = {}
if 'refresh_url' in _dict:
args['refresh_url'] = _dict.get('refresh_url')
else:
raise ValueError(
'Required property \'refresh_url\' not present in Pagination JSON'
)
if 'next_url' in _dict:
args['next_url'] = _dict.get('next_url')
if 'total' in _dict:
args['total'] = _dict.get('total')
if 'matched' in _dict:
args['matched'] = _dict.get('matched')
if 'refresh_cursor' in _dict:
args['refresh_cursor'] = _dict.get('refresh_cursor')
if 'next_cursor' in _dict:
args['next_cursor'] = _dict.get('next_cursor')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'refresh_url') and self.refresh_url is not None:
_dict['refresh_url'] = self.refresh_url
if hasattr(self, 'next_url') and self.next_url is not None:
_dict['next_url'] = self.next_url
if hasattr(self, 'total') and self.total is not None:
_dict['total'] = self.total
if hasattr(self, 'matched') and self.matched is not None:
_dict['matched'] = self.matched
if hasattr(self, 'refresh_cursor') and self.refresh_cursor is not None:
_dict['refresh_cursor'] = self.refresh_cursor
if hasattr(self, 'next_cursor') and self.next_cursor is not None:
_dict['next_cursor'] = self.next_cursor
return _dict
def __str__(self):
"""Return a `str` version of this Pagination object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuntimeEntity(object):
"""
A term from the request that was identified as an entity.
:attr str entity: An entity detected in the input.
:attr list[int] location: An array of zero-based character offsets that indicate where
the detected entity values begin and end in the input text.
:attr str value: The term in the input text that was recognized as an entity value.
:attr float confidence: (optional) A decimal percentage that represents Watson's
confidence in the entity.
:attr object metadata: (optional) Any metadata for the entity.
:attr list[CaptureGroup] groups: (optional) The recognized capture groups for the
entity, as defined by the entity pattern.
"""
def __init__(self,
entity,
location,
value,
confidence=None,
metadata=None,
groups=None,
**kwargs):
"""
Initialize a RuntimeEntity object.
:param str entity: An entity detected in the input.
:param list[int] location: An array of zero-based character offsets that indicate
where the detected entity values begin and end in the input text.
:param str value: The term in the input text that was recognized as an entity
value.
:param float confidence: (optional) A decimal percentage that represents Watson's
confidence in the entity.
:param object metadata: (optional) Any metadata for the entity.
:param list[CaptureGroup] groups: (optional) The recognized capture groups for the
entity, as defined by the entity pattern.
:param **kwargs: (optional) Any additional properties.
"""
self.entity = entity
self.location = location
self.value = value
self.confidence = confidence
self.metadata = metadata
self.groups = groups
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuntimeEntity object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'entity' in _dict:
args['entity'] = _dict.get('entity')
del xtra['entity']
else:
raise ValueError(
'Required property \'entity\' not present in RuntimeEntity JSON'
)
if 'location' in _dict:
args['location'] = _dict.get('location')
del xtra['location']
else:
raise ValueError(
'Required property \'location\' not present in RuntimeEntity JSON'
)
if 'value' in _dict:
args['value'] = _dict.get('value')
del xtra['value']
else:
raise ValueError(
'Required property \'value\' not present in RuntimeEntity JSON')
if 'confidence' in _dict:
args['confidence'] = _dict.get('confidence')
del xtra['confidence']
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
del xtra['metadata']
if 'groups' in _dict:
args['groups'] = [
CaptureGroup._from_dict(x) for x in (_dict.get('groups'))
]
del xtra['groups']
args.update(xtra)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entity') and self.entity is not None:
_dict['entity'] = self.entity
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
if hasattr(self, 'confidence') and self.confidence is not None:
_dict['confidence'] = self.confidence
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'groups') and self.groups is not None:
_dict['groups'] = [x._to_dict() for x in self.groups]
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def __setattr__(self, name, value):
properties = {
'entity', 'location', 'value', 'confidence', 'metadata', 'groups'
}
if not hasattr(self, '_additionalProperties'):
super(RuntimeEntity, self).__setattr__('_additionalProperties',
set())
if name not in properties:
self._additionalProperties.add(name)
super(RuntimeEntity, self).__setattr__(name, value)
def __str__(self):
"""Return a `str` version of this RuntimeEntity object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuntimeIntent(object):
"""
An intent identified in the user input.
:attr str intent: The name of the recognized intent.
:attr float confidence: A decimal percentage that represents Watson's confidence in
the intent.
"""
def __init__(self, intent, confidence, **kwargs):
"""
Initialize a RuntimeIntent object.
:param str intent: The name of the recognized intent.
:param float confidence: A decimal percentage that represents Watson's confidence
in the intent.
:param **kwargs: (optional) Any additional properties.
"""
self.intent = intent
self.confidence = confidence
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuntimeIntent object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'intent' in _dict:
args['intent'] = _dict.get('intent')
del xtra['intent']
else:
raise ValueError(
'Required property \'intent\' not present in RuntimeIntent JSON'
)
if 'confidence' in _dict:
args['confidence'] = _dict.get('confidence')
del xtra['confidence']
else:
raise ValueError(
'Required property \'confidence\' not present in RuntimeIntent JSON'
)
args.update(xtra)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'intent') and self.intent is not None:
_dict['intent'] = self.intent
if hasattr(self, 'confidence') and self.confidence is not None:
_dict['confidence'] = self.confidence
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def __setattr__(self, name, value):
properties = {'intent', 'confidence'}
if not hasattr(self, '_additionalProperties'):
super(RuntimeIntent, self).__setattr__('_additionalProperties',
set())
if name not in properties:
self._additionalProperties.add(name)
super(RuntimeIntent, self).__setattr__(name, value)
def __str__(self):
"""Return a `str` version of this RuntimeIntent object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Synonym(object):
"""
Synonym.
:attr str synonym_text: The text of the synonym.
:attr datetime created: (optional) The timestamp for creation of the synonym.
:attr datetime updated: (optional) The timestamp for the most recent update to the
synonym.
"""
def __init__(self, synonym_text, created=None, updated=None):
"""
Initialize a Synonym object.
:param str synonym_text: The text of the synonym.
:param datetime created: (optional) The timestamp for creation of the synonym.
:param datetime updated: (optional) The timestamp for the most recent update to
the synonym.
"""
self.synonym_text = synonym_text
self.created = created
self.updated = updated
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Synonym object from a json dictionary."""
args = {}
if 'synonym' in _dict or 'synonym_text' in _dict:
args['synonym_text'] = _dict.get('synonym') or _dict.get(
'synonym_text')
else:
raise ValueError(
'Required property \'synonym\' not present in Synonym JSON')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'synonym_text') and self.synonym_text is not None:
_dict['synonym'] = self.synonym_text
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
return _dict
def __str__(self):
"""Return a `str` version of this Synonym object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class SynonymCollection(object):
"""
SynonymCollection.
:attr list[Synonym] synonyms: An array of synonyms.
:attr Pagination pagination: The pagination data for the returned objects.
"""
def __init__(self, synonyms, pagination):
"""
Initialize a SynonymCollection object.
:param list[Synonym] synonyms: An array of synonyms.
:param Pagination pagination: The pagination data for the returned objects.
"""
self.synonyms = synonyms
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a SynonymCollection object from a json dictionary."""
args = {}
if 'synonyms' in _dict:
args['synonyms'] = [
Synonym._from_dict(x) for x in (_dict.get('synonyms'))
]
else:
raise ValueError(
'Required property \'synonyms\' not present in SynonymCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in SynonymCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'synonyms') and self.synonyms is not None:
_dict['synonyms'] = [x._to_dict() for x in self.synonyms]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this SynonymCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class SystemResponse(object):
"""
For internal use only.
"""
def __init__(self, **kwargs):
"""
Initialize a SystemResponse object.
:param **kwargs: (optional) Any additional properties.
"""
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a SystemResponse object from a json dictionary."""
args = {}
xtra = _dict.copy()
args.update(xtra)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def __setattr__(self, name, value):
properties = {}
if not hasattr(self, '_additionalProperties'):
super(SystemResponse, self).__setattr__('_additionalProperties',
set())
if name not in properties:
self._additionalProperties.add(name)
super(SystemResponse, self).__setattr__(name, value)
def __str__(self):
"""Return a `str` version of this SystemResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Value(object):
"""
Value.
:attr str value_text: The text of the entity value.
:attr object metadata: (optional) Any metadata related to the entity value.
:attr datetime created: (optional) The timestamp for creation of the entity value.
:attr datetime updated: (optional) The timestamp for the last update to the entity
value.
:attr list[str] synonyms: (optional) An array containing any synonyms for the entity
value.
:attr list[str] patterns: (optional) An array containing any patterns for the entity
value.
:attr str value_type: Specifies the type of value.
"""
def __init__(self,
value_text,
value_type,
metadata=None,
created=None,
updated=None,
synonyms=None,
patterns=None):
"""
Initialize a Value object.
:param str value_text: The text of the entity value.
:param str value_type: Specifies the type of value.
:param object metadata: (optional) Any metadata related to the entity value.
:param datetime created: (optional) The timestamp for creation of the entity
value.
:param datetime updated: (optional) The timestamp for the last update to the
entity value.
:param list[str] synonyms: (optional) An array containing any synonyms for the
entity value.
:param list[str] patterns: (optional) An array containing any patterns for the
entity value.
"""
self.value_text = value_text
self.metadata = metadata
self.created = created
self.updated = updated
self.synonyms = synonyms
self.patterns = patterns
self.value_type = value_type
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Value object from a json dictionary."""
args = {}
if 'value' in _dict or 'value_text' in _dict:
args['value_text'] = _dict.get('value') or _dict.get('value_text')
else:
raise ValueError(
'Required property \'value\' not present in Value JSON')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'synonyms' in _dict:
args['synonyms'] = _dict.get('synonyms')
if 'patterns' in _dict:
args['patterns'] = _dict.get('patterns')
if 'type' in _dict or 'value_type' in _dict:
args['value_type'] = _dict.get('type') or _dict.get('value_type')
else:
raise ValueError(
'Required property \'type\' not present in Value JSON')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'value_text') and self.value_text is not None:
_dict['value'] = self.value_text
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'synonyms') and self.synonyms is not None:
_dict['synonyms'] = self.synonyms
if hasattr(self, 'patterns') and self.patterns is not None:
_dict['patterns'] = self.patterns
if hasattr(self, 'value_type') and self.value_type is not None:
_dict['type'] = self.value_type
return _dict
def __str__(self):
"""Return a `str` version of this Value object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ValueCollection(object):
"""
ValueCollection.
:attr list[ValueExport] values: An array of entity values.
:attr Pagination pagination: The pagination data for the returned objects.
"""
def __init__(self, values, pagination):
"""
Initialize a ValueCollection object.
:param list[ValueExport] values: An array of entity values.
:param Pagination pagination: The pagination data for the returned objects.
"""
self.values = values
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ValueCollection object from a json dictionary."""
args = {}
if 'values' in _dict:
args['values'] = [
ValueExport._from_dict(x) for x in (_dict.get('values'))
]
else:
raise ValueError(
'Required property \'values\' not present in ValueCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in ValueCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'values') and self.values is not None:
_dict['values'] = [x._to_dict() for x in self.values]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this ValueCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ValueExport(object):
"""
ValueExport.
:attr str value_text: The text of the entity value.
:attr object metadata: (optional) Any metadata related to the entity value.
:attr datetime created: (optional) The timestamp for creation of the entity value.
:attr datetime updated: (optional) The timestamp for the last update to the entity
value.
:attr list[str] synonyms: (optional) An array containing any synonyms for the entity
value.
:attr list[str] patterns: (optional) An array containing any patterns for the entity
value.
:attr str value_type: Specifies the type of value.
"""
def __init__(self,
value_text,
value_type,
metadata=None,
created=None,
updated=None,
synonyms=None,
patterns=None):
"""
Initialize a ValueExport object.
:param str value_text: The text of the entity value.
:param str value_type: Specifies the type of value.
:param object metadata: (optional) Any metadata related to the entity value.
:param datetime created: (optional) The timestamp for creation of the entity
value.
:param datetime updated: (optional) The timestamp for the last update to the
entity value.
:param list[str] synonyms: (optional) An array containing any synonyms for the
entity value.
:param list[str] patterns: (optional) An array containing any patterns for the
entity value.
"""
self.value_text = value_text
self.metadata = metadata
self.created = created
self.updated = updated
self.synonyms = synonyms
self.patterns = patterns
self.value_type = value_type
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ValueExport object from a json dictionary."""
args = {}
if 'value' in _dict or 'value_text' in _dict:
args['value_text'] = _dict.get('value') or _dict.get('value_text')
else:
raise ValueError(
'Required property \'value\' not present in ValueExport JSON')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'synonyms' in _dict:
args['synonyms'] = _dict.get('synonyms')
if 'patterns' in _dict:
args['patterns'] = _dict.get('patterns')
if 'type' in _dict or 'value_type' in _dict:
args['value_type'] = _dict.get('type') or _dict.get('value_type')
else:
raise ValueError(
'Required property \'type\' not present in ValueExport JSON')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'value_text') and self.value_text is not None:
_dict['value'] = self.value_text
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'synonyms') and self.synonyms is not None:
_dict['synonyms'] = self.synonyms
if hasattr(self, 'patterns') and self.patterns is not None:
_dict['patterns'] = self.patterns
if hasattr(self, 'value_type') and self.value_type is not None:
_dict['type'] = self.value_type
return _dict
def __str__(self):
"""Return a `str` version of this ValueExport object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Workspace(object):
"""
Workspace.
:attr str name: The name of the workspace.
:attr str language: The language of the workspace.
:attr datetime created: (optional) The timestamp for creation of the workspace.
:attr datetime updated: (optional) The timestamp for the last update to the workspace.
:attr str workspace_id: The workspace ID of the workspace.
:attr str description: (optional) The description of the workspace.
:attr object metadata: (optional) Any metadata related to the workspace.
:attr bool learning_opt_out: (optional) Whether training data from the workspace
(including artifacts such as intents and entities) can be used by IBM for general
service improvements. `true` indicates that workspace training data is not to be used.
:attr WorkspaceSystemSettings system_settings: (optional) Global settings for the
workspace.
"""
def __init__(self,
name,
language,
workspace_id,
created=None,
updated=None,
description=None,
metadata=None,
learning_opt_out=None,
system_settings=None):
"""
Initialize a Workspace object.
:param str name: The name of the workspace.
:param str language: The language of the workspace.
:param str workspace_id: The workspace ID of the workspace.
:param datetime created: (optional) The timestamp for creation of the workspace.
:param datetime updated: (optional) The timestamp for the last update to the
workspace.
:param str description: (optional) The description of the workspace.
:param object metadata: (optional) Any metadata related to the workspace.
:param bool learning_opt_out: (optional) Whether training data from the workspace
(including artifacts such as intents and entities) can be used by IBM for general
service improvements. `true` indicates that workspace training data is not to be
used.
:param WorkspaceSystemSettings system_settings: (optional) Global settings for the
workspace.
"""
self.name = name
self.language = language
self.created = created
self.updated = updated
self.workspace_id = workspace_id
self.description = description
self.metadata = metadata
self.learning_opt_out = learning_opt_out
self.system_settings = system_settings
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Workspace object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in Workspace JSON')
if 'language' in _dict:
args['language'] = _dict.get('language')
else:
raise ValueError(
'Required property \'language\' not present in Workspace JSON')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'workspace_id' in _dict:
args['workspace_id'] = _dict.get('workspace_id')
else:
raise ValueError(
'Required property \'workspace_id\' not present in Workspace JSON'
)
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'learning_opt_out' in _dict:
args['learning_opt_out'] = _dict.get('learning_opt_out')
if 'system_settings' in _dict:
args['system_settings'] = WorkspaceSystemSettings._from_dict(
_dict.get('system_settings'))
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'language') and self.language is not None:
_dict['language'] = self.language
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'workspace_id') and self.workspace_id is not None:
_dict['workspace_id'] = self.workspace_id
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self,
'learning_opt_out') and self.learning_opt_out is not None:
_dict['learning_opt_out'] = self.learning_opt_out
if hasattr(self,
'system_settings') and self.system_settings is not None:
_dict['system_settings'] = self.system_settings._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this Workspace object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class WorkspaceCollection(object):
"""
WorkspaceCollection.
:attr list[Workspace] workspaces: An array of objects describing the workspaces
associated with the service instance.
:attr Pagination pagination: The pagination data for the returned objects.
"""
def __init__(self, workspaces, pagination):
"""
Initialize a WorkspaceCollection object.
:param list[Workspace] workspaces: An array of objects describing the workspaces
associated with the service instance.
:param Pagination pagination: The pagination data for the returned objects.
"""
self.workspaces = workspaces
self.pagination = pagination
@classmethod
def _from_dict(cls, _dict):
"""Initialize a WorkspaceCollection object from a json dictionary."""
args = {}
if 'workspaces' in _dict:
args['workspaces'] = [
Workspace._from_dict(x) for x in (_dict.get('workspaces'))
]
else:
raise ValueError(
'Required property \'workspaces\' not present in WorkspaceCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in WorkspaceCollection JSON'
)
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'workspaces') and self.workspaces is not None:
_dict['workspaces'] = [x._to_dict() for x in self.workspaces]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def __str__(self):
"""Return a `str` version of this WorkspaceCollection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class WorkspaceExport(object):
"""
WorkspaceExport.
:attr str name: The name of the workspace.
:attr str description: The description of the workspace.
:attr str language: The language of the workspace.
:attr object metadata: Any metadata that is required by the workspace.
:attr datetime created: (optional) The timestamp for creation of the workspace.
:attr datetime updated: (optional) The timestamp for the last update to the workspace.
:attr str workspace_id: The workspace ID of the workspace.
:attr str status: The current status of the workspace.
:attr bool learning_opt_out: Whether training data from the workspace can be used by
IBM for general service improvements. `true` indicates that workspace training data is
not to be used.
:attr WorkspaceSystemSettings system_settings: (optional) Global settings for the
workspace.
:attr list[IntentExport] intents: (optional) An array of intents.
:attr list[EntityExport] entities: (optional) An array of entities.
:attr list[Counterexample] counterexamples: (optional) An array of counterexamples.
:attr list[DialogNode] dialog_nodes: (optional) An array of objects describing the
dialog nodes in the workspace.
"""
def __init__(self,
name,
description,
language,
metadata,
workspace_id,
status,
learning_opt_out,
created=None,
updated=None,
system_settings=None,
intents=None,
entities=None,
counterexamples=None,
dialog_nodes=None):
"""
Initialize a WorkspaceExport object.
:param str name: The name of the workspace.
:param str description: The description of the workspace.
:param str language: The language of the workspace.
:param object metadata: Any metadata that is required by the workspace.
:param str workspace_id: The workspace ID of the workspace.
:param str status: The current status of the workspace.
:param bool learning_opt_out: Whether training data from the workspace can be used
by IBM for general service improvements. `true` indicates that workspace training
data is not to be used.
:param datetime created: (optional) The timestamp for creation of the workspace.
:param datetime updated: (optional) The timestamp for the last update to the
workspace.
:param WorkspaceSystemSettings system_settings: (optional) Global settings for the
workspace.
:param list[IntentExport] intents: (optional) An array of intents.
:param list[EntityExport] entities: (optional) An array of entities.
:param list[Counterexample] counterexamples: (optional) An array of
counterexamples.
:param list[DialogNode] dialog_nodes: (optional) An array of objects describing
the dialog nodes in the workspace.
"""
self.name = name
self.description = description
self.language = language
self.metadata = metadata
self.created = created
self.updated = updated
self.workspace_id = workspace_id
self.status = status
self.learning_opt_out = learning_opt_out
self.system_settings = system_settings
self.intents = intents
self.entities = entities
self.counterexamples = counterexamples
self.dialog_nodes = dialog_nodes
@classmethod
def _from_dict(cls, _dict):
"""Initialize a WorkspaceExport object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in WorkspaceExport JSON'
)
if 'description' in _dict:
args['description'] = _dict.get('description')
else:
raise ValueError(
'Required property \'description\' not present in WorkspaceExport JSON'
)
if 'language' in _dict:
args['language'] = _dict.get('language')
else:
raise ValueError(
'Required property \'language\' not present in WorkspaceExport JSON'
)
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
else:
raise ValueError(
'Required property \'metadata\' not present in WorkspaceExport JSON'
)
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'workspace_id' in _dict:
args['workspace_id'] = _dict.get('workspace_id')
else:
raise ValueError(
'Required property \'workspace_id\' not present in WorkspaceExport JSON'
)
if 'status' in _dict:
args['status'] = _dict.get('status')
else:
raise ValueError(
'Required property \'status\' not present in WorkspaceExport JSON'
)
if 'learning_opt_out' in _dict:
args['learning_opt_out'] = _dict.get('learning_opt_out')
else:
raise ValueError(
'Required property \'learning_opt_out\' not present in WorkspaceExport JSON'
)
if 'system_settings' in _dict:
args['system_settings'] = WorkspaceSystemSettings._from_dict(
_dict.get('system_settings'))
if 'intents' in _dict:
args['intents'] = [
IntentExport._from_dict(x) for x in (_dict.get('intents'))
]
if 'entities' in _dict:
args['entities'] = [
EntityExport._from_dict(x) for x in (_dict.get('entities'))
]
if 'counterexamples' in _dict:
args['counterexamples'] = [
Counterexample._from_dict(x)
for x in (_dict.get('counterexamples'))
]
if 'dialog_nodes' in _dict:
args['dialog_nodes'] = [
DialogNode._from_dict(x) for x in (_dict.get('dialog_nodes'))
]
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'language') and self.language is not None:
_dict['language'] = self.language
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'workspace_id') and self.workspace_id is not None:
_dict['workspace_id'] = self.workspace_id
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self,
'learning_opt_out') and self.learning_opt_out is not None:
_dict['learning_opt_out'] = self.learning_opt_out
if hasattr(self,
'system_settings') and self.system_settings is not None:
_dict['system_settings'] = self.system_settings._to_dict()
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x._to_dict() for x in self.intents]
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self,
'counterexamples') and self.counterexamples is not None:
_dict['counterexamples'] = [
x._to_dict() for x in self.counterexamples
]
if hasattr(self, 'dialog_nodes') and self.dialog_nodes is not None:
_dict['dialog_nodes'] = [x._to_dict() for x in self.dialog_nodes]
return _dict
def __str__(self):
"""Return a `str` version of this WorkspaceExport object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class WorkspaceSystemSettings(object):
"""
Global settings for the workspace.
:attr WorkspaceSystemSettingsTooling tooling: (optional) Workspace settings related to
the Watson Assistant tool.
:attr WorkspaceSystemSettingsDisambiguation disambiguation: (optional) Workspace
settings related to the disambiguation feature.
**Note:** This feature is available only to Premium users.
:attr object human_agent_assist: (optional) For internal use only.
"""
def __init__(self,
tooling=None,
disambiguation=None,
human_agent_assist=None):
"""
Initialize a WorkspaceSystemSettings object.
:param WorkspaceSystemSettingsTooling tooling: (optional) Workspace settings
related to the Watson Assistant tool.
:param WorkspaceSystemSettingsDisambiguation disambiguation: (optional) Workspace
settings related to the disambiguation feature.
**Note:** This feature is available only to Premium users.
:param object human_agent_assist: (optional) For internal use only.
"""
self.tooling = tooling
self.disambiguation = disambiguation
self.human_agent_assist = human_agent_assist
@classmethod
def _from_dict(cls, _dict):
"""Initialize a WorkspaceSystemSettings object from a json dictionary."""
args = {}
if 'tooling' in _dict:
args['tooling'] = WorkspaceSystemSettingsTooling._from_dict(
_dict.get('tooling'))
if 'disambiguation' in _dict:
args[
'disambiguation'] = WorkspaceSystemSettingsDisambiguation._from_dict(
_dict.get('disambiguation'))
if 'human_agent_assist' in _dict:
args['human_agent_assist'] = _dict.get('human_agent_assist')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'tooling') and self.tooling is not None:
_dict['tooling'] = self.tooling._to_dict()
if hasattr(self, 'disambiguation') and self.disambiguation is not None:
_dict['disambiguation'] = self.disambiguation._to_dict()
if hasattr(
self,
'human_agent_assist') and self.human_agent_assist is not None:
_dict['human_agent_assist'] = self.human_agent_assist
return _dict
def __str__(self):
"""Return a `str` version of this WorkspaceSystemSettings object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class WorkspaceSystemSettingsDisambiguation(object):
"""
Workspace settings related to the disambiguation feature.
**Note:** This feature is available only to Premium users.
:attr str prompt: (optional) The text of the introductory prompt that accompanies
disambiguation options presented to the user.
:attr str none_of_the_above_prompt: (optional) The user-facing label for the option
users can select if none of the suggested options is correct. If no value is specified
for this property, this option does not appear.
:attr bool enabled: (optional) Whether the disambiguation feature is enabled for the
workspace.
:attr str sensitivity: (optional) The sensitivity of the disambiguation feature to
intent detection conflicts. Set to **high** if you want the disambiguation feature to
be triggered more often. This can be useful for testing or demonstration purposes.
"""
def __init__(self,
prompt=None,
none_of_the_above_prompt=None,
enabled=None,
sensitivity=None):
"""
Initialize a WorkspaceSystemSettingsDisambiguation object.
:param str prompt: (optional) The text of the introductory prompt that accompanies
disambiguation options presented to the user.
:param str none_of_the_above_prompt: (optional) The user-facing label for the
option users can select if none of the suggested options is correct. If no value
is specified for this property, this option does not appear.
:param bool enabled: (optional) Whether the disambiguation feature is enabled for
the workspace.
:param str sensitivity: (optional) The sensitivity of the disambiguation feature
to intent detection conflicts. Set to **high** if you want the disambiguation
feature to be triggered more often. This can be useful for testing or
demonstration purposes.
"""
self.prompt = prompt
self.none_of_the_above_prompt = none_of_the_above_prompt
self.enabled = enabled
self.sensitivity = sensitivity
@classmethod
def _from_dict(cls, _dict):
"""Initialize a WorkspaceSystemSettingsDisambiguation object from a json dictionary."""
args = {}
if 'prompt' in _dict:
args['prompt'] = _dict.get('prompt')
if 'none_of_the_above_prompt' in _dict:
args['none_of_the_above_prompt'] = _dict.get(
'none_of_the_above_prompt')
if 'enabled' in _dict:
args['enabled'] = _dict.get('enabled')
if 'sensitivity' in _dict:
args['sensitivity'] = _dict.get('sensitivity')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'prompt') and self.prompt is not None:
_dict['prompt'] = self.prompt
if hasattr(self, 'none_of_the_above_prompt'
) and self.none_of_the_above_prompt is not None:
_dict['none_of_the_above_prompt'] = self.none_of_the_above_prompt
if hasattr(self, 'enabled') and self.enabled is not None:
_dict['enabled'] = self.enabled
if hasattr(self, 'sensitivity') and self.sensitivity is not None:
_dict['sensitivity'] = self.sensitivity
return _dict
def __str__(self):
"""Return a `str` version of this WorkspaceSystemSettingsDisambiguation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class WorkspaceSystemSettingsTooling(object):
"""
Workspace settings related to the Watson Assistant tool.
:attr bool store_generic_responses: (optional) Whether the dialog JSON editor displays
text responses within the `output.generic` object.
"""
def __init__(self, store_generic_responses=None):
"""
Initialize a WorkspaceSystemSettingsTooling object.
:param bool store_generic_responses: (optional) Whether the dialog JSON editor
displays text responses within the `output.generic` object.
"""
self.store_generic_responses = store_generic_responses
@classmethod
def _from_dict(cls, _dict):
"""Initialize a WorkspaceSystemSettingsTooling object from a json dictionary."""
args = {}
if 'store_generic_responses' in _dict:
args['store_generic_responses'] = _dict.get(
'store_generic_responses')
return cls(**args)
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'store_generic_responses'
) and self.store_generic_responses is not None:
_dict['store_generic_responses'] = self.store_generic_responses
return _dict
def __str__(self):
"""Return a `str` version of this WorkspaceSystemSettingsTooling object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other):
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| [
"[email protected]"
] | |
40b67939e03b48cfcbe364e11cb77e642e791485 | 747135fab93554fac11d6c2184470d4bf2701d31 | /style_guide/source/conf.py | cc3774d7c44179c0bf106e4354b8ccf3ad181cc7 | [
"CC-BY-3.0"
] | permissive | dhutty/chef-docs | 5985249fce8a8b0fbaaf256830fbdf43a5ec9d6e | 661c72f0e0405b4cec223bc0def67cd598035070 | refs/heads/master | 2021-01-18T00:11:51.224491 | 2014-05-16T21:36:35 | 2014-05-16T21:36:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,537 | py | # -*- coding: utf-8 -*-
#
# Chef documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 22 13:50:49 2012.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates', '../../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'style_guide'
# General information about the project.
project = u'Style Guide'
copyright = u'This work is licensed under a Creative Commons Attribution 3.0 Unported License.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.0.1'
# The full version, including alpha/beta/rc tags.
# release = '0.0.1-1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'emacs'
# highlight_language = 'ruby'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# A string of reStructuredText that will be included at the beginning of every source file that is read.
rst_prolog = """
.. include:: ../../swaps/swap_descriptions.txt
.. include:: ../../swaps/swap_names.txt
.. include:: ../../swaps/swap_notes.txt
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'chef'
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../../_themes/']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Style Guide"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = "../../images/chef_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = "chef.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# This setting is being used by Chef to override a version # stamp inserted
# at every page bottom, using a string. For example: chef-client 11.6.x. Or:
# Enterprise Chef Server 11.0.x. And yeah, this is just a hack, but it's the
# hack that achieved the desired behavior. Plus, there's 0% chance that we'll
# ever want to insert a datetime stamp in the docs.
html_last_updated_fmt = 'Style Guide, version 1.0.0'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'relations.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'search': 'chef_search.html',
}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages. Leave this one as True
# for the style guide, in case people want to see the reST used in the style guide.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = False
# This is set to "False" because we don't want to show the default copyright, but
# do want to show the custom string defined by the "copyright" general setting (above).
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'StyleGuide'
| [
"[email protected]"
] | |
5ca73a546253120cacc9fd813c21f62fbe1d8698 | 34cb685d3340cb59c2f3639b3b5ca42ff3812338 | /pptx/slide.py | 9c7c6cdf2c1ed62ed0f47f3a99a458510fdb79d8 | [
"MIT"
] | permissive | handwriter/python-pptx | 6b435b6c9c95fcc00cd2aa0923ca15e211228a8b | 22351c6f9fe637cadddca3461c4899af7d439711 | refs/heads/master | 2021-04-05T00:27:20.870352 | 2020-03-19T13:20:28 | 2020-03-19T13:20:28 | 248,506,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,844 | py | # encoding: utf-8
"""Slide-related objects, including masters, layouts, and notes."""
from __future__ import absolute_import, division, print_function, unicode_literals
from pptx.dml.fill import FillFormat
from pptx.enum.shapes import PP_PLACEHOLDER
from pptx.shapes.shapetree import (
LayoutPlaceholders,
LayoutShapes,
MasterPlaceholders,
MasterShapes,
NotesSlidePlaceholders,
NotesSlideShapes,
SlidePlaceholders,
SlideShapes,
)
from pptx.shared import ElementProxy, ParentedElementProxy, PartElementProxy
from pptx.util import lazyproperty
class _BaseSlide(PartElementProxy):
"""Base class for slide objects, including masters, layouts and notes."""
__slots__ = ("_background",)
@lazyproperty
def background(self):
"""|_Background| object providing slide background properties.
This property returns a |_Background| object whether or not the
slide, master, or layout has an explicitly defined background.
The same |_Background| object is returned on every call for the same
slide object.
"""
return _Background(self._element.cSld)
@property
def name(self):
"""
String representing the internal name of this slide. Returns an empty
string (`''`) if no name is assigned. Assigning an empty string or
|None| to this property causes any name to be removed.
"""
return self._element.cSld.name
@name.setter
def name(self, value):
new_value = "" if value is None else value
self._element.cSld.name = new_value
class _BaseMaster(_BaseSlide):
"""
Base class for master objects such as |SlideMaster| and |NotesMaster|.
Provides access to placeholders and regular shapes.
"""
__slots__ = ("_placeholders", "_shapes")
@lazyproperty
def placeholders(self):
"""
Instance of |MasterPlaceholders| containing sequence of placeholder
shapes in this master, sorted in *idx* order.
"""
return MasterPlaceholders(self._element.spTree, self)
@lazyproperty
def shapes(self):
"""
Instance of |MasterShapes| containing sequence of shape objects
appearing on this slide.
"""
return MasterShapes(self._element.spTree, self)
class NotesMaster(_BaseMaster):
"""
Proxy for the notes master XML document. Provides access to shapes, the
most commonly used of which are placeholders.
"""
__slots__ = ()
class NotesSlide(_BaseSlide):
"""
Notes slide object. Provides access to slide notes placeholder and other
shapes on the notes handout page.
"""
__slots__ = ("_placeholders", "_shapes")
def clone_master_placeholders(self, notes_master):
"""
Selectively add placeholder shape elements from *notes_master* to the
shapes collection of this notes slide. Z-order of placeholders is
preserved. Certain placeholders (header, date, footer) are not
cloned.
"""
def iter_cloneable_placeholders(notes_master):
"""
Generate a reference to each placeholder in *notes_master* that
should be cloned to a notes slide when the a new notes slide is
created.
"""
cloneable = (
PP_PLACEHOLDER.SLIDE_IMAGE,
PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.SLIDE_NUMBER,
)
for placeholder in notes_master.placeholders:
if placeholder.element.ph_type in cloneable:
yield placeholder
shapes = self.shapes
for placeholder in iter_cloneable_placeholders(notes_master):
shapes.clone_placeholder(placeholder)
@property
def notes_placeholder(self):
"""
Return the notes placeholder on this notes slide, the shape that
contains the actual notes text. Return |None| if no notes placeholder
is present; while this is probably uncommon, it can happen if the
notes master does not have a body placeholder, or if the notes
placeholder has been deleted from the notes slide.
"""
for placeholder in self.placeholders:
if placeholder.placeholder_format.type == PP_PLACEHOLDER.BODY:
return placeholder
return None
@property
def notes_text_frame(self):
"""
Return the text frame of the notes placeholder on this notes slide,
or |None| if there is no notes placeholder. This is a shortcut to
accommodate the common case of simply adding "notes" text to the
notes "page".
"""
notes_placeholder = self.notes_placeholder
if notes_placeholder is None:
return None
return notes_placeholder.text_frame
@lazyproperty
def placeholders(self):
"""
An instance of |NotesSlidePlaceholders| containing the sequence of
placeholder shapes in this notes slide.
"""
return NotesSlidePlaceholders(self.element.spTree, self)
@lazyproperty
def shapes(self):
"""
An instance of |NotesSlideShapes| containing the sequence of shape
objects appearing on this notes slide.
"""
return NotesSlideShapes(self._element.spTree, self)
class Slide(_BaseSlide):
"""Slide object. Provides access to shapes and slide-level properties."""
__slots__ = ("_placeholders", "_shapes")
@property
def background(self):
"""|_Background| object providing slide background properties.
This property returns a |_Background| object whether or not the slide
overrides the default background or inherits it. Determining which of
those conditions applies for this slide is accomplished using the
:attr:`follow_master_background` property.
The same |_Background| object is returned on every call for the same
slide object.
"""
return super(Slide, self).background
@property
def follow_master_background(self):
"""|True| if this slide inherits the slide master background.
Assigning |False| causes background inheritance from the master to be
interrupted; if there is no custom background for this slide,
a default background is added. If a custom background already exists
for this slide, assigning |False| has no effect.
Assigning |True| causes any custom background for this slide to be
deleted and inheritance from the master restored.
"""
return self._element.bg is None
@property
def has_notes_slide(self):
"""
Return True if this slide has a notes slide, False otherwise. A notes
slide is created by :attr:`.notes_slide` when one doesn't exist; use
this property to test for a notes slide without the possible side
effect of creating one.
"""
return self.part.has_notes_slide
@property
def notes_slide(self):
"""
Return the |NotesSlide| instance for this slide. If the slide does
not have a notes slide, one is created. The same single instance is
returned on each call.
"""
return self.part.notes_slide
@lazyproperty
def placeholders(self):
"""
Instance of |SlidePlaceholders| containing sequence of placeholder
shapes in this slide.
"""
return SlidePlaceholders(self._element.spTree, self)
@lazyproperty
def shapes(self):
"""
Instance of |SlideShapes| containing sequence of shape objects
appearing on this slide.
"""
return SlideShapes(self._element.spTree, self)
@property
def slide_id(self):
"""
The integer value that uniquely identifies this slide within this
presentation. The slide id does not change if the position of this
slide in the slide sequence is changed by adding, rearranging, or
deleting slides.
"""
return self.part.slide_id
@property
def slide_layout(self):
"""
|SlideLayout| object this slide inherits appearance from.
"""
return self.part.slide_layout
class Slides(ParentedElementProxy):
"""
Sequence of slides belonging to an instance of |Presentation|, having
list semantics for access to individual slides. Supports indexed access,
len(), and iteration.
"""
def __init__(self, sldIdLst, prs):
super(Slides, self).__init__(sldIdLst, prs)
self._sldIdLst = sldIdLst
def __getitem__(self, idx):
"""
Provide indexed access, (e.g. 'slides[0]').
"""
try:
sldId = self._sldIdLst[idx]
except IndexError:
raise IndexError("slide index out of range")
return self.part.related_slide(sldId.rId)
def __iter__(self):
"""
Support iteration (e.g. 'for slide in slides:').
"""
for sldId in self._sldIdLst:
yield self.part.related_slide(sldId.rId)
def __len__(self):
"""
Support len() built-in function (e.g. 'len(slides) == 4').
"""
return len(self._sldIdLst)
def add_slide(self, slide_layout):
"""
Return a newly added slide that inherits layout from *slide_layout*.
"""
rId, slide = self.part.add_slide(slide_layout)
slide.shapes.clone_layout_placeholders(slide_layout)
self._sldIdLst.add_sldId(rId)
return slide
def get(self, slide_id, default=None):
"""
Return the slide identified by integer *slide_id* in this
presentation, or *default* if not found.
"""
slide = self.part.get_slide(slide_id)
if slide is None:
return default
return slide
def index(self, slide):
"""
Map *slide* to an integer representing its zero-based position in
this slide collection. Raises |ValueError| on *slide* not present.
"""
for idx, this_slide in enumerate(self):
if this_slide == slide:
return idx
raise ValueError("%s is not in slide collection" % slide)
class SlideLayout(_BaseSlide):
"""
Slide layout object. Provides access to placeholders, regular shapes, and
slide layout-level properties.
"""
__slots__ = ("_placeholders", "_shapes")
def iter_cloneable_placeholders(self):
"""
Generate a reference to each layout placeholder on this slide layout
that should be cloned to a slide when the layout is applied to that
slide.
"""
latent_ph_types = (
PP_PLACEHOLDER.DATE,
PP_PLACEHOLDER.FOOTER,
PP_PLACEHOLDER.SLIDE_NUMBER,
)
for ph in self.placeholders:
if ph.element.ph_type not in latent_ph_types:
yield ph
@lazyproperty
def placeholders(self):
"""
Instance of |LayoutPlaceholders| containing sequence of placeholder
shapes in this slide layout, sorted in *idx* order.
"""
return LayoutPlaceholders(self._element.spTree, self)
@lazyproperty
def shapes(self):
"""
Instance of |LayoutShapes| containing the sequence of shapes
appearing on this slide layout.
"""
return LayoutShapes(self._element.spTree, self)
@property
def slide_master(self):
"""
Slide master from which this slide layout inherits properties.
"""
return self.part.slide_master
@property
def used_by_slides(self):
"""Tuple of slide objects based on this slide layout."""
# ---getting Slides collection requires going around the horn a bit---
slides = self.part.package.presentation_part.presentation.slides
return tuple(s for s in slides if s.slide_layout == self)
class SlideLayouts(ParentedElementProxy):
"""Sequence of slide layouts belonging to a slide-master.
Supports indexed access, len(), iteration, index() and remove().
"""
__slots__ = ("_sldLayoutIdLst",)
def __init__(self, sldLayoutIdLst, parent):
super(SlideLayouts, self).__init__(sldLayoutIdLst, parent)
self._sldLayoutIdLst = sldLayoutIdLst
def __getitem__(self, idx):
"""
Provide indexed access, (e.g. ``slide_layouts[2]``).
"""
try:
sldLayoutId = self._sldLayoutIdLst[idx]
except IndexError:
raise IndexError("slide layout index out of range")
return self.part.related_slide_layout(sldLayoutId.rId)
def __iter__(self):
"""
Generate a reference to each of the |SlideLayout| instances in the
collection, in sequence.
"""
for sldLayoutId in self._sldLayoutIdLst:
yield self.part.related_slide_layout(sldLayoutId.rId)
def __len__(self):
"""
Support len() built-in function (e.g. 'len(slides) == 4').
"""
return len(self._sldLayoutIdLst)
def get_by_name(self, name, default=None):
"""Return SlideLayout object having *name* or *default* if not found."""
for slide_layout in self:
if slide_layout.name == name:
return slide_layout
return default
def index(self, slide_layout):
"""Return zero-based index of *slide_layout* in this collection.
Raises ValueError if *slide_layout* is not present in this collection.
"""
for idx, this_layout in enumerate(self):
if slide_layout == this_layout:
return idx
raise ValueError("layout not in this SlideLayouts collection")
def remove(self, slide_layout):
"""Remove *slide_layout* from the collection.
Raises ValueError when *slide_layout* is in use; a slide layout which is the
basis for one or more slides cannot be removed.
"""
# ---raise if layout is in use---
if slide_layout.used_by_slides:
raise ValueError("cannot remove slide-layout in use by one or more slides")
# ---target layout is identified by its index in this collection---
target_idx = self.index(slide_layout)
# --remove layout from p:sldLayoutIds of its master
# --this stops layout from showing up, but doesn't remove it from package
target_sldLayoutId = self._sldLayoutIdLst.sldLayoutId_lst[target_idx]
self._sldLayoutIdLst.remove(target_sldLayoutId)
# --drop relationship from master to layout
# --this removes layout from package, along with everything (only) it refers to,
# --including images (not used elsewhere) and hyperlinks
slide_layout.slide_master.part.drop_rel(target_sldLayoutId.rId)
class SlideMaster(_BaseMaster):
"""
Slide master object. Provides access to slide layouts. Access to
placeholders, regular shapes, and slide master-level properties is
inherited from |_BaseMaster|.
"""
__slots__ = ("_slide_layouts",)
@lazyproperty
def slide_layouts(self):
"""|SlideLayouts| object providing access to this slide-master's layouts."""
return SlideLayouts(self._element.get_or_add_sldLayoutIdLst(), self)
class SlideMasters(ParentedElementProxy):
"""Sequence of |SlideMaster| objects belonging to a presentation.
Has list access semantics, supporting indexed access, len(), and iteration.
"""
__slots__ = ("_sldMasterIdLst",)
def __init__(self, sldMasterIdLst, parent):
super(SlideMasters, self).__init__(sldMasterIdLst, parent)
self._sldMasterIdLst = sldMasterIdLst
def __getitem__(self, idx):
"""
Provide indexed access, (e.g. ``slide_masters[2]``).
"""
try:
sldMasterId = self._sldMasterIdLst[idx]
except IndexError:
raise IndexError("slide master index out of range")
return self.part.related_slide_master(sldMasterId.rId)
def __iter__(self):
"""
Generate a reference to each of the |SlideMaster| instances in the
collection, in sequence.
"""
for smi in self._sldMasterIdLst:
yield self.part.related_slide_master(smi.rId)
def __len__(self):
"""
Support len() built-in function (e.g. 'len(slide_masters) == 4').
"""
return len(self._sldMasterIdLst)
class _Background(ElementProxy):
"""Provides access to slide background properties.
Note that the presence of this object does not by itself imply an
explicitly-defined background; a slide with an inherited background still
has a |_Background| object.
"""
__slots__ = ("_cSld", "_fill")
def __init__(self, cSld):
super(_Background, self).__init__(cSld)
self._cSld = cSld
@lazyproperty
def fill(self):
"""|FillFormat| instance for this background.
This |FillFormat| object is used to interrogate or specify the fill
of the slide background.
Note that accessing this property is potentially destructive. A slide
background can also be specified by a background style reference and
accessing this property will remove that reference, if present, and
replace it with NoFill. This is frequently the case for a slide
master background.
This is also the case when there is no explicitly defined background
(background is inherited); merely accessing this property will cause
the background to be set to NoFill and the inheritance link will be
interrupted. This is frequently the case for a slide background.
Of course, if you are accessing this property in order to set the
fill, then these changes are of no consequence, but the existing
background cannot be reliably interrogated using this property unless
you have already established it is an explicit fill.
If the background is already a fill, then accessing this property
makes no changes to the current background.
"""
bgPr = self._cSld.get_or_add_bgPr()
return FillFormat.from_fill_parent(bgPr)
| [
"[email protected]"
] | |
8a7bac217e3dfa9c44fa5647150501862b97aa9b | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_sp_outer/Jobs/TIPS_Pc/TIPS_Pc_anion_neut_inner2_outer1/TIPS_Pc_anion_neut_inner2_outer1.py | 6fd3f3b888a68c5738ebb4a4af6dfc566150231f | [] | no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 6,691 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
name='TIPS_Pc_anion_neut_inner2_outer1'
#For crystals here, all cubic and centred at centre
insize=2
#number of TVs in each dir central mol is from edge of inner region
outsize=1
mols_cen=['TIPS_Pc_anion_aniso_cifstruct_chelpg.xyz']
mols_sur=['TIPS_Pc_neut_aniso_cifstruct_chelpg.xyz']
mols_outer=['sp_TIPS_Pc_neut.xyz']
#From cif:
'''
TIPS
data_k01029
_cell_length_a 7.5650(15)
_cell_length_b 7.7500(15)
_cell_length_c 16.835(3)
_cell_angle_alpha 89.15(3)
_cell_angle_beta 78.42(3)
_cell_angle_gamma 83.63(3)
_cell_volume 960.9(3)
'''
#Get translation vectors:
a=7.565015/0.5291772109217
b=7.750015/0.5291772109217
c=16.8353/0.5291772109217
alpha=89.153*(pi/180)
beta=78.423*(pi/180)
gamma=83.633*(pi/180)
cif_unit_cell_volume=960.9/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
jm = JMatrix(cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
# print header for reorgs
f = open('reorg_energies_%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tReorg(eV)')
f.flush()
f.close()
# REORGANISATION ENERGIES
#Note that this assumes a cube, and values for which
for dist in range(0,(length[0]/2)+1,1):
print '\n\nDIST: ', dist, '\n'
for a in range(prot_neut_cry()._cenpos[0]-dist,prot_neut_cry()._cenpos[0]+dist+1,1):
for b in range(prot_neut_cry()._cenpos[1]-dist,prot_neut_cry()._cenpos[1]+dist+1,1):
for c in range(prot_neut_cry()._cenpos[2]-dist,prot_neut_cry()._cenpos[2]+dist+1,1):
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'a,b,c',a,b,c
for molincell in range(0,len(prot_neut_cry()._mols),1):
prot_neut_cry().calc_reorg(a1=prot_neut_cry()._cenpos[0],b1=prot_neut_cry()._cenpos[1],c1=prot_neut_cry()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,dips=d,oldUqd=Uqd)
print 'Reorg: ', prot_neut_cry()._reorgs[molincell][a][b][c]
f = open('reorg_energies_%s_properties.csv' % name, 'a')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,prot_neut_cry()._reorgs[molincell][a][b][c]))
f.flush()
f.close()
# Redo this and overwrite after each set to ensure we have some even if not all reorgs complete
prot_neut_cry().print_reorgs()
print 'Job Completed Successfully.'
| [
"[email protected]"
] | |
c4903fe6cc73ed9888fa791de56a6e121c6445d0 | 37fa222d2ce4b227dfeeae0053b5110c24f0c595 | /17/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM4000_R_0-3.py | bce53377e69348a7eb6d837b7dc54ee875322bfe | [] | no_license | colizz/fullRunII_ntuple | 8fffe7893ad80804c25444534b80edf3f1a09f97 | ec8c014e9502f12d060bf8198894f915adcee267 | refs/heads/master | 2020-08-04T07:02:35.210954 | 2019-09-30T00:47:37 | 2019-09-30T00:47:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | from WMCore.Configuration import Configuration
name = 'WWW'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M4000_R0-3_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Fall17_17Nov2017_V8_MC_L1FastJet_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK4PFPuppi.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK4PFPuppi.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M4000-R0-3_TuneCP5_13TeV-madgraph/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v2/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
#config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M4000_R0-3_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"[email protected]"
] | |
c4f6e56d2f5ffaf117a6a9a0dd55aca57da41e03 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02860/s539945526.py | 643abb9bdfe746d3b188f64eb579085746398507 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | a = int(input())
b = input()
if b[:int(a/2)] == b[int(a/2):]:
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
71ecc43cea2d8c1db7735a61332ee127a7689db7 | e81fabdd6988c787524755fac73aa9d3631fc64c | /polyaxon_schemas/specs/base.py | 34c10bb68168b352582267c8ec524cf227aa4122 | [
"MIT"
] | permissive | granularai/polyaxon-schemas | 0aa06f15b7353ceb6d31f1e5cf63c269ab0e2ce4 | 017ae74701f21f12f0b25e75379681ea5d8baa9e | refs/heads/master | 2022-08-30T00:05:40.888476 | 2020-05-19T17:22:46 | 2020-05-19T17:22:46 | 265,312,701 | 0 | 0 | MIT | 2020-05-19T17:16:38 | 2020-05-19T17:16:37 | null | UTF-8 | Python | false | false | 12,266 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import abc
import copy
import json
import six
from collections import Mapping
import rhea
from hestia.cached_property import cached_property
from hestia.list_utils import to_list
from marshmallow import EXCLUDE, ValidationError
from polyaxon_schemas.exceptions import PolyaxonConfigurationError, PolyaxonfileError
from polyaxon_schemas.ops import params as ops_params
from polyaxon_schemas.ops.environments.pods import EnvironmentConfig
from polyaxon_schemas.ops.operators import ForConfig, IfConfig
from polyaxon_schemas.specs import kinds
from polyaxon_schemas.specs.libs import validator
from polyaxon_schemas.specs.libs.parser import Parser
@six.add_metaclass(abc.ABCMeta)
class BaseSpecification(object):
"""Base abstract specification for plyaxonfiles and configurations."""
_SPEC_KIND = None
MAX_VERSION = 1 # Max Polyaxonfile specification version this CLI supports
MIN_VERSION = 1 # Min Polyaxonfile specification version this CLI supports
VERSION = 'version'
KIND = 'kind'
LOGGING = 'logging'
NAME = 'name'
DESCRIPTION = 'description'
TAGS = 'tags'
INPUTS = 'inputs'
OUTPUTS = 'outputs'
BACKEND = 'backend'
FRAMEWORK = 'framework'
HP_TUNING = 'hptuning'
DECLARATIONS = 'declarations'
PARAMS = 'params'
ENVIRONMENT = 'environment'
RUN = 'run'
BUILD = 'build'
SECTIONS = (
VERSION, KIND, NAME, DESCRIPTION, LOGGING, TAGS,
INPUTS, OUTPUTS, DECLARATIONS, PARAMS,
BACKEND, FRAMEWORK, ENVIRONMENT,
HP_TUNING, BUILD, RUN
)
STD_PARSING_SECTIONS = (BACKEND, FRAMEWORK, ENVIRONMENT, LOGGING, TAGS)
OP_PARSING_SECTIONS = (BUILD, RUN, )
HEADER_SECTIONS = (
VERSION, KIND, NAME, DESCRIPTION, LOGGING, TAGS,
)
GRAPH_SECTIONS = []
REQUIRED_SECTIONS = (
VERSION, KIND
)
POSSIBLE_SECTIONS = (
VERSION, KIND, LOGGING, TAGS, NAME, DESCRIPTION, INPUTS, OUTPUTS
)
OPERATORS = {
ForConfig.IDENTIFIER: ForConfig,
IfConfig.IDENTIFIER: IfConfig,
}
ENVIRONMENT_CONFIG = EnvironmentConfig
CONFIG = None
def __init__(self, values):
self._values = to_list(values)
try:
self._data = rhea.read(self._values)
except rhea.RheaError as e:
raise PolyaxonConfigurationError(e)
try:
self._config_data = self._get_config(self._data)
except ValidationError as e:
raise PolyaxonfileError(e)
self.check_data()
headers = Parser.get_headers(spec=self, data=self._data)
try:
self._headers = validator.validate_headers(spec=self, data=headers)
except ValidationError as e:
raise PolyaxonConfigurationError(e)
self._parsed_data = None
self._validated_data = None
self._config = None
self._extra_validation()
def _extra_validation(self):
pass
@cached_property
def config(self):
return self._config
@cached_property
def raw_config(self):
return self._config_data
def _get_config(self, data):
config = self.CONFIG.from_dict(copy.deepcopy(data))
ops_params.validate_params(params=config.params,
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
is_run=True)
return config
def parse_data(self, context=None):
return self.apply_context(context=context)
def apply_context(self, context=None):
context = context or {}
params = self._config_data.get_params(context=context)
parsed_data = Parser.parse(self, self._config_data, params, None)
self._config = self._get_config(parsed_data)
self._parsed_data = parsed_data
return parsed_data
@classmethod
def check_version(cls, data):
if cls.VERSION not in data:
raise PolyaxonfileError("The Polyaxonfile `version` must be specified.")
if not cls.MIN_VERSION <= data[cls.VERSION] <= cls.MAX_VERSION:
raise PolyaxonfileError(
"The Polyaxonfile's version specified is not supported by your current CLI."
"Your CLI support Polyaxonfile versions between: {} <= v <= {}."
"You can run `polyaxon upgrade` and "
"check documentation for the specification.".format(
cls.MIN_VERSION, cls.MAX_VERSION))
@classmethod
def check_kind(cls, data):
if cls.KIND not in data:
raise PolyaxonfileError("The Polyaxonfile `kind` must be specified.")
if data[cls.KIND] not in kinds.KINDS:
raise PolyaxonfileError(
"The Polyaxonfile with kind `{}` is not a supported value.".format(data[cls.KIND]))
def check_data(self, data=None):
data = data or self._data
self.check_version(data)
self.check_kind(data)
if data[self.KIND] != self._SPEC_KIND:
raise PolyaxonfileError(
"The specification used `{}` is incompatible with the kind `{}`.".format(
self.__class__.__name__, data[self.KIND]))
for key in set(six.iterkeys(data)) - set(self.SECTIONS):
raise PolyaxonfileError(
"Unexpected section `{}` in Polyaxonfile version `{}`. "
"Please check the Polyaxonfile specification "
"for this version.".format(key, data[self.VERSION]))
for key in set(six.iterkeys(data)) - set(self.POSSIBLE_SECTIONS):
raise PolyaxonfileError(
"Unexpected section `{}` for specification kind `{}` version `{}`. "
"Please check the Polyaxonfile specification "
"for this version.".format(key, self._SPEC_KIND, data[self.VERSION]))
for key in self.REQUIRED_SECTIONS:
if key not in data:
raise PolyaxonfileError("{} is a required section for a valid Polyaxonfile".format(
key))
def patch(self, values):
values = [self._parsed_data] + to_list(values)
spec = self.read(values=values)
spec.apply_context()
return spec
@classmethod
def get_kind(cls, data):
cls.check_kind(data=data)
return data[cls.KIND]
@staticmethod
def check_kind_experiment(kind):
return kind == kinds.EXPERIMENT
@staticmethod
def check_kind_group(kind):
return kind == kinds.GROUP
@staticmethod
def check_kind_job(kind):
return kind == kinds.JOB
@staticmethod
def check_kind_notebook(kind):
return kind == kinds.NOTEBOOK
@staticmethod
def check_kind_tensorboard(kind):
return kind == kinds.TENSORBOARD
@staticmethod
def check_kind_build(kind):
return kind == kinds.BUILD
@staticmethod
def check_kind_pipeline(kind):
return kind == kinds.PIPELINE
@classmethod
def read(cls, values):
if isinstance(values, cls):
return values
return cls(values)
@cached_property
def is_experiment(self):
return self.check_kind_experiment(self.kind)
@cached_property
def is_group(self):
return self.check_kind_group(self.kind)
@cached_property
def is_job(self):
return self.check_kind_job(self.kind)
@cached_property
def is_notebook(self):
return self.check_kind_notebook(self.kind)
@cached_property
def is_tensorboard(self):
return self.check_kind_tensorboard(self.kind)
@cached_property
def is_build(self):
return self.check_kind_build(self.kind)
@cached_property
def is_pipeline(self):
return self.check_kind_pipeline(self.kind)
@property
def values(self):
return self._values
@cached_property
def data(self):
return self._data
@cached_property
def headers(self):
return self._headers
@cached_property
def parsed_data(self):
return self._parsed_data
@cached_property
def raw_data(self):
return json.dumps(self._data)
@cached_property
def version(self):
return self.headers[self.VERSION]
@cached_property
def kind(self):
return self.headers[self.KIND]
@cached_property
def logging(self):
return self.headers.get(self.LOGGING, None)
@cached_property
def log_level(self):
if self.logging:
return self.logging.level
return 'INFO'
@cached_property
def tags(self):
tags = self.headers.get(self.TAGS, None)
return list(set(tags)) if tags else None
class EnvironmentSpecificationMixin(object):
@cached_property
def environment(self):
return self._config_data.environment
@cached_property
def resources(self):
return self.environment.resources if self.environment else None
@cached_property
def labels(self):
return self.environment.labels if self.environment else None
@cached_property
def annotations(self):
return self.environment.annotations if self.environment else None
@cached_property
def artifact_refs(self):
return self.environment.artifact_refs if self.environment else None
@cached_property
def data_refs(self):
return self.environment.data_refs if self.environment else None
@cached_property
def secret_refs(self):
return self.environment.secret_refs if self.environment else None
@cached_property
def config_map_refs(self):
return self.environment.config_map_refs if self.environment else None
@cached_property
def node_selector(self):
return self.environment.node_selector if self.environment else None
@cached_property
def affinity(self):
return self.environment.affinity if self.environment else None
@cached_property
def tolerations(self):
return self.environment.tolerations if self.environment else None
@cached_property
def outputs(self):
return self.environment.outputs if self.environment else None
@cached_property
def max_restarts(self):
return self.environment.max_restarts if self.environment else None
class BaseRunSpecification(BaseSpecification, EnvironmentSpecificationMixin):
"""The polyaxonfile specification for build jobs.
SECTIONS:
VERSION: defines the version of the file to be parsed and validated.
LOGGING: defines the logging
TAGS: defines the tags
ENVIRONMENT: defines the run environment for experiment.
BUILD: defines the build step where the user can set a docker image definition
"""
_SPEC_KIND = kinds.BUILD
HEADER_SECTIONS = BaseSpecification.HEADER_SECTIONS + (BaseSpecification.BACKEND, )
POSSIBLE_SECTIONS = BaseSpecification.POSSIBLE_SECTIONS + (
BaseSpecification.ENVIRONMENT, BaseSpecification.BUILD, BaseSpecification.BACKEND,
)
@cached_property
def build(self):
return self.config.build
@classmethod
def create_specification(cls, # pylint:disable=arguments-differ
build_config,
to_dict=True):
from polyaxon_schemas.ops.build_job import BuildConfig
if isinstance(build_config, BuildConfig):
b_config = build_config.to_light_dict()
elif isinstance(build_config, Mapping):
# Since the objective is to create the build spec from other specs
# we drop any extra attrs
b_config = BuildConfig.from_dict(build_config, unknown=EXCLUDE)
b_config = b_config.to_light_dict()
else:
raise PolyaxonConfigurationError(
'Create specification expects a dict or an instance of BuildConfig.')
specification = {
cls.VERSION: 1,
cls.KIND: cls._SPEC_KIND,
cls.BUILD: b_config,
}
if to_dict:
return specification
return cls.read(specification)
| [
"[email protected]"
] | |
c29e43cc150ebaddaacdafbc5af0227a5f1666e4 | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/lib/googlecloudsdk/api_lib/cloudkms/cryptokeyversions.py | fb554167e913c732d12bab736a12c9d70072871b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 1,597 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for CryptoKeyVersions."""
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
def SetState(version_ref, state):
"""Update the state of a CryptoKeyVersion.
Args:
version_ref: A resources.Resource for the CryptoKeyVersion.
state: an apitools enum for ENABLED or DISABLED state.
Returns:
The updated CryptoKeyVersion.
"""
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
req = messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchRequest( # pylint: disable=line-too-long
projectsId=version_ref.projectsId,
locationsId=version_ref.locationsId,
keyRingsId=version_ref.keyRingsId,
cryptoKeysId=version_ref.cryptoKeysId,
cryptoKeyVersionsId=version_ref.cryptoKeyVersionsId,
updateMask='state',
cryptoKeyVersion=messages.CryptoKeyVersion(state=state))
return client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions.Patch(
req)
| [
"[email protected]"
] | |
3c5e0cf2977035d9ba3c53d6d0a367c274d8c0f1 | 9320f83e6006a7879df2fe9f3a16620b66becf65 | /src/n8scripts/n8pushover.py | 956a3a822de251e2166d398117b5f925639a112a | [
"MIT"
] | permissive | n8henrie/n8scripts | e34a8d06252e30044815af401560322278ef23b2 | 7b79b2c4b7c5e6ef23aad4c2181f3b3886cdd7a8 | refs/heads/master | 2021-01-25T11:02:55.251973 | 2019-03-12T20:51:10 | 2019-03-12T20:51:10 | 93,908,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,325 | py | """n8pushover.py
A quick implementation of the Pushover API in Python, using an envvar or the
keyring module (desktop) or Pythonista's keychain module (iOS) to store
credentials.
Usage:
from n8scripts.n8pushover import push
push("It's alive!")
push("This one has a title.", title="My awesome title.")
"""
import __main__
import argparse
import http
import os.path
import subprocess
import sys
import typing
import urllib.parse
import urllib.request
class OSXSecurity:
"""Uses the MacOS `security` command following the keyring library API."""
def __init__(self):
"""Ensure platform is `darwin`."""
if sys.platform != "darwin":
raise OSError(f"{self.__class__ } can only run on MacOS (darwin)")
def get_password(self, service: str, account: str) -> str:
"""Use keychain API to get password for service / account."""
cmd = f"security find-generic-password -s {service} -a {account} -w"
process = subprocess.run(cmd.split(), stdout=subprocess.PIPE)
data = process.stdout
return data.decode("utf8").strip()
try:
import keychain
except ImportError:
try:
import keychain
except ImportError:
keychain = OSXSecurity()
def get_credentials() -> typing.Tuple[str, str]:
"""Get Pushover user and api_token."""
try:
user = os.environ["PUSHOVER_USER"]
api_token = os.environ["PUSHOVER_API_TOKEN"]
except KeyError:
user = keychain.get_password("pushover", "user")
api_token = keychain.get_password("pushover", "api_token")
return user, api_token
def push(
message,
user: str = None,
api_token: str = None,
device: str = None,
title: str = None,
url: str = None,
url_title: str = None,
priority: str = None,
timestamp: str = None,
sound: str = None,
) -> typing.Union[http.client.HTTPResponse, typing.BinaryIO]:
"""Pushes the notification.
API Reference: https://pushover.net/api
Args:
message: Your message
user: The user/group key (not e-mail address) of your user (or you),
viewable when logged into our dashboard (often referred to as
USER_KEY in our documentation and code examples)
api_token: Your application's API token
device: Your user's device name to send the message directly to that
device, rather than all of the user's devices
title: Your message's title, otherwise your app's name is used
url: A supplementary URL to show with your message
url_title: A title for your supplementary URL, otherwise just the URL
is shown
priority: Send as:1 to always send as a quiet notification, 1 to
display as high--priority and bypass the user's quiet
hours, or 2 to also require confirmation from the user
timestamp: A Unix timestamp of your message's date and time to
display to the user, rather than the time your message is
received by our API
sound: The name of one of the sounds supported by device clients to
override the user's default sound choice
Returns:
HTTP response from API call
"""
if user is None or api_token is None:
user, api_token = get_credentials()
api_url = "https://api.pushover.net/1/messages.json"
if title is None:
if getattr(__main__, "__file__", None):
title = os.path.basename(__main__.__file__)
else:
title = "n8scripts"
payload_dict = {
"token": api_token,
"user": user,
"message": message,
"device": device,
"title": title,
"url": url,
"url_title": url_title,
"priority": priority,
"timestamp": timestamp,
"sound": sound,
}
payload = urllib.parse.urlencode({k: v for k, v in payload_dict.items() if v})
with urllib.request.urlopen(api_url, data=payload.encode()) as resp:
return resp
def cli() -> None:
"""Collect command line args and run push."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
argument_default=argparse.SUPPRESS,
)
parser.add_argument("message", help="Your message")
parser.add_argument(
"-u",
"--user",
help=("The user/group key (not e-mail address) of " "your user (or you)"),
)
parser.add_argument("-a", "--api-token", help="Your application's API token")
parser.add_argument(
"-d",
"--device",
help=(
"Your user's device name to send the message "
"directly to that device, rather than all of "
"the user's devices (multiple devices may be "
"separated by a comma)"
),
)
parser.add_argument(
"-t",
"--title",
help=("Your message's title, otherwise your app's " "name is used"),
)
parser.add_argument(
"-k", "--url", help="A supplementary URL to show with your message"
)
parser.add_argument(
"-l",
"--url_title",
help=("A title for your supplementary URL, otherwise " "just the URL is shown"),
)
parser.add_argument(
"-p",
"--priority",
help=(
"Send as -2 to generate no notification/alert, "
"-1 to always send as a quiet notification, 1 "
"to display as high-priority and bypass the "
"user's quiet hours, or 2 to also require "
"confirmation from the user"
),
)
parser.add_argument(
"-m",
"--timestamp",
help=(
"A Unix timestamp of your message's date and "
"time to display to the user, rather than the "
"time your message is received by our API"
),
)
parser.add_argument(
"-s",
"--sound",
help=(
"The name of one of the sounds supported by "
"device clients to override the user's default "
"sound choice"
),
)
namespace = parser.parse_args()
args = {k: v for k, v in vars(namespace).items() if v}
push(**args)
if __name__ == "__main__":
cli()
| [
"[email protected]"
] | |
9e5d840e0bcb9bf3ab4aa6ccd3172973d8c3ce34 | 902e0bcd7abd0eafb1daf820f5009e632bfe9141 | /courses/migrations/0001_initial.py | 3d65d04b748eb39bcd4d755cb7a4d05f62aacabd | [] | no_license | ihfazhillah/educa-lms | 1ba4aebcfc7b68b6b80c3cacff0eeabb3024344b | e0c4ef46a147cc187297291db5adf78cc7da617d | refs/heads/master | 2020-03-28T09:38:34.998747 | 2018-09-22T16:03:49 | 2018-09-22T16:03:49 | 148,048,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,027 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
('overview', models.TextField(blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(related_name='courses_created', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=200)),
('description', models.TextField(blank=True)),
('course', models.ForeignKey(related_name='courses', to='courses.Course')),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
],
options={
'ordering': ('title',),
},
),
migrations.AddField(
model_name='course',
name='subject',
field=models.ForeignKey(related_name='courses', to='courses.Subject'),
),
]
| [
"[email protected]"
] | |
5f9bd9bb49499a97c6bc43ca09d6cbf41a34a357 | 68c49c51d04aa8c87e673784659088c1a5e4aeea | /database_reader/physionet_databases/capslpdb.py | 736f9190ce6652beb0c6940ec11e2e8869438217 | [
"MIT"
] | permissive | wenh06/database_reader | 9b1773c824ab62213e27f9e8c4144c098a13e410 | 784ea882e78791979ab020da403b97ea50b9d075 | refs/heads/master | 2023-06-05T09:32:48.164050 | 2021-06-24T09:45:33 | 2021-06-24T09:45:33 | 370,729,791 | 0 | 0 | MIT | 2021-05-25T14:52:58 | 2021-05-25T14:52:57 | null | UTF-8 | Python | false | false | 4,410 | py | # -*- coding: utf-8 -*-
"""
"""
import os
from datetime import datetime
from typing import Union, Optional, Any, List, NoReturn
from numbers import Real
import wfdb
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import pandas as pd
from ..utils.common import (
ArrayLike,
get_record_list_recursive,
)
from ..base import PhysioNetDataBase
__all__ = [
"CAPSLPDB",
]
class CAPSLPDB(PhysioNetDataBase):
""" NOT finished,
CAP Sleep Database
ABOUT capslpdb
--------------
1. contains 108 polysomnographic (PSG) recordings, including 16 healthy subjects and 92 pathological recordings, in EDF format, NOT the usual wfdb .dat format
2. The 92 pathological recordings include 40 recordings of patients diagnosed with nocturnal frontal lobe epilepsy (NFLE), 22 affected by REM behavior disorder (RBD), 10 with periodic leg movements (PLM), 9 insomniac, 5 narcoleptic, 4 affected by sleep-disordered breathing (SDB) and 2 by bruxism
3.
NOTE
----
1. background knowledge aboute CAP:
The Cyclic Alternating Pattern (CAP) is a periodic EEG activity occurring during NREM sleep. It is characterized by cyclic sequences of cerebral activation (phase A) followed by periods of deactivation (phase B) which separate two successive phase A periods with an interval <1 min. A phase A period and the following phase B period define a CAP cycle, and at least two CAP cycles are required to form a CAP sequence
ISSUES
------
Usage
-----
1. sleep stage
1. sleep cyclic alternating pattern
References
----------
[1] https://physionet.org/content/capslpdb/1.0.0/
"""
def __init__(self, db_dir:Optional[str]=None, working_dir:Optional[str]=None, verbose:int=2, **kwargs:Any) -> NoReturn:
"""
Parameters
----------
db_dir: str, optional,
storage path of the database
if not specified, data will be fetched from Physionet
working_dir: str, optional,
working directory, to store intermediate files and log file
verbose: int, default 2,
log verbosity
kwargs: auxilliary key word arguments
"""
super().__init__(db_name="capslpdb", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)
self.data_ext = "edf"
self.ann_ext = "st"
self.alias_ann_ext = "txt"
self.fs = None # psg data with different frequencies for each signal
self._ls_rec()
def _ls_rec(self, local:bool=True) -> NoReturn:
""" finished, checked,
find all records (relative path without file extension),
and save into `self._all_records` for further use
Parameters
----------
local: bool, default True,
if True, read from local storage, prior to using `wfdb.get_record_list`
"""
try:
super()._ls_rec(local=local)
except:
self._all_records = [
"brux1", "brux2",
"ins1", "ins2", "ins3", "ins4", "ins5", "ins6", "ins7", "ins8", "ins9",
"n10", "n11", "n12", "n13", "n14", "n15", "n16",
"n1", "n2", "n3", "n4", "n5", "n6", "n7", "n8", "n9",
"narco1", "narco2", "narco3", "narco4", "narco5",
"nfle10", "nfle11", "nfle12", "nfle13", "nfle14", "nfle15", "nfle16",
"nfle17", "nfle18", "nfle19", "nfle1", "nfle20", "nfle21", "nfle22",
"nfle23", "nfle24", "nfle25", "nfle26", "nfle27", "nfle28", "nfle29",
"nfle2", "nfle30", "nfle31", "nfle32", "nfle33", "nfle34", "nfle35",
"nfle36", "nfle37", "nfle38", "nfle39", "nfle3", "nfle40", "nfle4",
"nfle5", "nfle6", "nfle7", "nfle8", "nfle9",
"plm10", "plm1", "plm2", "plm3", "plm4", "plm5", "plm6", "plm7", "plm8", "plm9",
"rbd10", "rbd11", "rbd12", "rbd13", "rbd14", "rbd15", "rbd16", "rbd17",
"rbd18", "rbd19", "rbd1", "rbd20", "rbd21", "rbd22", "rbd2", "rbd3", "rbd4",
"rbd5", "rbd6", "rbd7", "rbd8", "rbd9",
"sdb1", "sdb2", "sdb3", "sdb4",
]
def get_subject_id(self, rec) -> int:
"""
"""
raise NotImplementedError
def database_info(self) -> NoReturn:
"""
"""
print(self.__doc__)
| [
"[email protected]"
] | |
389460e272923131109704dd69233dfb92abaa37 | a4a01e251b194f6d3c6654a2947a33fec2c03e80 | /PythonWeb/Flask/1809Flask/Flaskday02/flaskdemo02/run.py | dbf71b23902098d9f244460512522d83331dd8f8 | [] | no_license | demo112/1809 | 033019043e2e95ebc637b40eaf11c76bfd089626 | e22972229e5e7831dce2aae0b53ce19a6e3bb106 | refs/heads/master | 2020-04-09T07:10:49.906231 | 2019-02-27T13:08:45 | 2019-02-27T13:08:45 | 160,143,869 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/01-selftemp")
def selftemp():
html = "<!doctype html>"
html += "<html>"
html += "<head>"
html += "<title>"
html += "我自己的模版"
html += "</title>"
html += "</head>"
html += "<body>"
html += "<h1 color=red>"
html += "这是我第一个模版"
html += "</h1>"
html += "</body>"
html += "</html>"
return html
@app.route("/02-temp")
def template_views():
# html = render_template('index.html')
html = render_template(
'02-temp.html',
name="wangwc",
age=35,
gender="male")
return html
@app.route("/03-temp")
def template_views2():
html = render_template('03-temp.html', name1='歌名:《绿光》', name2='作词:宝强', name3='作词:奶亮', name4='演唱:羽凡')
return html
@app.route("/04-temp")
def template_views3():
name1 = '歌名:《绿光》'
name2 = '作词:宝强'
name3 = '作词:奶亮'
name4 = '演唱:羽凡'
html = render_template('04-temp.html', params=locals())
return html
@app.route("/04-var")
def var():
pass
uname = '他爸爸'
delay = 880
lis = ['阿珂', '兰陵王', ' 孙悟空']
tup = ('阿珂', '兰陵王', ' 孙悟空')
dic = {
'AK': '阿珂', 'LLW': '兰陵王', 'WZJ': ' 孙悟空'
}
game = Game()
print(locals())
return render_template('04-var.html', params=locals())
@app.route("/05-filter")
def filter1():
ustr = "this is a test string"
return render_template("05-filter.html", params=locals())
@app.route("/05-macro")
def marco():
lis = ["孙悟空", "西门庆", "刘姥姥", "小乔"]
return render_template("05-macro.html", list=lis)
@app.route("/image")
def image():
return render_template("image.html")
class Game(object):
group = '深渊'
def prt(self):
return "测试内容" + self.group
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
aa7534f4669baf9ee235e5cc5d793cae77d48129 | 4a4352800a7d9f26c4f2cd6c7e00a54e4fdc6517 | /Filters/BaseFilter.py | d5566bee291fef7664eb1e5b088c7e1c8561d69e | [] | no_license | nag92/ExoServer | 914e9b8b03a0c29211d1c1b6f22113cbf8924ad0 | d9006db8cf821fe0c552df13958797456d7ff0e2 | refs/heads/master | 2023-01-23T17:59:16.080404 | 2020-10-29T17:24:07 | 2020-10-29T17:24:07 | 261,233,186 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | import abc
from Sensors import Sensor
class BaseFilter(object):
"""
base class for the filters
"""
def __init__(self, sensor):
"""
:param sensor: sensor
:type sensor: Sensor.Sensor
"""
self.sensor = sensor
self.values = []
@abc.abstractmethod
def update(self, value):
"""
update the values with the filter
:param value: new values
:return: updated value
"""
return value
| [
"[email protected]"
] | |
0cbbf3590949fb9230b951dfa529e4a582a7587d | 176839e6f94e593fb957f0af1bd5682c95e44f8f | /exoplanet/theano_ops/celerite/factor_rev.py | 552d24cd4b7691d849b6b32a0ecd977c8a84b86e | [
"MIT"
] | permissive | Junjun1guo/exoplanet | 8a0a9d4deb351744a78db54801c4a9d9834e7f7a | 5df07b16cf7f8770f02fa53598ae3961021cfd0f | refs/heads/master | 2020-05-17T17:51:14.836055 | 2019-04-26T20:10:28 | 2019-04-26T20:10:28 | 183,867,012 | 2 | 0 | null | 2019-04-28T06:38:30 | 2019-04-28T06:38:30 | null | UTF-8 | Python | false | false | 381 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["FactorRevOp"]
from .base_op import CeleriteBaseOp
class FactorRevOp(CeleriteBaseOp):
func_file = "./factor_rev.cc"
func_name = "APPLY_SPECIFIC(factor_rev)"
num_input = 7
output_ndim = (1, 2, 2, 2)
def __init__(self, J=-1):
super(FactorRevOp, self).__init__(J=J)
| [
"[email protected]"
] | |
c00a910073398520cb97fc2609c3d5f4d8934baa | 3af8dfb5bc0a759f7237f10504dd28dfc2489d7e | /api/allennlp_demo/roberta_sentiment_analysis/test_api.py | 680638c082a6da1b492e39d0f2cf0f1dd9e3e925 | [
"Apache-2.0"
] | permissive | allenai/allennlp-demo | a710fca880b8de9d829790b7161fe8465deb15cc | afa862f1b473331f1157c1ee158ea202425fb10d | refs/heads/main | 2023-08-31T22:20:03.464642 | 2023-01-20T20:01:13 | 2023-01-20T20:01:13 | 136,056,285 | 200 | 93 | Apache-2.0 | 2023-01-16T17:14:38 | 2018-06-04T16:53:08 | TypeScript | UTF-8 | Python | false | false | 373 | py | from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.roberta_sentiment_analysis.api import RobertaSentimentAnalysisModelEndpoint
class TestRobertaSentimentAnalysisModelEndpoint(ModelEndpointTestCase):
endpoint = RobertaSentimentAnalysisModelEndpoint()
predict_input = {"sentence": "a very well-made, funny and entertaining picture."}
| [
"[email protected]"
] | |
b8d5dad94992b55574b60c2e52fc6f49923f7f1c | a458e773d46ad88725f07bdf9ac07d9608ddfb47 | /pages/views.py | a9240ed5511ee80c9ad873e5611146db5a21ccaa | [] | no_license | SonerArslan2019/egitim_sitesi | 82fd8dbe2bad593094a9caf85f06c7f86c96b064 | 8f5cabd5190334f47ef5beda10f8513be6ff9672 | refs/heads/master | 2023-03-15T09:18:38.771211 | 2021-03-15T18:13:46 | 2021-03-15T18:13:46 | 346,806,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | from django.shortcuts import render
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from courses.models import Course
from . forms import ContactForm
from django.urls import reverse_lazy
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.models import User
from teachers.models import Teacher
class IndexView(TemplateView):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['courses'] = Course.objects.filter(available=True).order_by('-date')[:2]
context['total_course'] = Course.objects.filter(available=True).count()
context['total_students'] = User.objects.count()
context['total_teachers'] = Teacher.objects.count()
return context
#def index(request):
# return render(request, 'index.html')
class AboutView(TemplateView):
template_name = 'about.html'
#def about(request):
# return render(request, 'about.html')
class ContactView(SuccessMessageMixin, FormView):
template_name = 'contact.html'
form_class = ContactForm
success_url = reverse_lazy('contact')
success_message = 'We received your request'
def form_valid(self, form):
form.save()
return super().form_valid(form) | [
"[email protected]"
] | |
1198b591a26c7bda00f1a072d5be35b687aba6e0 | 5a3547772b61f7d1b3a81f76dd1397eb92c68e7b | /slbo/envs/mujoco/ant_task_env.py | e819bb593f60567ec02d372d834e8d5847400f92 | [
"MIT"
] | permissive | suen049/AdMRL | 483440f0ded14e471d879b300da9afbab68fbe66 | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | refs/heads/master | 2023-03-12T23:15:05.154003 | 2021-03-06T15:31:21 | 2021-03-06T15:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,636 | py | import math
import numpy as np
from rllab.envs.mujoco import ant_task_env
from rllab.envs.base import Step
from slbo.envs import BaseModelBasedEnv
AntTaskConfig = ant_task_env.AntTaskConfig
class AntTaskEnv(ant_task_env.AntTaskEnv, BaseModelBasedEnv):
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat, # 15
self.model.data.qvel.flat, # 14
# np.clip(self.model.data.cfrc_ext, -1, 1).flat, # 84
self.get_body_xmat("torso").flat, # 9
self.get_body_com("torso"), # 9
self.get_body_comvel("torso"), # 3
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
if self._task_config.goal_velocity == -math.inf:
forward_reward = -1 * comvel[0]
elif self._task_config.goal_velocity == math.inf:
forward_reward = comvel[0]
else:
forward_reward = -np.abs(comvel[0] - self._task_config.goal_velocity) + 1.0
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def mb_step(self, states: np.ndarray, actions: np.ndarray, next_states: np.ndarray):
comvel = next_states[..., -3:]
if self._task_config.goal_velocity == -math.inf:
forward_reward = -1 * comvel[..., 0]
elif self._task_config.goal_velocity == math.inf:
forward_reward = comvel[..., 0]
else:
forward_reward = -np.abs(comvel[..., 0] - self._task_config.goal_velocity) + 1.0
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(actions / scaling), axis=-1)
contact_cost = 0.
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
notdone = np.all([next_states[..., 2] >= 0.2, next_states[..., 2] <= 1.0], axis=0)
return reward, 1. - notdone
| [
"[email protected]"
] | |
96ed4243504b965ffe7bb44a77193977ffd463fd | 96cb01cdbef51a9da25e1de68c7318572b69510f | /test/test_coupling.py | 2e2904e528f87e46a22a7a510cc250e258921084 | [] | no_license | pobot-pybot/pybot-youpi2 | e269efffb98083fc51b6d947dc8278bf644d4092 | a93a9acf40814583ba6816d265cc18a1bb61a72f | refs/heads/master | 2021-01-18T19:45:55.018721 | 2016-10-13T21:24:30 | 2016-10-13T21:24:30 | 69,095,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,639 | py | import unittest
from pybot.youpi2.model import YoupiArm
class JointToMotorTestCase(unittest.TestCase):
def test_01(self):
angles = {
YoupiArm.MOTOR_BASE: 10
}
angles_orig = angles.copy()
YoupiArm.joint_to_motor(angles)
self.assertDictEqual(angles, angles_orig)
def test_02(self):
angles = {
YoupiArm.MOTOR_BASE: 0,
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 0,
YoupiArm.MOTOR_WRIST: 0,
YoupiArm.MOTOR_HAND_ROT: 0,
}
YoupiArm.joint_to_motor(angles)
self.assertDictEqual(angles, {
YoupiArm.MOTOR_BASE: 0,
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 10,
YoupiArm.MOTOR_WRIST: 10,
YoupiArm.MOTOR_HAND_ROT: -10,
})
class MotorToJointTestCase(unittest.TestCase):
def test_01(self):
angles = {
YoupiArm.MOTOR_BASE: 10
}
angles_orig = angles.copy()
YoupiArm.motor_to_joint(angles)
self.assertDictEqual(angles, angles_orig)
def test_02(self):
angles = {
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 10,
YoupiArm.MOTOR_WRIST: 10,
YoupiArm.MOTOR_HAND_ROT: -10,
}
YoupiArm.motor_to_joint(angles)
self.assertDictEqual(angles, {
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 0,
YoupiArm.MOTOR_WRIST: 0,
YoupiArm.MOTOR_HAND_ROT: 0,
})
def test_03(self):
angles = {
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 20,
YoupiArm.MOTOR_WRIST: 30,
YoupiArm.MOTOR_HAND_ROT: -50,
}
YoupiArm.motor_to_joint(angles)
self.assertDictEqual(angles, {
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 10,
YoupiArm.MOTOR_WRIST: 10,
YoupiArm.MOTOR_HAND_ROT: -20,
})
class GlobalToLocalTestCase(unittest.TestCase):
def test_01(self):
_global = [10, 0, 0, 0, 0, 0]
_local = YoupiArm.global_to_local(_global)
self.assertEqual(_local, _global)
def test_02(self):
_global = [0, 10, 10, 10, -10, 0]
_local = YoupiArm.global_to_local(_global)
self.assertEqual(_local, [0, 10, 0, 0, 0, 0])
def test_03(self):
_global = [0, 10, 20, 30, -50, 0]
_local = YoupiArm.global_to_local(_global)
self.assertEqual(_local, [0, 10, 10, 10, -20, 0])
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
7fd91a257c9d2ed17a8aef21c95ccf7cf487178c | 62b2a1a9cea1662a86fa410fe91448ad6805d7b5 | /variability/plot_agn_tau_distribution.py | 5fd1d86f1494be77ada773b1f7a2dbe325fecb92 | [] | no_license | danielsf/CatSimMaintenance | f15dd74486f48c740bce2f4e3b6fdb60ab5d8c6f | 6f17f96b189aa0f860d316ffbe58483926123f4c | refs/heads/master | 2018-07-18T22:12:23.005153 | 2018-06-01T21:47:39 | 2018-06-01T21:47:39 | 105,593,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def make_histogram(xx_in, dmag, cut_off, min_val = None, cumulative=True):
xx = xx_in[np.where(xx_in<=cut_off+dmag)]
#print xx.min(),xx.max()
if min_val is None:
min_val=xx.min()-dmag
i_xx = np.round((xx-min_val)/dmag).astype(int)
unique_ixx, ct = np.unique(i_xx, return_counts=True)
if cumulative:
return unique_ixx*dmag+min_val, ct.astype(float)/float(len(xx_in))
else:
return unique_ixx*dmag+min_val, ct.astype(int)
if __name__ == "__main__":
dtype = np.dtype([('z', float), ('tau', float)])
data = np.genfromtxt('agn_tau_distribution.txt', dtype=dtype)
plt.figsize=(30,30)
tau_renorm = np.log10(data['tau']/(1.0+data['z']))
tau = np.log10(data['tau'])
tau_min = tau.min()
tau_max = tau.max()
tau_renorm_min = tau_renorm.min()
tau_renorm_max = tau_renorm.max()
tau_min = min(tau_min, tau_renorm_min)
tau_max = max(tau_max, tau_renorm_max)
dtau = 0.1
tau_grid, tau_hist = make_histogram(tau, dtau, tau_max+dtau,
cumulative=False)
(tau_renorm_grid,
tau_renorm_hist) = make_histogram(tau_renorm, dtau, tau_max+dtau,
cumulative=False)
t_l, = plt.plot(tau_grid, tau_hist)
t_r_l, = plt.plot(tau_renorm_grid, tau_renorm_hist)
plt.legend([t_l, t_r_l],['$\\tau$', '$\\tau/(1+z)$'], loc=0)
plt.xlim(0, 5)
plt.xlabel('$\log(\\tau)$')
plt.savefig('agn_tau_dist_fig.png')
| [
"[email protected]"
] | |
554de90b84acb3be0a4506092e5f1e2af577bec8 | c13261f07803218ff29238b3a455650316506e05 | /light8/configconvert.py | d5b0f0a32452f3cf3ccb555ca4ae4e08d33d842f | [] | no_license | shakaran/light9 | 3456427f718f43b829d34794bafc22d74305d30a | 91c86b030475e65f92c90adb0a0920f1fb9996eb | refs/heads/master | 2021-01-20T23:37:04.881942 | 2012-06-13T05:35:04 | 2012-06-13T05:35:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py |
from Config import subs
import Patch
Patch.reload_data(0)
def print_tsv(filename,allchans,subs):
f=open(filename,"w")
print >>f,"\t"+"\t".join(allchans)
for name,levels in subs.items():
normd={}
# nrmalize the names in the sub
for k,v in levels.items():
normd[Patch.resolve_name(k)]=v
print >>f,"%s\t%s" % (name, "\t".join([str(normd.get(c,"")) for c in allchans]))
def read_tsv(filename,outname):
"""converts from tsv filename to a config file (python) named outname"""
f=open(filename,'r')
out=open(outname,'w')
allchans=f.readline().split("\t")[1:]
for line in f.xreadlines():
spl=line.split("\t")
subname=spl[0]
print >>out,"subs['%s']={" % subname,
for channame,level in zip(allchans,spl[1:]):
try:
if level!="" and int(level)>0:
print >>out,"'%s': %s," %(channame,level),
except ValueError:
pass
print >>out,"}\n"
#print_tsv(filename="sublevs.txt",allchans=Patch.get_all_channels(),subs=subs)
read_tsv(filename="sublevs-fixed",outname="Configsubs-fixed.py")
| [
"none"
] | none |
194d96f6626df79c9dd21202fcc29fe2b79e3d3b | f66e6a3bc5f6eae570afa2013325d462f530cff6 | /core/seller/migrations/0054_auto_20210514_1705.py | d8258c0702eeeea09cfae4fd507edfce45254a9f | [] | no_license | Mahe07/vyavaharback | 3cb30e227d9e0c25c86ba4e20f9cafce054c4a2a | 4e35cac3b643197a78e420d34ea3f45cce368e46 | refs/heads/main | 2023-08-10T17:21:56.538518 | 2021-09-17T03:53:44 | 2021-09-17T03:53:44 | 407,386,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # Generated by Django 3.1.4 on 2021-05-14 11:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seller', '0053_auto_20210503_1805'),
]
operations = [
migrations.AlterField(
model_name='seller',
name='seller_status',
field=models.CharField(choices=[('Register', 'Register'), ('Approved', 'Approved'), ('Verified', 'Verified'), ('Draft', 'Draft'), ('Drop', 'Drop')], default='Register', max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
128cc0d6f08d2665af3af8ff3948a1e1ab1f15ef | 897802abf4ee5c7267de3eb5e321cc931898e2f6 | /python/python/eric/part2_project/project01_game_aliens/bullet.py | 94d931467ed36bd4cf2e943964aa9f74215ee800 | [] | no_license | aojie654/codes_store | 0527c7a7729b472e8fd2fd67af462cf857970633 | ed71b6266b2d2b5ddefadcb958f17695fb9db6cf | refs/heads/master | 2021-07-15T17:04:33.591673 | 2021-07-03T14:42:30 | 2021-07-03T14:42:30 | 132,343,733 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | # coding=utf-8
# @File : bullet
# @Author: aojie654
# @Date : 18-6-10 下午4:15
# @Desc : Bullet
import pygame as pg
from pygame.sprite import Sprite
class Bullet(Sprite):
"""A class to manage bullet"""
def __init__(self, ai_settings, screen, ship):
"""Create a bullet object at position where the ship is"""
super(Bullet, self).__init__()
self.screen = screen
# Set a rectangle at position (0,0), then set the correct
self.rect = pg.Rect(0, 0, ai_settings.bullet_width, ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
# Store bullet position with decimal
self.y = float(self.rect.y)
# Set color and speed
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
"""Move up"""
# update position value
self.y -= self.speed_factor
# update rect position
self.rect.y = self.y
def draw_bullet(self):
"""Draw bullet"""
pg.draw.rect(self.screen, self.color, self.rect)
| [
"[email protected]"
] | |
15705a89c31afbb086c3f166ae551352f6725885 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Algorithms/Recursion/tst01.py | c5a4b031f4ff2b599d55cd84ccb09657de44c1a8 | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | # Created by Bogdan Trif on 30-06-2018 , 3:16 PM.
import turtle
myTurtle = turtle.Turtle()
myWin = turtle.Screen()
myTurtle.speed(10)
def drawTriangle( points , color , myTurtle ):
myTurtle.fillcolor(color)
myTurtle.up()
myTurtle.goto(points[0][0],points[0][1]) # ( -100, -50 )
myTurtle.down()
myTurtle.begin_fill()
myTurtle.goto(points[1][0],points[1][1]) # (0 , 100)
myTurtle.goto(points[2][0],points[2][1]) # [100,-50]
myTurtle.goto(points[0][0],points[0][1]) # ( -100, -50 )
myTurtle.end_fill()
def triangle(line_len):
for i in range(3) :
myTurtle.forward(line_len)
myTurtle.left(120)
myTurtle.forward(line_len//2)
myTurtle.left( 60 )
side_len = 400
myPoints = [ [-side_len , -side_len//2 ] , [ 0, side_len ],[ side_len,-side_len//2 ] ]
# triangle(300)
drawTriangle(myPoints, 'olive', myTurtle)
myWin.exitonclick() | [
"[email protected]"
] | |
81707d27a4d72edd3e5bcd7db29b753d65389996 | 36bc2f2b52201ccc7ca11035fd0c66a8fe64d3f5 | /lint_domain_packages/interfaces.py | 1467db18f4b330fd2f46f364a420ddc7e4cea1d9 | [
"MIT"
] | permissive | imankulov/lint-domain-packages | fc46ba36ca739e31e152be79d3f609abd0af074a | cc6b7a33bdb0a5e022feee9d22d7f93c9f069680 | refs/heads/main | 2023-05-14T18:42:11.530567 | 2021-05-27T08:55:01 | 2021-05-27T08:55:55 | 370,928,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,796 | py | import abc
from dataclasses import dataclass
from grimp.application.ports.graph import AbstractImportGraph
@dataclass
class LinterSettings:
# Root package that we analyzes
root: str
# List of public domain packages.
#
# All modules of public domain packages are open to import. Usually, those are
# so-called, "utility packages", an assorted list of helper classes and functions.
#
# Note that marking a domain package as public doesn't automatically add it to
# the list of dependencies.
public_packages: list[str]
# List of public modules.
#
# Usually contains things like services and interfaces, but doesn't contain
# things that are specific to internal implementation of the package.
#
# Applies to all domain packages.
#
# Note that in order to be able to import these modules from the
# outside, you need to add the enclosing package in dependencies
public_modules: list[str]
# A self-imposed dependency map.
#
# Contains mapping from dependent modules to depending ones.
# For example, dependencies={"payments": ["users", "projects"]} means that
# the domain package "payments" depends on (imports) packages "users" and
# "projects"
dependencies: dict[str, list[str]]
def is_public(self, module_import_path: str) -> bool:
"""
Return true if module is public.
The module is considered public, if it belongs to a public domain package
(like, "myproject.utils") or the top-level module is public itself.
(like, "myproject.foo.services").
"""
chunks = self._get_module_chunks(module_import_path)
root, package = chunks[:2]
if len(chunks) > 2:
toplevel_module = chunks[2]
else:
toplevel_module = None # doesn't exist
if package in self.public_packages:
return True
if toplevel_module and toplevel_module in self.public_modules:
return True
return False
def listed_in_dependencies(
self, module_import_path: str, imported_module_import_path: str
) -> bool:
"""
Return True if the package of `imported_module_import_path` is marked as
a dependency of the package of `module_import_path`.
"""
package_name = self._get_module_chunks(module_import_path)[1]
imported_package_name = self._get_module_chunks(imported_module_import_path)[1]
if package_name not in self.dependencies:
return False
return imported_package_name in self.dependencies[package_name]
def _get_module_chunks(self, module_import_path):
chunks = module_import_path.split(".")
if chunks[0] != self.root:
raise RuntimeError(f"{module_import_path} doesn't belong to {self.root}")
return chunks
@dataclass
class ImportDetails:
line_number: int
line_contents: str
@dataclass
class ImportViolationGroup:
group_key: str
error_message: str
violations: list["ImportViolation"]
@dataclass
class ImportViolation:
"""Generic class for an import violation."""
graph: AbstractImportGraph
importer: str
imported: str
def get_import_details(self) -> ImportDetails:
details = self.graph.get_import_details(
importer=self.importer, imported=self.imported
)[0]
return ImportDetails(details["line_number"], details["line_contents"])
def get_location(self) -> str:
details = self.get_import_details()
return (
f"{self.importer_filename}:{details.line_number} "
f"{details.line_contents}"
)
@property
def importer_filename(self) -> str:
return self.importer.replace(".", "/") + ".py"
@property
def imported_filename(self) -> str:
return self.imported.replace(".", "/") + ".py"
def error_message(self) -> str:
raise NotImplementedError("Must be implemented in subclasses.")
def group_key(self) -> str:
raise NotImplementedError("Must be implemented in subclasses.")
@dataclass
class NonPublicImportViolation(ImportViolation):
def error_message(self) -> str:
return "A module imported outside of the package is not public."
def group_key(self) -> str:
return self.imported
@dataclass
class NotDependentImportViolation(ImportViolation):
def error_message(self) -> str:
return (
f"Package {domain_package(self.importer)} implicitly depends on "
f"{domain_package(self.imported)}."
)
def group_key(self) -> str:
return f"{domain_package(self.importer)}:{domain_package(self.imported)}"
def domain_package(import_path: str):
return import_path.split(".")[1]
| [
"[email protected]"
] | |
7d9ceb951227980f54371141d23133314a006bc8 | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/GrovePi/Software/Python/others/temboo/Library/Zoho/CRM/GenerateAuthToken.py | 6564e3744ec9a54d1fc5d48c38797f2b50e0244d | [
"Apache-2.0",
"MIT"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# GenerateAuthToken
# Generates an authentication token.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GenerateAuthToken(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GenerateAuthToken Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GenerateAuthToken, self).__init__(temboo_session, '/Library/Zoho/CRM/GenerateAuthToken')
def new_input_set(self):
return GenerateAuthTokenInputSet()
def _make_result_set(self, result, path):
return GenerateAuthTokenResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GenerateAuthTokenChoreographyExecution(session, exec_id, path)
class GenerateAuthTokenInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GenerateAuthToken
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, string) Your Zoho password.)
"""
super(GenerateAuthTokenInputSet, self)._set_input('Password', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) Your Zoho CRM username.)
"""
super(GenerateAuthTokenInputSet, self)._set_input('Username', value)
class GenerateAuthTokenResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GenerateAuthToken Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_AuthenticationToken(self):
"""
Retrieve the value for the "AuthenticationToken" output from this Choreo execution. ((string) The authentication token returned from Zoho.)
"""
return self._output.get('AuthenticationToken', None)
class GenerateAuthTokenChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GenerateAuthTokenResultSet(response, path)
| [
"[email protected]"
] | |
abb34c25cbdcc7ced69a926540585e6977ff820e | b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf | /000880DataCampPython01Intro/dataCamp000880ch01p02ex01.py | effc6ed2c8aeb882398c095f23b7b7584daaeca4 | [
"Apache-2.0"
] | permissive | SafonovMikhail/python_000577 | 5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4 | f2dccac82a37df430c4eb7425b5d084d83520409 | refs/heads/master | 2022-12-08T10:53:57.202746 | 2022-12-07T09:09:51 | 2022-12-07T09:09:51 | 204,713,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | '''
# Example, do not modify!
print(5 / 8)
# Print the sum of 7 and 10
'''
# Example, do not modify!
print(5 / 8)
# Print the sum of 7 and 10
print(7 + 10)
| [
"[email protected]"
] | |
47ce9e9f24be7f2de7cb55d7edf10d2ce08b6d6f | 7a77bade054683f7c36c59c6e0640958960efeea | /Komodo-Edit-8/lib/mozilla/python/komodo/Crypto/SelfTest/Random/Fortuna/__init__.py | e7e3adef44e6dd6018da853dc806e39887326fd2 | [] | no_license | amaurial/mininet | 4e8fd62ec1f0547d21fcbb60a3fde64d8855920b | d2761c075130c0f447a69bbb40b0e3fddc052eb6 | refs/heads/master | 2016-09-06T12:03:47.808851 | 2013-07-16T10:55:01 | 2013-07-16T10:55:01 | 11,447,348 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py | # -*- coding: utf-8 -*-
#
# SelfTest/Random/Fortuna/__init__.py: Self-test for Fortuna modules
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for the Crypto.Random.Fortuna package"""
__revision__ = "$Id: __init__.py 4769 2010-04-09 17:53:50Z toddw $"
import os
def get_tests(config={}):
tests = []
import test_FortunaAccumulator; tests += test_FortunaAccumulator.get_tests(config=config)
import test_FortunaGenerator; tests += test_FortunaGenerator.get_tests(config=config)
import test_SHAd256; tests += test_SHAd256.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| [
"[email protected]"
] | |
b320db840727aa5ecd16ba22b569f203a7c222e4 | 4201bc1c12ef6edaaf8a201ef3a1cc279889b768 | /tests/recogners/visual/test_plot.py | cc6dd289324cc59a8f6299c1de6d953fb7899611 | [
"MIT"
] | permissive | BachiLi/recogners | 30358df0d9b866ef8c298ff804689709a9e16638 | 945eb6119182d3b3f2d77c189b5b1c4f5306a9e3 | refs/heads/master | 2020-09-29T01:29:37.914280 | 2019-09-19T18:40:21 | 2019-09-19T18:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | import pytest
import torch
from recogners.visual import plot
def test_show_tensor():
t = torch.zeros(28, 28)
plot.show_tensor(t)
| [
"[email protected]"
] | |
e1a924aec30e81ab56964e1ad7f9bb4247ddb7ab | 4522fc52bc43654aadd30421a75bae00a09044f0 | /share/haley/validations.py | 44287c17e5198ac05fa0bbd79a2cdfc4130a1a00 | [] | no_license | qesoalpe/anelys | 1edb8201aa80fedf0316db973da3a58b67070fca | cfccaa1bf5175827794da451a9408a26cd97599d | refs/heads/master | 2020-04-07T22:39:35.344954 | 2018-11-25T05:23:21 | 2018-11-25T05:23:21 | 158,779,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,789 | py | from neo4j.v1 import GraphDatabase, basic_auth
from pprint import pprint
from decimal import Decimal
d8757_5 = GraphDatabase.driver('bolt://comercialpicazo.com', auth=basic_auth('alejandro', '47exI4'))
d5_session = d8757_5.session()
cfdis = list()
tps = list()
txs = list()
def get_txs(_month):
rr = d5_session.run('match (tx:bailey_transaction)-[:check]->() where tx.date starts with {month} '
'return tx.id as id, tx.date as date, tx.description as description, tx.value as value, '
'tx.mov_num as mov_num;',
{'month': _month})
_txs = list()
for rc in rr:
_txs.append({'id': rc['id'], 'date': rc['date'], 'description': rc['description'], 'value': rc['value'],
'mov_num': rc['mov_num']})
return _txs
def get_tx(tx):
for _tx in txs:
if _tx['id'] == tx['id']:
return _tx
else:
txs.append(tx)
return tx
def ensure_cfdi(cfdi):
for _cfdi in cfdis:
if _cfdi['uuid'] == cfdi['uuid']:
return _cfdi
else:
cfdis.append(cfdi)
return cfdi
def get_taxpayer(rfc):
for tp in tps:
if tp['rfc'] == rfc:
return tp
else:
tp = {'rfc': rfc}
tps.append(tp)
return tp
def inflate_cfdis(_txs):
for _tx in _txs:
rr = d5_session.run('match ({id:{id}})-[:cfdi]->(cfdi:haley_cfdi)-[:emitter]->(emitter) return cfdi.uuid as uuid, '
'cfdi.total as total, cfdi.datetime as datetime, cfdi.folio as folio, cfdi.voucher_effect as voucher_effect, '
'emitter.rfc as emitter_rfc;',
{'id': _tx['id']})
cfdis = list()
for rc in rr:
cfdi = {'uuid': rc['uuid'], 'datetime': rc['datetime'], 'voucher_effect': rc['voucher_effect'], 'total': rc['total']}
if rc['folio'] is not None:
cfdi['folio'] = rc['folio']
cfdi = ensure_cfdi(cfdi)
if 'emitter' not in cfdi and rc['emitter_rfc'] is not None:
cfdi['emitter'] = get_taxpayer(rc['emitter_rfc'])
cfdis.append(cfdi)
if len(cfdis) == 1:
_tx['cfdi'] = cfdis[0]
elif len(cfdis) > 1:
_tx['cfdis'] = cfdis
def inflate_txs(cfdi):
rr = d5_session.run('match (cfdi:haley_cfdi{uuid:{uuid}})<-[:cfdi]-(tx:bailey_transaction) return tx.id as id, tx.value as value, tx.date as date, tx.description as description, tx.mov_num as mov_num;',
{'uuid': cfdi['uuid']})
txs = list()
for rc in rr:
tx = get_tx({'id': rc['id'], 'value': rc['value'], 'description': rc['description'], 'mov_num': rc['mov_num']})
txs.append(tx)
if len(txs) == 1:
cfdi['tx'] = txs[0]
elif len(txs) > 1:
cfdi['txs'] = txs
def validation_1():
# (cfdi)<-[:cfdi]-(tx) count 1
no_pass = list()
for cfdi in cfdis:
if 'txs' in cfdi and len(cfdi['txs']) > 1:
no_pass.append(cfdi)
return no_pass
def validation_2():
# (tx)-[:cfdi]->(cfdi)-[:emitter]->(emitter) emitter unique
no_pass = list()
for tx in txs:
emitter = None
if 'cfdis' in tx:
for cfdi in tx['cfdis']:
if 'emitter' in cfdi:
if emitter is not None:
if cfdi['emitter']['rfc'] != emitter['rfc']:
no_pass.append(tx)
break
else:
emitter = cfdi['emitter']
return no_pass
def validation_3():
# tx.value == sum(cfdi.total)
no_pass = list()
# {'tx': tx, 'diference': diference}
from sarah.acp_bson import dictutils
dictutils.list_float_to_dec(txs)
for tx in txs:
total_cfdis = Decimal()
if 'cfdi' in tx:
if tx['cfdi']['voucher_effect'] == 'ingress':
total_cfdis = tx['cfdi']['total']
elif tx['cfdi']['voucher_effect'] == 'egress':
total_cfdis = -tx['cfdi']['total']
elif 'cfdis' in tx:
for cfdi in tx['cfdis']:
if cfdi['voucher_effect'] == 'ingress':
total_cfdis += cfdi['total']
elif cfdi['voucher_effect'] == 'egress':
total_cfdis -= cfdi['total']
if total_cfdis != -tx['value']:
no_pass.append({'tx': tx, 'difference': -tx['value'] - total_cfdis})
return no_pass
def validation_4():
# (tx)-[:cfdi]->(cfdi)-[:emitter]->(emitter)<-[:beneficiary]-(check)<-[:check]-(tx)
no_pass = list()
return no_pass
def validate():
validation = dict()
no_pass = validation_1()
if len(no_pass) > 0:
validation['validation_1'] = {'no_pass': no_pass}
no_pass = validation_2()
if len(no_pass) > 0:
validation['validation_2'] = {'no_pass': no_pass}
no_pass = validation_3()
if len(no_pass)>0:
validation['validation_3'] = {'no_pass': no_pass}
no_pass = validation_4()
if len(no_pass) > 0:
validation['validation_4'] = {'no_pass': no_pass}
return validation
def populate_from_txs_in_month(month):
txs.clear()
tps.clear()
cfdis.clear()
txs.extend(get_txs(month))
inflate_cfdis(txs)
for cfdi in cfdis:
inflate_txs(cfdi)
for tp in tps:
rr = d5_session.run('match (tp{rfc:{rfc}}) return tp.name as name limit 1;', {'rfc': tp['rfc']})
rc = rr.single()
if rc is not None and rc['name'] is not None:
tp['name'] = rc['name']
if __name__ == '__main__':
pprint(txs)
pprint(tps)
pprint(cfdis)
| [
"[email protected]"
] | |
64de11311bc599c3c5bed8ace84665103a3e169d | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/13164070.py | 860a9755b3d757aebdb485b23f443e9c6f4283dc | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/13164070.py generated: Wed, 25 Jan 2017 15:25:33
#
# Event Type: 13164070
#
# ASCII decay Descriptor: {[[B_s0]nos => K+ K- (D~0 -> K+ pi-)]cc, [[B_s0]os => K- K+ (D0 -> K- pi+)]cc}
#
from Configurables import Generation
Generation().EventType = 13164070
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_D0KK,Kpi=PHSP.dec"
Generation().SignalRepeatedHadronization.CutTool = "LHCbAcceptance"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "LHCbAcceptance"
from Configurables import LHCbAcceptance
pgun.addTool( LHCbAcceptance )
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13164070
| [
"[email protected]"
] | |
60732bc1697dfe6cd246eedd27bb9a19434be6fe | edbb183560323cae50927334cc001ba2ac74aed8 | /pyPdf/__init__.py | af02553da69cfeadc36e17ab0fb0e14298dd42c6 | [
"BSD-3-Clause"
] | permissive | mfenniak/pyPdf | aee06721304c1c696c9f5004140cf1bb0104e9ee | 4abdca42a7d8a47a12f1e35ab86fc80157a2fc16 | refs/heads/trunk | 2022-07-28T14:45:55.587320 | 2010-07-18T14:21:14 | 2010-07-18T14:21:14 | 331,547 | 145 | 66 | NOASSERTION | 2020-08-24T08:10:25 | 2009-10-08T23:52:41 | Python | UTF-8 | Python | false | false | 63 | py | from pdf import PdfFileReader, PdfFileWriter
__all__ = ["pdf"]
| [
"devnull@localhost"
] | devnull@localhost |
da64b8e3612c3f2a8acde931b596730b72a067c4 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_available_private_endpoint_types_operations.py | b86a9f49860e73367c905f73ef6b2660161bc9ae | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 9,286 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AvailablePrivateEndpointTypesOperations(object):
"""AvailablePrivateEndpointTypesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailablePrivateEndpointTypesResult"]
"""Returns all of the resource types that can be linked to a Private Endpoint in this subscription
in this region.
:param location: The location of the domain name.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailablePrivateEndpointTypesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.AvailablePrivateEndpointTypesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailablePrivateEndpointTypesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'} # type: ignore
def list_by_resource_group(
self,
location, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailablePrivateEndpointTypesResult"]
"""Returns all of the resource types that can be linked to a Private Endpoint in this subscription
in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailablePrivateEndpointTypesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.AvailablePrivateEndpointTypesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailablePrivateEndpointTypesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'} # type: ignore
| [
"[email protected]"
] | |
ad9fadfe929fbd742d5d3194a338c36c0e766e06 | 1498148e5d0af365cd7fd16197174174a7fa9800 | /leetcode/t001629_2.py | fbcfa1dde1361cd3e1d9c422cfd37ca1ee62acc6 | [] | no_license | feiyanshiren/myAcm | 59a2b80fe7e02787defcb152eee3eae26135322a | 00c7082d5143ddf87aeeafbdb6ce29da46dc8a12 | refs/heads/master | 2023-09-01T12:12:19.866447 | 2023-09-01T09:09:56 | 2023-09-01T09:09:56 | 148,560,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | from typing import List
class Solution:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
max_k = ""
max_v = 0
releaseTimes.insert(0, 0)
for i in range(1, len(releaseTimes)):
s = releaseTimes[i] - releaseTimes[i - 1]
if s > max_v:
max_v = s
max_k = keysPressed[i - 1]
elif s == max_v:
if keysPressed[i - 1] > max_k:
max_k = keysPressed[i - 1]
print(max_v)
return max_k
s = Solution()
print(s.slowestKey([9, 29, 49, 50], "cbcd"))
print(s.slowestKey([12, 23, 36, 46, 62], "spuda"))
| [
"[email protected]"
] | |
0929bfb61575f4a6c8fa86c1f6755ab29427c22a | 81c010cee2eeddec6737ed585cadd6caddecfbcd | /docs/conf.py | a0524846f7b7fcf58341f6c8864d02f0918db030 | [
"MIT"
] | permissive | nickdelgrosso/wavefront_reader | b27a5aaeea3c38f578e5cdac7d9ab7019c03ae13 | c515164a3952d6b85f8044f429406fddd862bfd0 | refs/heads/master | 2020-03-19T10:09:54.165210 | 2018-04-27T14:41:34 | 2018-04-27T14:41:34 | 79,463,748 | 1 | 1 | MIT | 2018-04-27T14:41:55 | 2017-01-19T14:55:23 | Python | UTF-8 | Python | false | false | 8,772 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# wavefront_reader documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import wavefront_reader
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wavefront_reader'
copyright = u"2017, Nicholas A. Del Grosso"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = wavefront_reader.__version__
# The full version, including alpha/beta/rc tags.
release = wavefront_reader.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wavefront_readerdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'wavefront_reader.tex',
u'wavefront_reader Documentation',
u'Nicholas A. Del Grosso', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wavefront_reader',
u'wavefront_reader Documentation',
[u'Nicholas A. Del Grosso'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wavefront_reader',
u'wavefront_reader Documentation',
u'Nicholas A. Del Grosso',
'wavefront_reader',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
c75429e425489c4fb78a4bfc6a4c93b281d9a415 | 0062ceae0071aaa3e4e8ecd9025e8cc9443bcb3b | /solved/17070.py | e14c99fcb45eac54e24f1a06821ec0c612691411 | [] | no_license | developyoun/AlgorithmSolve | 8c7479082528f67be9de33f0a337ac6cc3bfc093 | 5926924c7c44ffab2eb8fd43290dc6aa029f818d | refs/heads/master | 2023-03-28T12:02:37.260233 | 2021-03-24T05:05:48 | 2021-03-24T05:05:48 | 323,359,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | N = int(input())
board = [list(map(int, input().split())) for _ in range(N)]
dp = [[[0, 0, 0] for _ in range(N)] for _ in range(N)]
dp[0][1][0] = 1
for i in range(2, N):
if board[0][i]: break
dp[0][i][0] = 1
for i in range(1, N):
for j in range(1, N):
if not board[i][j]:
dp[i][j][0] = dp[i][j-1][0] + dp[i][j-1][2]
dp[i][j][1] = dp[i-1][j][1] + dp[i-1][j][2]
if not (board[i-1][j] or board[i][j-1]):
dp[i][j][2] = dp[i-1][j-1][0] + dp[i-1][j-1][1] + dp[i-1][j-1][2]
print(sum(dp[N-1][N-1])) | [
"[email protected]"
] | |
8b4f63c6a55804ce7d84505027839b601afd61d2 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/formula_fragment_py3.py | 0f212fb85237d011fc8a711d59206b6152c43a00 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,131 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .update_resource_py3 import UpdateResource
class FormulaFragment(UpdateResource):
"""A formula for creating a VM, specifying an image base and other parameters.
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param description: The description of the formula.
:type description: str
:param author: The author of the formula.
:type author: str
:param os_type: The OS type of the formula.
:type os_type: str
:param formula_content: The content of the formula.
:type formula_content:
~azure.mgmt.devtestlabs.models.LabVirtualMachineCreationParameterFragment
:param vm: Information about a VM from which a formula is to be created.
:type vm: ~azure.mgmt.devtestlabs.models.FormulaPropertiesFromVmFragment
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'author': {'key': 'properties.author', 'type': 'str'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'formula_content': {'key': 'properties.formulaContent', 'type': 'LabVirtualMachineCreationParameterFragment'},
'vm': {'key': 'properties.vm', 'type': 'FormulaPropertiesFromVmFragment'},
}
def __init__(self, *, tags=None, description: str=None, author: str=None, os_type: str=None, formula_content=None, vm=None, **kwargs) -> None:
super(FormulaFragment, self).__init__(tags=tags, **kwargs)
self.description = description
self.author = author
self.os_type = os_type
self.formula_content = formula_content
self.vm = vm
| [
"[email protected]"
] | |
f12dbc29ef6f39cbd86252c9fb94858a00f35d61 | 377e3a552fb807febc18ce036af77edbce93ca19 | /binary trees/deepest_node_binary_tree.py | 891638b927afd3d373b32ddde50d44b356bce55f | [] | no_license | souravs17031999/100dayscodingchallenge | 940eb9b6d6037be4fc0dd5605f9f808614085bd9 | d05966f3e6875a5ec5a8870b9d2627be570d18d9 | refs/heads/master | 2022-10-29T11:05:46.762554 | 2022-09-28T13:04:32 | 2022-09-28T13:04:32 | 215,993,823 | 44 | 12 | null | 2022-08-18T14:58:50 | 2019-10-18T09:55:03 | Python | UTF-8 | Python | false | false | 1,656 | py | # Program for finding the deepest node in the tree.
# Input : Root of below tree
# 1
# / \
# 2 3
# / \ / \
# 4 5 6 7
# \
# 8
# Output : 8
#
# Input : Root of below tree
# 1
# / \
# 2 3
# /
# 6
# Output : 6
# ----------------------------------------------------------------------------------------------------
# As we know that height of the binary tree is the maximum depth of the binary tree.
# From calculating height, we know that max. depth which is the level of the deepest node
# and then we can print the node data at the level as found above.
# TIME : 0(N), N IS NODES OF BINARY TREE.
class new_Node:
def __init__(self, x):
self.data = x
self.left = None
self.right = None
def height(root):
if not root:
return 0
lheight = height(root.left)
rheight = height(root.right)
return max(lheight, rheight) + 1
def deepest_node(root, levels):
if not root:
return
if levels == 1:
print(root.data)
elif levels > 1:
deepest_node(root.left, levels - 1)
deepest_node(root.right, levels - 1)
if __name__ == '__main__':
root = new_Node(1)
root.left = new_Node(2)
root.right = new_Node(3)
root.left.left = new_Node(4)
root.right.left = new_Node(5)
root.right.right = new_Node(6)
root.right.left.right = new_Node(7)
root.right.right.right = new_Node(8)
root.right.left.right.left = new_Node(9)
levels = height(root)
deepest_node(root, levels)
| [
"[email protected]"
] | |
42ff61b7b1532947d2f6707b192005c61325f2de | 644b019a4792b6c7d9e5352e6330069850cc07e7 | /dentexchange/apps/libs/tests/test_login_required_for.py | 88b4f1aa0075a77293dc5f58dee1d6c6a608927f | [
"BSD-3-Clause"
] | permissive | jpchauvel/dentexchange | db0611c8c45365db30bdc15e3005c6eeac104c73 | 58ae303e842404fc9e1860f294ec8044a332bef3 | refs/heads/master | 2021-10-10T12:19:00.985034 | 2014-09-24T03:42:20 | 2014-09-24T03:42:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,072 | py | # -*- coding:utf-8 -*-
import unittest
import mock
from ..decorators import login_required_for, EMPLOYER, EMPLOYEE
class LoginRequiredForTestCase(unittest.TestCase):
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_employer_return_user_passes_test_with_check_returning_true(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = True
# action
returned_value = login_required_for(EMPLOYER)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertTrue(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_employee_return_user_passes_test_with_check_returning_true(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = False
# action
returned_value = login_required_for(EMPLOYEE)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertTrue(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_employer_return_user_passes_test_with_check_returning_false(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = False
# action
returned_value = login_required_for(EMPLOYER)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertFalse(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_employee_return_user_passes_test_with_check_returning_false(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = True
# action
returned_value = login_required_for(EMPLOYEE)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertFalse(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_should_return_false_if_user_doesnt_have_userregistration_attr(
self, user_passes_test):
# setup
user = object()
# action
returned_value = login_required_for(EMPLOYER)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertFalse(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_should_return_user_passes_test_with_check_returning_false_for_login_types_list(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = False
# action
returned_value = login_required_for((EMPLOYER, EMPLOYER,))
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertFalse(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_should_return_user_passes_test_with_check_returning_true_for_login_types_list(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = False
# action
returned_value = login_required_for((EMPLOYER, EMPLOYEE,))
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertTrue(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
| [
"[email protected]"
] | |
ad2fd5634726e6272446d55a182d613f898857a5 | 2d0a0a1b6dad8657eaf844edbffe198bb1ff5c3e | /uvoyeur/daemon/mcast_proto.py | 2ecd3e5698e24d8324e5f20bda4e388e3bc41267 | [
"Apache-2.0"
] | permissive | egustafson/uvoyeur | 93ed7a4d795c74de477da39162285fdc7959f873 | ed7a9c60933a898964a111c0e5311bab3172b21a | refs/heads/master | 2021-01-19T22:33:43.403293 | 2015-08-29T19:06:56 | 2015-08-29T19:06:56 | 33,937,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,378 | py | """PROTOTYPE - Multicast Listener
REPLACE with full Mcast Listener in time
"""
import socket
import struct
import threading
MCAST_GRP = '224.1.1.1'
MCAST_PORT = 5007
class McastListener(threading.Thread):
def __init__(self, bus):
super(McastListener, self).__init__()
self.setDaemon(True)
self.bus = bus
self.shutdown = False
def subscribe(self):
self.bus.subscribe('start', self.do_start)
self.bus.subscribe('stop', self.stop)
def do_start(self):
print("McastListener - start")
self.start()
def stop(self):
self.shutdown = True
print("McastListener - shutdown")
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(1)
while not self.shutdown:
try:
msg = sock.recv(10240)
print("received: {0}".format(msg))
except socket.timeout:
#print("sock-tick")
pass
print("shutting down.")
## Local Variables:
## mode: python
## End:
| [
"[email protected]"
] | |
170439db6c8bc3f75bb70bbcfd21eb6a6a194663 | 6d967da5fd95aa5e66ddbb211da40041006ca5ec | /myvenv/Lib/site-packages/pip/_vendor/pep517/build.py | 1c6ecbe79e7f8c4778ecb5872ddf1b4a9c1f59cd | [] | no_license | gevorkyannaira/my-first-blog | 96e4458045a1dd0aa9c1f3ec69f4c829428200e0 | 42ab12a8c2b0e402b5fa1b8e5a7cdd2629d06c16 | refs/heads/master | 2022-09-03T21:14:18.946448 | 2020-05-18T18:15:39 | 2020-05-18T18:15:39 | 264,909,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,645 | py | """Build a project using PEP 517 hooks.
"""
import argparse
import logging
import os
<<<<<<< HEAD
import contextlib
from pip._vendor import pytoml
import shutil
import errno
import tempfile
from .envbuild import BuildEnvironment
from .wrappers import Pep517HookCaller
=======
from pip._vendor import toml
import shutil
from .envbuild import BuildEnvironment
from .wrappers import Pep517HookCaller
from .dirtools import tempdir, mkdir_p
from .compat import FileNotFoundError
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
log = logging.getLogger(__name__)
<<<<<<< HEAD
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
=======
def validate_system(system):
"""
Ensure build system has the requisite fields.
"""
required = {'requires', 'build-backend'}
if not (required <= set(system)):
message = "Missing required fields: {missing}".format(
missing=required-set(system),
)
raise ValueError(message)
def load_system(source_dir):
"""
Load the build system from a source dir (pyproject.toml).
"""
pyproject = os.path.join(source_dir, 'pyproject.toml')
with open(pyproject) as f:
pyproject_data = toml.load(f)
return pyproject_data['build-system']
def compat_system(source_dir):
"""
Given a source dir, attempt to get a build system backend
and requirements from pyproject.toml. Fallback to
setuptools but only if the file was not found or a build
system was not indicated.
"""
try:
system = load_system(source_dir)
except (FileNotFoundError, KeyError):
system = {}
system.setdefault(
'build-backend',
'setuptools.build_meta:__legacy__',
)
system.setdefault('requires', ['setuptools', 'wheel'])
return system
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
def _do_build(hooks, env, dist, dest):
get_requires_name = 'get_requires_for_build_{dist}'.format(**locals())
get_requires = getattr(hooks, get_requires_name)
reqs = get_requires({})
log.info('Got build requires: %s', reqs)
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
with tempdir() as td:
log.info('Trying to build %s in %s', dist, td)
build_name = 'build_{dist}'.format(**locals())
build = getattr(hooks, build_name)
filename = build(td, {})
source = os.path.join(td, filename)
shutil.move(source, os.path.join(dest, os.path.basename(filename)))
<<<<<<< HEAD
def mkdir_p(*args, **kwargs):
"""Like `mkdir`, but does not raise an exception if the
directory already exists.
"""
try:
return os.mkdir(*args, **kwargs)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def build(source_dir, dist, dest=None):
pyproject = os.path.join(source_dir, 'pyproject.toml')
dest = os.path.join(source_dir, dest or 'dist')
mkdir_p(dest)
with open(pyproject) as f:
pyproject_data = pytoml.load(f)
# Ensure the mandatory data can be loaded
buildsys = pyproject_data['build-system']
requires = buildsys['requires']
backend = buildsys['build-backend']
hooks = Pep517HookCaller(source_dir, backend)
with BuildEnvironment() as env:
env.pip_install(requires)
=======
def build(source_dir, dist, dest=None, system=None):
system = system or load_system(source_dir)
dest = os.path.join(source_dir, dest or 'dist')
mkdir_p(dest)
validate_system(system)
hooks = Pep517HookCaller(
source_dir, system['build-backend'], system.get('backend-path')
)
with BuildEnvironment() as env:
env.pip_install(system['requires'])
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
_do_build(hooks, env, dist, dest)
parser = argparse.ArgumentParser()
parser.add_argument(
'source_dir',
help="A directory containing pyproject.toml",
)
parser.add_argument(
'--binary', '-b',
action='store_true',
default=False,
)
parser.add_argument(
'--source', '-s',
action='store_true',
default=False,
)
parser.add_argument(
'--out-dir', '-o',
help="Destination in which to save the builds relative to source dir",
)
def main(args):
# determine which dists to build
dists = list(filter(None, (
'sdist' if args.source or not args.binary else None,
'wheel' if args.binary or not args.source else None,
)))
for dist in dists:
build(args.source_dir, dist, args.out_dir)
if __name__ == '__main__':
main(parser.parse_args())
| [
"[email protected]"
] | |
e1f4ff4fde1cb02d80fa4d2b94bbc9519caf75d7 | aa9f8d7b48dbe3cbecca5eaa2ad3bbea262dbf24 | /qualite/commentaire.py | e75f0efb9eee8d63fab5ff076e2945b6b187921a | [] | no_license | ssinayoko/Pyhton_Cours | 5381a98c42cba021f34b482776933accd3442a6c | 56b391aeb673b40b564c59053295ac68e2576a1c | refs/heads/master | 2020-08-30T13:32:59.662715 | 2019-10-25T12:50:25 | 2019-10-25T12:50:25 | 218,395,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | # -*- coding: utf-8 -*-
def addition(a:int, b:int) -> int:
"""
Fonction qui effectue une addition
:param a: Operande 1
:param b: Operande 2
:type a: int
:type b: int
:Example:
addition(2,2)
>>> 4
"""
return a + b
help(addition)
print(addition("abc", "d"))
| [
"[email protected]"
] | |
0713d0f46f779f0a3d49497f7ce75e67e8204e77 | 67553d46a257631810f394908013b82c337e0fbd | /goat/temp/test.py | 44fa0a89196104b700f5eda1a88be7e07ae37d41 | [] | no_license | bopopescu/goat-python | 3f9d79eb1a9c2733345d699c98d82f91968ca5fa | c139488e2b5286033954df50ae1ca834144446f5 | refs/heads/master | 2022-11-21T11:25:27.921210 | 2020-03-06T01:02:57 | 2020-03-06T01:02:57 | 281,066,748 | 0 | 0 | null | 2020-07-20T09:00:08 | 2020-07-20T09:00:08 | null | UTF-8 | Python | false | false | 1,067 | py |
import requests
from lxml import etree
#获取用来打开url的session
sessions = requests.session()
'''
给sssion设置代理,
因为一般的网站没有这个的话,
会拒绝我们的爬虫访问,
因此我们在这模拟谷歌浏览器访问
'''
sessions.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36'
#进行访问获取源码
r = sessions.get(
'https://baike.baidu.com/item/%E4%B8%AD%E5%9B%BD%E5%9C%B0%E9%9C%87%E5%B1%80%E9%83%91%E5%B7%9E%E5%9F%BA%E7%A1%80%E5%B7%A5%E7%A8%8B%E5%8B%98%E5%AF%9F%E7%A0%94%E7%A9%B6%E9%99%A2%E6%A1%A9%E5%9F%BA%E6%A3%80%E6%B5%8B%E4%B8%AD%E5%BF%83')
#给怕取下来的数据指定解码格式
r.encoding = 'utf-8'
text = r.text
#将网页源代码进行树结构化,以便于使用xpath
content = etree.HTML(text)
#使用xpath提取标签h1中的内容
h = content.xpath('//h1')
h1 = h[0].xpath('string(.)').strip()
print(h1)
d = content.xpath("//div[@label-module='lemmaSummary']")
d1 = d[0].xpath('string(.)').strip()
print(d1)
| [
"[email protected]"
] | |
23d890921b1774bbc78f6653f655c81c69604fe4 | 20d8a89124008c96fa59225926ce39f113522daa | /UL_NanoAODv8/2017/step1_cfg.py | 122f3ba6db11246c43b88205c06a08dbd288cabb | [] | no_license | MiT-HEP/MCProduction | 113a132a2ff440e13225be518ff8d52b0136e1eb | df019d7a15717a9eafd9502f2a310023dcd584f5 | refs/heads/master | 2022-05-06T20:25:34.372363 | 2022-04-12T11:55:15 | 2022-04-12T11:55:15 | 37,586,559 | 5 | 7 | null | 2015-08-24T11:13:58 | 2015-06-17T09:45:12 | Python | UTF-8 | Python | false | false | 5,862 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/PPD-RunIISummer20UL17wmLHEGEN-00001-fragment.py --python_filename step1_cfg.py --eventcontent RAWSIM,LHE --customise Configuration/DataProcessing/Utils.addMonitoring --datatier GEN,LHE --fileout file:step1.root --conditions 106X_mc2017_realistic_v6 --beamspot Realistic25ns13TeVEarly2017Collision --step LHE,GEN --geometry DB:Extended --era Run2_2017 --no_exec --mc -n 500
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing ('analysis')
options.register('jobNum', 0, VarParsing.multiplicity.singleton,VarParsing.varType.int,"jobNum")
options.register('chain', 'hbbg', VarParsing.multiplicity.singleton,VarParsing.varType.string,'chain')
options.parseArguments()
from Configuration.Eras.Era_Run2_2017_cff import Run2_2017
process = cms.Process('GEN',Run2_2017)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic25ns13TeVEarly2017Collision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
# Input source
firstLumi=10*options.jobNum+1 ## eventsPerJob/eventsPerLumi*jobNum +1
process.source = cms.Source("EmptySource",
firstLuminosityBlock = cms.untracked.uint32(firstLumi),
numberEventsInLuminosityBlock = cms.untracked.uint32(100)
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('Configuration/GenProduction/python/PPD-RunIISummer20UL17wmLHEGEN-00001-fragment.py nevts:500'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(1),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(20971520),
fileName = cms.untracked.string('file:step1.root'),
outputCommands = process.RAWSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
process.LHEoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('LHE'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:step1_inLHE.root'),
outputCommands = process.LHEEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '106X_mc2017_realistic_v6', '')
print ("Loading chain",options.chain)
process.load("fragment_"+ options.chain)
#process.externalLHEProducer.args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/2017/13TeV/powheg/V2/gg_H_quark-mass-effects_NNPDF31_13TeV_M125/v1/gg_H_quark-mass-effects_NNPDF31_13TeV_M125_slc6_amd64_gcc630_CMSSW_9_3_0.tgz'),
# Path and EndPath definitions
process.lhe_step = cms.Path(process.externalLHEProducer)
process.generation_step = cms.Path(process.pgen)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
process.LHEoutput_step = cms.EndPath(process.LHEoutput)
# Schedule definition
process.schedule = cms.Schedule(process.lhe_step,process.generation_step,process.genfiltersummary_step,process.endjob_step,process.RAWSIMoutput_step,process.LHEoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# filter all path with the production filter sequence
for path in process.paths:
if path in ['lhe_step']: continue
getattr(process,path).insert(0, process.generator)
# customisation of the process.
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# End of customisation functions
# Customisation from command line
import os,random
random.seed = os.urandom(10) #~10^14
process.RandomNumberGeneratorService.externalLHEProducer.initialSeed = random.randint(0,999999)
process.RandomNumberGeneratorService.generator.initialSeed = random.randint(0,999999)
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| [
"[email protected]"
] | |
4383b6408419e66072835b37d23d97ba2d7e7aae | 84a19fe0b89bb19caa1641aeadc9623c1a181767 | /abc/117/d.py | a815ba914748d87fb00aa584570fd4079a6fb59d | [
"MIT"
] | permissive | wotsushi/competitive-programming | 75abae653cff744189c53ad7e6dbd2ca1a62e3a8 | 17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86 | refs/heads/master | 2021-06-10T06:42:40.846666 | 2021-05-31T10:32:51 | 2021-05-31T10:32:51 | 175,002,279 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # 入力
N, K = map(int, input().split())
A = list(map(int, input().split()))
# MSBから順に0or1を決めていく
# 0にする場合、下位ビットの最適値は貪欲に求まる
def s(i):
return sum((a >> (i - 1)) & 1 for a in A)
def g(L, b):
m = 2**(b - 1)
return (
0 if b == 0 else
g(L, b - 1) + m * s(b) if L < m else
g(L >> 1, b - 1) + m * max(
s(b),
N - s(b)
) if L == (2**b - 1) else
max(
g(m - 1, b - 1) + m * s(b),
g(L - m, b - 1) + m * (N - s(b))
)
)
ans = g(K, max(K, *A).bit_length())
# 出力
print(ans)
| [
"[email protected]"
] | |
729118943cf7ba2ea251dbbaf8252aa5b343c9d4 | b87ea98bc166cade5c78d246aeb0e23c59183d56 | /samples/openapi3/client/petstore/python/petstore_api/paths/pet_pet_id_upload_image/post.pyi | c6e9baa0d0a84e9b7daf752e4577153d3c64a34f | [
"Apache-2.0"
] | permissive | holisticon/openapi-generator | 88f8e6a3d7bc059c8f56563c87f6d473694d94e5 | 6a67551ea54a1aa9a49eb48ee26b4e9bb7fb1272 | refs/heads/master | 2023-05-12T02:55:19.037397 | 2023-04-14T08:31:59 | 2023-04-14T08:31:59 | 450,034,139 | 1 | 0 | Apache-2.0 | 2022-01-20T09:34:14 | 2022-01-20T09:34:13 | null | UTF-8 | Python | false | false | 16,643 | pyi | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from petstore_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from petstore_api import schemas # noqa: F401
from petstore_api.model.api_response import ApiResponse
# Path params
PetIdSchema = schemas.Int64Schema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'petId': typing.Union[PetIdSchema, decimal.Decimal, int, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_pet_id = api_client.PathParameter(
name="petId",
style=api_client.ParameterStyle.SIMPLE,
schema=PetIdSchema,
required=True,
)
# body param
class SchemaForRequestBodyMultipartFormData(
schemas.DictSchema
):
class MetaOapg:
class properties:
additionalMetadata = schemas.StrSchema
file = schemas.BinarySchema
__annotations__ = {
"additionalMetadata": additionalMetadata,
"file": file,
}
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["additionalMetadata"]) -> MetaOapg.properties.additionalMetadata: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["file"]) -> MetaOapg.properties.file: ...
@typing.overload
def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
def __getitem__(self, name: typing.Union[typing_extensions.Literal["additionalMetadata", "file", ], str]):
# dict_instance[name] accessor
return super().__getitem__(name)
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["additionalMetadata"]) -> typing.Union[MetaOapg.properties.additionalMetadata, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["file"]) -> typing.Union[MetaOapg.properties.file, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["additionalMetadata", "file", ], str]):
return super().get_item_oapg(name)
def __new__(
cls,
*_args: typing.Union[dict, frozendict.frozendict, ],
additionalMetadata: typing.Union[MetaOapg.properties.additionalMetadata, str, schemas.Unset] = schemas.unset,
file: typing.Union[MetaOapg.properties.file, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset,
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
) -> 'SchemaForRequestBodyMultipartFormData':
return super().__new__(
cls,
*_args,
additionalMetadata=additionalMetadata,
file=file,
_configuration=_configuration,
**kwargs,
)
request_body_body = api_client.RequestBody(
content={
'multipart/form-data': api_client.MediaType(
schema=SchemaForRequestBodyMultipartFormData),
},
)
SchemaFor200ResponseBodyApplicationJson = ApiResponse
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _upload_image_oapg(
self,
content_type: typing_extensions.Literal["multipart/form-data"] = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _upload_image_oapg(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _upload_image_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _upload_image_oapg(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _upload_image_oapg(
self,
content_type: str = 'multipart/form-data',
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
uploads an image
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_pet_id,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
_fields = None
_body = None
if body is not schemas.unset:
serialized_data = request_body_body.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class UploadImage(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def upload_image(
self,
content_type: typing_extensions.Literal["multipart/form-data"] = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def upload_image(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def upload_image(
self,
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def upload_image(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def upload_image(
self,
content_type: str = 'multipart/form-data',
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._upload_image_oapg(
body=body,
path_params=path_params,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
content_type: typing_extensions.Literal["multipart/form-data"] = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
content_type: str = 'multipart/form-data',
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._upload_image_oapg(
body=body,
path_params=path_params,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"[email protected]"
] | |
988ece8807fab60fc99385a71a88b6c2591b1b91 | 94bb879816dbdd69559ecfcc70a09f33d104af67 | /source/functions/sqlmap/thirdparty/beautifulsoup/beautifulsoup.py | bc8889f76f8e3f8c42f748b57e1955c554ec750d | [
"LicenseRef-scancode-unknown",
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"GPL-CC-1.0"
] | permissive | 51000000/CampusCyberInspectionTool2021 | f328ad571ab88051aa6928a67209dd94ce25eb6c | 27a2de7ff3707ba6ab084acfce79a7d3f42b8f84 | refs/heads/main | 2023-03-28T01:11:22.678066 | 2021-04-01T05:23:54 | 2021-04-01T05:23:54 | 353,502,239 | 0 | 0 | MIT | 2021-03-31T22:06:49 | 2021-03-31T22:06:48 | null | UTF-8 | Python | false | false | 80,066 | py | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
from __future__ import print_function
__author__ = "Leonard Richardson ([email protected])"
__version__ = "3.2.1"
__copyright__ = "Copyright (c) 2004-2012 Leonard Richardson"
__license__ = "New-style BSD"
import codecs
import types
import re
import sys
if sys.version_info >= (3, 0):
xrange = range
text_type = str
binary_type = bytes
basestring = str
else:
text_type = unicode
binary_type = str
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
try:
import sgmllib
except ImportError:
from lib.utils import sgmllib
try:
import markupbase
except ImportError:
import _markupbase as markupbase
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = next(g)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, text_type):
if encoding:
s = s.encode(encoding)
elif isinstance(s, binary_type):
s = s.encode(encoding or "utf8")
else:
s = self.toEncoding(str(s), encoding or "utf8")
return s
BARE_AMPERSAND_OR_BRACKET = re.compile(r"([<>]|&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;))")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
class NavigableString(text_type, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, text_type):
return text_type.__new__(cls, value)
return text_type.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, attr))
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
# Substitute outgoing XML entities.
data = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, self)
if encoding:
return data.encode(encoding)
else:
return data
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
try:
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
except ValueError: # e.g. ValueError: unichr() arg not in range(0x10000)
pass
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
elif isinstance(attrs, dict):
attrs = attrs.items()
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
# Reference: https://github.com/pkrumins/xgoogle/pull/16/commits/3dba1165c436b0d6e5bdbd09e53ca0dbf8a043f8
convert = lambda k_val: (k_val[0],
re.sub(r"&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
k_val[1]))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current and current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in xrange(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.findAll(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in xrange(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
return # Note: https://stackoverflow.com/a/30217723 (PEP 479)
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current and current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception("I don't know how to match against a %s" \
% markup.__class__)
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = text_type(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, text_type):
matchAgainst = text_type(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, sgmllib.SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile(r'(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile(r'<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
sgmllib.SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, text_type):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
sgmllib.SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return sgmllib.SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return name in self.SELF_CLOSING_TAGS \
or name in self.instanceSelfClosingTags
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
sgmllib.SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in xrange(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in xrange(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = name in self.RESET_NESTING_TAGS
popTo = None
inclusive = True
for i in xrange(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and p.name in self.RESET_NESTING_TAGS):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if 'smartQuotesTo' not in kwargs:
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile(r"((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in xrange(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, text_type):
self.originalEncoding = None
self.unicode = text_type(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, text_type):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda x: self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception as e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = text_type(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = text_type(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = text_type(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = text_type(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = text_type(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = text_type(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = text_type(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = text_type(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = text_type(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = text_type(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
r'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile(r'<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, xrange(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print(soup.prettify())
| [
"[email protected]"
] | |
b2cf7f56580add4bcda1e1232608b9276c0b1eb1 | 946e03c8f6cddf9f0de5b30356c4926f80d12e8a | /h3/defs/hlmt.py | 3ae00d0222aafb14f5d2bcf89fb7a2faa128efb1 | [
"MIT"
] | permissive | holy-crust/reclaimer | 790f51ea2ef45f91cee28c81ea94a74cc2181078 | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | refs/heads/master | 2020-07-21T13:46:24.096505 | 2019-09-05T20:30:06 | 2019-09-05T20:30:06 | 206,885,681 | 0 | 0 | MIT | 2019-09-06T22:57:01 | 2019-09-06T22:57:01 | null | UTF-8 | Python | false | false | 16,943 | py | ############# Credits and version info #############
# Definition generated from Assembly XML tag def
# Date generated: 2018/12/03 04:56
#
# revision: 1 author: Assembly
# Generated plugin from scratch.
# revision: 2 author: -DeToX-
# Named Some Values...
# revision: 3 author: Halodu03de
# added chunk code
# revision: 4 author: Lord Zedd
# Lots of updates
# revision: 5 author: Moses_of_Egypt
# Cleaned up and converted to SuPyr definition
#
####################################################
from ..common_descs import *
from .objs.tag import *
from supyr_struct.defs.tag_def import TagDef
hlmt_model_object_data_type = (
"not_set",
"user_defined",
"auto_generated",
)
hlmt_new_damage_info_damage_section_instant_response_constraint_damage_type = (
"none",
"destroy_one_of_group",
"destroy_entire_group",
"loosen_one_of_group",
"loosen_entire_group",
)
hlmt_new_damage_info_damage_section_instant_response_response_type = (
"recieves_all_damage",
"recieves_area_effect_damage",
"recieves_local_damage",
)
hlmt_new_damage_info_damage_section_instant_response_unknown_special_damage = (
"none",
"_1",
"_2",
"_3",
)
hlmt_variant_region_sort_order = (
"no_sorting",
"neg_5_closest",
"neg_4",
"neg_3",
"neg_2",
"neg_1",
"_0_same_as_model",
"_1",
"_2",
"_3",
"_4",
"_5_farthest",
)
hlmt_variant_region_permutation_state = Struct("state",
h3_string_id("name"),
SInt8("model_permutation_index"),
Bool8("property_flags",
"blurred",
"hella_blurred",
"shielded",
),
SEnum16("state", *vehi_friction_point_model_state_destroyed),
h3_dependency("looping_effect"),
h3_string_id("looping_effect_marker_name"),
Float("initial_probability"),
ENDIAN=">", SIZE=32
)
hlmt_variant_region_permutation = Struct("permutation",
h3_string_id("name"),
SInt8("model_permutation_index"),
Bool8("flags",
"copy_states_to_all_permutations",
),
SInt8("unknown_0", VISIBLE=False),
SInt8("unknown_1", VISIBLE=False),
Float("probability"),
h3_reflexive("states", hlmt_variant_region_permutation_state),
BytesRaw("unknown_2", SIZE=12, VISIBLE=False),
ENDIAN=">", SIZE=36
)
hlmt_variant_region = Struct("region",
h3_string_id("name"),
SInt8("model_region_index"),
SInt8("unknown", VISIBLE=False),
SInt16("parent_variant_index"),
h3_reflexive("permutations", hlmt_variant_region_permutation),
SEnum32("sort_order", *hlmt_variant_region_sort_order),
ENDIAN=">", SIZE=24
)
hlmt_variant_object = Struct("object",
h3_string_id("parent_marker"),
h3_string_id("child_marker"),
h3_string_id("child_variant"),
h3_dependency("child_object"),
ENDIAN=">", SIZE=28
)
hlmt_variant = Struct("variant",
h3_string_id("name"),
Array("model_region_index_array", SUB_STRUCT=SInt8("model_region_index"), SIZE=16),
h3_reflexive("regions", hlmt_variant_region),
h3_reflexive("objects", hlmt_variant_object),
SInt32("instance_group_index"),
BytesRaw("unknown", SIZE=8, VISIBLE=False),
ENDIAN=">", SIZE=56
)
hlmt_instance_group_instance_member = Struct("instance_member",
SInt32("unknown", VISIBLE=False),
h3_string_id("instance_name"),
Float("probability"),
Bool32("instance_flags_1", *("instance_%s" % i for i in range(32))),
Bool32("instance_flags_2", *("instance_%s" % i for i in range(32, 64))),
ENDIAN=">", SIZE=20
)
hlmt_instance_group = Struct("instance_group",
h3_string_id("name"),
SInt32("unknown", VISIBLE=False),
h3_reflexive("instance_members", hlmt_instance_group_instance_member),
Float("probability"),
ENDIAN=">", SIZE=24
)
hlmt_material = Struct("material",
h3_string_id("name"),
SInt16("unknown_0", VISIBLE=False),
SInt16("damage_section_index"),
SInt16("unknown_1", VISIBLE=False),
SInt16("unknown_2", VISIBLE=False),
h3_string_id("material_name"),
SInt16("global_material_index"),
SInt16("unknown_3", VISIBLE=False),
ENDIAN=">", SIZE=20
)
hlmt_new_damage_info_damage_section_instant_response = Struct("instant_response",
SEnum16("response_type", *hlmt_new_damage_info_damage_section_instant_response_response_type),
SEnum16("constraint_damage_type", *hlmt_new_damage_info_damage_section_instant_response_constraint_damage_type),
h3_string_id("trigger"),
Bool32("flags",
"kills_object",
"inhibits_melee_attack",
"inhibits_weapon_attack",
"inhibits_walking",
"forces_drop_weapon",
"kills_weapon_primary_trigger",
"kills_weapon_secondary_trigger",
"destroys_object",
"damages_weapon_primary_trigger",
"damages_weapon_secondary_trigger",
"light_damage_left_turn",
"major_damage_left_turn",
"light_damage_right_turn",
"major_damage_right_turn",
"light_damage_engine",
"major_damage_engine",
"kills_object_no_player_solo",
"causes_detonation",
"destroy_all_group_constraints",
"kills_variant_objects",
"force_unattached_effects",
"fires_under_threshold",
"triggers_special_death",
"only_on_special_death",
"only_not_on_special_death",
("causes_detonation_in_single_player", 1 << 26),
),
Float("damage_threshold"),
h3_dependency("primary_transition_effect"),
h3_dependency("secondary_transition_effect"),
h3_dependency("transition_damage_effect"),
h3_string_id("region"),
SEnum16("new_state", *vehi_friction_point_model_state_destroyed),
SInt16("runtime_region_index"),
h3_string_id("secondary_region"),
SEnum16("secondary_new_state", *vehi_friction_point_model_state_destroyed),
SInt16("secondary_runtime_region_index"),
SInt16("unknown"),
SEnum16("unknown_special_damage", *hlmt_new_damage_info_damage_section_instant_response_unknown_special_damage),
h3_string_id("special_damage_case"),
h3_string_id("effect_marker_name"),
h3_string_id("damage_effect_marker_name"),
Float("response_delay"),
h3_dependency("delay_effect"),
h3_string_id("delay_effect_marker_name"),
h3_string_id("ejecting_seat_label"),
Float("skip_fraction"),
h3_string_id("destroyed_child_object_marker_name"),
Float("total_damage_threshold"),
ENDIAN=">", SIZE=136
)
hlmt_new_damage_info_damage_section = Struct("damage_section",
h3_string_id("name"),
Bool32("flags",
"absorbs_body_damage",
"takes_full_damage_when_object_dies",
"cannot_die_with_riders",
"takes_full_damage_when_object_destroyed",
"restored_on_ressurection",
("headshotable", 1 << 7),
"ignores_shields",
),
Float("vitality_percentage"),
h3_reflexive("instant_responses", hlmt_new_damage_info_damage_section_instant_response),
BytesRaw("unknown_0", SIZE=24, VISIBLE=False),
Float("stun_time"),
Float("recharge_time"),
Float("unknown_1", VISIBLE=False),
h3_string_id("resurrection_region_name"),
SInt16("ressurection_region_runtime_index"),
SInt16("unknown_2", VISIBLE=False),
ENDIAN=">", SIZE=68
)
hlmt_new_damage_info_node = Struct("node",
SInt16("unknown_0", VISIBLE=False),
SInt16("unknown_1", VISIBLE=False),
BytesRaw("unknown_2", SIZE=12, VISIBLE=False),
VISIBLE=False,
ENDIAN=">", SIZE=16
)
hlmt_new_damage_info_damage_seat_unknown = Struct("unknown",
h3_string_id("node"),
SInt16("unknown_0", VISIBLE=False),
SInt16("unknown_1", VISIBLE=False),
BytesRaw("unknown_2", SIZE=36, VISIBLE=False),
VISIBLE=False,
ENDIAN=">", SIZE=44
)
hlmt_new_damage_info_damage_seat = Struct("damage_seat",
h3_string_id("seat_label"),
Float("direct_damage_scale"),
Float("damage_transfer_fall_off_radius"),
Float("maximum_transfer_damage_scale"),
Float("minimum_transfer_damage_scale"),
h3_reflexive("unknown", hlmt_new_damage_info_damage_seat_unknown),
ENDIAN=">", SIZE=32
)
hlmt_new_damage_info_damage_constraint = Struct("damage_constraint",
h3_string_id("physics_model_constraint_name"),
h3_string_id("damage_constraint_name"),
h3_string_id("damage_constraint_group_name"),
Float("group_probability_scale"),
SEnum16("type", *phmo_node_edge_constraint_type),
SInt16("idx"),
ENDIAN=">", SIZE=20
)
hlmt_new_damage_info = Struct("new_damage_info",
Bool32("flags",
"takes_shield_damage_for_children",
"takes_body_damage_for_children",
"always_shields_friendly_damage",
"passes_area_damage_to_children",
"parent_never_takes_body_damage_for_children",
"only_damaged_by_explosives",
"parent_never_takes_shield_damage_for_children",
"cannot_die_from_damage",
"passes_attached_damage_to_riders",
("only_damaged_by_player", 1 << 11),
),
h3_string_id("global_indirect_material_name"),
SInt16("indirect_damage_section"),
SInt16("unknown_0", VISIBLE=False),
BytesRaw("unknown_1", SIZE=4, VISIBLE=False),
SEnum8("collision_damage_reporting_type", *proj_damage_reporting_type),
SEnum8("response_damage_reporting_type", *proj_damage_reporting_type),
SInt16("unknown_2", VISIBLE=False),
BytesRaw("unknown_3", SIZE=20, VISIBLE=False),
Float("maximum_vitality"),
Float("minimum_stun_damage_0"),
Float("stun_time_0"),
Float("recharge_time"),
Float("recharge_fraction"),
Pad(64),
Float("maximum_shield_vitality"),
h3_string_id("global_shield_material_name"),
Float("minimum_stun_damage_1"),
Float("stun_time_1"),
Float("shield_recharge_time"),
Float("shield_damaged_threshold"),
h3_dependency("shield_damaged_effect"),
h3_dependency("shield_depleted_effect"),
h3_dependency("shield_recharging_effect"),
h3_reflexive("damage_sections", hlmt_new_damage_info_damage_section),
h3_reflexive("nodes", hlmt_new_damage_info_node),
SInt16("global_shield_material_index"),
SInt16("global_indirect_material_index"),
Float("unknown_5", VISIBLE=False),
Float("unknown_6", VISIBLE=False),
h3_reflexive("damage_seats", hlmt_new_damage_info_damage_seat),
h3_reflexive("damage_constraints", hlmt_new_damage_info_damage_constraint),
ENDIAN=">", SIZE=256
)
hlmt_target = Struct("target",
h3_string_id("marker_name"),
Float("size"),
float_rad("cone_angle", VISIBLE=False),
SInt16("damage_section"),
SInt16("variant"),
Float("targeting_relevance"),
BytesRaw("unknown", SIZE=4, VISIBLE=False),
Bool32("flags",
"locked_by_human_tracking",
"locked_by_plasma_tracking",
"headshot",
("vulnerable", 1 << 4),
("always_locked_by_plasma_tracking", 1 << 6),
),
Float("lock_on_distance"),
VISIBLE=False,
ENDIAN=">", SIZE=32
)
hlmt_collision_region_permutation = Struct("permutation",
h3_string_id("name"),
Bool8("flags",
"cannot_be_chosen_randomly",
),
SInt8("collision_permutation_index"),
SInt8("physics_permutation_index"),
SInt8("unknown", VISIBLE=False),
ENDIAN=">", SIZE=8
)
hlmt_collision_region = Struct("collision_region",
h3_string_id("name"),
SInt8("collision_region_index"),
SInt8("physics_region_index"),
SInt8("unknown_0", VISIBLE=False),
SInt8("unknown_1", VISIBLE=False),
h3_reflexive("permutations", hlmt_collision_region_permutation),
ENDIAN=">", SIZE=20
)
hlmt_node = Struct("node",
h3_string_id("name"),
SInt16("parent_node"),
SInt16("first_child_node"),
SInt16("next_sibling_node"),
SInt16("import_node_index"),
QStruct("default_translation", INCLUDE=xyz_float),
QStruct("default_rotation", INCLUDE=ijkw_float),
Float("default_scale"),
QStruct("inverse_forward", INCLUDE=ijk_float),
QStruct("inverse_left", INCLUDE=ijk_float),
QStruct("inverse_up", INCLUDE=ijk_float),
QStruct("inverse_position", INCLUDE=xyz_float),
ENDIAN=">", SIZE=92
)
hlmt_model_object_data = Struct("model_object_data",
SEnum16("type", *hlmt_model_object_data_type),
SInt16("unknown", VISIBLE=False),
QStruct("offset", INCLUDE=xyz_float),
Float("radius"),
ENDIAN=">", SIZE=20
)
hlmt_unknown_3 = Struct("unknown_3",
h3_string_id("region"),
h3_string_id("permutation"),
VISIBLE=False,
ENDIAN=">", SIZE=8
)
hlmt_unknown_4 = Struct("unknown_4",
h3_string_id("unknown_0", VISIBLE=False),
BytesRaw("unknown_1", SIZE=4, VISIBLE=False),
VISIBLE=False,
ENDIAN=">", SIZE=8
)
hlmt_unknown_5 = Struct("unknown_5",
h3_string_id("marker_0", VISIBLE=False),
BytesRaw("unknown_0", SIZE=4, VISIBLE=False),
h3_string_id("marker_1", VISIBLE=False),
BytesRaw("unknown_1", SIZE=8, VISIBLE=False),
VISIBLE=False,
ENDIAN=">", SIZE=20
)
hlmt_body = Struct("tagdata",
h3_dependency("model"),
h3_dependency("collision_model"),
h3_dependency("animation"),
h3_dependency("physics_model"),
Float("reduce_to_l1_super_low"),
Float("reduce_to_l2_low"),
Float("reduce_to_l3_medium"),
Float("reduce_to_l4_high"),
Float("reduce_to_l5_super_high"),
h3_dependency("lod_model"),
h3_reflexive("variants", hlmt_variant),
h3_reflexive("instance_groups", hlmt_instance_group),
h3_reflexive("materials", hlmt_material),
h3_reflexive("new_damage_info", hlmt_new_damage_info),
h3_reflexive("targets", hlmt_target),
h3_reflexive("collision_regions", hlmt_collision_region),
h3_reflexive("nodes", hlmt_node),
BytesRaw("unknown_0", SIZE=4, VISIBLE=False),
h3_reflexive("model_object_data", hlmt_model_object_data),
h3_dependency("primary_dialog"),
h3_dependency("secondary_dialog"),
Bool32("flags",
"active_camo_always_on",
"active_camo_always_merge",
"active_camo_never_merge",
),
h3_string_id("default_dialogue_effect"),
Bool32("render_only_node_flags_1", *("node_%s" % i for i in range(32)), VISIBLE=False),
Bool32("render_only_node_flags_2", *("node_%s" % i for i in range(32, 64)), VISIBLE=False),
Bool32("render_only_node_flags_3", *("node_%s" % i for i in range(64, 96)), VISIBLE=False),
Bool32("render_only_node_flags_4", *("node_%s" % i for i in range(96, 128)), VISIBLE=False),
Bool32("render_only_node_flags_5", *("node_%s" % i for i in range(128, 160)), VISIBLE=False),
Bool32("render_only_node_flags_6", *("node_%s" % i for i in range(160, 192)), VISIBLE=False),
Bool32("render_only_node_flags_7", *("node_%s" % i for i in range(192, 224)), VISIBLE=False),
Bool32("render_only_node_flags_8", *("node_%s" % i for i in range(224, 256)), VISIBLE=False),
Bool32("render_only_section_flags_1", *("section_%s" % i for i in range(32)), VISIBLE=False),
Bool32("render_only_section_flags_2", *("section_%s" % i for i in range(32, 64)), VISIBLE=False),
Bool32("render_only_section_flags_3", *("section_%s" % i for i in range(64, 96)), VISIBLE=False),
Bool32("render_only_section_flags_4", *("section_%s" % i for i in range(96, 128)), VISIBLE=False),
Bool32("render_only_section_flags_5", *("section_%s" % i for i in range(128, 160)), VISIBLE=False),
Bool32("render_only_section_flags_6", *("section_%s" % i for i in range(160, 192)), VISIBLE=False),
Bool32("render_only_section_flags_7", *("section_%s" % i for i in range(192, 224)), VISIBLE=False),
Bool32("render_only_section_flags_8", *("section_%s" % i for i in range(224, 256)), VISIBLE=False),
Bool32("runtime_flags",
"contains_runtime_nodes",
VISIBLE=False,
),
BytesRaw("scenario_load_parameters_block", SIZE=12, VISIBLE=False),
SInt16("unknown_1", VISIBLE=False),
SInt16("unknown_2", VISIBLE=False),
h3_reflexive("unknown_3", hlmt_unknown_3),
h3_reflexive("unknown_4", hlmt_unknown_4),
h3_reflexive("unknown_5", hlmt_unknown_5),
h3_dependency("shield_impact_third_person"),
h3_dependency("shield_impact_first_person"),
ENDIAN=">", SIZE=392
)
def get():
return hlmt_def
hlmt_def = TagDef("hlmt",
h3_blam_header('hlmt'),
hlmt_body,
ext=".%s" % h3_tag_class_fcc_to_ext["hlmt"], endian=">", tag_cls=H3Tag
) | [
"[email protected]"
] | |
204e9db02dcad09209555ab4e5630f11266c831d | a7b66311c2ce113789933ec3162f1128b2862f13 | /app/waterQual/model/basinRef_box.py | fda72761a329cd4eb4e8dc8d10146234f9f760a6 | [
"MIT"
] | permissive | ChanJeunlam/geolearn | 214b2c42359ea1164b39117fad2d7470adeb6d35 | 791caa54eb70920823ea7d46714dc8a3e7fa7445 | refs/heads/master | 2023-07-16T04:13:15.526364 | 2021-08-16T05:24:18 | 2021-08-16T05:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import numpy as np
import matplotlib.pyplot as plt
wqData = waterQuality.DataModelWQ('basinRef')
outLst = ['basinRef-first50-opt1', 'basinRef-first50-opt2']
trainSet = 'first50'
testSet = 'last50'
pLst1, pLst2, errMatLst1, errMatLst2 = [list() for x in range(4)]
for outName in outLst:
master = basins.loadMaster(outName)
yP1, ycP1 = basins.testModel(outName, trainSet, wqData=wqData)
yP2, ycP2 = basins.testModel(outName, testSet, wqData=wqData)
errMatC1 = wqData.errBySiteC(ycP1, subset=trainSet, varC=master['varYC'])
errMatC2 = wqData.errBySiteC(ycP2, subset=testSet, varC=master['varYC'])
pLst1.append(ycP1)
pLst2.append(ycP2)
errMatLst1.append(errMatC1)
errMatLst2.append(errMatC2)
# figure out number of sample
info = wqData.info
siteNoLst = info['siteNo'].unique().tolist()
ycT = wqData.c
nc = ycT.shape[1]
countMat = np.full([len(siteNoLst), nc], 0)
for i, siteNo in enumerate(siteNoLst):
indS = info[info['siteNo'] == siteNo].index.values
for iC in range(nc):
countMat[i, iC] = np.count_nonzero(~np.isnan(ycT[indS, iC]))
# plot box
codePdf = usgs.codePdf
groupLst = codePdf.group.unique().tolist()
for group in groupLst:
codeLst = codePdf[codePdf.group == group].index.tolist()
indLst = [wqData.varC.index(code) for code in codeLst]
labLst1 = [codePdf.loc[code]['shortName'] +
'\n'+code for code in codeLst]
labLst2 = ['train opt1', 'train opt2', 'test opt2', 'test opt2']
dataBox = list()
for ic in indLst:
temp = list()
for errMat in errMatLst1+errMatLst2:
ind = np.where(countMat[:, ic] > 50)[0]
temp.append(errMat[:, ic, 1])
dataBox.append(temp)
title = 'correlation of {} group'.format(group)
fig = figplot.boxPlot(dataBox, label1=labLst1, label2=labLst2)
fig.suptitle(title)
fig.show()
| [
"[email protected]"
] | |
3046525c5a55e398a35936ff75d8f76dbab9cea8 | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /tests/unit/modules/network/fortios/test_fortios_system_snmp_user.py | 7805b09ad91c46bcd4f141d7de7a21a2ed96fa56 | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,136 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.notstdlib.moveitallout.plugins.modules import fortios_system_snmp_user
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.modules.fortios_system_snmp_user.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_snmp_user_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_snmp_user': {
'auth_proto': 'md5',
'auth_pwd': 'test_value_4',
'ha_direct': 'enable',
'name': 'default_name_6',
'priv_proto': 'aes',
'priv_pwd': 'test_value_8',
'queries': 'enable',
'query_port': '10',
'security_level': 'no-auth-no-priv',
'source_ip': '84.230.14.12',
'source_ipv6': 'test_value_13',
'status': 'enable',
'trap_lport': '15',
'trap_rport': '16',
'trap_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_snmp_user.fortios_system_snmp(input_data, fos_instance)
expected_data = {
'auth-proto': 'md5',
'auth-pwd': 'test_value_4',
'ha-direct': 'enable',
'name': 'default_name_6',
'priv-proto': 'aes',
'priv-pwd': 'test_value_8',
'queries': 'enable',
'query-port': '10',
'security-level': 'no-auth-no-priv',
'source-ip': '84.230.14.12',
'source-ipv6': 'test_value_13',
'status': 'enable',
'trap-lport': '15',
'trap-rport': '16',
'trap-status': 'enable'
}
set_method_mock.assert_called_with('system.snmp', 'user', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_snmp_user_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_snmp_user': {
'auth_proto': 'md5',
'auth_pwd': 'test_value_4',
'ha_direct': 'enable',
'name': 'default_name_6',
'priv_proto': 'aes',
'priv_pwd': 'test_value_8',
'queries': 'enable',
'query_port': '10',
'security_level': 'no-auth-no-priv',
'source_ip': '84.230.14.12',
'source_ipv6': 'test_value_13',
'status': 'enable',
'trap_lport': '15',
'trap_rport': '16',
'trap_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_snmp_user.fortios_system_snmp(input_data, fos_instance)
expected_data = {
'auth-proto': 'md5',
'auth-pwd': 'test_value_4',
'ha-direct': 'enable',
'name': 'default_name_6',
'priv-proto': 'aes',
'priv-pwd': 'test_value_8',
'queries': 'enable',
'query-port': '10',
'security-level': 'no-auth-no-priv',
'source-ip': '84.230.14.12',
'source-ipv6': 'test_value_13',
'status': 'enable',
'trap-lport': '15',
'trap-rport': '16',
'trap-status': 'enable'
}
set_method_mock.assert_called_with('system.snmp', 'user', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_snmp_user_removal(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_snmp_user': {
'auth_proto': 'md5',
'auth_pwd': 'test_value_4',
'ha_direct': 'enable',
'name': 'default_name_6',
'priv_proto': 'aes',
'priv_pwd': 'test_value_8',
'queries': 'enable',
'query_port': '10',
'security_level': 'no-auth-no-priv',
'source_ip': '84.230.14.12',
'source_ipv6': 'test_value_13',
'status': 'enable',
'trap_lport': '15',
'trap_rport': '16',
'trap_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_snmp_user.fortios_system_snmp(input_data, fos_instance)
delete_method_mock.assert_called_with('system.snmp', 'user', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_snmp_user_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_snmp_user': {
'auth_proto': 'md5',
'auth_pwd': 'test_value_4',
'ha_direct': 'enable',
'name': 'default_name_6',
'priv_proto': 'aes',
'priv_pwd': 'test_value_8',
'queries': 'enable',
'query_port': '10',
'security_level': 'no-auth-no-priv',
'source_ip': '84.230.14.12',
'source_ipv6': 'test_value_13',
'status': 'enable',
'trap_lport': '15',
'trap_rport': '16',
'trap_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_snmp_user.fortios_system_snmp(input_data, fos_instance)
delete_method_mock.assert_called_with('system.snmp', 'user', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_snmp_user_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_snmp_user': {
'auth_proto': 'md5',
'auth_pwd': 'test_value_4',
'ha_direct': 'enable',
'name': 'default_name_6',
'priv_proto': 'aes',
'priv_pwd': 'test_value_8',
'queries': 'enable',
'query_port': '10',
'security_level': 'no-auth-no-priv',
'source_ip': '84.230.14.12',
'source_ipv6': 'test_value_13',
'status': 'enable',
'trap_lport': '15',
'trap_rport': '16',
'trap_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_snmp_user.fortios_system_snmp(input_data, fos_instance)
expected_data = {
'auth-proto': 'md5',
'auth-pwd': 'test_value_4',
'ha-direct': 'enable',
'name': 'default_name_6',
'priv-proto': 'aes',
'priv-pwd': 'test_value_8',
'queries': 'enable',
'query-port': '10',
'security-level': 'no-auth-no-priv',
'source-ip': '84.230.14.12',
'source-ipv6': 'test_value_13',
'status': 'enable',
'trap-lport': '15',
'trap-rport': '16',
'trap-status': 'enable'
}
set_method_mock.assert_called_with('system.snmp', 'user', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_snmp_user_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_snmp_user': {
'random_attribute_not_valid': 'tag',
'auth_proto': 'md5',
'auth_pwd': 'test_value_4',
'ha_direct': 'enable',
'name': 'default_name_6',
'priv_proto': 'aes',
'priv_pwd': 'test_value_8',
'queries': 'enable',
'query_port': '10',
'security_level': 'no-auth-no-priv',
'source_ip': '84.230.14.12',
'source_ipv6': 'test_value_13',
'status': 'enable',
'trap_lport': '15',
'trap_rport': '16',
'trap_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_snmp_user.fortios_system_snmp(input_data, fos_instance)
expected_data = {
'auth-proto': 'md5',
'auth-pwd': 'test_value_4',
'ha-direct': 'enable',
'name': 'default_name_6',
'priv-proto': 'aes',
'priv-pwd': 'test_value_8',
'queries': 'enable',
'query-port': '10',
'security-level': 'no-auth-no-priv',
'source-ip': '84.230.14.12',
'source-ipv6': 'test_value_13',
'status': 'enable',
'trap-lport': '15',
'trap-rport': '16',
'trap-status': 'enable'
}
set_method_mock.assert_called_with('system.snmp', 'user', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"[email protected]"
] | |
778767f7a0d4a6de15f577542a87425d58a718a4 | f9e6fd01ba0c8b5eff3680fd4c38237a540be1d0 | /config.py | f1f1dd0925475e74bad86fa3cfadbc91d9d865f9 | [] | no_license | kupacariibumu/NovelDownloader | 757e2a339e946e26bdf30debc5a84c0d54672e3d | ded5a2a4243b5f171c8d9daa448931321de2ea81 | refs/heads/master | 2023-06-02T05:16:39.140316 | 2019-12-16T15:45:09 | 2019-12-16T15:45:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | from typing import Any, Dict, Union
import yaml
import utils
import websites
class Config:
def __init__(self, book: str, values: Dict[str, str]):
self.book = book
self.website = websites.from_config(values["website"])
self.values = values
def __getattr__(self, name):
return self.values.get(name)
def _get_website(config: dict):
print("[0] Custom")
for i, website in enumerate(websites.WEBSITES, 1):
print("[{}] {}".format(i, website.name))
website_index = utils.input_int("Website: ", 0, i)
if website_index > 0:
website = websites.WEBSITES[website_index - 1]
config["website"] = website.name
config.update(website.create_config())
else:
config["website"] = {
"toc_url": input("TOC url: "),
"toc_start": input("TOC start: "),
"toc_end": input("TOC end: "),
"toc_link": input("TOC link regex (optional): ") or 'href="(.*?)"',
"chapter_url": input("Chapter url: "),
"chapter_start": input("Chapter start: "),
"chapter_end": input("Chapter end: "),
}
def create_config(book: str):
config: Dict[str, Any] = {}
print("Creating new config for {}:".format(book))
_get_website(config)
name = input("Name? (optional) ")
if name:
config["name"] = name
with open(utils.get_config_file(book), "w") as f:
yaml.dump(config, f, default_flow_style=False)
print("Config created at:", utils.get_config_file(book))
print()
def load_config(book: str) -> Config:
with open(utils.get_config_file(book)) as f:
values = yaml.safe_load(f)
return Config(book, values)
| [
"[email protected]"
] | |
795ad238a3cee773a6c8a30b3dcfe36fc367688e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02696/s652919986.py | d04309a3129fc7b8360255b5cdba0b3e126b0677 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | a,b,n = map(int, input().split())
if n >= b-1:
print( (a*(b-1)) // b - a * ((b-1)//b))
else:
print( (a*n) // b - a * (n//b)) | [
"[email protected]"
] | |
c2be790281c74b3a097b6e5fcd55262d0ffe0919 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/interactions/picker/situation_picker_interaction.py | 5a6cfed4b77d651b4dc7af8cf2245a263c9e698e | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,819 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\picker\situation_picker_interaction.py
# Compiled at: 2017-08-29 22:16:16
# Size of source mod 2**32: 4323 bytes
from event_testing.resolver import InteractionResolver
from filters.tunable import FilterResult
from interactions.base.picker_interaction import SimPickerInteraction, AutonomousSimPickerSuperInteraction
from interactions.base.picker_strategy import SimPickerEnumerationStrategy
from sims4.tuning.tunable import TunableList, TunableVariant, TunablePackSafeReference
from sims4.tuning.tunable_base import GroupNames
from sims4.utils import flexmethod
from situations.situation_by_definition_or_tags import SituationSearchByDefinitionOrTagsVariant
from vet.vet_picker_strategy import VetCustomerPickerEnumerationStrategy
import services, sims4
class SituationSimsPickerMixin:
INSTANCE_TUNABLES = {'valid_situations':SituationSearchByDefinitionOrTagsVariant(description='\n Situations where the guest list will be collected to populate the picker.\n ',
tuning_group=GroupNames.PICKERTUNING),
'job_filter':TunableList(description='\n If provided, only looks for Sims with the specified jobs.\n ',
tunable=TunablePackSafeReference(manager=(services.get_instance_manager(sims4.resources.Types.SITUATION_JOB))),
tuning_group=GroupNames.PICKERTUNING)}
REMOVE_INSTANCE_TUNABLES = ('sim_filter', 'sim_filter_household_override', 'sim_filter_requesting_sim',
'include_uninstantiated_sims', 'include_instantiated_sims',
'include_actor_sim', 'include_target_sim')
@flexmethod
def _get_valid_sim_choices_gen(cls, inst, target, context, **kwargs):
inst_or_cls = inst if inst is not None else cls
for situation in cls.valid_situations.get_all_matching_situations():
for sim in situation.all_sims_in_situation_gen():
if cls.job_filter:
if situation.get_current_job_for_sim(sim) not in cls.job_filter:
continue
if inst_or_cls.sim_tests:
if inst:
interaction_parameters = inst.interaction_parameters.copy()
else:
interaction_parameters = kwargs.copy()
interaction_parameters['picked_item_ids'] = {
sim.sim_id}
resolver = InteractionResolver(cls, inst, target=target, context=context, **interaction_parameters)
if inst_or_cls.sim_tests.run_tests(resolver):
yield FilterResult(sim_info=(sim.sim_info))
else:
yield FilterResult(sim_info=(sim.sim_info))
class SituationSimsPickerInteraction(SituationSimsPickerMixin, SimPickerInteraction):
pass
class AutonomousSituationSimsPickerInteraction(SituationSimsPickerMixin, AutonomousSimPickerSuperInteraction):
INSTANCE_TUNABLES = {'choice_strategy': TunableVariant(description='\n Strategy to use for picking a Sim.\n ',
default='default_sim_picker',
default_sim_picker=(SimPickerEnumerationStrategy.TunableFactory()),
vet_customer_picker=(VetCustomerPickerEnumerationStrategy.TunableFactory()),
tuning_group=(GroupNames.PICKERTUNING))}
REMOVE_INSTANCE_TUNABLES = ('test_compatibility', )
def __init__(self, *args, **kwargs):
(super().__init__)(args, choice_enumeration_strategy=self.choice_strategy, **kwargs) | [
"[email protected]"
] | |
3903880ef11dddcfb52a460e340e38f17acd4533 | 2aff23f7efc101969df2d13c5de91208f1153ff7 | /pyexcel_matplotlib/__init__.py | 63181b955335972638426181671ca5d3dffa487d | [
"BSD-3-Clause"
] | permissive | mobanbot/pyexcel-matplotlib | 7a8c12cb897173647377b2656cbac246f58793fe | 8771fcf3cc82164b50dc7ec0314838bf3de63e3b | refs/heads/master | 2021-06-19T16:17:29.541971 | 2017-07-13T07:17:31 | 2017-07-13T07:18:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
pyexcel_matplotlib
~~~~~~~~~~~~~~~~~~~
chart drawing plugin for pyexcel
:copyright: (c) 2016-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for further details
"""
from pyexcel.plugins import PyexcelPluginChain
PyexcelPluginChain(__name__).add_a_renderer(
relative_plugin_class_path='plot.MatPlotter',
file_types=['svg', 'png']
)
| [
"[email protected]"
] | |
e01067024fe471232edab834f0a4d5da7c238f63 | 8906e04870524f190a11f3eb3caf8fe377ab3a24 | /Chapter13/Chapter_13/obs_tower2/labeler/main.py | 3e5a85559d1fdfeeedd004fdb99b45064e24782f | [
"MIT"
] | permissive | PacktPublishing/Hands-On-Reinforcement-Learning-for-Games | 8719c086c8410a2da2b4fb9852b029a4c8f67f60 | 609d63ee5389b80b760a17f7f43abe632d99a9bb | refs/heads/master | 2023-02-08T19:35:30.005167 | 2023-01-30T09:09:07 | 2023-01-30T09:09:07 | 231,567,217 | 54 | 32 | MIT | 2022-04-21T06:47:24 | 2020-01-03T10:43:21 | Python | UTF-8 | Python | false | false | 2,940 | py | """
Web server for the data labeling tool.
This web server looks for a trained classifier in the
scripts/ directory.
If such a classifier is found, its outputs are shown as
part of the web interface.
"""
import io
import json
import os
import random
from PIL import Image
from flask import Flask, send_file, send_from_directory
import numpy as np
import torch
from obs_tower2.labels import LabeledImage, load_all_labeled_images
from obs_tower2.model import StateClassifier
from obs_tower2.recording import load_all_data, sample_recordings
app = Flask(__name__, static_url_path='')
labelled = load_all_labeled_images()
recordings = load_all_data()
CLASSIFIER_PATH = '../scripts/save_classifier.pkl'
if os.path.exists(CLASSIFIER_PATH):
classifier = StateClassifier()
classifier.load_state_dict(torch.load(CLASSIFIER_PATH, map_location='cpu'))
else:
classifier = None
@app.route('/assets/<path:path>')
def handle_asset(path):
return send_from_directory('assets', path)
@app.route('/')
def handle_root():
return send_from_directory('.', 'index.html')
@app.route('/sample')
def handle_sample():
return sample_new_name()
@app.route('/frame/<name>')
def handle_frame(name):
buffer = io.BytesIO()
load_frame(name).save(buffer, 'PNG')
buffer.seek(0)
return send_file(buffer, mimetype='image/png')
@app.route('/key/<name>')
def handle_key(name):
return json.dumps(check_key(name))
@app.route('/classify/<name>')
def handle_classify(name):
if classifier is None:
return 'null'
img = np.array(load_frame(name))
inputs = torch.from_numpy(img[None])
outputs = torch.sigmoid(classifier(inputs)).detach().numpy()[0]
return json.dumps([float(x) for x in outputs])
@app.route('/save/<name>/<labels>')
def handle_save(name, labels):
frame = load_frame(name)
labels = [x == '1' for x in labels.split(',')]
img = LabeledImage(os.environ['OBS_TOWER_IMAGE_LABELS'], name, *labels)
img.save(frame)
labelled.append(img)
return 'success'
def sample_new_name():
while True:
rec = sample_recordings(recordings, 1)[0]
frame = random.randrange(rec.num_steps)
name = '%d_%d_%d' % (rec.seed, rec.uid, frame)
if any([x for x in labelled if x.name == name]):
continue
return name
def load_frame(name):
rec, frame = find_rec_frame(name)
return Image.fromarray(rec.load_frame(frame))
def check_key(name):
rec, frame = find_rec_frame(name)
for i in range(frame + 10, min(frame + 50, rec.num_steps), 5):
img = rec.load_frame(i)
if not (img[2] == 0).all():
return True
return False
def find_rec_frame(name):
parts = name.split('_')
seed = int(parts[0])
uid = int(parts[1])
frame = int(parts[2])
rec = next(x for x in recordings if x.seed == seed and x.uid == uid)
return rec, frame
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
9e96120750f7833d375a3b5ddf802df2de37e27f | 9cf179388a901089cd547d36eedf0fd7a42eb9bd | /config/settings/base.py | 9dbbf59ed9571d4f50ab378bb3a98edc393489b7 | [] | no_license | birkoss/stocks | 94015013bfef9d19beabfea854891eac95fa1f8d | 58d8f030add64962aea386ef72c50665381c6258 | refs/heads/master | 2023-02-16T02:29:55.500638 | 2020-12-22T19:30:01 | 2020-12-22T19:30:01 | 323,704,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,861 | py | import json
import os
from django.core.exceptions import ImproperlyConfigured
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
with open('secrets.json') as f:
secrets = json.load(f)
except FileNotFoundError:
raise ImproperlyConfigured('Fill the secrets.json file')
def get_secret(setting, secrets=secrets):
'''
Get the secret variable or return explicit exception.
'''
try:
return secrets[setting]
except KeyError:
error_msg = 'Set the {0} environment → variable'.format(setting)
raise ImproperlyConfigured(error_msg)
SECRET_KEY = get_secret('SECRET_KEY')
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'core',
'users',
'stocks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # nopep8
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # nopep8
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # nopep8
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # nopep8
},
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id,name,email',
}
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
LOGIN_URL = 'home'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'home'
SOCIAL_AUTH_FACEBOOK_KEY = get_secret('FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = get_secret('FACEBOOK_SECRET')
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = get_secret('GOOGLE_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = get_secret('GOOGLE_SECRET')
AUTH_USER_MODEL = 'users.User'
LANGUAGE_CODE = 'en-ca'
TIME_ZONE = 'America/Montreal'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
fb19cc288cacbf45c79fb602182a6a2014c7a09a | 4f2cdd9a34fce873ff5995436edf403b38fb2ea5 | /Data-Structures/List/Part2/P007.py | efd86d663d29f875ee66ef39f98c36aa37486be6 | [] | no_license | sanjeevseera/Python-Practice | 001068e9cd144c52f403a026e26e9942b56848b0 | 5ad502c0117582d5e3abd434a169d23c22ef8419 | refs/heads/master | 2021-12-11T17:24:21.136652 | 2021-08-17T10:25:01 | 2021-08-17T10:25:01 | 153,397,297 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | """
Write a Python program to find the index of an item in a specified list.
"""
num =[10, '30', 4, -6]
try:
print(num.index('30'))
except:
print("Value not in List") | [
"[email protected]"
] | |
61e8f3382590d817b94e5a1f6bb2299f795c7962 | 271886f348c3b72cd4b2a34ca456491d39bde520 | /component-clustering/model-variance-exploration_2.py | 143358c7e11e9548b3894d7d19ecc99fa6922c63 | [
"MIT"
] | permissive | tingard/Galaxy-builder-aggregation | 4ca99c7473d31d9a0b6909e3ccc9b08559dc04b1 | 78fec76eeb2ab4b38e241b66fa5643e0002ba3a7 | refs/heads/master | 2021-06-28T06:46:00.676450 | 2019-07-17T16:45:56 | 2019-07-17T16:45:56 | 126,490,129 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,613 | py | import numpy as np
import pandas as pd
import lib.galaxy_utilities as gu
import gzbuilderaggregation
from progress.bar import Bar
from multiprocessing import Pool
import argparse
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
N_SPLITS = 5
def get_pa_from_arms(arms):
try:
p = arms[0].get_parent()
return p.get_pitch_angle(arms)
except IndexError:
return (np.nan, np.nan)
def get_splits_df(ss_id, val_id, dr8id):
gal, angle = gu.get_galaxy_and_angle(ss_id)
cls_for_gal = gu.classifications.query(
'subject_ids == {} | subject_ids == {}'.format(ss_id, val_id)
)
results = []
for i in range(N_SPLITS):
cls_sample = cls_for_gal.sample(30)
results.append(
gzbuilderaggregation.make_model(
cls_sample,
gal, angle,
)
)
disk_df = pd.DataFrame([
i[0]['disk'] for i in results if i[0]['disk'] is not None
])
disk_df.columns = 'disk_' + disk_df.columns
bulge_df = pd.DataFrame([
i[0]['bulge'] for i in results if i[0]['bulge'] is not None
])
bulge_df.columns = 'bulge_' + bulge_df.columns
bar_df = pd.DataFrame([
i[0]['bar'] for i in results if i[0]['bar'] is not None
])
bar_df.columns = 'bar_' + bar_df.columns
pa_df = pd.DataFrame(
[get_pa_from_arms(i[-1]) for i in results],
columns=('pa', 'sigma_pa')
)
gal_df = pd.concat((disk_df, bulge_df, bar_df, pa_df), axis=1, sort=False)
return gal_df
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
'Perform Shuffle split variance explortation'
' on aggregate models'
)
)
parser.add_argument('--nsplits', '-N', metavar='N', default=5,
help='Number of splits to use')
args = parser.parse_args()
N_SPLITS = int(args.nsplits)
dr8ids, ss_ids, validation_ids = np.load('lib/duplicate_galaxies.npy').T
out = []
to_iter = np.stack((ss_ids, validation_ids, dr8ids), axis=-1)
bar = Bar('Calculating aggregate models', max=len(dr8ids),
suffix='%(percent).1f%% - %(eta)ds')
try:
for row in to_iter:
try:
out.append(get_splits_df(*row))
except Exception as e:
print('\n', row[0], e)
bar.next()
bar.finish()
except KeyboardInterrupt:
pass
df = pd.concat(out, keys=dr8ids, sort=False)
df.to_pickle('model-variances.pkl')
| [
"[email protected]"
] | |
9a2a46b9e35529dc5ec63c6c719c5b2d2bb9dffc | c1c3dc2d8a3bbe12eb60f49f277f605793fa7758 | /lesson06_multi_nasled/death_romb.py | 4cfa169b0949ef6301c53b7097827fb658c970e2 | [] | no_license | Bulgakoff/UdemyNew | 838e073b3ab61ae227fcc497f9ded5d6048f3077 | 97a27d0de164fcbd06def5e0edf1464ad46b5668 | refs/heads/master | 2020-09-23T03:29:20.789215 | 2019-12-09T11:04:28 | 2019-12-09T11:04:28 | 225,391,089 | 0 | 0 | null | 2019-12-09T11:04:29 | 2019-12-02T14:15:39 | Python | UTF-8 | Python | false | false | 994 | py | class Animal:
def set_health(self, health):
print('set in Animal')
class Carnivour(Animal):
def set_health(self, health):
super().set_health(health)# что бы не было двоцной инициализации базовых классов super().
print('set in Carnivour')
class Mammal(Animal):
def set_health(self, health):
super().set_health(health)# что бы не было двоцной инициализации базовых классов super().
print('set in Mammal')
class Dog(Carnivour, Mammal):
def set_health(self, health):
super().set_health(health)# что бы не было двоцной инициализации базовых классов super().
# Carnivour.set_health(self, health)
# Mammal.set_health(self, health)
print('set in Dog')
print('///////////////////////////////////////')
dog = Dog()
print(f'-----собака-------{dog.set_health(10)}--')
| [
"[email protected]"
] | |
fe4f9588384f0ada08e023ffb6a95d0de228157c | 8703982937001523f125cb65a80002e5ebb95477 | /config.py | 4a511a489406af5835497a3304fce1fb98252df1 | [
"MIT"
] | permissive | meddulla/GNN-Tutorial-Recsys2015 | 12638d2510859b08fc3249be638e756704b69cf4 | 82918c5ec824c1580c8c61a2bb76f3cbab08f19c | refs/heads/master | 2020-09-25T18:28:10.408024 | 2019-12-05T09:23:47 | 2019-12-05T09:23:47 | 226,063,496 | 0 | 0 | MIT | 2019-12-05T09:22:34 | 2019-12-05T09:22:34 | null | UTF-8 | Python | false | false | 740 | py | import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # sets device for model and PyTorch tensors
embed_dim = 128
batch_size = 1024
num_embeds = 52739
image_folder = 'data'
c_file = 'data/yoochoose-data/yoochoose-clicks.dat'
b_file = 'data/yoochoose-data/yoochoose-buys.dat'
c_index = ["session_id", "timestamp", "item_id", "category"]
b_index = ["session_id", "timestamp", "item_id", "price", "quantity"]
test_data_file = 'data/yoochoose-data/yoochoose-test.dat'
# Training parameters
num_workers = 4 # for data-loading
grad_clip = 5. # clip gradients at an absolute value of
print_freq = 10 # print training/validation stats every __ batches
checkpoint = None # path to checkpoint, None if none
| [
"[email protected]"
] | |
15b5a61186ff47009a360de4e660aa87ece8da91 | cbbdbdfa3d69a11de5dbd80f860986c97ec10b67 | /marrow/schema/transform/complex.py | 391f78fc1519129ad02552bb1b941af914cf6966 | [
"MIT"
] | permissive | lokeshmeher/schema | 757cbc837c91f124774d3a1562ceccc255f17026 | 3c7478d27f87a2f1a7f2c2da67beced4a76704cc | refs/heads/master | 2021-06-04T18:50:42.461646 | 2016-02-24T04:15:04 | 2016-02-24T04:15:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,730 | py | # encoding: utf-8
from __future__ import unicode_literals
import re
from inspect import isroutine
from ..compat import unicode
from .base import Concern, Transform, DataAttribute, Attribute
class TokenPatternAttribute(DataAttribute):
"""Lazy construction of the regular expression needed for token processing."""
def __get__(self, obj, cls=None):
# If this is class attribute (and not instance attribute) access, we return ourselves.
if obj is None:
return self
# Attempt to retrieve the cached value from the warehouse.
try:
return obj.__data__[self.__name__]
except KeyError:
pass
# No stored value? No problem! Let's calculate it.
separators = obj.separators
groups = obj.groups
quotes = obj.quotes
if groups and None not in groups:
groups = [None] + list(groups)
expression = ''.join((
# Trap possible leading space or separators.
('[\s%s]*' % (''.join(separators), )),
'(',
# Pass groups=('+','-') to handle optional leading + or -.
('[%s]%s' % (''.join([i for i in list(groups) if i is not None]), '?' if None in groups else '')) if groups else '',
# Match any amount of text (that isn't a quote) inside quotes.
''.join([(r'%s[^%s]+%s|' % (i, i, i)) for i in quotes]) if quotes else '',
# Match any amount of text that isn't whitespace.
('[^%s]+' % (''.join(separators), )),
')',
# Match possible separator character.
('[%s]*' % (''.join(separators), )),
))
value = (expression, re.compile(expression))
self.__set__(obj, value)
return value
class Token(Transform):
separators = Attribute(default=' \t')
quotes = Attribute(default="\"'")
groups = Attribute(default=[])
group = Attribute(default=None) # None or 'dict' or some other handler.
normalize = Attribute(default=None)
sort = Attribute(default=False)
cast = Attribute(default=list)
pattern = TokenPatternAttribute()
def native(self, value, context=None):
value = super(Token, self).native(value, context)
if value is None:
return None
pattern, regex = self.pattern
matches = regex.findall(value)
if isroutine(self.normalize):
matches = [self.normalize(i) for i in matches]
if self.sort:
matches.sort()
if not self.groups:
return self.cast(matches)
groups = dict([(i, list()) for i in self.groups])
if None not in groups:
groups[None] = list() # To prevent errors.
for i in matches:
if i[0] in self.groups:
groups[i[0]].append(i[1:])
else:
groups[None].append(i)
if self.group is dict:
return groups
if not self.group:
results = []
for group in self.groups:
results.extend([(group, match) for match in groups[group]])
return self.cast(results)
return self.group([[match for match in groups[group]] for group in self.groups])
def foreign(self, value, context=None):
value = super(Token, self).foreign(value, context)
if value is None:
return None
def sanatize(keyword):
if not self.quotes:
return keyword
for sep in self.separators:
if sep in keyword:
return self.quotes[0] + keyword + self.quotes[0]
return keyword
if self.group is dict:
if not isinstance(value, dict):
raise Concern("Dictionary grouped values must be passed as a dictionary.")
return self.separators[0].join([((prefix or '') + sanatize(keyword)) for prefix, keywords in sorted(list(value.items())) for keyword in sorted(value[prefix])])
if not isinstance(value, (list, tuple, set)):
raise Concern("Ungrouped values must be passed as a list, tuple, or set.")
value = [sanatize(keyword) for keyword in value]
return self.separators[0].join(sorted(value) if self.sort else value)
# A lowercase-normalized ungrouped tag set processor, returning only unique tags.
tags = Token(separators=' \t,', normalize=lambda s: s.lower().strip('"'), cast=set)
# A tag search; as per tags but grouped into a dictionary of sets for normal (None), forced inclusion (+) or exclusion (-).
tag_search = Token(separators=' \t,', normalize=lambda s: s.lower().strip('"'), cast=set, groups=['+', '-'], group=dict)
# A search keyword processor which retains quotes and groups into a dictionary of lists; no normalization is applied.
terms = Token(groups=['+', '-'], group=dict)
# VETO: Extract
'''
class DateTimeTransform(Transform):
base = Attribute(defualt=datetime.datetime)
format = "%Y-%m-%d %H:%M:%S"
def __call__(self, value):
if not value:
return ''
return super(DateTimeTransform, self)(value.strftime(self.format))
def native(self, value):
value = super(DateTimeTransform, self).native(value)
return self.base.strptime(value, self.format)
'''
| [
"[email protected]"
] | |
3d056240ccc91d11d0fa994fade9566d83649476 | 0ebf38d311d11f2473db301d08d906cf1a5d8825 | /testinfra/modules/blockdevice.py | 88e152d3b10bfe7658b43e1fe2782fb65fd3de93 | [
"Apache-2.0",
"CC-BY-ND-4.0"
] | permissive | disser/testinfra | 5b8baf35e36192f98ca879464e858eb06029df63 | 14af900fb305991cdf2b31b8825884955e0d8f2c | refs/heads/master | 2022-11-13T17:45:19.118394 | 2020-07-02T15:25:24 | 2020-07-02T15:25:24 | 276,672,242 | 0 | 0 | Apache-2.0 | 2020-07-02T14:46:20 | 2020-07-02T14:46:20 | null | UTF-8 | Python | false | false | 4,144 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testinfra.modules.base import Module
from testinfra.utils import cached_property
class BlockDevice(Module):
"""Information for block device.
Should be used with sudo or under root.
If device is not a block device, RuntimeError is raised.
"""
def _data(self):
raise NotImplementedError
def __init__(self, device):
self.device = device
super().__init__()
@property
def is_partition(self):
"""Return True if the device is a partition.
>>> host.block_device("/dev/sda1").is_partition
True
>>> host.block_device("/dev/sda").is_partition
False
"""
return self._data['start_sector'] > 0
@property
def size(self):
"""Return size if the device in bytes.
>>> host.block_device("/dev/sda1").size
512110190592
"""
return self._data['size']
@property
def sector_size(self):
"""Return sector size for the device in bytes.
>>> host.block_device("/dev/sda1").sector_size
512
"""
return self._data['sector_size']
@property
def block_size(self):
"""Return block size for the device in bytes.
>>> host.block_device("/dev/sda").block_size
4096
"""
return self._data['block_size']
@property
def start_sector(self):
"""Return start sector of the device on the underlaying device.
Usually the value is zero for full devices and is non-zero
for partitions.
>>> host.block_device("/dev/sda1").start_sector
2048
>>> host.block_device("/dev/md0").start_sector
0
"""
return self._data['sector_size']
@property
def is_writable(self):
"""Return True if device is writable (have no RO status)
>>> host.block_device("/dev/sda").is_writable
True
>>> host.block_device("/dev/loop1").is_writable
False
"""
mode = self._data['rw_mode']
if mode == 'rw':
return True
if mode == 'ro':
return False
raise ValueError('Unexpected value for rw: %s' % mode)
@property
def ra(self):
"""Return Read Ahead for the device in 512-bytes sectors.
>>> host.block_device("/dev/sda").ra
256
"""
return self._data['read_ahead']
@classmethod
def get_module_class(cls, host):
if host.system_info.type == 'linux':
return LinuxBlockDevice
raise NotImplementedError
def __repr__(self):
return '<BlockDevice(path=%s)>' % self.device
class LinuxBlockDevice(BlockDevice):
@cached_property
def _data(self):
header = ['RO', 'RA', 'SSZ', 'BSZ', 'StartSec', 'Size', 'Device']
command = 'blockdev --report %s'
blockdev = self.run(command % self.device)
if blockdev.rc != 0 or blockdev.stderr:
raise RuntimeError("Failed to gather data: %s" % blockdev.stderr)
output = blockdev.stdout.splitlines()
if len(output) < 2:
raise RuntimeError("No data from %s" % self.device)
if output[0].split() != header:
raise RuntimeError('Unknown output of blockdev: %s' % output[0])
fields = output[1].split()
return {
'rw_mode': str(fields[0]),
'read_ahead': int(fields[1]),
'sector_size': int(fields[2]),
'block_size': int(fields[3]),
'start_sector': int(fields[4]),
'size': int(fields[5])
}
| [
"[email protected]"
] | |
c5adb35910a3801181d1a6c8535732b8f9d6cf51 | 0facb323be8a76bb4c168641309972fa77cbecf2 | /Configurations/HWWSemiLepHighMass/nanoAODv5/v6_production/2017/NJET_biined_WJets/SKIM10/HMVAR10_Full_SBI/MassPoints/structure_M140_mu.py | 72b4463434ca67ffe33f216b0179abf363733d62 | [] | no_license | bhoh/SNuAnalytics | ef0a1ba9fa0d682834672a831739dfcfa1e7486b | 34d1fc062e212da152faa83be50561600819df0e | refs/heads/master | 2023-07-06T03:23:45.343449 | 2023-06-26T12:18:28 | 2023-06-26T12:18:28 | 242,880,298 | 0 | 1 | null | 2020-02-25T01:17:50 | 2020-02-25T01:17:49 | null | UTF-8 | Python | false | false | 1,031 | py | structure["DY"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["MultiV"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["WpWmJJ_EWK_QCD_noHiggs"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["top"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["Wjets"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["vbfHWWlnuqq_M125"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["ggHWWlnuqq_M125"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["QCD_MU"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["DATA"]={
"isSignal" : 0,
"isData" : 1 ,
}
structure["ggHWWlnuqq_M140"]={
"isSignal" : 1,
"isData" : 0 ,
}
structure["vbfHWWlnuqq_M140"]={
"isSignal" : 1,
"isData" : 0 ,
}
| [
"[email protected]"
] | |
4c21f061d0e7cd7fcb64320a3d50b43a7c06d22e | bba2bd15307d94707825057fe2790a72c707a363 | /awesome_glue/bert_classifier.py | 3a6808b7dfbf4131497a2415fec14b8310982d9e | [] | no_license | Xalp/dne | c78e8ef2f730b129623ed3eaa27f93d2cf85d6f6 | afa519eea9ccd29332c477d89b4691fc2520813b | refs/heads/master | 2023-02-16T14:27:48.089160 | 2021-01-15T12:30:44 | 2021-01-15T12:30:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,974 | py | import torch
import torch.nn.functional as F
from allennlp.models import Model
from allennlp.training.metrics import CategoricalAccuracy
from transformers import AdamW
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from allennlp.modules.seq2vec_encoders import ClsPooler
from luna import ram_globalize
class BertClassifier(Model):
def __init__(self, vocab, num_labels):
super().__init__(vocab)
self.bert_embedder = PretrainedTransformerEmbedder('bert-base-uncased')
self.pooler = ClsPooler(self.bert_embedder.get_output_dim())
self.linear = torch.nn.Sequential(
torch.nn.Dropout(0.1),
torch.nn.Linear(in_features=768, out_features=num_labels))
self.accuracy = CategoricalAccuracy()
self.loss_function = torch.nn.CrossEntropyLoss()
def forward(self, sent, label=None):
bert_embeddings = self.bert_embedder(
token_ids=sent['tokens']['token_ids'],
type_ids=sent['tokens']['type_ids'],
mask=sent['tokens']['mask'])
bert_vec = self.pooler(bert_embeddings)
logits = self.linear(bert_vec)
output = {"logits": logits, "probs": F.softmax(logits, dim=1)}
if label is not None:
self.accuracy(logits, label)
output["loss"] = self.loss_function(logits, label)
return output
def get_metrics(self, reset=False):
return {'accuracy': self.accuracy.get_metric(reset)}
def get_optimizer(self):
optimizer = AdamW(self.parameters(), lr=2e-5, eps=1e-8)
# get_linear_schedule_with_warmup(
# optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
# )
return optimizer
@ram_globalize()
def noise(tsr: torch.Tensor, scale=1.0):
return tsr
# if scale == 0:
# return tsr
# else:
# return tsr + torch.normal(0., tsr.std().item() * scale, tsr.size(), device=tsr.device)
| [
"[email protected]"
] | |
20c28a30e5a7a54696e106c9cce4973e2678a8dc | b921f8ffb559e90c2711f77dc8ceba960b721714 | /rocket_engine/__init__.py | 4cfbaab90c1017b1491be694e9f409501465594e | [
"BSD-2-Clause-Views"
] | permissive | xando/django-rocket-engine | 7b8af49d2665cd213b75b9bcc4a9ba405d63f339 | 3ef942cd0ddd2f88832725990bd0fe9bb07bbe84 | refs/heads/master | 2020-12-24T16:23:36.789339 | 2012-06-29T19:49:43 | 2012-06-29T19:49:43 | 3,860,429 | 1 | 0 | null | 2012-07-03T08:30:44 | 2012-03-28T22:46:21 | Python | UTF-8 | Python | false | false | 2,838 | py | import os
import sys
from django.core.handlers.wsgi import WSGIHandler
from django.core import signals
on_appengine_remote = os.getenv('SERVER_SOFTWARE','')\
.startswith('Google App Engine')
on_appengine = on_appengine_remote
os.path.abspath(os.curdir)
PROJECT_DIR = os.path.abspath(os.getcwd())
def get_appengine_sdk_path():
typical_sdk_paths = [
os.environ.get('APP_ENGINE_SDK',""),
'/usr/local/google_appengine',
'/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine'
] + os.environ.get('PATH', '').split(os.pathsep)
# List of files which will be used as a test for SQK lookup.
is_appengine_sdk = lambda path: all([
x in os.listdir(path) for x in [
'appcfg.py',
'dev_appserver.py',
'google'
]
])
for path in typical_sdk_paths:
if os.path.exists(path) and is_appengine_sdk(path):
return path
sys.stderr.write(
'The Google App Engine SDK could not be found!\n'
"Make sure it's accessible via your PATH "
"environment and called google_appengine.\n"
)
sys.exit(1)
def setup_appendine_sdk():
try:
import dev_appserver
except ImportError:
sdk_path = get_appengine_sdk_path()
sys.path.append(sdk_path)
import dev_appserver
sys.path.extend(dev_appserver.EXTRA_PATHS)
sys.path.extend(dev_appserver.GOOGLE_SQL_EXTRA_PATHS)
def path_appendine_sdk():
if not os.environ.get('DJANGO_SETTINGS_MODULE'):
os.environ.update({'DJANGO_SETTINGS_MODULE': 'settings'})
if not on_appengine_remote:
# add SQLlite to allowed modules
from google.appengine.tools import dev_appserver
from google.appengine import dist27
dist27.MODULE_OVERRIDES = []
dev_appserver.HardenedModulesHook._WHITE_LIST_C_MODULES.extend(
('parser', '_ssl', '_io', '_sqlite3', 'os', '_os', 'tempfile'))
dev_appserver.HardenedModulesHook._MODULE_OVERRIDES['os'] = os.__dict__
dev_appserver.HardenedModulesHook._PY27_ALLOWED_MODULES.append('os')
dev_appserver.HardenedModulesHook._HardenedModulesHook__PY27_OPTIONAL_ALLOWED_MODULES = {}
dev_appserver.FakeFile.NOT_ALLOWED_DIRS = set([])
dev_appserver.FakeFile.IsFileAccessible = staticmethod(
lambda *args, **kwargs: True
)
else:
# loogging exceptions hook
from .utils import log_traceback
signals.got_request_exception.connect(log_traceback)
# add production site
import site
site.addsitedir(os.path.join(PROJECT_DIR, 'appengine_libs'))
if not on_appengine_remote:
setup_appendine_sdk()
path_appendine_sdk()
wsgi = WSGIHandler()
| [
"[email protected]"
] | |
71dab8b1dc580f2d08efed954c2be67f8bdb700e | 72b1d8b44520d1757d379d8013eb3912b005bef3 | /ml/visualizations/word_cloud/demo_cloud.py | 85787dfd91e13e926b6a51745f2ce8dd67c2ce84 | [] | no_license | joshuaNewman10/ml | 14d8d5821bd952e77272b740cf05cef69ebee383 | 3ec43868004d421814f8e056205e77a2b8cb92dc | refs/heads/master | 2021-04-03T06:29:33.655495 | 2018-09-17T19:03:40 | 2018-09-17T19:03:40 | 124,795,304 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | import matplotlib.pyplot as plt
from argparse import ArgumentParser
from wordcloud import WordCloud, STOPWORDS
def main(text_file_path):
print('hey')
text = open(text_file_path).read()
# Generate a word cloud image
wordcloud = WordCloud(stopwords=STOPWORDS).generate(text)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
# lower max_font_size
wordcloud = WordCloud(max_font_size=40, stopwords=STOPWORDS).generate(text)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--text_file_path', required=True)
args = parser.parse_args()
main(args.text_file_path)
| [
"[email protected]"
] | |
b9682dd26ca433882646316da95ace105a4ee492 | c6382b3f6778edd5a64bfb2a4d22ff6e5e5c0f7d | /ipyparallel/client/_joblib.py | 7098a73b1bfb54d9cd43aa248681c69d35e554a3 | [
"BSD-3-Clause"
] | permissive | 195610087/ipyparallel | 17f382498bad28b339a3ca1f0b479151e9c0c955 | 6cd55b00a520b3f299e7db88a08b78dcbe713af8 | refs/heads/main | 2023-09-06T07:29:36.302391 | 2021-11-15T10:01:57 | 2021-11-15T10:01:57 | 429,255,340 | 0 | 0 | NOASSERTION | 2021-11-18T01:24:20 | 2021-11-18T01:24:20 | null | UTF-8 | Python | false | false | 2,308 | py | """joblib parallel backend for IPython Parallel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from joblib.parallel import AutoBatchingMixin
from joblib.parallel import ParallelBackendBase
import ipyparallel as ipp
class IPythonParallelBackend(AutoBatchingMixin, ParallelBackendBase):
def __init__(self, view=None, **kwargs):
super().__init__(**kwargs)
self._cluster_owner = False
self._client_owner = False
if view is None:
self._client_owner = True
try:
# load the default cluster
cluster = ipp.Cluster.from_file()
except FileNotFoundError:
# other load errors?
cluster = self._cluster = ipp.Cluster()
self._cluster_owner = True
cluster.start_cluster_sync()
else:
# cluster running, ensure some engines are, too
if not cluster.engines:
cluster.start_engines_sync()
rc = cluster.connect_client_sync()
rc.wait_for_engines(cluster.n or 1)
view = rc.load_balanced_view()
# use cloudpickle or dill for closures, if available.
# joblib tends to create closures default pickle can't handle.
try:
import cloudpickle # noqa
except ImportError:
try:
import dill # noqa
except ImportError:
pass
else:
view.client[:].use_dill()
else:
view.client[:].use_cloudpickle()
self._view = view
def effective_n_jobs(self, n_jobs):
"""A View can run len(view) jobs at a time"""
return len(self._view)
def terminate(self):
"""Close the client if we created it"""
if self._client_owner:
self._view.client.close()
if self._cluster_owner:
self._cluster.stop_cluster_sync()
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
future = self._view.apply_async(func)
if callback:
future.add_done_callback(lambda f: callback(f.result()))
return future
| [
"[email protected]"
] | |
24a9abcd14ccf38aa4edade81d64a646ca06c078 | 282ec49f8ce8aa176c24e4f13a8852c9b0752e4a | /jumble/gtkmm/SConstruct | bdf8811058ae5ba72e9070f2535846020b8fdf25 | [] | no_license | montreal91/workshop | b118b9358094f91defdae1d11ff8a1553d67cee6 | 8c05e15417e99d7236744fe9f960f4d6b09e4e31 | refs/heads/master | 2023-05-22T00:26:09.170584 | 2023-01-28T12:41:08 | 2023-01-28T12:41:08 | 40,283,198 | 3 | 1 | null | 2023-05-01T20:19:11 | 2015-08-06T03:53:44 | C++ | UTF-8 | Python | false | false | 238 |
env = Environment()
env.ParseConfig("pkg-config --cflags --libs gtkmm-3.0")
sources = [
"main.cc",
"helloworld.cc"
]
flags = [
"-std=c++11",
"-Wall",
]
env.Program(target="gtk-hello-1", CXXFLAGS=flags, source=sources)
| [
"[email protected]"
] | ||
1f4d2eb377e64e487faba3cdf2c21c6ecabc8bbe | 775f887ab0933c8bb9263febceb702974966bb48 | /packages/pyright-internal/src/tests/samples/genericTypes46.py | 1395effc3f8c0f8d6f844f6afd1e87a98e644aac | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | isabella232/pyright | 160a4d9ce366cb61946949f9d5aebe7457539c67 | a192486099503353413e02078c41d0d82bd696e8 | refs/heads/master | 2023-03-13T05:04:51.852745 | 2021-03-03T07:51:18 | 2021-03-03T07:51:18 | 344,101,663 | 0 | 0 | NOASSERTION | 2021-03-03T11:24:10 | 2021-03-03T11:21:38 | null | UTF-8 | Python | false | false | 413 | py | # This sample tests the assignment of constrained TypeVars to a union
# that allows for all of the types in the constraint.
from typing import TypeVar, Union
def func(a: Union[int, float]):
...
_T1 = TypeVar("_T1", int, float)
def func1(a: _T1, b: _T1):
return func(a)
_T2 = TypeVar("_T2", int, float, complex)
def func2(a: _T2, b: _T2):
# This should generate an error.
return func(a)
| [
"[email protected]"
] | |
29e734cef324fc14d43b25079376467dbf6b876d | 41523dd4871e8ed1043d2b3ddf73417fcbdde209 | /day16/zuoye.py | 3a03ad98b7fd77f925c21ebaa2e63183f5f27041 | [] | no_license | WayneChen1994/Python1805 | 2aa1c611f8902b8373b8c9a4e06354c25f8826d6 | a168cd3b7749afc326ec4326db413378fd3677d5 | refs/heads/master | 2020-03-30T23:19:00.773288 | 2018-11-02T10:47:40 | 2018-11-02T10:47:40 | 151,697,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:Wayne
'''
使用while循环遍历list,tuple,dict,set
使用try……except……
'''
from typing import Iterable
# 传入的参数若是一个可迭代类型的对象,则将其遍历打印
def printIterByWhile(obj):
# 判断所传参数是否为可迭代对象
if isinstance(obj, Iterable):
# 进一步判断该可迭代对象是否为字典,因为字典需要同时遍历Key和Value
if isinstance(obj, dict):
aiter = iter(obj.items())
else:
aiter = iter(obj)
while True:
try:
# 迭代输出
print(next(aiter))
except StopIteration:
# 一旦捕捉到此异常,则说明遍历结束,直接break跳出循环
break
else:
print("所给参数不是可迭代类型")
if __name__ == "__main__":
alist = [x for x in range(39)]
atuple = (1, 2, 3, 4, 5, 6, 7)
aset = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
adict = {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
astr = "qwertyuiopasdfghjklzxcvbnm"
printIterByWhile(alist)
printIterByWhile(atuple)
printIterByWhile(aset)
printIterByWhile(adict)
printIterByWhile(astr)
printIterByWhile(123)
| [
"[email protected]"
] | |
06be33f39bb529376eefc5bb5e39140cf58c3760 | 0503295fd59e32bfda7f8fdf4f73a89217ad00bb | /non_geometry_features.py | 8b117dc1282a361a9ad94a1ea4973b8525367bad | [] | no_license | xinyu1905/Nomad2018 | f00e884c8f14bf1fcc4cf675a757b59a7f0dd3e8 | 864a2571f29e74821dbe6220a0143cdf97fac27c | refs/heads/master | 2020-03-19T01:22:23.514215 | 2018-02-01T19:00:37 | 2018-02-01T19:00:37 | 135,540,432 | 0 | 1 | null | 2018-05-31T06:18:26 | 2018-05-31T06:18:26 | null | UTF-8 | Python | false | false | 1,541 | py | import logging
import numpy as np
import global_flags_constanst as gfc
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(gfc.LOGGING_LEVEL)
def add_number_of_symmetries(space_group_feature):
n = len(space_group_feature)
symmetries_data = np.zeros((n, 1))
for i in range(n):
sg = int(space_group_feature[i])
logger.info("space group: {0}; number of symmetries: {1}".format(sg,
gfc.SPACE_GROUP_PROPERTIES[sg]))
symmetries_data[i] = gfc.SPACE_GROUP_PROPERTIES[sg]
return symmetries_data
if __name__ == "__main__":
data = np.loadtxt("train.csv", delimiter=",", skiprows=1)
test_data = np.loadtxt("test.csv", delimiter=",", skiprows=1)
ids = data[:, 0].reshape(-1, 1)
space_group_feature = data[:, 1]
test_ids = test_data[:, 0].reshape(-1, 1)
test_space_group_feature = test_data[:, 1]
symmetries_data = add_number_of_symmetries(space_group_feature)
test_symmetries_data = add_number_of_symmetries(test_space_group_feature)
symmetries_data = np.hstack((ids, symmetries_data))
np.savetxt("train_symmetries_data.csv", symmetries_data, delimiter=",")
test_symmetries_data = np.hstack((test_ids, test_symmetries_data))
np.savetxt("test_symmetries_data.csv", test_symmetries_data, delimiter=",") | [
"[email protected]"
] | |
43aa10d574a4924007bedd0ace1fc6033a14c708 | d5b4601af52d5fb28fafd3610709423690996efb | /backend/g_toorie_24935/urls.py | e48c547f3b2f1db37c5c7967fca558e7cf735874 | [] | no_license | crowdbotics-apps/g-toorie-24935 | bd2267020868dd92c63e2d643850fc2d5689b753 | e6fe855538a13b0f140425231b7f646039929625 | refs/heads/master | 2023-03-21T10:32:40.747072 | 2021-03-10T18:37:57 | 2021-03-10T18:37:57 | 346,106,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | """g_toorie_24935 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "g_toorie"
admin.site.site_title = "g_toorie Admin Portal"
admin.site.index_title = "g_toorie Admin"
# swagger
api_info = openapi.Info(
title="g_toorie API",
default_version="v1",
description="API documentation for g_toorie App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"[email protected]"
] | |
983e220e9d0025ef7610862a4a9836e32df1be59 | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/theano/gof/type.py | 1dbe55575435f7013f7d09f1709afb94480a31d9 | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 23,725 | py | """
WRITEME
Defines the `Type` class.
"""
from __future__ import absolute_import, print_function, division
import ctypes
from six import string_types
import theano
from theano.gof import utils
from theano.gof.utils import MethodNotDefined, object2
from theano.gof import graph
from theano.configparser import change_flags
########
# Type #
########
from theano.gof.op import CLinkerObject, Op
__docformat__ = "restructuredtext en"
class CLinkerType(CLinkerObject):
"""
Interface specification for Types that can be arguments to a `CLinkerOp`.
A CLinkerType instance is mainly reponsible for providing the C code that
interfaces python objects with a C `CLinkerOp` implementation.
See WRITEME for a general overview of code generation by `CLinker`.
"""
def c_is_simple(self):
"""
Optional: Return True for small or builtin C types.
A hint to tell the compiler that this type is a builtin C type or a
small struct and that its memory footprint is negligible. Simple
objects may be passed on the stack.
"""
return False
def c_literal(self, data):
"""
Optional: WRITEME
Parameters
----------
data : WRITEME
WRITEME
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined("c_literal", type(self),
self.__class__.__name__)
def c_declare(self, name, sub, check_input=True):
"""
Required: Return c code to declare variables that will be
instantiated by `c_extract`.
Parameters
----------
name: str
The name of the ``PyObject *`` pointer that will
the value for this Type
sub: dict string -> string
a dictionary of special codes. Most importantly
sub['fail']. See CLinker for more info on `sub` and ``fail``.
Notes
-----
It is important to include the `name` inside of variables which
are declared here, so that name collisions do not occur in the
source file that is generated.
The variable called ``name`` is not necessarily defined yet
where this code is inserted. This code might be inserted to
create class variables for example, whereas the variable ``name``
might only exist inside certain functions in that class.
TODO: Why should variable declaration fail? Is it even allowed to?
Raises
------
MethodNotDefined
Subclass does not implement this method.
Examples
--------
.. code-block: python
return "PyObject ** addr_of_%(name)s;"
"""
raise MethodNotDefined()
def c_init(self, name, sub):
"""
Required: Return c code to initialize the variables that were declared
by self.c_declare().
Notes
-----
The variable called ``name`` is not necessarily defined yet
where this code is inserted. This code might be inserted in a
class constructor for example, whereas the variable ``name``
might only exist inside certain functions in that class.
TODO: Why should variable initialization fail? Is it even allowed to?
Examples
--------
.. code-block: python
return "addr_of_%(name)s = NULL;"
"""
raise MethodNotDefined("c_init", type(self), self.__class__.__name__)
def c_extract(self, name, sub, check_input=True):
"""
Required: Return c code to extract a PyObject * instance.
The code returned from this function must be templated using
``%(name)s``, representing the name that the caller wants to
call this `Variable`. The Python object self.data is in a
variable called "py_%(name)s" and this code must set the
variables declared by c_declare to something representative
of py_%(name)s. If the data is improper, set an appropriate
exception and insert "%(fail)s".
TODO: Point out that template filling (via sub) is now performed
by this function. --jpt
Parameters
----------
name : str
The name of the ``PyObject *`` pointer that will
store the value for this Type.
sub : dict string -> string
A dictionary of special codes. Most importantly
sub['fail']. See CLinker for more info on `sub` and ``fail``.
Raises
------
MethodNotDefined
Subclass does not implement this method.
Examples
--------
.. code-block: python
return "if (py_%(name)s == Py_None)" + \\\
addr_of_%(name)s = &py_%(name)s;" + \\\
"else" + \\\
{ PyErr_SetString(PyExc_ValueError, \\\
'was expecting None'); %(fail)s;}"
"""
raise MethodNotDefined("c_extract", type(self),
self.__class__.__name__)
def c_extract_out(self, name, sub, check_input=True):
"""
Optional: C code to extract a PyObject * instance.
Unlike c_extract, c_extract_out has to accept Py_None,
meaning that the variable should be left uninitialized.
"""
return """
if (py_%(name)s == Py_None)
{
%(c_init_code)s
}
else
{
%(c_extract_code)s
}
""" % dict(
name=name,
c_init_code=self.c_init(name, sub),
c_extract_code=self.c_extract(name, sub, check_input))
def c_cleanup(self, name, sub):
"""
Return C code to clean up after `c_extract`.
This returns C code that should deallocate whatever `c_extract`
allocated or decrease the reference counts. Do not decrease
py_%(name)s's reference count.
WRITEME
Parameters
----------
name : WRITEME
WRITEME
sub : WRITEME
WRITEME
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined()
def c_sync(self, name, sub):
"""
Required: Return C code to pack C types back into a PyObject.
The code returned from this function must be templated using
"%(name)s", representing the name that the caller wants to
call this Variable. The returned code may set "py_%(name)s"
to a PyObject* and that PyObject* will be accessible from
Python via variable.data. Do not forget to adjust reference
counts if "py_%(name)s" is changed from its original value.
Parameters
----------
name : WRITEME
WRITEME
sub : WRITEME
WRITEME
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined("c_sync", type(self), self.__class__.__name__)
def c_code_cache_version(self):
"""
Return a tuple of integers indicating the version of this Type.
An empty tuple indicates an 'unversioned' Type that will not
be cached between processes.
The cache mechanism may erase cached modules that have been
superceded by newer versions. See `ModuleCache` for details.
"""
return ()
class PureType(object):
"""
Interface specification for variable type instances.
A :term:`Type` instance is mainly reponsible for two things:
- creating `Variable` instances (conventionally, `__call__` does this), and
- filtering a value assigned to a `Variable` so that the value
conforms to restrictions imposed by the type (also known as
casting, this is done by `filter`).
"""
# the type that will be created by call to make_variable.
Variable = graph.Variable
# the type that will be created by call to make_constant
Constant = graph.Constant
def filter(self, data, strict=False, allow_downcast=None):
"""
Required: Return data or an appropriately wrapped/converted data.
Subclass implementation should raise a TypeError exception if
the data is not of an acceptable type.
If strict is True, the data returned must be the same as the
data passed as an argument. If it is False, and allow_downcast
is True, filter may cast it to an appropriate type. If
allow_downcast is False, filter may only upcast it, not lose
precision. If allow_downcast is None (default), the behaviour can be
Type-dependent, but for now it means only Python floats can be
downcasted, and only to floatX scalars.
Raises
------
MethodNotDefined
Subclass doesn't implement this function.
"""
raise MethodNotDefined("filter", type(self), self.__class__.__name__)
# If filter_inplace is defined, it will be called instead of
# filter() This is to allow reusing the old allocated memory. As
# of this writing this is used only when we transfer new data to a
# shared variable on the gpu.
# def filter_inplace(value, storage, strict=False, allow_downcast=None)
def filter_variable(self, other, allow_convert=True):
"""
Convert a symbolic variable into this Type, if compatible.
For the moment, the only Types compatible with one another are
TensorType and CudaNdarrayType, provided they have the same
number of dimensions, same broadcasting pattern, and same dtype.
If Types are not compatible, a TypeError should be raised.
"""
if not isinstance(other, graph.Variable):
# The value is not a Variable: we cast it into
# a Constant of the appropriate Type.
other = self.Constant(type=self, data=other)
if other.type != self and allow_convert:
other2 = self.convert_variable(other)
if other2 is not None:
return other2
if other.type != self:
raise TypeError(
'Cannot convert Type %(othertype)s '
'(of Variable %(other)s) into Type %(self)s. '
'You can try to manually convert %(other)s into a %(self)s.'
% dict(othertype=other.type, other=other, self=self))
return other
def convert_variable(self, var):
"""
Patch variable so that its type will match self, if possible.
If the variable can't be converted, this should return None.
The conversion can only happen if the following implication is
true for all possible `val`.
self.is_valid_value(val) => var.type.is_valid_value(val)
For the majority of types this means that you can only have
non-broadcastable dimensions become broadcastable and not the
inverse.
The default is to not convert anything which is always safe.
"""
return None
def is_valid_value(self, a):
"""
Required: Return True for any python object `a` that would be a
legal value for a Variable of this Type.
"""
try:
self.filter(a, strict=True)
return True
except (TypeError, ValueError):
return False
def value_validity_msg(self, a):
"""
Optional: Return a message explaining the output of
is_valid_value.
"""
return "none"
def make_variable(self, name=None):
"""
Return a new `Variable` instance of Type `self`.
Parameters
----------
name : None or str
A pretty string for printing and debugging.
"""
return self.Variable(self, name=name)
def make_constant(self, value, name=None):
return self.Constant(type=self, data=value, name=name)
def __call__(self, name=None):
"""
Return a new `Variable` instance of Type `self`.
Parameters
----------
name : None or str
A pretty string for printing and debugging.
"""
return utils.add_tag_trace(self.make_variable(name))
def values_eq(self, a, b):
"""
Return True if a and b can be considered exactly equal.
a and b are assumed to be valid values of this Type.
"""
return a == b
def values_eq_approx(self, a, b):
"""
Return True if a and b can be considered approximately equal.
This function is used by theano debugging tools to decide
whether two values are equivalent, admitting a certain amount
of numerical instability. For example, for floating-point
numbers this function should be an approximate comparison.
By default, this does an exact comparison.
Parameters
----------
a
A potential value for a Variable of this Type.
b
A potential value for a Variable of this Type.
Returns
-------
bool
"""
return self.values_eq(a, b)
# def get_shape_info(self, obj):
"""
Optional function. See TensorType().get_shape_info for definition.
"""
# def get_size(self, shape_info):
"""
Optional function. See TensorType().get_size for definition.
"""
_nothing = """
"""
class Type(object2, PureType, CLinkerType):
"""
Convenience wrapper combining `PureType` and `CLinkerType`.
Theano comes with several subclasses of such as:
- `Generic`: for any python type
- `TensorType`: for numpy.ndarray
- `SparseType`: for scipy.sparse
But you are encouraged to write your own, as described in WRITEME.
The following code illustrates the use of a Type instance,
here tensor.fvector:
.. code-block:: python
# Declare a symbolic floating-point vector using __call__
b = tensor.fvector()
# Create a second Variable with the same Type instance
c = tensor.fvector()
Whenever you create a symbolic variable in theano (technically,
`Variable`) it will contain a reference to a Type instance. That
reference is typically constant during the lifetime of the
Variable. Many variables can refer to a single Type instance, as
do b and c above. The Type instance defines the kind of value
which might end up in that variable when executing a `Function`.
In this sense, theano is like a strongly-typed language because
the types are included in the graph before the values. In our
example above, b is a Variable which is guaranteed to correspond
to a numpy.ndarray of rank 1 when we try to do some computations
with it.
Many `Op` instances will raise an exception if they are applied to
inputs with incorrect types. Type references are also useful to
do type-checking in pattern-based optimizations.
"""
class SingletonType(Type):
"""
Convenient Base class for a Type subclass with no attributes.
It saves having to implement __eq__ and __hash__.
"""
__instance = None
def __new__(cls):
# If sub-subclass of SingletonType don't redeclare __instance
# when we look for it, we will find it in the subclass. We
# don't want that, so we check the class. When we add one, we
# add one only to the current class, so all is working
# correctly.
if cls.__instance is None or not isinstance(cls.__instance, cls):
cls.__instance = Type.__new__(cls)
return cls.__instance
def __str__(self):
return self.__class__.__name__
# even if we try to make a singleton, this do not always work. So
# we compare the type. See test_type_other.test_none_Constant for
# an exmple. So we need to implement __eq__ and __hash__
def __eq__(self, other):
if self is other:
return True
if type(self) is type(other):
return True
return False
def __hash__(self):
return hash(type(self))
class Generic(SingletonType):
"""
Represents a generic Python object.
This class implements the `PureType` and `CLinkerType` interfaces
for generic PyObject instances.
EXAMPLE of what this means, or when you would use this type.
WRITEME
"""
def filter(self, data, strict=False, allow_downcast=None):
return data
def is_valid_value(self, a):
return True
def c_declare(self, name, sub, check_input=True):
return """
PyObject* %(name)s;
""" % locals()
def c_init(self, name, sub):
return """
%(name)s = NULL;
""" % locals()
def c_extract(self, name, sub, check_input=True):
return """
Py_INCREF(py_%(name)s);
%(name)s = py_%(name)s;
""" % locals()
def c_cleanup(self, name, sub):
return """
Py_XDECREF(%(name)s);
""" % locals()
def c_sync(self, name, sub):
return """
assert(py_%(name)s->ob_refcnt > 1);
Py_DECREF(py_%(name)s);
py_%(name)s = %(name)s ? %(name)s : Py_None;
Py_INCREF(py_%(name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
def __str__(self):
return self.__class__.__name__
generic = Generic()
_cdata_type = ctypes.py_object.from_address(
ctypes.addressof(ctypes.pythonapi.PyCapsule_Type)).value
class _make_cdata(Op):
__props__ = ('rtype',)
def __init__(self, rtype):
assert isinstance(rtype, CDataType)
self.rtype = rtype
def do_constant_folding(self, node):
return False
def make_node(self, val):
from theano.scalar import as_scalar
from theano import Apply
val = as_scalar(val).astype('uint64')
return Apply(self, [val], [self.rtype()])
def c_code(self, node, name, inputs, outputs, sub):
return """
%(out)s = (%(ctype)s)%(inp)s;
""" % dict(ctype=self.rtype.ctype, out=outputs[0], inp=inputs[0])
def c_code_cache_version(self):
return (0,)
class CDataType(Type):
"""
Represents opaque C data to be passed around. The intent is to
ease passing arbitrary data between ops C code.
The constructor builds a type made to represent a C pointer in theano.
Parameters
----------
ctype
The type of the pointer (complete with the `*`).
freefunc
A function to call to free the pointer. This function must
have a `void` return and take a single pointer argument.
"""
__props__ = ('ctype', 'freefunc', 'headers', 'header_dirs',
'libraries', 'lib_dirs', 'extra_support_code')
def __init__(self, ctype, freefunc=None, headers=None, header_dirs=None,
libraries=None, lib_dirs=None, extra_support_code=""):
assert isinstance(ctype, string_types)
self.ctype = ctype
if freefunc is not None:
assert isinstance(freefunc, string_types)
self.freefunc = freefunc
if headers is None:
headers = ()
self.headers = tuple(headers)
if header_dirs is None:
header_dirs = ()
self.header_dirs = tuple(header_dirs)
if libraries is None:
libraries = ()
self.libraries = tuple(libraries)
if lib_dirs is None:
lib_dirs = ()
self.lib_dirs = tuple(lib_dirs)
self.extra_support_code = extra_support_code
self._fn = None
def filter(self, data, strict=False, allow_downcast=None):
if data is not None and not isinstance(data, _cdata_type):
raise TypeError("expected None or a PyCapsule")
return data
def _get_func(self):
"""
Return a function that makes a value from an integer.
The integer value is assumed to be a valid pointer for the
type and no check is done to ensure that.
"""
from theano.scalar import get_scalar_type
if self._fn is None:
with change_flags(compute_test_value='off'):
v = get_scalar_type('int64')()
self._fn = theano.function([v], _make_cdata(self)(v),
mode=theano.Mode(optimizer=None),
profile=False)
return self._fn
def make_value(self, ptr):
"""
Make a value of this type.
Parameters
----------
ptr : int
Integer representation of a valid pointer value
"""
return self._get_func()(ptr)
def c_declare(self, name, sub, check_input=True):
return """
%(ctype)s %(name)s;
""" % dict(ctype=self.ctype, name=name)
def c_init(self, name, sub):
return "%(name)s = NULL;" % dict(name=name)
def c_extract(self, name, sub, check_input=True):
return """
%(name)s = (%(ctype)s)PyCapsule_GetPointer(py_%(name)s, NULL);
if (%(name)s == NULL) %(fail)s
""" % dict(name=name, ctype=self.ctype, fail=sub['fail'])
def c_support_code(self):
return """
void _capsule_destructor(PyObject *o) {
void *d = PyCapsule_GetContext(o);
void *p = PyCapsule_GetPointer(o, NULL);
void (*f)(void *) = (void (*)(void *))d;
if (f != NULL) f(p);
}
""" + self.extra_support_code
def c_sync(self, name, sub):
freefunc = self.freefunc
if freefunc is None:
freefunc = "NULL"
s = """
Py_XDECREF(py_%(name)s);
if (%(name)s == NULL) {
py_%(name)s = Py_None;
Py_INCREF(py_%(name)s);
} else {
py_%(name)s = PyCapsule_New((void *)%(name)s, NULL,
_capsule_destructor);
if (py_%(name)s != NULL) {
if (PyCapsule_SetContext(py_%(name)s, (void *)%(freefunc)s) != 0) {
/* This won't trigger a call to freefunc since it could not be
set. The error case below will do it. */
Py_DECREF(py_%(name)s);
/* Signal the error */
py_%(name)s = NULL;
}
}
}"""
if self.freefunc is not None:
s += """
if (py_%(name)s == NULL) { %(freefunc)s(%(name)s); }
"""
return s % dict(name=name, freefunc=freefunc)
def c_cleanup(self, name, sub):
# No need to do anything here since the CObject/Capsule will
# free the data for us when released.
return ""
def c_headers(self):
return self.headers
def c_header_dirs(self):
return self.header_dirs
def c_libraries(self):
return self.libraries
def c_lib_dirs(self):
return self.lib_dirs
def c_code_cache_version(self):
return (3,)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.ctype)
def __setstate__(self, dct):
self.__dict__.update(dct)
if not hasattr(self, 'headers'):
self.headers = ()
self.header_dirs = ()
self.libraries = ()
self.lib_dirs = ()
self.extra_support_code = ""
class CDataTypeConstant(graph.Constant):
def merge_signature(self):
# We don't want to merge constants that don't point to the
# same object.
return id(self.data)
def signature(self):
# There is no way to put the data in the signature, so we
# don't even try
return (self.type,)
CDataType.Constant = CDataTypeConstant
| [
"[email protected]"
] | |
ab002aea2d27eadf5bcf53b4f0a3367f06297f7f | b977a59c246230cfccf40a4b57283ab5bc278770 | /pyBN/learning/structure/constraint/fast_iamb.py | c3d930b055105dcd1195e8b083920c3948e5b649 | [] | no_license | baturayo/edbn | 6f5c92b413121ededa461afd537a88f965f2af1d | 4b3d3e2e97c1a35908c0237e5aac60e85d75a001 | refs/heads/master | 2020-04-29T17:13:36.986454 | 2019-03-25T16:58:32 | 2019-03-25T16:58:32 | 176,291,155 | 0 | 0 | null | 2019-03-18T13:23:44 | 2019-03-18T13:23:43 | null | UTF-8 | Python | false | false | 4,324 | py | """
*********
Fast-IAMB
Algorithm
*********
For Feature Selection (from [1]):
"A principled solution to the feature selection problem is
to determine a subset of attributes that can "shield" (render
independent) the attribute of interest from the effect of
the remaining attributes in the domain. Koller and Sahami
[4] first showed that the Markov blanket of a given target attribute
is the theoretically optimal set of attributes to predict
its value...
Because the Markov blanket of a target attribute T renders
it statistically independent from all the remaining attributes
(see the Markov blanket definition below), all information
that may influence its value is stored in the values
of the attributes of its Markov blanket. Any attribute
from the feature set outside its Markov blanket can be effectively
ignored from the feature set without adversely affecting
the performance of any classifier that predicts the
value of T"
References
----------
[1] Yaramakala and Maragritis, "Speculative Markov Blanket
Discovery for Optimal Feature Selection"
[2] Tsarmardinos, et al. "Algorithms for Large Scale
Markov Blanket Discovery"
"""
from __future__ import division
import numpy as np
from pyBN.utils.data import unique_bins, replace_strings
from pyBN.utils.independence_tests import are_independent, mi_test
def fast_iamb(data, k=5, alpha=0.05, feature_selection=None, debug=False):
"""
From [1]:
"A novel algorithm for the induction of
Markov blankets from data, called Fast-IAMB, that employs
a heuristic to quickly recover the Markov blanket. Empirical
results show that Fast-IAMB performs in many cases
faster and more reliably than existing algorithms without
adversely affecting the accuracy of the recovered Markov
blankets."
Arguments
---------
*data* : a nested numpy array
*k* : an integer
The max number of edges to add at each iteration of
the algorithm.
*alpha* : a float
Probability of Type I error
Returns
-------
*bn* : a BayesNet object
Effects
-------
None
Notes
-----
- Currently does not work. I think it's stuck in an infinite loop...
"""
# get values
value_dict = dict(zip(range(data.shape[1]),
[list(np.unique(col)) for col in data.T]))
# replace strings
data = replace_strings(data)
n_rv = data.shape[1]
Mb = dict([(rv,[]) for rv in range(n_rv)])
N = data.shape[0]
card = dict(zip(range(n_rv),unique_bins(data)))
#card = dict(zip(range(data.shape[1]),np.amax(data,axis=0)))
if feature_selection is None:
_T = range(n_rv)
else:
assert (not isinstance(feature_selection, list)), 'feature_selection must be only one value'
_T = [feature_selection]
# LEARN MARKOV BLANKET
for T in _T:
S = set(range(n_rv)) - {T}
for A in S:
if not are_independent(data[:,(A,T)]):
S.remove(A)
s_h_dict = dict([(s,0) for s in S])
while S:
insufficient_data = False
break_grow_phase = False
#### GROW PHASE ####
# Calculate mutual information for all variables
mi_dict = dict([(s,mi_test(data[:,(s,T)+tuple(Mb[T])])) for s in S])
for x_i in sorted(mi_dict, key=mi_dict.get,reverse=True):
# Add top MI-score variables until there isn't enough data for bins
if (N / card[x_i]*card[T]*np.prod([card[b] for b in Mb[T]])) >= k:
Mb[T].append(x_i)
else:
insufficient_data = True
break
#### SHRINK PHASE ####
removed_vars = False
for A in Mb[T]:
cols = (A,T) + tuple(set(Mb[T]) - {A})
# if A is independent of T given Mb[T], remove A
if are_independent(data[:,cols]):
Mb[T].remove(A)
removed_vars=True
#### FINALIZE BLANKET FOR "T" OR MAKE ANOTHER PASS ####
if insufficient_data and not removed_vars:
if debug:
print('Breaking..')
break
else:
A = set(range(n_rv)) - {T} - set(Mb[T])
#A = set(nodes) - {T} - set(Mb[T])
S = set()
for a in A:
cols = (a,T) + tuple(Mb[T])
if are_independent(data[:,cols]):
S.add(a)
if debug:
print('Done with %s' % T)
if feature_selection is None:
# RESOLVE GRAPH STRUCTURE
edge_dict = resolve_markov_blanket(Mb, data)
# ORIENT EDGES
oriented_edge_dict = orient_edges_MB(edge_dict,Mb,data,alpha)
# CREATE BAYESNET OBJECT
bn=BayesNet(oriented_edge_dict,value_dict)
return BN
else:
return Mb[_T]
| [
"[email protected]"
] | |
3eecd1395fded62f776353bc6b65ee0e4ea3aadd | d0530d181fac2d5a1d04afaee1976ab8d74ed7dd | /argparse_demo.py | ea4313e8b581ccc365568dd6fc6d44333ed3bf61 | [] | no_license | guanguanboy/testPython | b587712c12f9a769872bebfe9eabcd4fca486690 | 4dbab2057a2b0264c3e38374283188d4fffc804f | refs/heads/master | 2023-05-16T00:18:52.795866 | 2023-05-09T06:56:27 | 2023-05-09T06:56:27 | 337,029,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | """
https://www.youtube.com/watch?v=q94B9n_2nf0
"""
import argparse
def fib(n):
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a
def Main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("num", help="The fibonacci number you wish to calculate.", type=int)
parser.add_argument("-o", "--output", help="Output result to a file.", action="store_true")
args = parser.parse_args()
result = fib(args.num)
if args.verbose:
print("The " + str(args.num) + "th fib number is " + str(result))
elif args.quiet:
print(result)
else:
print("Fib(" + str(args.num) +") = " + str(result))
if args.output:
f = open("fibonacci.txt", "a")
f.write(str(result) + '\n')
if __name__ == '__main__':
Main() | [
"[email protected]"
] | |
b5cdd6986c2acbfb06b88e72bf32735d0a8eb004 | 2eb386991d9975f0f8440d90de26e950304ac42f | /HackTM2020/count_on_me/aes.py | 1e77d5ba471ace032b7e2f0397b4b6c18787dae7 | [] | no_license | Quintec/CTFs2020 | 2816a66e8a486537c31e5ac25253840bc3a8ffe9 | bdaa327c9f0b0ee16ff95bafcaf65f0df8acd8b9 | refs/heads/master | 2022-12-19T21:39:14.129702 | 2020-10-01T16:49:06 | 2020-10-01T16:49:06 | 281,812,929 | 1 | 0 | null | 2020-10-01T16:49:08 | 2020-07-23T00:37:44 | null | UTF-8 | Python | false | false | 422 | py | from Crypto.Cipher import AES
# this is a demo of the encyption / decryption proceess.
a = 'flagflagflagflag'
key = '1111111111111111111111111111111111111111111111111111111111111111'.decode('hex')
iv = '42042042042042042042042042042042'.decode('hex')
#encrypt
aes = AES.new(key,AES.MODE_CBC, iv)
c = aes.encrypt(a).encode("hex")
print(c)
#decrypt
aes = AES.new(key,AES.MODE_CBC, iv)
print(aes.decrypt(c.decode("hex"))) | [
"[email protected]"
] | |
052adf70d0033e9a2f09e7ada0b84bae66757da5 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_2_00/routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/nud/__init__.py | 80a5b33532ac15c901e09a6277bcecb1a0451e96 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,074 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import retry
class nud(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/interface/ve/ipv6/ipv6-nd-ra/ipv6-intf-cmds/nd/nud. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__retry',)
_yang_name = 'nud'
_rest_name = 'nud'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__retry = YANGDynClass(base=retry.retry, is_container='container', presence=False, yang_name="retry", rest_name="retry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'retry attempts', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'interface', u've', u'ipv6', u'ipv6-nd-ra', u'ipv6-intf-cmds', u'nd', u'nud']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ve', u'ipv6', u'nd', u'nud']
def _get_retry(self):
"""
Getter method for retry, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/nud/retry (container)
"""
return self.__retry
def _set_retry(self, v, load=False):
"""
Setter method for retry, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/nud/retry (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_retry is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retry() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=retry.retry, is_container='container', presence=False, yang_name="retry", rest_name="retry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'retry attempts', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """retry must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=retry.retry, is_container='container', presence=False, yang_name="retry", rest_name="retry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'retry attempts', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)""",
})
self.__retry = t
if hasattr(self, '_set'):
self._set()
def _unset_retry(self):
self.__retry = YANGDynClass(base=retry.retry, is_container='container', presence=False, yang_name="retry", rest_name="retry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'retry attempts', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)
retry = __builtin__.property(_get_retry, _set_retry)
_pyangbind_elements = {'retry': retry, }
| [
"[email protected]"
] | |
cb1cfa2061ba0203e359d8a494bb3e4129ab3d99 | e3178ef1fcc2f11b2608881b11a2545e1b830804 | /pyfurby/restless.py | c9ce40bb815cf5cc596269aa2d9055b8a3a6678d | [
"MIT"
] | permissive | matteoferla/pyfurby | 2dd5b59b88e90777e693b3fd121981c4629a6e54 | 1383b93b432f09ac4fdd07562dedf66509b5809d | refs/heads/main | 2023-03-08T16:54:19.393819 | 2021-02-20T19:36:13 | 2021-02-20T19:36:13 | 330,163,453 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | from typing import get_type_hints
import pydoc
class RestlessFurby: # restful...
def _resolve_request(self, cmd):
"""
Restful API Mode.
This action is trigged when any route that is not home is requested
:param cmd:
:return:
"""
try:
from flask import Flask, request
kwargs = request.args
print(f'Request {cmd}: {kwargs} from {request.remote_addr}')
getattr(self, cmd)(**kwargs)
return {'status': 'OK'}
except Exception as error:
return {'status': 'error',
'error': f'{error.__class__.__name__}: {error}'}
def _home(self):
"""
Restful API Mode.
This action is trigged when home is requested.
"""
reply = '## Furby Restful API options\n\n'
reply += 'To trigger a command, say `furby.yell`, use 198.162.1/0.xx:1998/yell?text=I%20hate%20you ' +\
'where xx is the furby\'s netword address\n'
reply += 'Namely, the route (part before the question mark) is the command, and its arguments are ' +\
'key=value separated by an ampersand (that is a URL query).\n' +\
'Using Pythod requests, just submit it as a dictionary\n'
for k in [k for k in dir(self) if k.find('_') != 0]:
attribute = getattr(self, k)
reply += f'###{k}\n>{get_type_hints(attribute)}\n{pydoc.getdoc(attribute)}\n\n'
return reply
def restful(self):
"""
The furby listens on port 1998, the year the Furby was introduced (Nawww).
Note that it is using Flask's internal app serving method, so is not suitable for use over the internet...
:return:
"""
from flask import Flask
import waitress
app = Flask(__name__)
app.add_url_rule('/<cmd>', 'command', self._resolve_request)
app.add_url_rule('/', 'home', self._home)
waitress.serve(app, port=1998, host='0.0.0.0')
| [
"[email protected]"
] | |
f731a090e91a2638b256e73ffab2478b8bd0d195 | b6472217400cfce4d12e50a06cd5cfc9e4deee1f | /sites/top/api/rest/FenxiaoProductImageDeleteRequest.py | b41002248a940d2d3ed32f64a24e4503466cfdc4 | [] | no_license | topwinner/topwinner | 2d76cab853b481a4963826b6253f3fb0e578a51b | 83c996b898cf5cfe6c862c9adb76a3d6a581f164 | refs/heads/master | 2021-01-22T22:50:09.653079 | 2012-08-26T19:11:16 | 2012-08-26T19:11:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | '''
Created by auto_sdk on 2012-08-26 16:43:44
'''
from top.api.base import RestApi
class FenxiaoProductImageDeleteRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.position = None
self.product_id = None
self.properties = None
def getapiname(self):
return 'taobao.fenxiao.product.image.delete'
| [
"[email protected]"
] | |
6c67af0b7b41e76d5d8eb85986cbe08b3eb3aaac | 7233ff4c7cbb5d0f5e43a44800d0edddc2793b84 | /Players/Combine2.py | 682b4aa0bfe9b56f303869c63e5474dad35b2629 | [] | no_license | SoumitraAgarwal/Webscraping-Tennis-Grand | b3d97be13e67b285aa1303815ee58e5c693fa5df | 45d0023e7c20ebcb230827f4a89c2669fcaee6fd | refs/heads/master | 2021-01-02T08:23:28.373915 | 2017-09-30T23:02:44 | 2017-09-30T23:02:44 | 98,999,991 | 5 | 2 | null | 2017-09-11T21:32:45 | 2017-08-01T12:40:51 | HTML | UTF-8 | Python | false | false | 506 | py | import numpy as np
import cv2
import os
base = 'Pictures2/'
images = os.listdir(base)
for j in range(0,len(images), 50):
output = cv2.imread(base+images[j])
image1 = cv2.imread(base+images[j + 1])
cv2.addWeighted(image1, 1.0/100, output, 1.0/100, 0, output)
for i in range(j + 2,min(j + 100, len(images))):
# load the image
image1 = cv2.imread(base+images[i])
cv2.addWeighted(image1, 1.0/min(100, len(images) - j), output, 1, 0, output)
cv2.imwrite("OutputComb" + str(j) + ".jpg", output)
| [
"[email protected]"
] | |
b4633535520e64b12b41f1fcd1ab0e4c8e4330b6 | 41581af29553c967f1c3bfcce4cbb45128093b22 | /problems/mrna/mrna.py | e9fb08aa812a183bbac2f027437a528d3b708673 | [] | no_license | pratishhegde/rosalind | 818105992dd6169a09b737e5ec171a52cb58ccd8 | bd06ae574d08f1a8a34c79d7da3ae8b10cb63dbe | refs/heads/master | 2021-01-16T18:23:12.557850 | 2014-05-27T21:49:56 | 2014-05-27T21:49:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | # -*- coding: utf-8 -*-
"""
Problem
For positive integers a and n, a modulo n (written amodn in shorthand) is the
remainder when a is divided by n. For example, 29mod11=7 because 29=11×2+7.
Modular arithmetic is the study of addition, subtraction, multiplication, and
division with respect to the modulo operation. We say that a and b are congruent
modulo n if amodn=bmodn; in this case, we use the notation a≡bmodn.
Two useful facts in modular arithmetic are that if a≡bmodn and c≡dmodn, then
a+c≡b+dmodn and a×c≡b×dmodn. To check your understanding of these rules, you may
wish to verify these relationships for a=29, b=73, c=10, d=32, and n=11.
As you will see in this exercise, some Rosalind problems will ask for a (very
large) integer solution modulo a smaller number to avoid the computational
pitfalls that arise with storing such large numbers.
Given: A protein string of length at most 1000 aa.
Return: The total number of different RNA strings from which the protein could
have been translated, modulo 1,000,000. (Don't neglect the importance of the
stop codon in protein translation.)
Sample Dataset
MA
Sample Output
12
"""
import sys
sys.path.append('../../')
import rosalind_utils
def mrna():
seq = open("rosalind_mrna.txt").read().strip()
n = 1
for b in seq:
diff_code = len([x for x in rosalind_utils.GENCODE
if rosalind_utils.GENCODE[x]==b])
n = (n*diff_code) % 10**6
# stop codon
n = (n*3) % 10**6
print n
| [
"[email protected]"
] | |
77d3d4a0e6278746b35c702c3de1bad1ad5bd8af | ce9b0888a82fcb3d9fde60098ee4e9fbc186e7cd | /djangoapi/settings/base.py | e851795bc160542dd9ea32e0570efa53d2db58f1 | [] | no_license | Sushmi-pal/To-Do-List | b84faa3f449e1e646136543e3796c35194401eb1 | c2e04fadcc307e72d2accbae2783bc04cf74bcd9 | refs/heads/master | 2023-01-12T11:03:19.904300 | 2020-11-11T07:29:35 | 2020-11-11T07:29:35 | 311,892,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | """
Django settings for djangoapi project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
BASE_DIR=os.path.dirname(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
CORS_ORIGIN_ALLOW_ALL=True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'new',
'rest_framework',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoapi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
da8ecc7359ae6638d5fb2798338f29b32ef9b009 | 34c2d81e0887d0381096717eebc28e4e1a2a2234 | /setup.py | ef33f011bb234d9a151986128bfdd8c2597b838d | [] | no_license | digital4rensics/canari | 06634d9f5154f08b1d5407bd3972d9bd8d52c166 | ec0c9edb74958a388e8bea279289995dbc9cf5e7 | refs/heads/master | 2021-01-18T15:35:52.236835 | 2012-12-25T05:58:29 | 2012-12-25T05:58:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
from os import name
scripts = [
'src/scripts/canari',
'src/scripts/pysudo',
'src/scripts/dispatcher',
]
extras = [
'readline'
]
if name == 'nt':
scripts += ['%s.bat' % s for s in scripts]
setup(
name='canari',
author='Nadeem Douba',
version='0.5',
author_email='[email protected]',
description='Rapid transform development and transform execution framework for Maltego.',
license='GPL',
packages=find_packages('src'),
package_dir={ '' : 'src' },
scripts=scripts,
zip_safe=False,
package_data={
'' : [ '*.conf', '*.plate' ]
},
install_requires=[
'pexpect',
'argparse'
],
dependency_links=[]
)
| [
"[email protected]"
] | |
0c2c82cd75939f224f4cb1936d9f3623cadbb4c0 | 4130bc72387f78ded674db06f04e759658dfbda4 | /setup.py | 1a331e0435675d937c4743cd37719b272f0d5192 | [
"BSD-2-Clause"
] | permissive | soukicz/nginx-amplify-agent | 231d9f2371627ab836adf3baea37a6e2b9c0716c | 484f20a902ed07dc4b50107c0ad6c5d7f14e4681 | refs/heads/master | 2021-01-12T11:12:22.770574 | 2016-11-02T17:25:22 | 2016-11-02T17:25:22 | 72,869,483 | 0 | 0 | null | 2016-11-04T17:18:48 | 2016-11-04T17:18:47 | null | UTF-8 | Python | false | false | 1,464 | py | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, '%s/amplify' % os.getcwd())
from setuptools import setup, find_packages
from amplify.agent.common.util.host import is_deb, is_rpm, is_amazon
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "[email protected]"
data_files = [
('/etc/amplify-agent/', [
'etc/agent.conf.default',
]),
('/etc/logrotate.d/', ['etc/logrotate.d/amplify-agent'])
]
if is_rpm() or is_amazon():
data_files.append(
('/etc/init.d/', ['etc/chkconfig/amplify-agent'])
)
elif is_deb():
data_files.append(
('/etc/init.d/', ['etc/init.d/amplify-agent']),
)
setup(
name="nginx-amplify-agent",
version="0.40",
author="Mike Belov",
author_email="[email protected]",
description="NGINX Amplify Agent",
keywords="amplify agent nginx",
url="https:/amplify.nginx.com/",
packages=find_packages(exclude=[
"*.test", "*.test.*", "test.*", "test",
"tools", "tools.*", "packages", "packages.*"]),
package_data={'amplify': [
'gevent/*.so',
'psutil/*.so',
'*.so',
]},
data_files=data_files,
scripts=[
'nginx-amplify-agent.py'
],
entry_points={},
long_description='NGINX Amplify Agent',
)
| [
"[email protected]"
] | |
141adecde80698fe1f2a91f82fc04de378d19e86 | f151d2e8ce0f09069f76a2719fcc4bc106f90e15 | /venv/lib/python3.6/site-packages/itsdangerous/serializer.py | 840c030e6858dae807d520a4f6eada8bceda64b2 | [] | no_license | Ali-Khakpash/flask-admin | b8d71e85edb644f8f3754ea8bdbcc8f79e0425e3 | f2beab858368dabe5c9f48b2e41ff8ddbca0fdae | refs/heads/master | 2020-12-02T04:10:38.978578 | 2020-06-05T09:47:17 | 2020-06-05T09:47:17 | 230,882,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,650 | py | import hashlib
from ._compat import text_type
from ._json import json
from .encoding import want_bytes
from .exc import BadPayload
from .exc import BadSignature
from .signer import Signer
def is_text_serializer(serializer):
"""Checks whether a serializer generates text or binary."""
return isinstance(serializer.dumps({}), text_type)
class Serializer(object):
"""This class provides a serialization interface on top of the
signer. It provides a similar API to json/pickle and other modules
but is structured differently internally. If you want to change the
underlying implementation for parsing and loading you have to
override the :meth:`load_payload` and :meth:`dump_payload`
functions.
This implementation uses simplejson if available for dumping and
loading and will fall back to the standard library's json module if
it's not available.
You do not need to subclass this class in order to switch out or
customize the :class:`.Signer`. You can instead pass a different
class to the constructor as well as keyword arguments as a dict that
should be forwarded.
.. code-block:: python
s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
You may want to upgrade the auth parameters without invalidating
existing signatures that are in use. Fallback signatures can be
given that will be tried if unsigning with the current signer fails.
Fallback signers can be defined by providing a list of
``fallback_signers``. Each item can be one of the following: a
signer class (which is instantiated with ``signer_kwargs``,
``salt``, and ``secret_key``), a tuple
``(signer_class, signer_kwargs)``, or a dict of ``signer_kwargs``.
For example, this is a serializer that signs using SHA-512, but will
unsign using either SHA-512 or SHA1:
.. code-block:: python
s = Serializer(
signer_kwargs={"digest_method": hashlib.sha512},
fallback_signers=[{"digest_method": hashlib.sha1}]
)
.. versionchanged:: 0.14:
The ``signer`` and ``signer_kwargs`` parameters were added to
the constructor.
.. versionchanged:: 1.1.0:
Added support for ``fallback_signers`` and configured a default
SHA-512 fallback. This fallback is for users who used the yanked
1.0.0 release which defaulted to SHA-512.
"""
#: If a serializer module or class is not passed to the constructor
#: this one is picked up. This currently defaults to :mod:`json`.
default_serializer = json
#: The default :class:`Signer` class that is being used by this
#: serializer.
#:
#: .. versionadded:: 0.14
default_signer = Signer
#: The default fallback signers.
default_fallback_signers = [{"digest_method": hashlib.sha512}]
def __init__(
self,
secret_key,
salt=b"itsdangerous",
serializer=None,
serializer_kwargs=None,
signer=None,
signer_kwargs=None,
fallback_signers=None,
):
self.secret_key = want_bytes(secret_key)
self.salt = want_bytes(salt)
if serializer is None:
serializer = self.default_serializer
self.serializer = serializer
self.is_text_serializer = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer = signer
self.signer_kwargs = signer_kwargs or {}
if fallback_signers is None:
fallback_signers = list(self.default_fallback_signers or ())
self.fallback_signers = fallback_signers
self.serializer_kwargs = serializer_kwargs or {}
def load_payload(self, payload, serializer=None):
"""Loads the encoded object. This function raises
:class:`.BadPayload` if the payload is not valid. The
``serializer`` parameter can be used to override the serializer
stored on the class. The encoded ``payload`` should always be
bytes.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
payload = payload.decode("utf-8")
return serializer.loads(payload)
except Exception as e:
raise BadPayload(
"Could not load the payload because an exception"
" occurred on unserializing the data.",
original_error=e,
)
def dump_payload(self, obj):
"""Dumps the encoded object. The return value is always bytes.
If the internal serializer returns text, the value will be
encoded as UTF-8.
"""
return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))
def make_signer(self, salt=None):
"""Creates a new instance of the signer to be used. The default
implementation uses the :class:`.Signer` base class.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def iter_unsigners(self, salt=None):
"""Iterates over all signers to be tried for unsigning. Starts
with the configured signer, then constructs each signer
specified in ``fallback_signers``.
"""
if salt is None:
salt = self.salt
yield self.make_signer(salt)
for fallback in self.fallback_signers:
if type(fallback) is dict:
kwargs = fallback
fallback = self.signer
elif type(fallback) is tuple:
fallback, kwargs = fallback
else:
kwargs = self.signer_kwargs
yield fallback(self.secret_key, salt=salt, **kwargs)
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal
serializer. The return value can be either a byte or unicode
string depending on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode("utf-8")
return rv
def dump(self, obj, f, salt=None):
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(self, s, salt=None):
"""Reverse of :meth:`dumps`. Raises :exc:`.BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
last_exception = None
for signer in self.iter_unsigners(salt):
try:
return self.load_payload(signer.unsign(s))
except BadSignature as err:
last_exception = err
raise last_exception
def load(self, f, salt=None):
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(self, s, salt=None):
"""Like :meth:`loads` but without verifying the signature. This
is potentially very dangerous to use depending on how your
serializer works. The return value is ``(signature_valid,
payload)`` instead of just the payload. The first item will be a
boolean that indicates if the signature is valid. This function
never fails.
Use it for debugging only and if you know that your serializer
module is not exploitable (for example, do not use it with a
pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(self, s, salt, load_kwargs=None, load_payload_kwargs=None):
"""Low level helper function to implement :meth:`loads_unsafe`
in serializer subclasses.
"""
try:
return True, self.loads(s, salt=salt, **(load_kwargs or {}))
except BadSignature as e:
if e.payload is None:
return False, None
try:
return (
False,
self.load_payload(e.payload, **(load_payload_kwargs or {})),
)
except BadPayload:
return False, None
def load_unsafe(self, f, *args, **kwargs):
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), *args, **kwargs)
| [
"[email protected]"
] | |
4c6cbdca46382716a0c157a4ee44adfc026d21b9 | 4908b1d34d69c1cb652f25049552562574e1075f | /2020/Day-24/Lobby_Layout/example.py | b97fbca0cf8b5178a0781e9cb2a4930254914c64 | [
"MIT"
] | permissive | sreekesari-vangeepuram/adventofcode | 3d4ad98a25a30640182d928538b421e00ad8259d | 645531be0208affe042ac0328105b9ef3cfc9dbf | refs/heads/main | 2023-07-26T13:36:03.036721 | 2021-08-11T08:27:25 | 2021-08-11T08:27:25 | 317,850,039 | 1 | 0 | MIT | 2021-08-11T08:27:26 | 2020-12-02T12:08:13 | Go | UTF-8 | Python | false | false | 882 | py | #!/usr/bin/env python
from collections import defaultdict
# nw, se in YZ plane
# ne, sw in XZ plane
# w , e in XY plane
position = {
"nw": (0, +1, -1), "ne": (+1, 0, -1),
"w" : (-1, +1, 0), "e" : (+1, -1, 0),
"sw": (-1, 0, +1), "se": (0, -1, +1),
}
# `position` source: https://www.redblobgames.com/grids/hexagons/
# NW * NE
# * \ / *
# W * - * - * E
# * / \ *
# SW * SE
fmt_line = lambda line: line \
.replace("e", "e ") \
.replace("w", "w ") \
.split()
ins_list = list(map(fmt_line, open("sample.txt").read().strip().split("\n")))
tiles = defaultdict(int)
for ins in ins_list:
x = y = z = 0
for dx, dy, dz in [position[_in] for _in in ins]:
x += dx; y += dy; z += dz
tiles[x, y, z] ^= 1
print(f"Number of black sides facing-up: {sum(tiles.values())}")
| [
"[email protected]"
] | |
04479afbaa7d4c54e40051a2426054b6ca0c0aad | 169d809f45dedcaa3c7b1b49912d8b025abe18d9 | /date_connvert.py | 7f68a922ad5d38c34dcb486b4b74a0557a63f1f1 | [] | no_license | bermec/challenges | 8a82d1d38d1ed1a0fc3f258443bc0054efc977a6 | 9fb092f20f12b4eaa808e758f00f482a49346c88 | refs/heads/master | 2021-10-08T05:05:56.803332 | 2018-12-08T00:20:20 | 2018-12-08T00:20:20 | 109,448,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py |
import datetime
from datetime import date
def str2int(strng):
out_lst = []
for x in strng:
if x.isdigit():
x = int(x)
out_lst.append(x)
out_tup = tuple(out_lst)
return out_tup
a = '2015-07-01 2019-04-06'
a = a.split()
print('a ', a)
b = a[0].split('-')
print('b', b)
year = int(b[0])
month = int(b[1])
day = int(b[2])
z = datetime.date(year, month, day)
print(z)
print(type(z))
nice_z = date.strftime(z, '%#B, %#d, %Y')
print('nice_z: ', nice_z)
| [
"[email protected]"
] | |
43b23642d653c51031076e53d722728e53160630 | 09ce9635b0e74ba178e98efd0d5229a25995713e | /submissions/arc034/b.py | 67f01974b68e9681ad402eff11836072f3cd9ee8 | [
"Unlicense"
] | permissive | m-star18/atcoder | 7575f1e1f3ee1dfa4a765493eb17b4ef0ad5f1f0 | 08e475810516602fa088f87daf1eba590b4e07cc | refs/heads/main | 2023-07-14T09:16:42.807150 | 2021-08-22T15:59:48 | 2021-08-22T15:59:48 | 364,458,316 | 1 | 0 | Unlicense | 2021-08-22T15:59:49 | 2021-05-05T04:13:03 | Python | UTF-8 | Python | false | false | 408 | py | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n = int(readline())
ans = [0]
for check in range(max(0, n - 153), n):
cnt = check
check = str(check)
for i in range(len(check)):
cnt += int(check[i])
if cnt == n:
ans[0] += 1
ans.append(check)
for a in ans:
print(a)
| [
"[email protected]"
] | |
278dd8023b33a8ac37253da6d123844c226c6d0b | 38372fcc2ca58798176267360ff07f886400bc7b | /core_arrears/filters.py | f5f875ecce205bf8110cb12b1ecce51054584da2 | [] | no_license | portman-asset-finance/_GO_PAF | 4eb22c980aae01e0ad45095eb5e55e4cb4eb5189 | ee93c49d55bb5717ff1ce73b5d2df6c8daf7678f | refs/heads/master | 2020-09-21T05:22:10.555710 | 2019-11-28T16:44:17 | 2019-11-28T16:44:17 | 224,691,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | import datetime
from django.contrib.auth.models import User
from .models import arrears_summary_agreement_level, arrears_summary_arrear_level
from core.models import ncf_dd_schedule
import django_filters
class arrears_summary_agreement_level_Filter(django_filters.FilterSet):
arr_agreement_id = django_filters.CharFilter(lookup_expr='icontains')
arr_customercompanyname = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = arrears_summary_agreement_level
exclude = ['agreementproducttierid']
class arrears_summary_arrear_level_Filter(django_filters.FilterSet):
def __init__(self, data=None, *args, **kwargs):
# if filterset is bound, use initial values as defaults
if data is not None:
# get a mutable copy of the QueryDict
data = data.copy()
for name, f in self.base_filters.items():
initial = f.extra.get('initial')
# filter param is either missing or empty, use initial as default
if not data.get(name) and initial:
data[name] = initial
super(arrears_summary_arrear_level_Filter, self).__init__(data, *args, **kwargs)
# Get current due date
initial_default_date_queryset = ncf_dd_schedule.objects.filter(dd_status_id='999')[:1].get()
ara_agreement_id = django_filters.CharFilter(lookup_expr='icontains')
ara_customercompanyname = django_filters.CharFilter(lookup_expr='icontains')
ara_due_date = django_filters.DateFilter()
ara_agent_id = django_filters.ModelChoiceFilter(queryset=User.objects.filter(groups__name='NCF_Collections_PrimaryAgents'),label=('Assigned'))
class Meta:
model = arrears_summary_arrear_level
exclude = ['agreementproducttierid'] | [
"[email protected]"
] | |
19aa0dcfc90a3620be3c0497b99296840035f999 | bc6681b8d8b294ec6a1c52bf0ad5275af3ecc9e4 | /wsex/utils/__init__.py | 612d2689fbeca4ffa46a83d9788a843f47c33064 | [] | no_license | zhuchen0310/WSEX | bab9d410148bcfefce5581eb26b811c634681378 | bf9ab77e07f64d9b759d2661c7481f0efbd38139 | refs/heads/master | 2022-07-22T10:16:36.829811 | 2020-04-28T15:18:05 | 2020-04-28T15:18:05 | 178,389,202 | 3 | 0 | null | 2022-07-05T21:32:10 | 2019-03-29T11:00:02 | Python | UTF-8 | Python | false | false | 74 | py | #! /usr/bin/python
# -*- coding:utf-8 -*-
# @zhuchen : 2019-07-02 10:24 | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.