code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 7/02/2014
@author: marco
Generador de ambientes FACIL 2014
'''
import wx
from formgenerador import FrameGeneral
from Dial_Pagina import ObjPagina
class IncioInterface(FrameGeneral):
def __init__(self):
#self.log = ObLog('Inicio programa')
#self.log.setNivel(0) #debug
FrameGeneral.__init__(self,None)
FrameGeneral.SetTitle(self,u"Administrador de Aplicacion FACIL")
#iconFile = u"imagenes/2s.ico"
#FrameGeneral.SetIcon(self,wx.Icon(iconFile, wx.BITMAP_TYPE_ICO))
#self.Bind(wx.EVT_MENU, self.onConfig,self.f2s_mConfig)
self.__inicio()
self.dibujarPizarra()
#Eventos Menu
self.Bind(wx.EVT_MENU,self.onDefPagina,self.f2s_menuTamPapel)
self.f2s_Pizarra.Bind(wx.EVT_PAINT, self.onPaint)
def __inicio(self):
#Asignacion Variables Globales
self.Guadar=False
self.borde=20
self.AnchoPagina=8.5 * 72
self.AltoPagina = 11 * 72
self.objfacil=[]
self.objFormatos=[]
self._initBuffer()
def onDefPagina(self,event):
pagina= ObjPagina(self.Parent)
if pagina.orientar==None :
return
print pagina.orientar
print pagina.papel
if pagina.orientar ==0 or pagina.orientar==2: #Vertical
self.AnchoPagina=pagina.papel[0] * 72
self.AltoPagina=pagina.papel[1] * 72
else: #Horizontal
self.AnchoPagina=pagina.papel[1] * 72
self.AltoPagina=pagina.papel[0] * 72
print self.AnchoPagina
print self.AltoPagina
self.dibujarPizarra()
self.wrapDC = lambda dc: dc
def dibujarPizarra(self):
print "dibujar Pizarra"
self.f2s_Pizarra.SetBackgroundColour('white')
self.f2s_Pizarra.EnableScrolling(True,True)
self.f2s_Pizarra.SetScrollbars(20, 20, (self.AnchoPagina + self.borde *2) / 20, (self.AltoPagina + self.borde *2) / 20)
def onPaint(self, event):
print "onPaint"
"""
Called when the window is exposed.
"""
# Create a buffered paint DC. It will create the real
# wx.PaintDC and then blit the bitmap to it when dc is
# deleted.
dc = wx.BufferedPaintDC(self.f2s_Pizarra, self.buffer)
# On Windows, if that's all we do things look a little rough
# So in order to make scrolling more polished-looking
# we iterate over the exposed regions and fill in unknown
# areas with a fall-back pattern.
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
dc.DrawRectangle(self.borde, self.borde, self.AnchoPagina, self.AltoPagina)
print self.borde, self.borde, self.AnchoPagina, self.AltoPagina
if wx.Platform != '__WXMSW__':
return
print "Windows?"
# First get the update rects and subtract off the part that
# self.buffer has correct already
region = self.f2s_Pizarra.GetUpdateRegion()
panelRect = self.f2s_Pizarra.GetClientRect()
offset = list(self.f2s_Pizarra.CalcUnscrolledPosition(0,0))
offset[0] -= self.saved_offset[0]
offset[1] -= self.saved_offset[1]
region.Subtract(-offset[0],- offset[1],panelRect.Width, panelRect.Height)
# Now iterate over the remaining region rects and fill in with a pattern
rgn_iter = wx.RegionIterator(region)
if rgn_iter.HaveRects():
self.setBackgroundMissingFillStyle(dc)
offset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)
while rgn_iter:
r = rgn_iter.GetRect()
if r.Size != self.f2s_Pizarra.ClientSize:
dc.DrawRectangleRect(r)
rgn_iter.Next()
#def onConfig(self,env):
#self.log.logger.info('onCofig')
#image=ObjConfig(self.Parent,self.log.getNivel())
def _initBuffer(self):
print "_initBuffer"
"""Initialize the bitmap used for buffering the display."""
size = self.f2s_Pizarra.GetSize()
self.buffer = wx.EmptyBitmap(max(1,size.width),max(1,size.height))
dc = wx.BufferedDC(None, self.buffer)
dc.SetBackground(wx.Brush(self.f2s_Pizarra.GetBackgroundColour()))
dc.Clear()
#self.drawContents(dc)
del dc # commits all drawing to the buffer
self.saved_offset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)
self._reInitBuffer = False
class ObjInicio():
def __init__(self,ActDebug=False):
# Lanzamos aplicación.
#ActDebug=True
#
#print "inicio"
#if ActDebug:
# pass
# aplicacion = ObjDebug(redirect=True)
#else:
# aplicacion=wx.PySimpleApp()
# frame_usuario = IncioInterface()
# frame_usuario.Maximize()
# frame_usuario.Show()
aplicacion=wx.PySimpleApp()
frame_usuario = IncioInterface()
#frame_usuario.Maximize()
frame_usuario.Show()
aplicacion.MainLoop()
aplicacion.Destroy()
if __name__ == '__main__':
# Lanzamos aplicación.
j=ObjInicio(False)
|
normal
|
{
"blob_id": "9bd1fd2df7da068ac8aa4e6e24fe14d163a7e6b3",
"index": 2362,
"step-1": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n'''\nCreated on 7/02/2014\n\n@author: marco\nGenerador de ambientes FACIL 2014\n'''\n\n\nimport wx\n\nfrom formgenerador import FrameGeneral\nfrom Dial_Pagina import ObjPagina\n\n\nclass IncioInterface(FrameGeneral):\n\tdef __init__(self):\n\t\t#self.log = ObLog('Inicio programa')\n\t\t#self.log.setNivel(0) #debug\n\t\t\n\t\tFrameGeneral.__init__(self,None)\n\t\tFrameGeneral.SetTitle(self,u\"Administrador de Aplicacion FACIL\")\n\t\t#iconFile = u\"imagenes/2s.ico\"\n\t\t#FrameGeneral.SetIcon(self,wx.Icon(iconFile, wx.BITMAP_TYPE_ICO))\n\t\t#self.Bind(wx.EVT_MENU, self.onConfig,self.f2s_mConfig)\n\t\tself.__inicio()\n\t\tself.dibujarPizarra()\n\t\t\n\t\t#Eventos Menu\n\t\tself.Bind(wx.EVT_MENU,self.onDefPagina,self.f2s_menuTamPapel)\n\t\tself.f2s_Pizarra.Bind(wx.EVT_PAINT, self.onPaint)\n\t\t\n\tdef __inicio(self):\n\t\t#Asignacion Variables Globales\n\t\tself.Guadar=False\n\t\tself.borde=20\n\t\tself.AnchoPagina=8.5 * 72\n\t\tself.AltoPagina = 11 * 72\n\t\tself.objfacil=[]\n\t\tself.objFormatos=[]\n\t\tself._initBuffer()\n\n\t\t\n\tdef onDefPagina(self,event):\n\t\tpagina= ObjPagina(self.Parent)\n\t\tif pagina.orientar==None : \n\t\t\treturn\n\t\t\n\t\tprint pagina.orientar\n\t\tprint pagina.papel\n\t\t\n\t\tif pagina.orientar ==0 or pagina.orientar==2: \t#Vertical\n\t\t\tself.AnchoPagina=pagina.papel[0] * 72\n\t\t\tself.AltoPagina=pagina.papel[1] * 72\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t#Horizontal\n\t\t\tself.AnchoPagina=pagina.papel[1] * 72\n\t\t\tself.AltoPagina=pagina.papel[0] * 72\n\t\t\t\n\t\tprint self.AnchoPagina\n\t\tprint self.AltoPagina\n\t\t\n\t\tself.dibujarPizarra()\t\n\t\tself.wrapDC = lambda dc: dc\n\t\t\n\t\t\n\tdef dibujarPizarra(self):\n\t\tprint \"dibujar Pizarra\"\n\t\tself.f2s_Pizarra.SetBackgroundColour('white')\n\t\tself.f2s_Pizarra.EnableScrolling(True,True)\n\t\tself.f2s_Pizarra.SetScrollbars(20, 20, (self.AnchoPagina + self.borde *2) / 20, (self.AltoPagina + self.borde *2) / 20)\n\t\t\n\t\t\n\t\t\n\t\n\tdef onPaint(self, event):\n\t\tprint \"onPaint\"\n\t\t\"\"\"\n\t\tCalled when the window is exposed.\n\t\t\"\"\"\n\t\t# Create a buffered paint DC. It will create the real\n\t\t# wx.PaintDC and then blit the bitmap to it when dc is\n\t\t# deleted.\n\t\tdc = wx.BufferedPaintDC(self.f2s_Pizarra, self.buffer)\n\n\t\t# On Windows, if that's all we do things look a little rough\n\t\t# So in order to make scrolling more polished-looking\n\t\t# we iterate over the exposed regions and fill in unknown\n\t\t# areas with a fall-back pattern.\n\n\t\tdc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))\n\t\tdc.DrawRectangle(self.borde, self.borde, self.AnchoPagina, self.AltoPagina)\n\t\tprint self.borde, self.borde, self.AnchoPagina, self.AltoPagina\n\n\n\t\tif wx.Platform != '__WXMSW__':\n\t\t\treturn\n\t\t\n\t\tprint \"Windows?\"\n\n\n\t\t# First get the update rects and subtract off the part that\n\t\t# self.buffer has correct already\n\t\tregion = self.f2s_Pizarra.GetUpdateRegion()\n\t\tpanelRect = self.f2s_Pizarra.GetClientRect()\n\t\toffset = list(self.f2s_Pizarra.CalcUnscrolledPosition(0,0))\n\t\toffset[0] -= self.saved_offset[0]\n\t\toffset[1] -= self.saved_offset[1]\n\t\tregion.Subtract(-offset[0],- offset[1],panelRect.Width, panelRect.Height)\n\n\t\t# Now iterate over the remaining region rects and fill in with a pattern\n\t\trgn_iter = wx.RegionIterator(region)\n\t\tif rgn_iter.HaveRects():\n\t\t\tself.setBackgroundMissingFillStyle(dc)\n\t\t\toffset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)\n\t\twhile rgn_iter:\n\t\t\tr = rgn_iter.GetRect()\n\t\t\tif r.Size != self.f2s_Pizarra.ClientSize:\n\t\t\t\tdc.DrawRectangleRect(r)\n\t\t\trgn_iter.Next()\n\n\t\n\t\n\t#def onConfig(self,env):\n\t\t#self.log.logger.info('onCofig')\n\t\t#image=ObjConfig(self.Parent,self.log.getNivel())\n\n\n\n\n\tdef _initBuffer(self):\n\t\tprint \"_initBuffer\"\n\t\t\"\"\"Initialize the bitmap used for buffering the display.\"\"\"\n\t\tsize = self.f2s_Pizarra.GetSize()\n\t\tself.buffer = wx.EmptyBitmap(max(1,size.width),max(1,size.height))\n\t\tdc = wx.BufferedDC(None, self.buffer)\n\t\tdc.SetBackground(wx.Brush(self.f2s_Pizarra.GetBackgroundColour()))\n\t\tdc.Clear()\n\t\t#self.drawContents(dc)\n\t\tdel dc # commits all drawing to the buffer\n\t\tself.saved_offset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)\n\t\tself._reInitBuffer = False\n\n\n\nclass ObjInicio():\n\tdef __init__(self,ActDebug=False):\n\t\t# Lanzamos aplicación.\n\t\t#ActDebug=True\n\t\t# \n\t\t#print \"inicio\"\n\t\t#if ActDebug:\n\t\t#\tpass\n\t\t#\taplicacion = ObjDebug(redirect=True)\n\t\t#else:\n\t\t#\taplicacion=wx.PySimpleApp()\n\t\t#\tframe_usuario = IncioInterface()\t\t\t\n\t\t#\tframe_usuario.Maximize()\n\t\t#\tframe_usuario.Show()\n\t\t\n\t\taplicacion=wx.PySimpleApp()\n\t\tframe_usuario = IncioInterface()\t\t\t\n\t\t#frame_usuario.Maximize()\n\t\tframe_usuario.Show()\n\t\taplicacion.MainLoop()\t\t\n\t\taplicacion.Destroy()\n\n\n\n\n\n\nif __name__ == '__main__':\n\t# Lanzamos aplicación.\n\t\n\tj=ObjInicio(False)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
from flask import Flask, request, jsonify, Response, abort
from sesamutils import sesam_logger, VariablesConfig
from sesamutils.flask import serve
required_env_vars = ["SUBDOMAIN"]
optional_env_vars = ["DEBUG", "LOG_LEVEL", ("API_ROOT","zendesk.com/api/v2/tickets/")] # Default values can be given to optional environment variables by the use of tuples
app = Flask(__name__)
logger = sesam_logger('DemoMicroservice', app=app,timestamp=True)
orders = [
{
'id': 1,
'Username': u'Unjudosely',
'Orders': u'Thinkpad',
'TotalSum': 8000
},
{
'id': 2,
'Username': u'Wimen1979',
'Orders': u'MacbookPro',
'TotalSum': 12000
},
{ 'id': 3,
'Username': u'Gotin1984',
'Orders': u'Chormebook',
'TotalSum': 10000
}
]
@app.route('/api/orders')
def get_orders():
return jsonify({'orders': orders})
@app.route('/api/orders/update/<int:orderID>', methods=['GET','PUT','POST','DELETE'])
def update_ticket(orderID):
try:
if request.method != 'PUT':
abort(405) # Check closer what Flask abort does
logger.error(f"ConnectionError issue while fetching tickets{request.method}")
else:
return jsonify(orders[orderID-1])
except ConnectionError as e:
logger.error(f"ConnectionError issue while fetching tickets{e}")
except Exception as e:
logger.error(f"Issue while fetching tickets from Zendesk {e}")
@app.route('/api/generic/<path:txt>', methods=['GET','PUT','POST','DELETE'])
def get_generic(txt):
method = request.method
if method == "POST" and request.is_json:
returnList = []
enteties = request.get_json()
logger.info(type(enteties))
for item in enteties:
item['Hello'] = "Hello, this is a test."
logger.info(type(item))
returnList.append(item)
return jsonify(returnList) , 200, {"Content-Type": "application/json"}
else:
logger.info(f'Http method is {method}')
return "Only JSON on POST is supported.", 500, {"Content-Type": "text/plain"}
@app.route('/api/show/config')
def get_config():
return jsonify({'config': config})
if __name__ == "__main__":
config = VariablesConfig(required_env_vars, optional_env_vars=optional_env_vars)
# logger.info(str(config))
# if not config.validate():
# os.sys.exit(1)
serve(app)
|
normal
|
{
"blob_id": "bb58b4384eaeec45be1af865012c618af05f5a0a",
"index": 9667,
"step-1": "<mask token>\n\n\[email protected]('/api/orders/update/<int:orderID>', methods=['GET', 'PUT',\n 'POST', 'DELETE'])\ndef update_ticket(orderID):\n try:\n if request.method != 'PUT':\n abort(405)\n logger.error(\n f'ConnectionError issue while fetching tickets{request.method}'\n )\n else:\n return jsonify(orders[orderID - 1])\n except ConnectionError as e:\n logger.error(f'ConnectionError issue while fetching tickets{e}')\n except Exception as e:\n logger.error(f'Issue while fetching tickets from Zendesk {e}')\n\n\[email protected]('/api/generic/<path:txt>', methods=['GET', 'PUT', 'POST', 'DELETE'])\ndef get_generic(txt):\n method = request.method\n if method == 'POST' and request.is_json:\n returnList = []\n enteties = request.get_json()\n logger.info(type(enteties))\n for item in enteties:\n item['Hello'] = 'Hello, this is a test.'\n logger.info(type(item))\n returnList.append(item)\n return jsonify(returnList), 200, {'Content-Type': 'application/json'}\n else:\n logger.info(f'Http method is {method}')\n return 'Only JSON on POST is supported.', 500, {'Content-Type':\n 'text/plain'}\n\n\[email protected]('/api/show/config')\ndef get_config():\n return jsonify({'config': config})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/api/orders')\ndef get_orders():\n return jsonify({'orders': orders})\n\n\[email protected]('/api/orders/update/<int:orderID>', methods=['GET', 'PUT',\n 'POST', 'DELETE'])\ndef update_ticket(orderID):\n try:\n if request.method != 'PUT':\n abort(405)\n logger.error(\n f'ConnectionError issue while fetching tickets{request.method}'\n )\n else:\n return jsonify(orders[orderID - 1])\n except ConnectionError as e:\n logger.error(f'ConnectionError issue while fetching tickets{e}')\n except Exception as e:\n logger.error(f'Issue while fetching tickets from Zendesk {e}')\n\n\[email protected]('/api/generic/<path:txt>', methods=['GET', 'PUT', 'POST', 'DELETE'])\ndef get_generic(txt):\n method = request.method\n if method == 'POST' and request.is_json:\n returnList = []\n enteties = request.get_json()\n logger.info(type(enteties))\n for item in enteties:\n item['Hello'] = 'Hello, this is a test.'\n logger.info(type(item))\n returnList.append(item)\n return jsonify(returnList), 200, {'Content-Type': 'application/json'}\n else:\n logger.info(f'Http method is {method}')\n return 'Only JSON on POST is supported.', 500, {'Content-Type':\n 'text/plain'}\n\n\[email protected]('/api/show/config')\ndef get_config():\n return jsonify({'config': config})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/api/orders')\ndef get_orders():\n return jsonify({'orders': orders})\n\n\[email protected]('/api/orders/update/<int:orderID>', methods=['GET', 'PUT',\n 'POST', 'DELETE'])\ndef update_ticket(orderID):\n try:\n if request.method != 'PUT':\n abort(405)\n logger.error(\n f'ConnectionError issue while fetching tickets{request.method}'\n )\n else:\n return jsonify(orders[orderID - 1])\n except ConnectionError as e:\n logger.error(f'ConnectionError issue while fetching tickets{e}')\n except Exception as e:\n logger.error(f'Issue while fetching tickets from Zendesk {e}')\n\n\[email protected]('/api/generic/<path:txt>', methods=['GET', 'PUT', 'POST', 'DELETE'])\ndef get_generic(txt):\n method = request.method\n if method == 'POST' and request.is_json:\n returnList = []\n enteties = request.get_json()\n logger.info(type(enteties))\n for item in enteties:\n item['Hello'] = 'Hello, this is a test.'\n logger.info(type(item))\n returnList.append(item)\n return jsonify(returnList), 200, {'Content-Type': 'application/json'}\n else:\n logger.info(f'Http method is {method}')\n return 'Only JSON on POST is supported.', 500, {'Content-Type':\n 'text/plain'}\n\n\[email protected]('/api/show/config')\ndef get_config():\n return jsonify({'config': config})\n\n\nif __name__ == '__main__':\n config = VariablesConfig(required_env_vars, optional_env_vars=\n optional_env_vars)\n serve(app)\n",
"step-4": "import os\nfrom flask import Flask, request, jsonify, Response, abort\nfrom sesamutils import sesam_logger, VariablesConfig\nfrom sesamutils.flask import serve\nrequired_env_vars = ['SUBDOMAIN']\noptional_env_vars = ['DEBUG', 'LOG_LEVEL', ('API_ROOT',\n 'zendesk.com/api/v2/tickets/')]\napp = Flask(__name__)\nlogger = sesam_logger('DemoMicroservice', app=app, timestamp=True)\norders = [{'id': 1, 'Username': u'Unjudosely', 'Orders': u'Thinkpad',\n 'TotalSum': 8000}, {'id': 2, 'Username': u'Wimen1979', 'Orders':\n u'MacbookPro', 'TotalSum': 12000}, {'id': 3, 'Username': u'Gotin1984',\n 'Orders': u'Chormebook', 'TotalSum': 10000}]\n\n\[email protected]('/api/orders')\ndef get_orders():\n return jsonify({'orders': orders})\n\n\[email protected]('/api/orders/update/<int:orderID>', methods=['GET', 'PUT',\n 'POST', 'DELETE'])\ndef update_ticket(orderID):\n try:\n if request.method != 'PUT':\n abort(405)\n logger.error(\n f'ConnectionError issue while fetching tickets{request.method}'\n )\n else:\n return jsonify(orders[orderID - 1])\n except ConnectionError as e:\n logger.error(f'ConnectionError issue while fetching tickets{e}')\n except Exception as e:\n logger.error(f'Issue while fetching tickets from Zendesk {e}')\n\n\[email protected]('/api/generic/<path:txt>', methods=['GET', 'PUT', 'POST', 'DELETE'])\ndef get_generic(txt):\n method = request.method\n if method == 'POST' and request.is_json:\n returnList = []\n enteties = request.get_json()\n logger.info(type(enteties))\n for item in enteties:\n item['Hello'] = 'Hello, this is a test.'\n logger.info(type(item))\n returnList.append(item)\n return jsonify(returnList), 200, {'Content-Type': 'application/json'}\n else:\n logger.info(f'Http method is {method}')\n return 'Only JSON on POST is supported.', 500, {'Content-Type':\n 'text/plain'}\n\n\[email protected]('/api/show/config')\ndef get_config():\n return jsonify({'config': config})\n\n\nif __name__ == '__main__':\n config = VariablesConfig(required_env_vars, optional_env_vars=\n optional_env_vars)\n serve(app)\n",
"step-5": "import os\r\n\r\nfrom flask import Flask, request, jsonify, Response, abort\r\n\r\nfrom sesamutils import sesam_logger, VariablesConfig\r\nfrom sesamutils.flask import serve\r\n\r\nrequired_env_vars = [\"SUBDOMAIN\"]\r\noptional_env_vars = [\"DEBUG\", \"LOG_LEVEL\", (\"API_ROOT\",\"zendesk.com/api/v2/tickets/\")] # Default values can be given to optional environment variables by the use of tuples\r\n\r\napp = Flask(__name__)\r\n\r\nlogger = sesam_logger('DemoMicroservice', app=app,timestamp=True)\r\n\r\norders = [\r\n{\r\n 'id': 1,\r\n 'Username': u'Unjudosely',\r\n 'Orders': u'Thinkpad',\r\n 'TotalSum': 8000\r\n },\r\n {\r\n 'id': 2,\r\n 'Username': u'Wimen1979',\r\n 'Orders': u'MacbookPro',\r\n 'TotalSum': 12000\r\n },\r\n { 'id': 3,\r\n 'Username': u'Gotin1984',\r\n 'Orders': u'Chormebook',\r\n 'TotalSum': 10000\r\n }\r\n\r\n]\r\n\r\[email protected]('/api/orders')\r\ndef get_orders():\r\n return jsonify({'orders': orders})\r\n\r\[email protected]('/api/orders/update/<int:orderID>', methods=['GET','PUT','POST','DELETE']) \r\ndef update_ticket(orderID):\r\n try:\r\n if request.method != 'PUT':\r\n abort(405) # Check closer what Flask abort does\r\n logger.error(f\"ConnectionError issue while fetching tickets{request.method}\")\r\n else:\r\n return jsonify(orders[orderID-1])\r\n except ConnectionError as e:\r\n logger.error(f\"ConnectionError issue while fetching tickets{e}\")\r\n except Exception as e:\r\n logger.error(f\"Issue while fetching tickets from Zendesk {e}\")\r\n\r\[email protected]('/api/generic/<path:txt>', methods=['GET','PUT','POST','DELETE'])\r\ndef get_generic(txt):\r\n method = request.method\r\n if method == \"POST\" and request.is_json:\r\n returnList = []\r\n enteties = request.get_json()\r\n logger.info(type(enteties))\r\n for item in enteties:\r\n item['Hello'] = \"Hello, this is a test.\"\r\n logger.info(type(item))\r\n returnList.append(item)\r\n return jsonify(returnList) , 200, {\"Content-Type\": \"application/json\"}\r\n else: \r\n logger.info(f'Http method is {method}')\r\n return \"Only JSON on POST is supported.\", 500, {\"Content-Type\": \"text/plain\"}\r\n\r\[email protected]('/api/show/config')\r\ndef get_config():\r\n return jsonify({'config': config})\r\n\r\nif __name__ == \"__main__\":\r\n config = VariablesConfig(required_env_vars, optional_env_vars=optional_env_vars)\r\n # logger.info(str(config))\r\n # if not config.validate():\r\n # os.sys.exit(1)\r\n\r\n serve(app)",
"step-ids": [
3,
4,
5,
7,
8
]
}
|
[
3,
4,
5,
7,
8
] |
# -*- coding: utf-8 -*-
from django.db import models
from filebrowser.fields import FileBrowseField
from localisations.models import Ville, Lieu
from model_utils.managers import InheritanceManager
from services.models import Service
from equipements.models import Equipement
from localisations.models import Ville
from django.db.models import permalink
class Organisateur(models.Model):
nom = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, unique=True)
meta_description = models.CharField(max_length=200)
description = models.TextField()
logo = FileBrowseField("Image", max_length=255, directory="evenements",
extensions=[".jpg", ".png", ".gif", ".jpeg"], blank=True, null=True)
url = models.URLField("Site de cet organisateur: (facultatif) ", blank=True)
email = models.EmailField("Mail (facultatif)", max_length=255, blank=True)
telephone = models.CharField(max_length=25)
fax = models.CharField("Fax (facultatif)", max_length=25, blank=True)
rue = models.CharField(max_length=255)
ville = models.ForeignKey(Ville)
# Un choix de design pas très beau, mais fonctionellement les équipements, services, communes de la
# communauté d'agglo peuvent organiser des evènements ainsi que d'autres entités exterieures alors ...
orga_service = models.ForeignKey(Service, blank=True, null=True)
orga_equipement = models.ForeignKey(Equipement, blank=True, null=True)
orga_ville = models.ForeignKey(Ville, blank=True, null=True, related_name='orga_orga_ville')
def __unicode__(self):
return self.nom + " / " + self.ville.nom
class Meta:
verbose_name_plural = "Organisateurs"
ordering = ['ville__nom']
class Saison(models.Model):
nom = models.CharField(max_length=255)
debut = models.DateTimeField("Date de début")
fin = models.DateTimeField("date de fin")
description = models.TextField()
slug = models.SlugField(max_length=255, unique=True)
objects = InheritanceManager()
def __unicode__(self):
return self.nom
class SaisonCulturelle(Saison):
def __unicode__(self):
return self.nom
class Festival(Saison):
saison_culture = models.ForeignKey(SaisonCulturelle)
def __unicode__(self):
return self.nom
class TypeEvenement(models.Model):
nom = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
def __unicode__(self):
return self.nom
class Meta:
ordering = ['nom']
EVENEMENT_CATEGORIES = (
('bib', u'Bibliothèques/Médiatèques'),
('crd', u'Conservatoires'),
('sty', u'Sothevy'),
('eco', u'Développement Économique'),
('aut', u'Autres'),
)
EVENEMENT_PUBLIC = (
('adt', u'Adulte'),
('enf', u'Enfant'),
('pub', u'Tout public'),
('ent', u'Entreprises'),
)
class Evenement(models.Model):
nom = models.CharField(max_length=255)
meta_description = models.CharField(max_length=200)
description = models.TextField()
debut = models.DateTimeField("Date de début")
fin = models.DateTimeField("Date de fin")
organisateur = models.ManyToManyField(Organisateur)
image = FileBrowseField("Image (facultatif)", max_length=255, directory="evenements",
extensions=[".jpg", ".png", ".gif", ".jpeg", ".pdf"], blank=True, null=True)
url = models.URLField("Un lien vers plus d'infos: (facultatif)", blank=True, null=True)
url_reservation = models.URLField(
"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) ", blank=True, null=True)
categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES, default='aut')
public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC, default='pub')
cadre_evenement = models.ForeignKey(Saison)
type = models.ForeignKey(TypeEvenement)
lieu = models.ForeignKey(Lieu)
publish = models.BooleanField("Publié", default=False)
page_accueil = models.BooleanField("Page d'accueil", default=False)
complet = models.BooleanField("Ce spectacle est complet", default=False)
slug = models.SlugField(max_length=255, unique=True)
class Meta:
ordering = ['-debut']
def Organisateurs(self):
return "\n;\n".join([s.nom for s in self.organisateur.all()])
def __unicode__(self):
return self.nom
def monthyeardebut(self):
return self.debut.strftime("%m") + "-" + self.debut.strftime("%Y")
@permalink
def get_absolute_url(self):
return ('event-details', (), {'slug': self.cadre_evenement.slug, 'evenement_slug': self.slug})
class Prix(models.Model):
intitule = models.CharField("Intitulé ", max_length=255, blank=False, null=False)
prix = models.FloatField("Prix (séparateur point ex : 0.5 )", default=None, blank=False, null=True)
evenement = models.ForeignKey(Evenement)
class Meta:
verbose_name_plural = u"Prix"
class DocumentAttache(models.Model):
nom = models.CharField(max_length=255, verbose_name="Nom")
document = FileBrowseField("Document", max_length=200, directory="evenements/docs",
extensions=[".pdf", ".doc", ".odt", ".docx", ".txt"])
reference = models.ForeignKey(Evenement)
class EvenementBibManager(models.Manager):
def get_queryset(self):
return super(EvenementBibManager, self).get_queryset().filter(categorie='bib')
class EvenementBib(Evenement):
objects = EvenementBibManager()
class Meta:
proxy = True
verbose_name_plural = u"Événements Bibliothèques"
verbose_name = u"Événement Bibliothèque"
class EvenementCrdManager(models.Manager):
def get_queryset(self):
return super(EvenementCrdManager, self).get_queryset().filter(categorie='crd')
class EvenementCrd(Evenement):
objects = EvenementCrdManager()
class Meta:
proxy = True
verbose_name_plural = u"Événements Conservatoires"
verbose_name = u"Événement Conservatoire"
class EvenementDevEcoManager(models.Manager):
def get_queryset(self):
return super(EvenementDevEcoManager, self).get_queryset().filter(categorie='eco')
class EvenementDevEco(Evenement):
objects = EvenementDevEcoManager()
class Meta:
proxy = True
verbose_name_plural = u"Événements Dev Eco"
verbose_name = u"Événement Dev Eco"
|
normal
|
{
"blob_id": "596fe474ae60dd6a06123df6fe246f7e947b3482",
"index": 1760,
"step-1": "<mask token>\n\n\nclass SaisonCulturelle(Saison):\n\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n\n class Meta:\n ordering = ['nom']\n\n\n<mask token>\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('Date de fin')\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField('Image (facultatif)', max_length=255, directory\n ='evenements', extensions=['.jpg', '.png', '.gif', '.jpeg', '.pdf'],\n blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=\n True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \"\n , blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES,\n default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC,\n default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField('Publié', default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField('Ce spectacle est complet', default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return '\\n;\\n'.join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime('%m') + '-' + self.debut.strftime('%Y')\n\n @permalink\n def get_absolute_url(self):\n return 'event-details', (), {'slug': self.cadre_evenement.slug,\n 'evenement_slug': self.slug}\n\n\nclass Prix(models.Model):\n intitule = models.CharField('Intitulé ', max_length=255, blank=False,\n null=False)\n prix = models.FloatField('Prix (séparateur point ex : 0.5 )', default=\n None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n\n class Meta:\n verbose_name_plural = u'Prix'\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name='Nom')\n document = FileBrowseField('Document', max_length=200, directory=\n 'evenements/docs', extensions=['.pdf', '.doc', '.odt', '.docx', '.txt']\n )\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie\n ='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Bibliothèques'\n verbose_name = u'Événement Bibliothèque'\n\n\nclass EvenementCrdManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie\n ='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Conservatoires'\n verbose_name = u'Événement Conservatoire'\n\n\nclass EvenementDevEcoManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(\n categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Dev Eco'\n verbose_name = u'Événement Dev Eco'\n",
"step-2": "<mask token>\n\n\nclass Saison(models.Model):\n nom = models.CharField(max_length=255)\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('date de fin')\n description = models.TextField()\n slug = models.SlugField(max_length=255, unique=True)\n objects = InheritanceManager()\n\n def __unicode__(self):\n return self.nom\n\n\nclass SaisonCulturelle(Saison):\n\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n\n class Meta:\n ordering = ['nom']\n\n\n<mask token>\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('Date de fin')\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField('Image (facultatif)', max_length=255, directory\n ='evenements', extensions=['.jpg', '.png', '.gif', '.jpeg', '.pdf'],\n blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=\n True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \"\n , blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES,\n default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC,\n default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField('Publié', default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField('Ce spectacle est complet', default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return '\\n;\\n'.join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime('%m') + '-' + self.debut.strftime('%Y')\n\n @permalink\n def get_absolute_url(self):\n return 'event-details', (), {'slug': self.cadre_evenement.slug,\n 'evenement_slug': self.slug}\n\n\nclass Prix(models.Model):\n intitule = models.CharField('Intitulé ', max_length=255, blank=False,\n null=False)\n prix = models.FloatField('Prix (séparateur point ex : 0.5 )', default=\n None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n\n class Meta:\n verbose_name_plural = u'Prix'\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name='Nom')\n document = FileBrowseField('Document', max_length=200, directory=\n 'evenements/docs', extensions=['.pdf', '.doc', '.odt', '.docx', '.txt']\n )\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie\n ='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Bibliothèques'\n verbose_name = u'Événement Bibliothèque'\n\n\nclass EvenementCrdManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie\n ='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Conservatoires'\n verbose_name = u'Événement Conservatoire'\n\n\nclass EvenementDevEcoManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(\n categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Dev Eco'\n verbose_name = u'Événement Dev Eco'\n",
"step-3": "<mask token>\n\n\nclass Organisateur(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name_plural = 'Organisateurs'\n ordering = ['ville__nom']\n\n\nclass Saison(models.Model):\n nom = models.CharField(max_length=255)\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('date de fin')\n description = models.TextField()\n slug = models.SlugField(max_length=255, unique=True)\n objects = InheritanceManager()\n\n def __unicode__(self):\n return self.nom\n\n\nclass SaisonCulturelle(Saison):\n\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n\n class Meta:\n ordering = ['nom']\n\n\n<mask token>\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('Date de fin')\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField('Image (facultatif)', max_length=255, directory\n ='evenements', extensions=['.jpg', '.png', '.gif', '.jpeg', '.pdf'],\n blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=\n True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \"\n , blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES,\n default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC,\n default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField('Publié', default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField('Ce spectacle est complet', default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return '\\n;\\n'.join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime('%m') + '-' + self.debut.strftime('%Y')\n\n @permalink\n def get_absolute_url(self):\n return 'event-details', (), {'slug': self.cadre_evenement.slug,\n 'evenement_slug': self.slug}\n\n\nclass Prix(models.Model):\n intitule = models.CharField('Intitulé ', max_length=255, blank=False,\n null=False)\n prix = models.FloatField('Prix (séparateur point ex : 0.5 )', default=\n None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n\n class Meta:\n verbose_name_plural = u'Prix'\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name='Nom')\n document = FileBrowseField('Document', max_length=200, directory=\n 'evenements/docs', extensions=['.pdf', '.doc', '.odt', '.docx', '.txt']\n )\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie\n ='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Bibliothèques'\n verbose_name = u'Événement Bibliothèque'\n\n\nclass EvenementCrdManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie\n ='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Conservatoires'\n verbose_name = u'Événement Conservatoire'\n\n\nclass EvenementDevEcoManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(\n categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Dev Eco'\n verbose_name = u'Événement Dev Eco'\n",
"step-4": "<mask token>\n\n\nclass Organisateur(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, unique=True)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n logo = FileBrowseField('Image', max_length=255, directory='evenements',\n extensions=['.jpg', '.png', '.gif', '.jpeg'], blank=True, null=True)\n url = models.URLField('Site de cet organisateur: (facultatif) ', blank\n =True)\n email = models.EmailField('Mail (facultatif)', max_length=255, blank=True)\n telephone = models.CharField(max_length=25)\n fax = models.CharField('Fax (facultatif)', max_length=25, blank=True)\n rue = models.CharField(max_length=255)\n ville = models.ForeignKey(Ville)\n orga_service = models.ForeignKey(Service, blank=True, null=True)\n orga_equipement = models.ForeignKey(Equipement, blank=True, null=True)\n orga_ville = models.ForeignKey(Ville, blank=True, null=True,\n related_name='orga_orga_ville')\n\n def __unicode__(self):\n return self.nom + ' / ' + self.ville.nom\n\n\n class Meta:\n verbose_name_plural = 'Organisateurs'\n ordering = ['ville__nom']\n\n\nclass Saison(models.Model):\n nom = models.CharField(max_length=255)\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('date de fin')\n description = models.TextField()\n slug = models.SlugField(max_length=255, unique=True)\n objects = InheritanceManager()\n\n def __unicode__(self):\n return self.nom\n\n\nclass SaisonCulturelle(Saison):\n\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n\n class Meta:\n ordering = ['nom']\n\n\n<mask token>\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('Date de fin')\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField('Image (facultatif)', max_length=255, directory\n ='evenements', extensions=['.jpg', '.png', '.gif', '.jpeg', '.pdf'],\n blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=\n True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \"\n , blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES,\n default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC,\n default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField('Publié', default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField('Ce spectacle est complet', default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return '\\n;\\n'.join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime('%m') + '-' + self.debut.strftime('%Y')\n\n @permalink\n def get_absolute_url(self):\n return 'event-details', (), {'slug': self.cadre_evenement.slug,\n 'evenement_slug': self.slug}\n\n\nclass Prix(models.Model):\n intitule = models.CharField('Intitulé ', max_length=255, blank=False,\n null=False)\n prix = models.FloatField('Prix (séparateur point ex : 0.5 )', default=\n None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n\n class Meta:\n verbose_name_plural = u'Prix'\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name='Nom')\n document = FileBrowseField('Document', max_length=200, directory=\n 'evenements/docs', extensions=['.pdf', '.doc', '.odt', '.docx', '.txt']\n )\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie\n ='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Bibliothèques'\n verbose_name = u'Événement Bibliothèque'\n\n\nclass EvenementCrdManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie\n ='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Conservatoires'\n verbose_name = u'Événement Conservatoire'\n\n\nclass EvenementDevEcoManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(\n categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Dev Eco'\n verbose_name = u'Événement Dev Eco'\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom filebrowser.fields import FileBrowseField\nfrom localisations.models import Ville, Lieu\nfrom model_utils.managers import InheritanceManager\nfrom services.models import Service\nfrom equipements.models import Equipement\nfrom localisations.models import Ville\nfrom django.db.models import permalink\n\n\nclass Organisateur(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, unique=True)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n logo = FileBrowseField(\"Image\", max_length=255, directory=\"evenements\",\n extensions=[\".jpg\", \".png\", \".gif\", \".jpeg\"], blank=True, null=True)\n url = models.URLField(\"Site de cet organisateur: (facultatif) \", blank=True)\n email = models.EmailField(\"Mail (facultatif)\", max_length=255, blank=True)\n telephone = models.CharField(max_length=25)\n fax = models.CharField(\"Fax (facultatif)\", max_length=25, blank=True)\n rue = models.CharField(max_length=255)\n ville = models.ForeignKey(Ville)\n\n # Un choix de design pas très beau, mais fonctionellement les équipements, services, communes de la\n # communauté d'agglo peuvent organiser des evènements ainsi que d'autres entités exterieures alors ...\n\n orga_service = models.ForeignKey(Service, blank=True, null=True)\n orga_equipement = models.ForeignKey(Equipement, blank=True, null=True)\n orga_ville = models.ForeignKey(Ville, blank=True, null=True, related_name='orga_orga_ville')\n\n def __unicode__(self):\n return self.nom + \" / \" + self.ville.nom\n\n class Meta:\n verbose_name_plural = \"Organisateurs\"\n ordering = ['ville__nom']\n\n\nclass Saison(models.Model):\n nom = models.CharField(max_length=255)\n debut = models.DateTimeField(\"Date de début\")\n fin = models.DateTimeField(\"date de fin\")\n description = models.TextField()\n slug = models.SlugField(max_length=255, unique=True)\n\n objects = InheritanceManager()\n\n def __unicode__(self):\n return self.nom\n\n\nclass SaisonCulturelle(Saison):\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n class Meta:\n ordering = ['nom']\n\n\nEVENEMENT_CATEGORIES = (\n ('bib', u'Bibliothèques/Médiatèques'),\n ('crd', u'Conservatoires'),\n ('sty', u'Sothevy'),\n ('eco', u'Développement Économique'),\n ('aut', u'Autres'),\n)\n\nEVENEMENT_PUBLIC = (\n ('adt', u'Adulte'),\n ('enf', u'Enfant'),\n ('pub', u'Tout public'),\n ('ent', u'Entreprises'),\n)\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField(\"Date de début\")\n fin = models.DateTimeField(\"Date de fin\")\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField(\"Image (facultatif)\", max_length=255, directory=\"evenements\",\n extensions=[\".jpg\", \".png\", \".gif\", \".jpeg\", \".pdf\"], blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \", blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES, default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC, default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField(\"Publié\", default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField(\"Ce spectacle est complet\", default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return \"\\n;\\n\".join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime(\"%m\") + \"-\" + self.debut.strftime(\"%Y\")\n\n @permalink\n def get_absolute_url(self):\n return ('event-details', (), {'slug': self.cadre_evenement.slug, 'evenement_slug': self.slug})\n\n\nclass Prix(models.Model):\n intitule = models.CharField(\"Intitulé \", max_length=255, blank=False, null=False)\n prix = models.FloatField(\"Prix (séparateur point ex : 0.5 )\", default=None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n class Meta:\n verbose_name_plural = u\"Prix\"\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name=\"Nom\")\n document = FileBrowseField(\"Document\", max_length=200, directory=\"evenements/docs\",\n extensions=[\".pdf\", \".doc\", \".odt\", \".docx\", \".txt\"])\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n class Meta:\n proxy = True\n verbose_name_plural = u\"Événements Bibliothèques\"\n verbose_name = u\"Événement Bibliothèque\"\n\n\nclass EvenementCrdManager(models.Manager):\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n class Meta:\n proxy = True\n verbose_name_plural = u\"Événements Conservatoires\"\n verbose_name = u\"Événement Conservatoire\"\n\n\nclass EvenementDevEcoManager(models.Manager):\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n class Meta:\n proxy = True\n verbose_name_plural = u\"Événements Dev Eco\"\n verbose_name = u\"Événement Dev Eco\"",
"step-ids": [
30,
33,
34,
36,
39
]
}
|
[
30,
33,
34,
36,
39
] |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmagic.registry import MODELS
def test_colorization_net():
model_cfg = dict(
type='ColorizationNet', input_nc=4, output_nc=2, norm_type='batch')
# build model
model = MODELS.build(model_cfg)
# test attributes
assert model.__class__.__name__ == 'ColorizationNet'
# prepare data
input_A = torch.rand(1, 1, 256, 256)
input_B = torch.rand(1, 2, 256, 256)
mask_B = torch.rand(1, 1, 256, 256)
target_shape = (1, 2, 256, 256)
# test on cpu
(out_class, out_reg, feature_map) = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
assert feature_map['conv1_2'].shape == (1, 64, 256, 256) \
and feature_map['out_reg'].shape == target_shape
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
input_A = input_A.cuda()
input_B = input_B.cuda()
mask_B = mask_B.cuda()
(out_class, out_reg, feature_map) = \
model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
for item in feature_map.keys():
assert torch.is_tensor(feature_map[item])
|
normal
|
{
"blob_id": "94be205e516c1f1248b6028419c04c927236596e",
"index": 618,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_colorization_net():\n model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,\n norm_type='batch')\n model = MODELS.build(model_cfg)\n assert model.__class__.__name__ == 'ColorizationNet'\n input_A = torch.rand(1, 1, 256, 256)\n input_B = torch.rand(1, 2, 256, 256)\n mask_B = torch.rand(1, 1, 256, 256)\n target_shape = 1, 2, 256, 256\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[\n 'out_reg'].shape == target_shape\n if torch.cuda.is_available():\n model = model.cuda()\n input_A = input_A.cuda()\n input_B = input_B.cuda()\n mask_B = mask_B.cuda()\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n for item in feature_map.keys():\n assert torch.is_tensor(feature_map[item])\n",
"step-3": "import torch\nfrom mmagic.registry import MODELS\n\n\ndef test_colorization_net():\n model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,\n norm_type='batch')\n model = MODELS.build(model_cfg)\n assert model.__class__.__name__ == 'ColorizationNet'\n input_A = torch.rand(1, 1, 256, 256)\n input_B = torch.rand(1, 2, 256, 256)\n mask_B = torch.rand(1, 1, 256, 256)\n target_shape = 1, 2, 256, 256\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[\n 'out_reg'].shape == target_shape\n if torch.cuda.is_available():\n model = model.cuda()\n input_A = input_A.cuda()\n input_B = input_B.cuda()\n mask_B = mask_B.cuda()\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n for item in feature_map.keys():\n assert torch.is_tensor(feature_map[item])\n",
"step-4": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmagic.registry import MODELS\n\n\ndef test_colorization_net():\n\n model_cfg = dict(\n type='ColorizationNet', input_nc=4, output_nc=2, norm_type='batch')\n\n # build model\n model = MODELS.build(model_cfg)\n\n # test attributes\n assert model.__class__.__name__ == 'ColorizationNet'\n\n # prepare data\n input_A = torch.rand(1, 1, 256, 256)\n input_B = torch.rand(1, 2, 256, 256)\n mask_B = torch.rand(1, 1, 256, 256)\n\n target_shape = (1, 2, 256, 256)\n\n # test on cpu\n (out_class, out_reg, feature_map) = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n assert feature_map['conv1_2'].shape == (1, 64, 256, 256) \\\n and feature_map['out_reg'].shape == target_shape\n\n # test on gpu\n if torch.cuda.is_available():\n model = model.cuda()\n input_A = input_A.cuda()\n input_B = input_B.cuda()\n mask_B = mask_B.cuda()\n (out_class, out_reg, feature_map) = \\\n model(input_A, input_B, mask_B)\n\n assert isinstance(feature_map, dict)\n for item in feature_map.keys():\n assert torch.is_tensor(feature_map[item])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from matplotlib import cm
from datascience.visu.util import plt, save_fig, get_figure
from sklearn.metrics import roc_curve, auc, confusion_matrix
import numpy as np
y = np.array([
[0.8869, 1.],
[1.-0.578, 0.],
[0.7959, 1.],
[0.8618, 1.],
[1.-0.2278, 0.],
[0.6607, 1.],
[0.7006, 1.],
[1.-0.4859, 0.],
[0.6935, 1.],
[0.9048, 1.],
[0.6681, 1.],
[0.7585, 1.],
[1.-0.5063, 0.],
[1.-0.4516, 0.],
[1.-0.5158, 0.],
[1.-0.5873, 0.],
[1.-0.7682, 0.],
[0.8620, 1.],
[1-0.7337, 0.],
[0.9412, 1.],
[1.-0.5819, 0.],
[.2738, 1.],
[1.-.5136, 0.],
[.8819, 1.],
[1.-.4387, 0.],
[1.-.6257, 0.],
[.7857, 1.],
[1.-.3722, 0.],
[1.-0.8049, 0.],
[0.7864, 1.],
[1.-0.2372, 0.],
[0.7934, 1.],
[0.9583, 1.],
[0.9739, 1.],
[1.-0.3556, 0.],
[1.-0.2551, 0.],
[1.-0.4532, 0.],
[0.4605, 1.],
[0.7572, 1.],
[0.9496, 1.],
[0.8268, 1.],
[1.-0.4876, 0.],
[0.8523, 1.],
[1.-0.2629, 0.],
[1.-0.9021, 0.],
[0.6977, 1.],
[0.9142, 1.],
[1.-0.8175, 0.],
[1.-0.4865, 0.],
[0.9110, 1.],
[1.-0.2159, 0.],
[1.-0.6943, 0.],
[1.-0.2753, 0.],
[0.8590, 1.],
[0.8273, 1.],
[1.-0.5169, 0.],
[1.-0.7412, 0.]
])
fpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)
ax = plt('roc_curve').gca()
ax.set_xlim([-0.007, 1.0])
ax.set_ylim([0.0, 1.01])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))
ax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')
ax.plot(fpr, tpr, color='yellow', label='IArt')
ax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label='Perfect model')
ax.legend(loc="lower right")
ax = plt('confusion_matrix').gca()
y_threshold = (y > 0.7).astype(int)
matrix = confusion_matrix(y[:, 1], y_threshold[:, 0])
matrix = matrix / matrix.astype(np.float).sum(axis=1)
im = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))
ax.axis('off')
get_figure('confusion_matrix').colorbar(im)
save_fig()
|
normal
|
{
"blob_id": "5b3514af839c132fda9a2e6e178ae62f780f291e",
"index": 3388,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=\n 'Perfect model')\nax.legend(loc='lower right')\n<mask token>\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\nsave_fig()\n",
"step-3": "<mask token>\ny = np.array([[0.8869, 1.0], [1.0 - 0.578, 0.0], [0.7959, 1.0], [0.8618, \n 1.0], [1.0 - 0.2278, 0.0], [0.6607, 1.0], [0.7006, 1.0], [1.0 - 0.4859,\n 0.0], [0.6935, 1.0], [0.9048, 1.0], [0.6681, 1.0], [0.7585, 1.0], [1.0 -\n 0.5063, 0.0], [1.0 - 0.4516, 0.0], [1.0 - 0.5158, 0.0], [1.0 - 0.5873, \n 0.0], [1.0 - 0.7682, 0.0], [0.862, 1.0], [1 - 0.7337, 0.0], [0.9412, \n 1.0], [1.0 - 0.5819, 0.0], [0.2738, 1.0], [1.0 - 0.5136, 0.0], [0.8819,\n 1.0], [1.0 - 0.4387, 0.0], [1.0 - 0.6257, 0.0], [0.7857, 1.0], [1.0 - \n 0.3722, 0.0], [1.0 - 0.8049, 0.0], [0.7864, 1.0], [1.0 - 0.2372, 0.0],\n [0.7934, 1.0], [0.9583, 1.0], [0.9739, 1.0], [1.0 - 0.3556, 0.0], [1.0 -\n 0.2551, 0.0], [1.0 - 0.4532, 0.0], [0.4605, 1.0], [0.7572, 1.0], [\n 0.9496, 1.0], [0.8268, 1.0], [1.0 - 0.4876, 0.0], [0.8523, 1.0], [1.0 -\n 0.2629, 0.0], [1.0 - 0.9021, 0.0], [0.6977, 1.0], [0.9142, 1.0], [1.0 -\n 0.8175, 0.0], [1.0 - 0.4865, 0.0], [0.911, 1.0], [1.0 - 0.2159, 0.0], [\n 1.0 - 0.6943, 0.0], [1.0 - 0.2753, 0.0], [0.859, 1.0], [0.8273, 1.0], [\n 1.0 - 0.5169, 0.0], [1.0 - 0.7412, 0.0]])\nfpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)\nax = plt('roc_curve').gca()\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=\n 'Perfect model')\nax.legend(loc='lower right')\nax = plt('confusion_matrix').gca()\ny_threshold = (y > 0.7).astype(int)\nmatrix = confusion_matrix(y[:, 1], y_threshold[:, 0])\nmatrix = matrix / matrix.astype(np.float).sum(axis=1)\nim = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\nsave_fig()\n",
"step-4": "from matplotlib import cm\nfrom datascience.visu.util import plt, save_fig, get_figure\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix\nimport numpy as np\ny = np.array([[0.8869, 1.0], [1.0 - 0.578, 0.0], [0.7959, 1.0], [0.8618, \n 1.0], [1.0 - 0.2278, 0.0], [0.6607, 1.0], [0.7006, 1.0], [1.0 - 0.4859,\n 0.0], [0.6935, 1.0], [0.9048, 1.0], [0.6681, 1.0], [0.7585, 1.0], [1.0 -\n 0.5063, 0.0], [1.0 - 0.4516, 0.0], [1.0 - 0.5158, 0.0], [1.0 - 0.5873, \n 0.0], [1.0 - 0.7682, 0.0], [0.862, 1.0], [1 - 0.7337, 0.0], [0.9412, \n 1.0], [1.0 - 0.5819, 0.0], [0.2738, 1.0], [1.0 - 0.5136, 0.0], [0.8819,\n 1.0], [1.0 - 0.4387, 0.0], [1.0 - 0.6257, 0.0], [0.7857, 1.0], [1.0 - \n 0.3722, 0.0], [1.0 - 0.8049, 0.0], [0.7864, 1.0], [1.0 - 0.2372, 0.0],\n [0.7934, 1.0], [0.9583, 1.0], [0.9739, 1.0], [1.0 - 0.3556, 0.0], [1.0 -\n 0.2551, 0.0], [1.0 - 0.4532, 0.0], [0.4605, 1.0], [0.7572, 1.0], [\n 0.9496, 1.0], [0.8268, 1.0], [1.0 - 0.4876, 0.0], [0.8523, 1.0], [1.0 -\n 0.2629, 0.0], [1.0 - 0.9021, 0.0], [0.6977, 1.0], [0.9142, 1.0], [1.0 -\n 0.8175, 0.0], [1.0 - 0.4865, 0.0], [0.911, 1.0], [1.0 - 0.2159, 0.0], [\n 1.0 - 0.6943, 0.0], [1.0 - 0.2753, 0.0], [0.859, 1.0], [0.8273, 1.0], [\n 1.0 - 0.5169, 0.0], [1.0 - 0.7412, 0.0]])\nfpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)\nax = plt('roc_curve').gca()\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=\n 'Perfect model')\nax.legend(loc='lower right')\nax = plt('confusion_matrix').gca()\ny_threshold = (y > 0.7).astype(int)\nmatrix = confusion_matrix(y[:, 1], y_threshold[:, 0])\nmatrix = matrix / matrix.astype(np.float).sum(axis=1)\nim = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\nsave_fig()\n",
"step-5": "from matplotlib import cm\n\nfrom datascience.visu.util import plt, save_fig, get_figure\n\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix\n\nimport numpy as np\n\ny = np.array([\n [0.8869, 1.],\n [1.-0.578, 0.],\n [0.7959, 1.],\n [0.8618, 1.],\n [1.-0.2278, 0.],\n [0.6607, 1.],\n [0.7006, 1.],\n [1.-0.4859, 0.],\n [0.6935, 1.],\n [0.9048, 1.],\n [0.6681, 1.],\n [0.7585, 1.],\n [1.-0.5063, 0.],\n [1.-0.4516, 0.],\n [1.-0.5158, 0.],\n [1.-0.5873, 0.],\n [1.-0.7682, 0.],\n [0.8620, 1.],\n [1-0.7337, 0.],\n [0.9412, 1.],\n [1.-0.5819, 0.],\n [.2738, 1.],\n [1.-.5136, 0.],\n [.8819, 1.],\n [1.-.4387, 0.],\n [1.-.6257, 0.],\n [.7857, 1.],\n [1.-.3722, 0.],\n [1.-0.8049, 0.],\n [0.7864, 1.],\n [1.-0.2372, 0.],\n [0.7934, 1.],\n [0.9583, 1.],\n [0.9739, 1.],\n [1.-0.3556, 0.],\n [1.-0.2551, 0.],\n [1.-0.4532, 0.],\n [0.4605, 1.],\n [0.7572, 1.],\n [0.9496, 1.],\n [0.8268, 1.],\n [1.-0.4876, 0.],\n [0.8523, 1.],\n [1.-0.2629, 0.],\n [1.-0.9021, 0.],\n [0.6977, 1.],\n [0.9142, 1.],\n [1.-0.8175, 0.],\n [1.-0.4865, 0.],\n [0.9110, 1.],\n [1.-0.2159, 0.],\n [1.-0.6943, 0.],\n [1.-0.2753, 0.],\n [0.8590, 1.],\n [0.8273, 1.],\n [1.-0.5169, 0.],\n [1.-0.7412, 0.]\n])\n\nfpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)\n\nax = plt('roc_curve').gca()\n\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\n\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label='Perfect model')\n\nax.legend(loc=\"lower right\")\n\nax = plt('confusion_matrix').gca()\ny_threshold = (y > 0.7).astype(int)\n\nmatrix = confusion_matrix(y[:, 1], y_threshold[:, 0])\n\nmatrix = matrix / matrix.astype(np.float).sum(axis=1)\n\nim = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\n\nsave_fig()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This script is a wrapper for JSON primitives, such as validation.
# Using routines of this module permits us to replace the underlying
# implementation with a better one without disrupting client code.
#
# In particular, at the time of this writing, there weren't really great
# json validation packages available for python. We initially settled
# on validictory, but it has a number of shortcomings, such as:
# * format error diagnostic message isn't always helpful for diagnosis
# * doesn't support references
# * doesn't support application of defaults
# * doesn't support dependencies
#
# TODO: offer a combined json parsing/validation function that applies
# defaults from the schema
# TODO: duplicate of 'validate', 'ValidationError', 'loadJSONValueFromFile'
# in swarming.hypersearch.utils -- will want to remove that later
import json
import math
import os
import validictory
class ValidationError(validictory.ValidationError):
pass
class NaNInvalidator(validictory.SchemaValidator):
""" validictory.SchemaValidator subclass to not accept NaN values as numbers.
Usage:
validate(value, schemaDict, validator_cls=NaNInvalidator)
"""
def validate_type_number(self, val):
return not math.isnan(val) \
and super(NaNInvalidator, self).validate_type_number(val)
def validate(value, **kwds):
""" Validate a python value against json schema:
validate(value, schemaPath)
validate(value, schemaDict)
value: python object to validate against the schema
The json schema may be specified either as a path of the file containing
the json schema or as a python dictionary using one of the
following keywords as arguments:
schemaPath: Path of file containing the json schema object.
schemaDict: Python dictionary containing the json schema object
Returns: nothing
Raises:
ValidationError when value fails json validation
"""
assert len(kwds.keys()) >= 1
assert 'schemaPath' in kwds or 'schemaDict' in kwds
schemaDict = None
if 'schemaPath' in kwds:
schemaPath = kwds.pop('schemaPath')
schemaDict = loadJsonValueFromFile(schemaPath)
elif 'schemaDict' in kwds:
schemaDict = kwds.pop('schemaDict')
try:
validictory.validate(value, schemaDict, **kwds)
except validictory.ValidationError as e:
raise ValidationError(e)
def loadJsonValueFromFile(inputFilePath):
""" Loads a json value from a file and converts it to the corresponding python
object.
inputFilePath:
Path of the json file;
Returns:
python value that represents the loaded json value
"""
with open(inputFilePath) as fileObj:
value = json.load(fileObj)
return value
def test():
"""
"""
import sys
schemaDict = {
"description":"JSON schema for jsonhelpers.py test code",
"type":"object",
"additionalProperties":False,
"properties":{
"myBool":{
"description":"Some boolean property",
"required":True,
"type":"boolean"
}
}
}
d = {
'myBool': False
}
print "Validating schemaDict method in positive test..."
validate(d, schemaDict=schemaDict)
print "ok\n"
print "Validating schemaDict method in negative test..."
try:
validate({}, schemaDict=schemaDict)
except ValidationError:
print "ok\n"
else:
print "FAILED\n"
sys.exit(1)
schemaPath = os.path.join(os.path.dirname(__file__), "testSchema.json")
print "Validating schemaPath method in positive test using %s..." % \
(os.path.abspath(schemaPath),)
validate(d, schemaPath=schemaPath)
print "ok\n"
print "Validating schemaPath method in negative test using %s..." % \
(os.path.abspath(schemaPath),)
try:
validate({}, schemaPath=schemaPath)
except ValidationError:
print "ok\n"
else:
print "FAILED\n"
sys.exit(1)
return
if __name__ == "__main__":
test()
|
normal
|
{
"blob_id": "f0f4573808253ca4bff808104afa9f350d305a9c",
"index": 3501,
"step-1": "# ----------------------------------------------------------------------\n# Numenta Platform for Intelligent Computing (NuPIC)\n# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement\n# with Numenta, Inc., for a separate license for this software code, the\n# following terms and conditions apply:\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU Affero Public License for more details.\n#\n# You should have received a copy of the GNU Affero Public License\n# along with this program. If not, see http://www.gnu.org/licenses.\n#\n# http://numenta.org/licenses/\n# ----------------------------------------------------------------------\n\n# This script is a wrapper for JSON primitives, such as validation.\n# Using routines of this module permits us to replace the underlying\n# implementation with a better one without disrupting client code.\n#\n# In particular, at the time of this writing, there weren't really great\n# json validation packages available for python. We initially settled\n# on validictory, but it has a number of shortcomings, such as:\n# * format error diagnostic message isn't always helpful for diagnosis\n# * doesn't support references\n# * doesn't support application of defaults\n# * doesn't support dependencies\n#\n# TODO: offer a combined json parsing/validation function that applies\n# defaults from the schema\n# TODO: duplicate of 'validate', 'ValidationError', 'loadJSONValueFromFile'\n# in swarming.hypersearch.utils -- will want to remove that later\n\nimport json\nimport math\nimport os\n\nimport validictory\n\n\nclass ValidationError(validictory.ValidationError):\n pass\n\n\nclass NaNInvalidator(validictory.SchemaValidator):\n \"\"\" validictory.SchemaValidator subclass to not accept NaN values as numbers.\n\n Usage:\n\n validate(value, schemaDict, validator_cls=NaNInvalidator)\n\n \"\"\"\n def validate_type_number(self, val):\n return not math.isnan(val) \\\n and super(NaNInvalidator, self).validate_type_number(val)\n\n\n\ndef validate(value, **kwds):\n \"\"\" Validate a python value against json schema:\n validate(value, schemaPath)\n validate(value, schemaDict)\n\n value: python object to validate against the schema\n\n The json schema may be specified either as a path of the file containing\n the json schema or as a python dictionary using one of the\n following keywords as arguments:\n schemaPath: Path of file containing the json schema object.\n schemaDict: Python dictionary containing the json schema object\n\n Returns: nothing\n\n Raises:\n ValidationError when value fails json validation\n \"\"\"\n\n assert len(kwds.keys()) >= 1\n assert 'schemaPath' in kwds or 'schemaDict' in kwds\n\n schemaDict = None\n if 'schemaPath' in kwds:\n schemaPath = kwds.pop('schemaPath')\n schemaDict = loadJsonValueFromFile(schemaPath)\n elif 'schemaDict' in kwds:\n schemaDict = kwds.pop('schemaDict')\n\n try:\n validictory.validate(value, schemaDict, **kwds)\n except validictory.ValidationError as e:\n raise ValidationError(e)\n\n\n\ndef loadJsonValueFromFile(inputFilePath):\n \"\"\" Loads a json value from a file and converts it to the corresponding python\n object.\n\n inputFilePath:\n Path of the json file;\n\n Returns:\n python value that represents the loaded json value\n\n \"\"\"\n with open(inputFilePath) as fileObj:\n value = json.load(fileObj)\n\n return value\n\n\n\ndef test():\n \"\"\"\n \"\"\"\n import sys\n\n schemaDict = {\n \"description\":\"JSON schema for jsonhelpers.py test code\",\n \"type\":\"object\",\n \"additionalProperties\":False,\n \"properties\":{\n \"myBool\":{\n \"description\":\"Some boolean property\",\n \"required\":True,\n \"type\":\"boolean\"\n }\n }\n }\n\n d = {\n 'myBool': False\n }\n\n print \"Validating schemaDict method in positive test...\"\n validate(d, schemaDict=schemaDict)\n print \"ok\\n\"\n\n print \"Validating schemaDict method in negative test...\"\n try:\n validate({}, schemaDict=schemaDict)\n except ValidationError:\n print \"ok\\n\"\n else:\n print \"FAILED\\n\"\n sys.exit(1)\n\n\n schemaPath = os.path.join(os.path.dirname(__file__), \"testSchema.json\")\n print \"Validating schemaPath method in positive test using %s...\" % \\\n (os.path.abspath(schemaPath),)\n validate(d, schemaPath=schemaPath)\n print \"ok\\n\"\n\n print \"Validating schemaPath method in negative test using %s...\" % \\\n (os.path.abspath(schemaPath),)\n try:\n validate({}, schemaPath=schemaPath)\n except ValidationError:\n print \"ok\\n\"\n else:\n print \"FAILED\\n\"\n sys.exit(1)\n\n\n\n return\n\n\n\nif __name__ == \"__main__\":\n test()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Write a Python program to print alphabet pattern 'G'.
result = ''
for row in range(0,7):
for col in range(0,7):
if ((col ==0) and (row !=0 and row !=6) or ((row ==0 or row == 6) and (col>0 and col<6))or ((row ==1 or row == 5 or row == 4)and (col ==6))or ((row ==3)and ((col!=2)and col!=1))):
result = result+'*'
else:
result = result+' '
result=result+'\n'
print(result)
|
normal
|
{
"blob_id": "e598091fc6c05b1d7f9f35f2ae58494fed53f9af",
"index": 5392,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor row in range(0, 7):\n for col in range(0, 7):\n if col == 0 and (row != 0 and row != 6) or (row == 0 or row == 6) and (\n col > 0 and col < 6) or (row == 1 or row == 5 or row == 4\n ) and col == 6 or row == 3 and (col != 2 and col != 1):\n result = result + '*'\n else:\n result = result + ' '\n result = result + '\\n'\nprint(result)\n",
"step-3": "result = ''\nfor row in range(0, 7):\n for col in range(0, 7):\n if col == 0 and (row != 0 and row != 6) or (row == 0 or row == 6) and (\n col > 0 and col < 6) or (row == 1 or row == 5 or row == 4\n ) and col == 6 or row == 3 and (col != 2 and col != 1):\n result = result + '*'\n else:\n result = result + ' '\n result = result + '\\n'\nprint(result)\n",
"step-4": "# Write a Python program to print alphabet pattern 'G'.\n\nresult = ''\nfor row in range(0,7):\n for col in range(0,7):\n if ((col ==0) and (row !=0 and row !=6) or ((row ==0 or row == 6) and (col>0 and col<6))or ((row ==1 or row == 5 or row == 4)and (col ==6))or ((row ==3)and ((col!=2)and col!=1))):\n result = result+'*'\n else:\n result = result+' '\n result=result+'\\n'\nprint(result)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
from logzero import logger as log
from extract import data_generator
from transform import create_dataframe
def bigquery(
datafile,
dataset=os.environ["BQDATASET"],
project=os.environ["GCPPROJECT"],
schema=[
{"name": "conversation", "type": "STRING"},
{"name": "id", "type": "INTEGER"},
{"name": "from", "type": "STRING"},
{"name": "text", "type": "STRING"},
{"name": "wordcount", "type": "INTEGER"},
{"name": "reply_to_message_id", "type": "INTEGER"},
{"name": "photo", "type": "STRING"},
{"name": "wait_time", "type": "FLOAT"},
],
):
log.info("creating bigquery dataset")
src = data_generator(datafile)
chatinfo = create_dataframe(src)
ts = chatinfo.to_gbq(
"{}".format(dataset),
project_id="{}".format(project),
if_exists="replace",
table_schema=schema,
)
if ts:
return True
else:
return False
|
normal
|
{
"blob_id": "d6046217308745b85455aed78734700b9622782c",
"index": 7559,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef bigquery(datafile, dataset=os.environ['BQDATASET'], project=os.environ[\n 'GCPPROJECT'], schema=[{'name': 'conversation', 'type': 'STRING'}, {\n 'name': 'id', 'type': 'INTEGER'}, {'name': 'from', 'type': 'STRING'}, {\n 'name': 'text', 'type': 'STRING'}, {'name': 'wordcount', 'type':\n 'INTEGER'}, {'name': 'reply_to_message_id', 'type': 'INTEGER'}, {'name':\n 'photo', 'type': 'STRING'}, {'name': 'wait_time', 'type': 'FLOAT'}]):\n log.info('creating bigquery dataset')\n src = data_generator(datafile)\n chatinfo = create_dataframe(src)\n ts = chatinfo.to_gbq('{}'.format(dataset), project_id='{}'.format(\n project), if_exists='replace', table_schema=schema)\n if ts:\n return True\n else:\n return False\n",
"step-3": "import os\nfrom logzero import logger as log\nfrom extract import data_generator\nfrom transform import create_dataframe\n\n\ndef bigquery(datafile, dataset=os.environ['BQDATASET'], project=os.environ[\n 'GCPPROJECT'], schema=[{'name': 'conversation', 'type': 'STRING'}, {\n 'name': 'id', 'type': 'INTEGER'}, {'name': 'from', 'type': 'STRING'}, {\n 'name': 'text', 'type': 'STRING'}, {'name': 'wordcount', 'type':\n 'INTEGER'}, {'name': 'reply_to_message_id', 'type': 'INTEGER'}, {'name':\n 'photo', 'type': 'STRING'}, {'name': 'wait_time', 'type': 'FLOAT'}]):\n log.info('creating bigquery dataset')\n src = data_generator(datafile)\n chatinfo = create_dataframe(src)\n ts = chatinfo.to_gbq('{}'.format(dataset), project_id='{}'.format(\n project), if_exists='replace', table_schema=schema)\n if ts:\n return True\n else:\n return False\n",
"step-4": "import os\nfrom logzero import logger as log\nfrom extract import data_generator\n\nfrom transform import create_dataframe\n\n\ndef bigquery(\n datafile,\n dataset=os.environ[\"BQDATASET\"],\n project=os.environ[\"GCPPROJECT\"],\n schema=[\n {\"name\": \"conversation\", \"type\": \"STRING\"},\n {\"name\": \"id\", \"type\": \"INTEGER\"},\n {\"name\": \"from\", \"type\": \"STRING\"},\n {\"name\": \"text\", \"type\": \"STRING\"},\n {\"name\": \"wordcount\", \"type\": \"INTEGER\"},\n {\"name\": \"reply_to_message_id\", \"type\": \"INTEGER\"},\n {\"name\": \"photo\", \"type\": \"STRING\"},\n {\"name\": \"wait_time\", \"type\": \"FLOAT\"},\n ],\n):\n\n log.info(\"creating bigquery dataset\")\n src = data_generator(datafile)\n chatinfo = create_dataframe(src)\n ts = chatinfo.to_gbq(\n \"{}\".format(dataset),\n project_id=\"{}\".format(project),\n if_exists=\"replace\",\n table_schema=schema,\n )\n if ts:\n return True\n else:\n return False\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 13:36:13 2019
@author: gennachiaro
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import pyrolite.plot
from pyrolite.plot.spider import spider
#read in data
df = pd.read_csv('/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv', index_col=0)
#set values
MG = df.loc[['ORA-2A-001','ORA-2A-005','ORA-2A-018','ORA-2A-031','ORA-2A-032','ORA-2A-035','ORA-2A-040']]
VCCR = df.loc [['ORA-5B-402','ORA-5B-404A','ORA-5B-404B','ORA-5B-405','ORA-5B-406','ORA-5B-407','ORA-5B-408-SITE2','ORA-5B-408-SITE7','ORA-5B-408-SITE8','ORA-5B-409','ORA-5B-411','ORA-5B-412A-CG','ORA-5B-412B-CG','ORA-5B-413','ORA-5B-414-CG','ORA-5B-415','ORA-5B-416','ORA-5B-417']]
FG = df.loc [['ORA-5B-410','ORA-5B-412A-FG','ORA-5B-412B-FG','ORA-5B-414-FG']]
FGCP = df.loc[['ORA-2A-002_Type1','ORA-2A-002_Type2','ORA-2A-002','ORA-2A-003','ORA-2A-016_Type1','ORA-2A-016-Type2','ORA-2A-016-Type3','ORA-2A-016-Type4','ORA-2A-023','ORA-2A-024','MINGLED1-ORA-2A-024','MINGLED2-ORA-2A-024','MINGLED3-ORA-2A-024']]
#plot diagrams
MG.pyroplot.spider(color="green",alpha = 0.5, mode = "fill")
VCCR.pyroplot.spider(color="red",alpha = 0.5, mode = "fill")
FG.pyroplot.spider(color="purple",alpha = 0.5, mode = "fill")
FGCP.pyroplot.spider(color="blue",alpha = 0.5, mode = "fill")
#set background
sns.set_style("darkgrid")
#plot graph
plt.show()
|
normal
|
{
"blob_id": "f6fee18898636ad6b0dc6d96d28dead4e09b8035",
"index": 1650,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsns.set()\n<mask token>\nMG.pyroplot.spider(color='green', alpha=0.5, mode='fill')\nVCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')\nFG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')\nFGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')\nsns.set_style('darkgrid')\nplt.show()\n",
"step-3": "<mask token>\nsns.set()\n<mask token>\ndf = pd.read_csv(\n '/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'\n , index_col=0)\nMG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',\n 'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]\nVCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',\n 'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',\n 'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',\n 'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',\n 'ORA-5B-416', 'ORA-5B-417']]\nFG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']\n ]\nFGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',\n 'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',\n 'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',\n 'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]\nMG.pyroplot.spider(color='green', alpha=0.5, mode='fill')\nVCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')\nFG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')\nFGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')\nsns.set_style('darkgrid')\nplt.show()\n",
"step-4": "<mask token>\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport pyrolite.plot\nfrom pyrolite.plot.spider import spider\ndf = pd.read_csv(\n '/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'\n , index_col=0)\nMG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',\n 'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]\nVCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',\n 'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',\n 'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',\n 'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',\n 'ORA-5B-416', 'ORA-5B-417']]\nFG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']\n ]\nFGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',\n 'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',\n 'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',\n 'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]\nMG.pyroplot.spider(color='green', alpha=0.5, mode='fill')\nVCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')\nFG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')\nFGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')\nsns.set_style('darkgrid')\nplt.show()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 18 13:36:13 2019\n\n@author: gennachiaro\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport pyrolite.plot\nfrom pyrolite.plot.spider import spider\n\n#read in data\ndf = pd.read_csv('/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv', index_col=0)\n\n#set values\nMG = df.loc[['ORA-2A-001','ORA-2A-005','ORA-2A-018','ORA-2A-031','ORA-2A-032','ORA-2A-035','ORA-2A-040']]\nVCCR = df.loc [['ORA-5B-402','ORA-5B-404A','ORA-5B-404B','ORA-5B-405','ORA-5B-406','ORA-5B-407','ORA-5B-408-SITE2','ORA-5B-408-SITE7','ORA-5B-408-SITE8','ORA-5B-409','ORA-5B-411','ORA-5B-412A-CG','ORA-5B-412B-CG','ORA-5B-413','ORA-5B-414-CG','ORA-5B-415','ORA-5B-416','ORA-5B-417']]\nFG = df.loc [['ORA-5B-410','ORA-5B-412A-FG','ORA-5B-412B-FG','ORA-5B-414-FG']]\nFGCP = df.loc[['ORA-2A-002_Type1','ORA-2A-002_Type2','ORA-2A-002','ORA-2A-003','ORA-2A-016_Type1','ORA-2A-016-Type2','ORA-2A-016-Type3','ORA-2A-016-Type4','ORA-2A-023','ORA-2A-024','MINGLED1-ORA-2A-024','MINGLED2-ORA-2A-024','MINGLED3-ORA-2A-024']]\n\n#plot diagrams\nMG.pyroplot.spider(color=\"green\",alpha = 0.5, mode = \"fill\")\n\nVCCR.pyroplot.spider(color=\"red\",alpha = 0.5, mode = \"fill\")\n\nFG.pyroplot.spider(color=\"purple\",alpha = 0.5, mode = \"fill\")\n\nFGCP.pyroplot.spider(color=\"blue\",alpha = 0.5, mode = \"fill\")\n\n\n#set background\nsns.set_style(\"darkgrid\")\n\n\n#plot graph\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib.auth import get_user_model
from rest_framework import generics
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from status.api.serializers import StatusInlineUserSerializer
from status.api.views import StatusAPIView
from status.models import Status
from .serializers import UserDetailSerializer
User = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.filter(is_active=True)
serializer_class = UserDetailSerializer
lookup_field = 'username'
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = ('id',)
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get("username")
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({"detail": "Not allowed here"})
|
normal
|
{
"blob_id": "472a79767f5dc7dc3cd03d89999d322b3885dcbf",
"index": 1220,
"step-1": "<mask token>\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n search_fields = 'id',\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get('username')\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({'detail': 'Not allowed here'})\n",
"step-2": "<mask token>\n\n\nclass UserDetailAPIView(generics.RetrieveAPIView):\n queryset = User.objects.filter(is_active=True)\n serializer_class = UserDetailSerializer\n lookup_field = 'username'\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n search_fields = 'id',\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get('username')\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({'detail': 'Not allowed here'})\n",
"step-3": "<mask token>\nUser = get_user_model()\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\njwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n\n\nclass UserDetailAPIView(generics.RetrieveAPIView):\n queryset = User.objects.filter(is_active=True)\n serializer_class = UserDetailSerializer\n lookup_field = 'username'\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n search_fields = 'id',\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get('username')\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({'detail': 'Not allowed here'})\n",
"step-4": "from django.contrib.auth import get_user_model\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework_jwt.settings import api_settings\nfrom status.api.serializers import StatusInlineUserSerializer\nfrom status.api.views import StatusAPIView\nfrom status.models import Status\nfrom .serializers import UserDetailSerializer\nUser = get_user_model()\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\njwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n\n\nclass UserDetailAPIView(generics.RetrieveAPIView):\n queryset = User.objects.filter(is_active=True)\n serializer_class = UserDetailSerializer\n lookup_field = 'username'\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n search_fields = 'id',\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get('username')\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({'detail': 'Not allowed here'})\n",
"step-5": "from django.contrib.auth import get_user_model\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework_jwt.settings import api_settings\n\nfrom status.api.serializers import StatusInlineUserSerializer\nfrom status.api.views import StatusAPIView\nfrom status.models import Status\n\nfrom .serializers import UserDetailSerializer\n\nUser = get_user_model()\n\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\njwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n\n\nclass UserDetailAPIView(generics.RetrieveAPIView):\n queryset = User.objects.filter(is_active=True)\n serializer_class = UserDetailSerializer\n lookup_field = 'username'\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n\n search_fields = ('id',)\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get(\"username\")\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({\"detail\": \"Not allowed here\"})\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from meizi.items import MeiziItem
class MztspiderSpider(CrawlSpider):
name = 'mztspider2'
allowed_domains = ['meizitu.com']
start_urls = ['http://www.meizitu.com/a/list_1_%s.html' % urlnum for urlnum in range(1, 92)]
rules = (
Rule(LinkExtractor(allow='meizitu.com/a', restrict_xpaths='//ul[@class="wp-list clearfix"]/li/div/div/a'),
callback='parse_item', follow=True),
)
def parse_item(self, response):
sel = Selector(response)
srcs = sel.xpath('//div[@id="picture"]/p/img/@src').extract()
item = MeiziItem()
item['image_urls'] = srcs
yield item
|
normal
|
{
"blob_id": "a1ce43c3f64667619c4964bc4dc67215d3ecc1a0",
"index": 9215,
"step-1": "<mask token>\n\n\nclass MztspiderSpider(CrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MztspiderSpider(CrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse_item(self, response):\n sel = Selector(response)\n srcs = sel.xpath('//div[@id=\"picture\"]/p/img/@src').extract()\n item = MeiziItem()\n item['image_urls'] = srcs\n yield item\n",
"step-3": "<mask token>\n\n\nclass MztspiderSpider(CrawlSpider):\n name = 'mztspider2'\n allowed_domains = ['meizitu.com']\n start_urls = [('http://www.meizitu.com/a/list_1_%s.html' % urlnum) for\n urlnum in range(1, 92)]\n rules = Rule(LinkExtractor(allow='meizitu.com/a', restrict_xpaths=\n '//ul[@class=\"wp-list clearfix\"]/li/div/div/a'), callback=\n 'parse_item', follow=True),\n\n def parse_item(self, response):\n sel = Selector(response)\n srcs = sel.xpath('//div[@id=\"picture\"]/p/img/@src').extract()\n item = MeiziItem()\n item['image_urls'] = srcs\n yield item\n",
"step-4": "import scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.selector import Selector\nfrom meizi.items import MeiziItem\n\n\nclass MztspiderSpider(CrawlSpider):\n name = 'mztspider2'\n allowed_domains = ['meizitu.com']\n start_urls = [('http://www.meizitu.com/a/list_1_%s.html' % urlnum) for\n urlnum in range(1, 92)]\n rules = Rule(LinkExtractor(allow='meizitu.com/a', restrict_xpaths=\n '//ul[@class=\"wp-list clearfix\"]/li/div/div/a'), callback=\n 'parse_item', follow=True),\n\n def parse_item(self, response):\n sel = Selector(response)\n srcs = sel.xpath('//div[@id=\"picture\"]/p/img/@src').extract()\n item = MeiziItem()\n item['image_urls'] = srcs\n yield item\n",
"step-5": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.selector import Selector\nfrom meizi.items import MeiziItem\n\n\nclass MztspiderSpider(CrawlSpider):\n name = 'mztspider2'\n allowed_domains = ['meizitu.com']\n start_urls = ['http://www.meizitu.com/a/list_1_%s.html' % urlnum for urlnum in range(1, 92)]\n\n rules = (\n Rule(LinkExtractor(allow='meizitu.com/a', restrict_xpaths='//ul[@class=\"wp-list clearfix\"]/li/div/div/a'),\n callback='parse_item', follow=True),\n )\n\n def parse_item(self, response):\n sel = Selector(response)\n srcs = sel.xpath('//div[@id=\"picture\"]/p/img/@src').extract()\n item = MeiziItem()\n item['image_urls'] = srcs\n yield item\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#Some people are standing in a queue. A selection process follows a rule where people standing on even positions are selected. Of the selected people a queue is formed and again out of these only people on even position are selected. This continues until we are left with one person. Find out the position of that person in the original queue.
#Input:
#The first line of input contains an integer T denoting the number of test cases.The first line of each test case is N,number of people standing in a queue.
#Output:
#Print the position(original queue) of that person who is left.
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def even(n):
if n == 0 or n == 1:
return
elif n == 2:
return 2
else:
for i in reversed(range(n+1)):
if 2**i < n:
return 2**i
t = int(input("Enter number of test cases:"))
arr = []
for i in range(t):
n = int(input())
ans = even(n)
arr.append(ans)
for i in range(len(arr)):
print(arr[i], end = ' ')
# --------------------------------------------------------------------------------------------------------------------
import math
t = int(input())
for i in range(t):
n =int(input())
print(pow(2,int(math.log(n,2))))
|
normal
|
{
"blob_id": "358fd8efd5c3823255ab64d5f8b88b343415ed0e",
"index": 2708,
"step-1": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\n<mask token>\n",
"step-2": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\n<mask token>\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)):\n print(arr[i], end=' ')\n<mask token>\nfor i in range(t):\n n = int(input())\n print(pow(2, int(math.log(n, 2))))\n",
"step-3": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\nt = int(input('Enter number of test cases:'))\narr = []\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)):\n print(arr[i], end=' ')\n<mask token>\nt = int(input())\nfor i in range(t):\n n = int(input())\n print(pow(2, int(math.log(n, 2))))\n",
"step-4": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\nt = int(input('Enter number of test cases:'))\narr = []\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)):\n print(arr[i], end=' ')\nimport math\nt = int(input())\nfor i in range(t):\n n = int(input())\n print(pow(2, int(math.log(n, 2))))\n",
"step-5": "#Some people are standing in a queue. A selection process follows a rule where people standing on even positions are selected. Of the selected people a queue is formed and again out of these only people on even position are selected. This continues until we are left with one person. Find out the position of that person in the original queue.\n\n#Input:\n#The first line of input contains an integer T denoting the number of test cases.The first line of each test case is N,number of people standing in a queue.\n\n#Output:\n#Print the position(original queue) of that person who is left.\n#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef even(n):\n if n == 0 or n == 1:\n return \n elif n == 2:\n return 2\n else: \n for i in reversed(range(n+1)):\n if 2**i < n:\n return 2**i\nt = int(input(\"Enter number of test cases:\"))\narr = []\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)): \n print(arr[i], end = ' ')\n# --------------------------------------------------------------------------------------------------------------------\n\nimport math\nt = int(input())\nfor i in range(t):\n n =int(input())\n print(pow(2,int(math.log(n,2))))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Box:
def __init__(self, id, capacity):
self.id = id
self.dogs = []
self.capacity = capacity
@property
def status(self):
return len(self.dogs)
def add_dog(self, dog):
if self.capacity > self.status:
self.dogs.append(dog)
return True
else:
return False
def remove_dog(self, dog):
if self.status > 0:
self.dogs.remove(dog)
return True
else:
return False
|
normal
|
{
"blob_id": "5f24c5a21dc151e9efbbfaff0fe1e71e65d1eb67",
"index": 1590,
"step-1": "class Box:\n\n def __init__(self, id, capacity):\n self.id = id\n self.dogs = []\n self.capacity = capacity\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Box:\n\n def __init__(self, id, capacity):\n self.id = id\n self.dogs = []\n self.capacity = capacity\n <mask token>\n <mask token>\n\n def remove_dog(self, dog):\n if self.status > 0:\n self.dogs.remove(dog)\n return True\n else:\n return False\n",
"step-3": "class Box:\n\n def __init__(self, id, capacity):\n self.id = id\n self.dogs = []\n self.capacity = capacity\n\n @property\n def status(self):\n return len(self.dogs)\n <mask token>\n\n def remove_dog(self, dog):\n if self.status > 0:\n self.dogs.remove(dog)\n return True\n else:\n return False\n",
"step-4": "class Box:\n\n def __init__(self, id, capacity):\n self.id = id\n self.dogs = []\n self.capacity = capacity\n\n @property\n def status(self):\n return len(self.dogs)\n\n def add_dog(self, dog):\n if self.capacity > self.status:\n self.dogs.append(dog)\n return True\n else:\n return False\n\n def remove_dog(self, dog):\n if self.status > 0:\n self.dogs.remove(dog)\n return True\n else:\n return False\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
/Users/jhajhajhajha1/anaconda/lib/python3.6/codecs.py
|
normal
|
{
"blob_id": "0354445d255cc79d3cb9242f82d37e035ff61788",
"index": 2410,
"step-1": "/Users/jhajhajhajha1/anaconda/lib/python3.6/codecs.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from . import metrics
from . import matrices
from .pairwise import apply_pairwise_rect, apply_pairwise_sparse, apply_running_rect
from . import numba_tools as nb_tools
from . import running_metrics as running
__all__ = ['metrics', 'apply_pairwise_rect', 'apply_pairwise_sparse',
'apply_running_rect', 'nb_tools', 'matrices', 'running']
|
normal
|
{
"blob_id": "3605e8b8b2f8f49cc7c40fc436c147578b12091c",
"index": 6026,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['metrics', 'apply_pairwise_rect', 'apply_pairwise_sparse',\n 'apply_running_rect', 'nb_tools', 'matrices', 'running']\n",
"step-3": "from . import metrics\nfrom . import matrices\nfrom .pairwise import apply_pairwise_rect, apply_pairwise_sparse, apply_running_rect\nfrom . import numba_tools as nb_tools\nfrom . import running_metrics as running\n__all__ = ['metrics', 'apply_pairwise_rect', 'apply_pairwise_sparse',\n 'apply_running_rect', 'nb_tools', 'matrices', 'running']\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from gatheros_event.views.mixins import AccountMixin
from gatheros_subscription.helpers.extract import (
create_extract,
get_extract_file_name,
)
from gatheros_subscription.models import Subscription
class ExtractSubscriptionPDFView(AccountMixin):
subscription = None
def pre_dispatch(self, request):
uuid = self.kwargs.get('pk')
self.subscription = get_object_or_404(Subscription,
uuid=uuid)
return super().pre_dispatch(request)
def get_permission_denied_url(self):
""" Resgata url quando permissão negada. """
return reverse('subscription:subscription-view', kwargs={
'event_pk': self.kwargs.get('event_pk'),
'pk': self.kwargs.get('pk'),
})
def get(self, request, *args, **kwargs):
pdf = create_extract(subscription=self.subscription,
user=self.request.user)
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename="{}"'.format(
get_extract_file_name(subscription=self.subscription)
)
return response
def can_access(self):
return self.subscription.lot.price > 0
|
normal
|
{
"blob_id": "431f109903e014a29aed7f125d47f327e17b9f65",
"index": 4366,
"step-1": "<mask token>\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n <mask token>\n\n def pre_dispatch(self, request):\n uuid = self.kwargs.get('pk')\n self.subscription = get_object_or_404(Subscription, uuid=uuid)\n return super().pre_dispatch(request)\n\n def get_permission_denied_url(self):\n \"\"\" Resgata url quando permissão negada. \"\"\"\n return reverse('subscription:subscription-view', kwargs={'event_pk':\n self.kwargs.get('event_pk'), 'pk': self.kwargs.get('pk')})\n <mask token>\n\n def can_access(self):\n return self.subscription.lot.price > 0\n",
"step-3": "<mask token>\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n subscription = None\n\n def pre_dispatch(self, request):\n uuid = self.kwargs.get('pk')\n self.subscription = get_object_or_404(Subscription, uuid=uuid)\n return super().pre_dispatch(request)\n\n def get_permission_denied_url(self):\n \"\"\" Resgata url quando permissão negada. \"\"\"\n return reverse('subscription:subscription-view', kwargs={'event_pk':\n self.kwargs.get('event_pk'), 'pk': self.kwargs.get('pk')})\n\n def get(self, request, *args, **kwargs):\n pdf = create_extract(subscription=self.subscription, user=self.\n request.user)\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"{}\"'.format(\n get_extract_file_name(subscription=self.subscription))\n return response\n\n def can_access(self):\n return self.subscription.lot.price > 0\n",
"step-4": "from django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom gatheros_event.views.mixins import AccountMixin\nfrom gatheros_subscription.helpers.extract import create_extract, get_extract_file_name\nfrom gatheros_subscription.models import Subscription\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n subscription = None\n\n def pre_dispatch(self, request):\n uuid = self.kwargs.get('pk')\n self.subscription = get_object_or_404(Subscription, uuid=uuid)\n return super().pre_dispatch(request)\n\n def get_permission_denied_url(self):\n \"\"\" Resgata url quando permissão negada. \"\"\"\n return reverse('subscription:subscription-view', kwargs={'event_pk':\n self.kwargs.get('event_pk'), 'pk': self.kwargs.get('pk')})\n\n def get(self, request, *args, **kwargs):\n pdf = create_extract(subscription=self.subscription, user=self.\n request.user)\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"{}\"'.format(\n get_extract_file_name(subscription=self.subscription))\n return response\n\n def can_access(self):\n return self.subscription.lot.price > 0\n",
"step-5": "from django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\n\nfrom gatheros_event.views.mixins import AccountMixin\nfrom gatheros_subscription.helpers.extract import (\n create_extract,\n get_extract_file_name,\n)\nfrom gatheros_subscription.models import Subscription\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n subscription = None\n\n def pre_dispatch(self, request):\n uuid = self.kwargs.get('pk')\n self.subscription = get_object_or_404(Subscription,\n uuid=uuid)\n\n return super().pre_dispatch(request)\n\n def get_permission_denied_url(self):\n \"\"\" Resgata url quando permissão negada. \"\"\"\n return reverse('subscription:subscription-view', kwargs={\n 'event_pk': self.kwargs.get('event_pk'),\n 'pk': self.kwargs.get('pk'),\n })\n\n def get(self, request, *args, **kwargs):\n pdf = create_extract(subscription=self.subscription,\n user=self.request.user)\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"{}\"'.format(\n get_extract_file_name(subscription=self.subscription)\n )\n\n return response\n\n def can_access(self):\n return self.subscription.lot.price > 0\n",
"step-ids": [
1,
4,
6,
7,
8
]
}
|
[
1,
4,
6,
7,
8
] |
import numpy as np
import cv2
import serial
import serial.tools.list_ports
import time
import random
import math
#import mcpi.minecraft as minecraft
#import mcpi.block as block
#from house import House
#Arduino Serials
ports = list(serial.tools.list_ports.comports())
print (ports)
for p in ports:
print (p[1])
if "Arduino" in p[1]:
ser=serial.Serial(port=p[0])
else :
print ("No Arduino Device was found connected to the computer")
#time.sleep(2)
#face detection
cap =cv2.VideoCapture(1)
face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')
lastpos=0
currentpos=0
lastdis=0
currentdis=0
lastx_d=0
currentx_d=0
shoot=0
#MC
#mc=minecraft.Minecraft.create()
#pos=mc.player.getTilePos()
#pos0=[]
#pos0.append(pos.x)
#pos0.append(pos.y)
#pos0.append(pos.z)
#des=House([pos.x+20,pos.y,pos.z],mc,block.GOLD_BLOCK.id,block.GLASS.id)
#des.buildall()
ct=0
while(True):
ct+=1
#到达目的地了吗
#if(des.isInsideHouse()):
#mc.postToChat("You win")
#break
#人脸识别,一方面投石机追踪,一方面控制MC里面人到Destinatioin
ret,img=cap.read()
center=[img.shape[0]/2,img.shape[1]/2]
faces = face_cascade.detectMultiScale(img, 1.3, 5)
tmp=0
for(x,y,w,h) in faces:
tmp+=1
if(tmp>1):
print("too many faces")
else:
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_color = img[y:y+h, x:x+w]
x_d=x+w/2-325-73
dis=(-0.88*w+220)
angle=x_d#math.atan(x_d/dis)/3.1415926535897*180
currentpos=angle
currentdis=dis
currentx_d=x_d
if(ct==1):
lastpos=currentpos
lastdis=currentdis
lastx_d=currentx_d
#pos=mc.player.getTilePos()
#mc.player.setTilePos([pos.x+(currentx_d-lastx_d)/5,pos.y,pos.z+(currentdis-lastdis)/5])
#print(x_d)
#print(angle)
#ser.write
print(str(int(angle)).encode())
#ser.write
if(angle<0):
ser.write(str(int(angle)).encode())
else:
ser.write(("+"+str(int(angle))).encode())
time.sleep(1)
if((lastpos-currentpos)<10 and abs(angle)<15):
shoot+=1
if(shoot>1):
time.sleep(2)
#mc.player.setTilePos([0,-1000,0])
ser.write(str(10000).encode())
time.sleep(2)
shoot=0
lastpos=currentpos
lastdis=currentdis
lastx_d=currentx_d
cv2.imshow('img',img)
if cv2.waitKey(1)& 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "7c80c98e32f386362003ac3cd729fa9b279b8e8e",
"index": 7316,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(ports)\nfor p in ports:\n print(p[1])\n if 'Arduino' in p[1]:\n ser = serial.Serial(port=p[0])\n else:\n print('No Arduino Device was found connected to the computer')\n<mask token>\nwhile True:\n ct += 1\n ret, img = cap.read()\n center = [img.shape[0] / 2, img.shape[1] / 2]\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n tmp = 0\n for x, y, w, h in faces:\n tmp += 1\n if tmp > 1:\n print('too many faces')\n else:\n for x, y, w, h in faces:\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_color = img[y:y + h, x:x + w]\n x_d = x + w / 2 - 325 - 73\n dis = -0.88 * w + 220\n angle = x_d\n currentpos = angle\n currentdis = dis\n currentx_d = x_d\n if ct == 1:\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n print(str(int(angle)).encode())\n if angle < 0:\n ser.write(str(int(angle)).encode())\n else:\n ser.write(('+' + str(int(angle))).encode())\n time.sleep(1)\n if lastpos - currentpos < 10 and abs(angle) < 15:\n shoot += 1\n if shoot > 1:\n time.sleep(2)\n ser.write(str(10000).encode())\n time.sleep(2)\n shoot = 0\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n cv2.imshow('img', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nports = list(serial.tools.list_ports.comports())\nprint(ports)\nfor p in ports:\n print(p[1])\n if 'Arduino' in p[1]:\n ser = serial.Serial(port=p[0])\n else:\n print('No Arduino Device was found connected to the computer')\ncap = cv2.VideoCapture(1)\nface_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')\nlastpos = 0\ncurrentpos = 0\nlastdis = 0\ncurrentdis = 0\nlastx_d = 0\ncurrentx_d = 0\nshoot = 0\nct = 0\nwhile True:\n ct += 1\n ret, img = cap.read()\n center = [img.shape[0] / 2, img.shape[1] / 2]\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n tmp = 0\n for x, y, w, h in faces:\n tmp += 1\n if tmp > 1:\n print('too many faces')\n else:\n for x, y, w, h in faces:\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_color = img[y:y + h, x:x + w]\n x_d = x + w / 2 - 325 - 73\n dis = -0.88 * w + 220\n angle = x_d\n currentpos = angle\n currentdis = dis\n currentx_d = x_d\n if ct == 1:\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n print(str(int(angle)).encode())\n if angle < 0:\n ser.write(str(int(angle)).encode())\n else:\n ser.write(('+' + str(int(angle))).encode())\n time.sleep(1)\n if lastpos - currentpos < 10 and abs(angle) < 15:\n shoot += 1\n if shoot > 1:\n time.sleep(2)\n ser.write(str(10000).encode())\n time.sleep(2)\n shoot = 0\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n cv2.imshow('img', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-4": "import numpy as np\nimport cv2\nimport serial\nimport serial.tools.list_ports\nimport time\nimport random\nimport math\nports = list(serial.tools.list_ports.comports())\nprint(ports)\nfor p in ports:\n print(p[1])\n if 'Arduino' in p[1]:\n ser = serial.Serial(port=p[0])\n else:\n print('No Arduino Device was found connected to the computer')\ncap = cv2.VideoCapture(1)\nface_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')\nlastpos = 0\ncurrentpos = 0\nlastdis = 0\ncurrentdis = 0\nlastx_d = 0\ncurrentx_d = 0\nshoot = 0\nct = 0\nwhile True:\n ct += 1\n ret, img = cap.read()\n center = [img.shape[0] / 2, img.shape[1] / 2]\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n tmp = 0\n for x, y, w, h in faces:\n tmp += 1\n if tmp > 1:\n print('too many faces')\n else:\n for x, y, w, h in faces:\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_color = img[y:y + h, x:x + w]\n x_d = x + w / 2 - 325 - 73\n dis = -0.88 * w + 220\n angle = x_d\n currentpos = angle\n currentdis = dis\n currentx_d = x_d\n if ct == 1:\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n print(str(int(angle)).encode())\n if angle < 0:\n ser.write(str(int(angle)).encode())\n else:\n ser.write(('+' + str(int(angle))).encode())\n time.sleep(1)\n if lastpos - currentpos < 10 and abs(angle) < 15:\n shoot += 1\n if shoot > 1:\n time.sleep(2)\n ser.write(str(10000).encode())\n time.sleep(2)\n shoot = 0\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n cv2.imshow('img', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-5": "import numpy as np\nimport cv2\nimport serial\nimport serial.tools.list_ports\nimport time\nimport random\nimport math\n#import mcpi.minecraft as minecraft\n#import mcpi.block as block\n#from house import House\n\n\n\n#Arduino Serials\nports = list(serial.tools.list_ports.comports())\nprint (ports)\nfor p in ports:\n print (p[1])\n if \"Arduino\" in p[1]:\n\t ser=serial.Serial(port=p[0])\n else :\n\t print (\"No Arduino Device was found connected to the computer\")\n#time.sleep(2)\n#face detection\t \ncap =cv2.VideoCapture(1)\nface_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')\n\nlastpos=0\ncurrentpos=0\nlastdis=0\ncurrentdis=0\nlastx_d=0\ncurrentx_d=0\nshoot=0\n#MC\n#mc=minecraft.Minecraft.create()\n#pos=mc.player.getTilePos()\n#pos0=[]\n#pos0.append(pos.x)\n#pos0.append(pos.y)\n#pos0.append(pos.z)\n#des=House([pos.x+20,pos.y,pos.z],mc,block.GOLD_BLOCK.id,block.GLASS.id)\n#des.buildall()\n\nct=0\nwhile(True):\n ct+=1\n #到达目的地了吗\n #if(des.isInsideHouse()):\n #mc.postToChat(\"You win\")\n #break\n #人脸识别,一方面投石机追踪,一方面控制MC里面人到Destinatioin\n ret,img=cap.read()\n center=[img.shape[0]/2,img.shape[1]/2]\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n tmp=0\n for(x,y,w,h) in faces:\n tmp+=1\n if(tmp>1):\n print(\"too many faces\")\n else:\n for (x,y,w,h) in faces:\n img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_color = img[y:y+h, x:x+w]\n \n x_d=x+w/2-325-73\n dis=(-0.88*w+220)\n angle=x_d#math.atan(x_d/dis)/3.1415926535897*180\n currentpos=angle\n currentdis=dis\n currentx_d=x_d\n if(ct==1):\n lastpos=currentpos\n lastdis=currentdis\n lastx_d=currentx_d\n #pos=mc.player.getTilePos()\n #mc.player.setTilePos([pos.x+(currentx_d-lastx_d)/5,pos.y,pos.z+(currentdis-lastdis)/5])\n #print(x_d)\n #print(angle)\n #ser.write\n print(str(int(angle)).encode())\n #ser.write\n if(angle<0):\n ser.write(str(int(angle)).encode())\n else:\n ser.write((\"+\"+str(int(angle))).encode())\n time.sleep(1)\n if((lastpos-currentpos)<10 and abs(angle)<15):\n shoot+=1\n if(shoot>1):\n time.sleep(2)\n #mc.player.setTilePos([0,-1000,0])\n ser.write(str(10000).encode())\n time.sleep(2)\n shoot=0\n lastpos=currentpos\n lastdis=currentdis\n lastx_d=currentx_d\n cv2.imshow('img',img)\n if cv2.waitKey(1)& 0xFF==ord('q'):\n break\n \n\ncap.release()\ncv2.destroyAllWindows()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .linked_list import LinkedList
class Queue:
def __init__(self):
self.list = LinkedList()
def enqueue(self, value):
self.list.insert_last(value)
def dequeue(self):
element = self.list.get_head()
self.list.remove_first()
return element
def front(self):
return self.list.get_tail()
def rear(self):
return self.list.get_head()
|
normal
|
{
"blob_id": "4830da6bee6b19a5e5a82a73d2f3b220ca59d28b",
"index": 9025,
"step-1": "<mask token>\n\n\nclass Queue:\n <mask token>\n <mask token>\n <mask token>\n\n def front(self):\n return self.list.get_tail()\n\n def rear(self):\n return self.list.get_head()\n",
"step-2": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.list = LinkedList()\n <mask token>\n\n def dequeue(self):\n element = self.list.get_head()\n self.list.remove_first()\n return element\n\n def front(self):\n return self.list.get_tail()\n\n def rear(self):\n return self.list.get_head()\n",
"step-3": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.list = LinkedList()\n\n def enqueue(self, value):\n self.list.insert_last(value)\n\n def dequeue(self):\n element = self.list.get_head()\n self.list.remove_first()\n return element\n\n def front(self):\n return self.list.get_tail()\n\n def rear(self):\n return self.list.get_head()\n",
"step-4": "from .linked_list import LinkedList\n\n\nclass Queue:\n\n def __init__(self):\n self.list = LinkedList()\n\n def enqueue(self, value):\n self.list.insert_last(value)\n\n def dequeue(self):\n element = self.list.get_head()\n self.list.remove_first()\n return element\n\n def front(self):\n return self.list.get_tail()\n\n def rear(self):\n return self.list.get_head()\n",
"step-5": null,
"step-ids": [
3,
5,
6,
7
]
}
|
[
3,
5,
6,
7
] |
# Generated by Django 2.2.15 on 2020-09-16 03:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('api', '0005_cashiershift_couriershift_couriershiftexpenses_dailyransom_expensestype_vehicleservice'),
]
operations = [
migrations.AlterField(
model_name='address',
name='city',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='city_addresses', related_query_name='city_address', to='api.City'),
),
migrations.AlterField(
model_name='address',
name='district',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='district_addresses', related_query_name='district_address', to='api.District'),
),
migrations.AlterField(
model_name='address',
name='street',
field=models.ForeignKey(max_length=255, on_delete=django.db.models.fields.Empty, related_name='street_addresses', related_query_name='street_address', to='api.Street', verbose_name='Улица'),
),
migrations.AlterField(
model_name='couriershift',
name='courier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='couriers', related_query_name='courier', to=settings.AUTH_USER_MODEL, verbose_name='Курьер'),
),
migrations.AlterField(
model_name='couriershift',
name='vehicle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shift_vehicles', related_query_name='shift_vehicle', to='api.Vehicle', verbose_name='Транспортное средство'),
),
migrations.AlterField(
model_name='couriershift',
name='vehicle_accepted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_accepted_bys', related_query_name='vehicle_accepted_by', to=settings.AUTH_USER_MODEL, verbose_name='Принял'),
),
migrations.AlterField(
model_name='couriershift',
name='vehicle_given_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_given_bys', related_query_name='vehicle_given_by', to=settings.AUTH_USER_MODEL, verbose_name='Выдал'),
),
migrations.AlterField(
model_name='district',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='city_districts', related_query_name='city_district', to='api.City'),
),
migrations.AlterField(
model_name='technicalservice',
name='address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_services', related_query_name='address_service', to='api.Address', verbose_name='Адрес СТО'),
),
migrations.AlterField(
model_name='vehicleservice',
name='service',
field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='service_vehicles', related_query_name='service_vehicle', to='api.TechnicalService'),
),
migrations.AlterField(
model_name='vehicleservice',
name='vehicle',
field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='vehicles', related_query_name='vehicle', to='api.Vehicle'),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),
('status', models.CharField(choices=[('new', 'Новый'), ('accepted', 'Принят'), ('canceled', 'Отменен'), ('done', 'Завершен'), ('in_progress', 'Выполняется')], default='new', max_length=100, verbose_name='Статус заказа')),
('accepted_time', models.DateTimeField(blank=True, null=True, verbose_name='Время подтверждения заказа')),
('start_time', models.DateTimeField(blank=True, null=True, verbose_name='Время начала выполнения заказа')),
('end_time', models.DateTimeField(blank=True, null=True, verbose_name='Время завершения заказа')),
('reciever_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='Имя получателя')),
('info', models.TextField(blank=True, null=True, verbose_name='Дополнительные сведения')),
('ransom_sum', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Сумма выкупа')),
('wait_time', models.TimeField(blank=True, null=True, verbose_name='Время ожидания')),
('delivery_cost', models.IntegerField(blank=True, null=True, verbose_name='Стоимость даставки')),
('delivery_time', models.TimeField(blank=True, null=True, verbose_name='Время выполнения заказа')),
('courier_shift', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courier_orders', related_query_name='courier_order', to='api.CourierShift', verbose_name='Смена курьера')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders_created_by', related_query_name='order_created_by', to=settings.AUTH_USER_MODEL, verbose_name='Кем создан')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customers_orders', related_query_name='customer_order', to=settings.AUTH_USER_MODEL, verbose_name='Клиент')),
('delivery_from', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_from', related_query_name='address_from', to='api.Address', verbose_name='Забрать от')),
('delivery_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_to', related_query_name='address_to', to='api.Address', verbose_name='Куда доставить')),
],
options={
'get_latest_by': '-created_at',
'abstract': False,
},
),
migrations.CreateModel(
name='OperatorShift',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),
('start_time', models.DateField(auto_now_add=True, verbose_name='Начало смены')),
('end_time', models.DateField(blank=True, null=True, verbose_name='Конец смены')),
('operator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operator_shifts', related_query_name='operator_shift', to=settings.AUTH_USER_MODEL, verbose_name='Оператор')),
],
options={
'get_latest_by': '-created_at',
'abstract': False,
},
),
]
|
normal
|
{
"blob_id": "1c979d505b58025aae74865d6556c726ed3f0769",
"index": 2651,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api',\n '0005_cashiershift_couriershift_couriershiftexpenses_dailyransom_expensestype_vehicleservice'\n )]\n operations = [migrations.AlterField(model_name='address', name='city',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='city_addresses', related_query_name='city_address',\n to='api.City')), migrations.AlterField(model_name='address', name=\n 'district', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='district_addresses',\n related_query_name='district_address', to='api.District')),\n migrations.AlterField(model_name='address', name='street', field=\n models.ForeignKey(max_length=255, on_delete=django.db.models.fields\n .Empty, related_name='street_addresses', related_query_name=\n 'street_address', to='api.Street', verbose_name='Улица')),\n migrations.AlterField(model_name='couriershift', name='courier',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='couriers', related_query_name='courier', to=settings.\n AUTH_USER_MODEL, verbose_name='Курьер')), migrations.AlterField(\n model_name='couriershift', name='vehicle', field=models.ForeignKey(\n blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='shift_vehicles', related_query_name='shift_vehicle',\n to='api.Vehicle', verbose_name='Транспортное средство')),\n migrations.AlterField(model_name='couriershift', name=\n 'vehicle_accepted_by', field=models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'vehicle_accepted_bys', related_query_name='vehicle_accepted_by',\n to=settings.AUTH_USER_MODEL, verbose_name='Принял')), migrations.\n AlterField(model_name='couriershift', name='vehicle_given_by',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.\n models.deletion.CASCADE, related_name='vehicle_given_bys',\n related_query_name='vehicle_given_by', to=settings.AUTH_USER_MODEL,\n verbose_name='Выдал')), migrations.AlterField(model_name='district',\n name='city', field=models.ForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'city_districts', related_query_name='city_district', to='api.City'\n )), migrations.AlterField(model_name='technicalservice', name=\n 'address', field=models.ForeignKey(blank=True, null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name='address_services',\n related_query_name='address_service', to='api.Address',\n verbose_name='Адрес СТО')), migrations.AlterField(model_name=\n 'vehicleservice', name='service', field=models.ForeignKey(on_delete\n =django.db.models.fields.Empty, related_name='service_vehicles',\n related_query_name='service_vehicle', to='api.TechnicalService')),\n migrations.AlterField(model_name='vehicleservice', name='vehicle',\n field=models.ForeignKey(on_delete=django.db.models.fields.Empty,\n related_name='vehicles', related_query_name='vehicle', to=\n 'api.Vehicle')), migrations.CreateModel(name='Order', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('deleted', models.DateTimeField(\n editable=False, null=True)), ('created_at', models.DateTimeField(\n auto_now_add=True, verbose_name='Created at')), ('updated_at',\n models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),\n ('status', models.CharField(choices=[('new', 'Новый'), ('accepted',\n 'Принят'), ('canceled', 'Отменен'), ('done', 'Завершен'), (\n 'in_progress', 'Выполняется')], default='new', max_length=100,\n verbose_name='Статус заказа')), ('accepted_time', models.\n DateTimeField(blank=True, null=True, verbose_name=\n 'Время подтверждения заказа')), ('start_time', models.DateTimeField\n (blank=True, null=True, verbose_name=\n 'Время начала выполнения заказа')), ('end_time', models.\n DateTimeField(blank=True, null=True, verbose_name=\n 'Время завершения заказа')), ('reciever_name', models.CharField(\n blank=True, max_length=255, null=True, verbose_name=\n 'Имя получателя')), ('info', models.TextField(blank=True, null=True,\n verbose_name='Дополнительные сведения')), ('ransom_sum', models.\n DecimalField(decimal_places=2, max_digits=6, verbose_name=\n 'Сумма выкупа')), ('wait_time', models.TimeField(blank=True, null=\n True, verbose_name='Время ожидания')), ('delivery_cost', models.\n IntegerField(blank=True, null=True, verbose_name=\n 'Стоимость даставки')), ('delivery_time', models.TimeField(blank=\n True, null=True, verbose_name='Время выполнения заказа')), (\n 'courier_shift', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='courier_orders', related_query_name\n ='courier_order', to='api.CourierShift', verbose_name=\n 'Смена курьера')), ('created_by', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='orders_created_by',\n related_query_name='order_created_by', to=settings.AUTH_USER_MODEL,\n verbose_name='Кем создан')), ('customer', models.ForeignKey(blank=\n True, null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='customers_orders', related_query_name=\n 'customer_order', to=settings.AUTH_USER_MODEL, verbose_name=\n 'Клиент')), ('delivery_from', models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'address_delivery_from', related_query_name='address_from', to=\n 'api.Address', verbose_name='Забрать от')), ('delivery_to', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='address_delivery_to',\n related_query_name='address_to', to='api.Address', verbose_name=\n 'Куда доставить'))], options={'get_latest_by': '-created_at',\n 'abstract': False}), migrations.CreateModel(name='OperatorShift',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('deleted', models.\n DateTimeField(editable=False, null=True)), ('created_at', models.\n DateTimeField(auto_now_add=True, verbose_name='Created at')), (\n 'updated_at', models.DateTimeField(auto_now_add=True, verbose_name=\n 'Updated at')), ('start_time', models.DateField(auto_now_add=True,\n verbose_name='Начало смены')), ('end_time', models.DateField(blank=\n True, null=True, verbose_name='Конец смены')), ('operator', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='operator_shifts', related_query_name='operator_shift',\n to=settings.AUTH_USER_MODEL, verbose_name='Оператор'))], options={\n 'get_latest_by': '-created_at', 'abstract': False})]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.db.models.fields\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api',\n '0005_cashiershift_couriershift_couriershiftexpenses_dailyransom_expensestype_vehicleservice'\n )]\n operations = [migrations.AlterField(model_name='address', name='city',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='city_addresses', related_query_name='city_address',\n to='api.City')), migrations.AlterField(model_name='address', name=\n 'district', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='district_addresses',\n related_query_name='district_address', to='api.District')),\n migrations.AlterField(model_name='address', name='street', field=\n models.ForeignKey(max_length=255, on_delete=django.db.models.fields\n .Empty, related_name='street_addresses', related_query_name=\n 'street_address', to='api.Street', verbose_name='Улица')),\n migrations.AlterField(model_name='couriershift', name='courier',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='couriers', related_query_name='courier', to=settings.\n AUTH_USER_MODEL, verbose_name='Курьер')), migrations.AlterField(\n model_name='couriershift', name='vehicle', field=models.ForeignKey(\n blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='shift_vehicles', related_query_name='shift_vehicle',\n to='api.Vehicle', verbose_name='Транспортное средство')),\n migrations.AlterField(model_name='couriershift', name=\n 'vehicle_accepted_by', field=models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'vehicle_accepted_bys', related_query_name='vehicle_accepted_by',\n to=settings.AUTH_USER_MODEL, verbose_name='Принял')), migrations.\n AlterField(model_name='couriershift', name='vehicle_given_by',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.\n models.deletion.CASCADE, related_name='vehicle_given_bys',\n related_query_name='vehicle_given_by', to=settings.AUTH_USER_MODEL,\n verbose_name='Выдал')), migrations.AlterField(model_name='district',\n name='city', field=models.ForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'city_districts', related_query_name='city_district', to='api.City'\n )), migrations.AlterField(model_name='technicalservice', name=\n 'address', field=models.ForeignKey(blank=True, null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name='address_services',\n related_query_name='address_service', to='api.Address',\n verbose_name='Адрес СТО')), migrations.AlterField(model_name=\n 'vehicleservice', name='service', field=models.ForeignKey(on_delete\n =django.db.models.fields.Empty, related_name='service_vehicles',\n related_query_name='service_vehicle', to='api.TechnicalService')),\n migrations.AlterField(model_name='vehicleservice', name='vehicle',\n field=models.ForeignKey(on_delete=django.db.models.fields.Empty,\n related_name='vehicles', related_query_name='vehicle', to=\n 'api.Vehicle')), migrations.CreateModel(name='Order', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('deleted', models.DateTimeField(\n editable=False, null=True)), ('created_at', models.DateTimeField(\n auto_now_add=True, verbose_name='Created at')), ('updated_at',\n models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),\n ('status', models.CharField(choices=[('new', 'Новый'), ('accepted',\n 'Принят'), ('canceled', 'Отменен'), ('done', 'Завершен'), (\n 'in_progress', 'Выполняется')], default='new', max_length=100,\n verbose_name='Статус заказа')), ('accepted_time', models.\n DateTimeField(blank=True, null=True, verbose_name=\n 'Время подтверждения заказа')), ('start_time', models.DateTimeField\n (blank=True, null=True, verbose_name=\n 'Время начала выполнения заказа')), ('end_time', models.\n DateTimeField(blank=True, null=True, verbose_name=\n 'Время завершения заказа')), ('reciever_name', models.CharField(\n blank=True, max_length=255, null=True, verbose_name=\n 'Имя получателя')), ('info', models.TextField(blank=True, null=True,\n verbose_name='Дополнительные сведения')), ('ransom_sum', models.\n DecimalField(decimal_places=2, max_digits=6, verbose_name=\n 'Сумма выкупа')), ('wait_time', models.TimeField(blank=True, null=\n True, verbose_name='Время ожидания')), ('delivery_cost', models.\n IntegerField(blank=True, null=True, verbose_name=\n 'Стоимость даставки')), ('delivery_time', models.TimeField(blank=\n True, null=True, verbose_name='Время выполнения заказа')), (\n 'courier_shift', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='courier_orders', related_query_name\n ='courier_order', to='api.CourierShift', verbose_name=\n 'Смена курьера')), ('created_by', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='orders_created_by',\n related_query_name='order_created_by', to=settings.AUTH_USER_MODEL,\n verbose_name='Кем создан')), ('customer', models.ForeignKey(blank=\n True, null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='customers_orders', related_query_name=\n 'customer_order', to=settings.AUTH_USER_MODEL, verbose_name=\n 'Клиент')), ('delivery_from', models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'address_delivery_from', related_query_name='address_from', to=\n 'api.Address', verbose_name='Забрать от')), ('delivery_to', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='address_delivery_to',\n related_query_name='address_to', to='api.Address', verbose_name=\n 'Куда доставить'))], options={'get_latest_by': '-created_at',\n 'abstract': False}), migrations.CreateModel(name='OperatorShift',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('deleted', models.\n DateTimeField(editable=False, null=True)), ('created_at', models.\n DateTimeField(auto_now_add=True, verbose_name='Created at')), (\n 'updated_at', models.DateTimeField(auto_now_add=True, verbose_name=\n 'Updated at')), ('start_time', models.DateField(auto_now_add=True,\n verbose_name='Начало смены')), ('end_time', models.DateField(blank=\n True, null=True, verbose_name='Конец смены')), ('operator', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='operator_shifts', related_query_name='operator_shift',\n to=settings.AUTH_USER_MODEL, verbose_name='Оператор'))], options={\n 'get_latest_by': '-created_at', 'abstract': False})]\n",
"step-5": "# Generated by Django 2.2.15 on 2020-09-16 03:20\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0005_cashiershift_couriershift_couriershiftexpenses_dailyransom_expensestype_vehicleservice'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='address',\n name='city',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='city_addresses', related_query_name='city_address', to='api.City'),\n ),\n migrations.AlterField(\n model_name='address',\n name='district',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='district_addresses', related_query_name='district_address', to='api.District'),\n ),\n migrations.AlterField(\n model_name='address',\n name='street',\n field=models.ForeignKey(max_length=255, on_delete=django.db.models.fields.Empty, related_name='street_addresses', related_query_name='street_address', to='api.Street', verbose_name='Улица'),\n ),\n migrations.AlterField(\n model_name='couriershift',\n name='courier',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='couriers', related_query_name='courier', to=settings.AUTH_USER_MODEL, verbose_name='Курьер'),\n ),\n migrations.AlterField(\n model_name='couriershift',\n name='vehicle',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shift_vehicles', related_query_name='shift_vehicle', to='api.Vehicle', verbose_name='Транспортное средство'),\n ),\n migrations.AlterField(\n model_name='couriershift',\n name='vehicle_accepted_by',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_accepted_bys', related_query_name='vehicle_accepted_by', to=settings.AUTH_USER_MODEL, verbose_name='Принял'),\n ),\n migrations.AlterField(\n model_name='couriershift',\n name='vehicle_given_by',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_given_bys', related_query_name='vehicle_given_by', to=settings.AUTH_USER_MODEL, verbose_name='Выдал'),\n ),\n migrations.AlterField(\n model_name='district',\n name='city',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='city_districts', related_query_name='city_district', to='api.City'),\n ),\n migrations.AlterField(\n model_name='technicalservice',\n name='address',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_services', related_query_name='address_service', to='api.Address', verbose_name='Адрес СТО'),\n ),\n migrations.AlterField(\n model_name='vehicleservice',\n name='service',\n field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='service_vehicles', related_query_name='service_vehicle', to='api.TechnicalService'),\n ),\n migrations.AlterField(\n model_name='vehicleservice',\n name='vehicle',\n field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='vehicles', related_query_name='vehicle', to='api.Vehicle'),\n ),\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('deleted', models.DateTimeField(editable=False, null=True)),\n ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),\n ('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),\n ('status', models.CharField(choices=[('new', 'Новый'), ('accepted', 'Принят'), ('canceled', 'Отменен'), ('done', 'Завершен'), ('in_progress', 'Выполняется')], default='new', max_length=100, verbose_name='Статус заказа')),\n ('accepted_time', models.DateTimeField(blank=True, null=True, verbose_name='Время подтверждения заказа')),\n ('start_time', models.DateTimeField(blank=True, null=True, verbose_name='Время начала выполнения заказа')),\n ('end_time', models.DateTimeField(blank=True, null=True, verbose_name='Время завершения заказа')),\n ('reciever_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='Имя получателя')),\n ('info', models.TextField(blank=True, null=True, verbose_name='Дополнительные сведения')),\n ('ransom_sum', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Сумма выкупа')),\n ('wait_time', models.TimeField(blank=True, null=True, verbose_name='Время ожидания')),\n ('delivery_cost', models.IntegerField(blank=True, null=True, verbose_name='Стоимость даставки')),\n ('delivery_time', models.TimeField(blank=True, null=True, verbose_name='Время выполнения заказа')),\n ('courier_shift', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courier_orders', related_query_name='courier_order', to='api.CourierShift', verbose_name='Смена курьера')),\n ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders_created_by', related_query_name='order_created_by', to=settings.AUTH_USER_MODEL, verbose_name='Кем создан')),\n ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customers_orders', related_query_name='customer_order', to=settings.AUTH_USER_MODEL, verbose_name='Клиент')),\n ('delivery_from', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_from', related_query_name='address_from', to='api.Address', verbose_name='Забрать от')),\n ('delivery_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_to', related_query_name='address_to', to='api.Address', verbose_name='Куда доставить')),\n ],\n options={\n 'get_latest_by': '-created_at',\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='OperatorShift',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('deleted', models.DateTimeField(editable=False, null=True)),\n ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),\n ('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),\n ('start_time', models.DateField(auto_now_add=True, verbose_name='Начало смены')),\n ('end_time', models.DateField(blank=True, null=True, verbose_name='Конец смены')),\n ('operator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operator_shifts', related_query_name='operator_shift', to=settings.AUTH_USER_MODEL, verbose_name='Оператор')),\n ],\n options={\n 'get_latest_by': '-created_at',\n 'abstract': False,\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import logging
from logging import INFO
from typing import Dict, List
from .constants import Relations, POS
from .evaluator import *
from .general import DPHelper
from .general import *
from .utils import *
# ========================================= DRIVER =================================================
def generate(root: Dict):
# {"relation": <>, "subjs": [<>], "objs": [<>]}
relations: List[Dict] = []
# Is this applicable only to root?
subj = DPHelper.get_subject(root)
obj = DPHelper.get_object(root)
if subj is not None and DPHelper.is_proper_noun(subj) and \
obj is not None and DPHelper.is_proper_noun(obj):
if DPHelper.is_proper_noun(subj) and DPHelper.is_proper_noun(obj):
logging.log(INFO, "============ Rooted NNP SUBJECT and NNP OBJECT =============")
subjs = get_all_nouns(subj, proper_noun=True)
objs = [get_noun_phrase(obj, proper_noun=True)]
aux_relations = sub_obj_vbroot(root) # Relations between subject and object
relations = relations + create_relations(subjs, aux_relations, objs)
# Relations within clausal complements
open_comp: List[Dict] = DPHelper.get_child_type(root, Relations.OPEN_CLAUSAL_COMPLEMENT)
comp: List[Dict] = DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT)
if open_comp: # Assume for now open_comps all relate to object
subjs = [get_noun_phrase(obj, proper_noun=True)]
objs, xcomp_relations = x_comp(open_comp[0]) # TODO Can there be multiple xcomps?
relations = relations + create_relations(subjs, xcomp_relations, objs)
elif subj is not None and DPHelper.is_proper_noun(subj):
subjs = get_all_nouns(subj, proper_noun=True)
appos_rels, appos_objs = [], []
# Find direct appositional relations within NSUBJ block
appos_rel_objs = []
for appos in DPHelper.get_child_type(subj, Relations.APPOSITION):
a_objs, a_relations = direct_appositional_relations(appos)
relations += create_nested_relations(subjs, a_relations, a_objs)
# TODO Check for clausal complement for Subj (INDEPENDENT)
if DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT):
pass
# Passive subject, look into preposition for predicate object with possessive
if DPHelper.is_proper_noun(subj) and subj["link"] == Relations.PASSIVE_NOM_SUBJECT:
logging.log(INFO, "============= NNP PASSIVE SUBJECT ===============")
objs, aux_relations, appos = subjpass(root)
for appos_instance in appos:
relations = relations + create_relations(subjs, appos_instance["relation"], appos_instance["obj"])
relations = relations + create_relations(subjs, aux_relations, objs)
# Possible case where root is noun and hence subject is not labeled passive but relation still exists
elif DPHelper.is_noun(root):
logging.log(INFO, "============= NNP SUBJECT with NOUN ROOT ===============")
objs, aux_relations = nnroot_subj(root)
relations = relations + create_relations(subjs, aux_relations, objs)
# Usually the case that the direct obj being non-NNP represents relation
elif DPHelper.is_verb(root) and obj is not None:
logging.log(INFO, "============= NNP SUBJECT with VERB ROOT (NON-NNP DOBJ present) ===============")
objs, aux_relations = vbroot_subj_xobj(root)
relations = relations + create_relations(subjs, aux_relations, objs)
# Root verb without concrete noun form but valid relation (E.g. lives, resides) TODO Do we require `in/from etc.` for preposition?
elif DPHelper.is_verb(root):
logging.log(INFO, "============= NNP SUBJECT with VERB ROOT ===============")
objs, aux_relations = vbroot_subj(root)
relations = relations + create_nested_relations(subjs, aux_relations, objs)
elif DPHelper.is_adjective(root):
logging.log(INFO, "============= NNP SUBJECT with ADJ ROOT ===============")
objs, aux_relations = vbroot_subj(root) # FIXME We assume this is similar to verb root for now
relations = relations + create_nested_relations(subjs, aux_relations, objs)
else:
logging.log(INFO, "============= NNP SUBJECT with UNKNOWN STRUCTURE ===============")
else:
logging.log(INFO, "============== NOUN ROOT - No Direct SUBJ and OBJ ================")
if subj is not None: # Mostly likely noun with possessive or nested
if (subj["link"] == Relations.PASSIVE_NOM_SUBJECT): # Necessarily assume this since noun subj is possessive, else should Corefer
logging.log(INFO, "============= NESTED POSSESSIVE OF PASSIVE SUBJECT ===============")
subjs = subjpass_poss(subj)
if DPHelper.has_rc_modifier(root): # NNP still might be present in rc modifier
logging.log(INFO, "============= RELATIVE CLAUSE MODIFIER PRESENT ===============")
if DPHelper.is_proper_noun(root):
subj, relations, objs = nnproot(root)
all_rel_tuples = []
for relation in relations:
rel_tuples = [(sub, relation['relation'], obj) for sub in relation['subjs'] for obj in relation['objs']]
all_rel_tuples += rel_tuples
return all_rel_tuples
|
normal
|
{
"blob_id": "5923a12378225fb6389e7e0275af6d4aa476fe87",
"index": 1635,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate(root: Dict):\n relations: List[Dict] = []\n subj = DPHelper.get_subject(root)\n obj = DPHelper.get_object(root)\n if subj is not None and DPHelper.is_proper_noun(subj\n ) and obj is not None and DPHelper.is_proper_noun(obj):\n if DPHelper.is_proper_noun(subj) and DPHelper.is_proper_noun(obj):\n logging.log(INFO,\n '============ Rooted NNP SUBJECT and NNP OBJECT =============')\n subjs = get_all_nouns(subj, proper_noun=True)\n objs = [get_noun_phrase(obj, proper_noun=True)]\n aux_relations = sub_obj_vbroot(root)\n relations = relations + create_relations(subjs, aux_relations, objs\n )\n open_comp: List[Dict] = DPHelper.get_child_type(root, Relations\n .OPEN_CLAUSAL_COMPLEMENT)\n comp: List[Dict] = DPHelper.get_child_type(root, Relations.\n CLAUSAL_COMPLEMENT)\n if open_comp:\n subjs = [get_noun_phrase(obj, proper_noun=True)]\n objs, xcomp_relations = x_comp(open_comp[0])\n relations = relations + create_relations(subjs,\n xcomp_relations, objs)\n elif subj is not None and DPHelper.is_proper_noun(subj):\n subjs = get_all_nouns(subj, proper_noun=True)\n appos_rels, appos_objs = [], []\n appos_rel_objs = []\n for appos in DPHelper.get_child_type(subj, Relations.APPOSITION):\n a_objs, a_relations = direct_appositional_relations(appos)\n relations += create_nested_relations(subjs, a_relations, a_objs)\n if DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT):\n pass\n if DPHelper.is_proper_noun(subj) and subj['link'\n ] == Relations.PASSIVE_NOM_SUBJECT:\n logging.log(INFO,\n '============= NNP PASSIVE SUBJECT ===============')\n objs, aux_relations, appos = subjpass(root)\n for appos_instance in appos:\n relations = relations + create_relations(subjs,\n appos_instance['relation'], appos_instance['obj'])\n relations = relations + create_relations(subjs, aux_relations, objs\n )\n elif DPHelper.is_noun(root):\n logging.log(INFO,\n '============= NNP SUBJECT with NOUN ROOT ===============')\n objs, aux_relations = nnroot_subj(root)\n relations = relations + create_relations(subjs, aux_relations, objs\n )\n elif DPHelper.is_verb(root) and obj is not None:\n logging.log(INFO,\n '============= NNP SUBJECT with VERB ROOT (NON-NNP DOBJ present) ==============='\n )\n objs, aux_relations = vbroot_subj_xobj(root)\n relations = relations + create_relations(subjs, aux_relations, objs\n )\n elif DPHelper.is_verb(root):\n logging.log(INFO,\n '============= NNP SUBJECT with VERB ROOT ===============')\n objs, aux_relations = vbroot_subj(root)\n relations = relations + create_nested_relations(subjs,\n aux_relations, objs)\n elif DPHelper.is_adjective(root):\n logging.log(INFO,\n '============= NNP SUBJECT with ADJ ROOT ===============')\n objs, aux_relations = vbroot_subj(root)\n relations = relations + create_nested_relations(subjs,\n aux_relations, objs)\n else:\n logging.log(INFO,\n '============= NNP SUBJECT with UNKNOWN STRUCTURE ==============='\n )\n else:\n logging.log(INFO,\n '============== NOUN ROOT - No Direct SUBJ and OBJ ================'\n )\n if subj is not None:\n if subj['link'] == Relations.PASSIVE_NOM_SUBJECT:\n logging.log(INFO,\n '============= NESTED POSSESSIVE OF PASSIVE SUBJECT ==============='\n )\n subjs = subjpass_poss(subj)\n if DPHelper.has_rc_modifier(root):\n logging.log(INFO,\n '============= RELATIVE CLAUSE MODIFIER PRESENT ==============='\n )\n if DPHelper.is_proper_noun(root):\n subj, relations, objs = nnproot(root)\n all_rel_tuples = []\n for relation in relations:\n rel_tuples = [(sub, relation['relation'], obj) for sub in relation[\n 'subjs'] for obj in relation['objs']]\n all_rel_tuples += rel_tuples\n return all_rel_tuples\n",
"step-3": "import logging\nfrom logging import INFO\nfrom typing import Dict, List\nfrom .constants import Relations, POS\nfrom .evaluator import *\nfrom .general import DPHelper\nfrom .general import *\nfrom .utils import *\n\n\ndef generate(root: Dict):\n relations: List[Dict] = []\n subj = DPHelper.get_subject(root)\n obj = DPHelper.get_object(root)\n if subj is not None and DPHelper.is_proper_noun(subj\n ) and obj is not None and DPHelper.is_proper_noun(obj):\n if DPHelper.is_proper_noun(subj) and DPHelper.is_proper_noun(obj):\n logging.log(INFO,\n '============ Rooted NNP SUBJECT and NNP OBJECT =============')\n subjs = get_all_nouns(subj, proper_noun=True)\n objs = [get_noun_phrase(obj, proper_noun=True)]\n aux_relations = sub_obj_vbroot(root)\n relations = relations + create_relations(subjs, aux_relations, objs\n )\n open_comp: List[Dict] = DPHelper.get_child_type(root, Relations\n .OPEN_CLAUSAL_COMPLEMENT)\n comp: List[Dict] = DPHelper.get_child_type(root, Relations.\n CLAUSAL_COMPLEMENT)\n if open_comp:\n subjs = [get_noun_phrase(obj, proper_noun=True)]\n objs, xcomp_relations = x_comp(open_comp[0])\n relations = relations + create_relations(subjs,\n xcomp_relations, objs)\n elif subj is not None and DPHelper.is_proper_noun(subj):\n subjs = get_all_nouns(subj, proper_noun=True)\n appos_rels, appos_objs = [], []\n appos_rel_objs = []\n for appos in DPHelper.get_child_type(subj, Relations.APPOSITION):\n a_objs, a_relations = direct_appositional_relations(appos)\n relations += create_nested_relations(subjs, a_relations, a_objs)\n if DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT):\n pass\n if DPHelper.is_proper_noun(subj) and subj['link'\n ] == Relations.PASSIVE_NOM_SUBJECT:\n logging.log(INFO,\n '============= NNP PASSIVE SUBJECT ===============')\n objs, aux_relations, appos = subjpass(root)\n for appos_instance in appos:\n relations = relations + create_relations(subjs,\n appos_instance['relation'], appos_instance['obj'])\n relations = relations + create_relations(subjs, aux_relations, objs\n )\n elif DPHelper.is_noun(root):\n logging.log(INFO,\n '============= NNP SUBJECT with NOUN ROOT ===============')\n objs, aux_relations = nnroot_subj(root)\n relations = relations + create_relations(subjs, aux_relations, objs\n )\n elif DPHelper.is_verb(root) and obj is not None:\n logging.log(INFO,\n '============= NNP SUBJECT with VERB ROOT (NON-NNP DOBJ present) ==============='\n )\n objs, aux_relations = vbroot_subj_xobj(root)\n relations = relations + create_relations(subjs, aux_relations, objs\n )\n elif DPHelper.is_verb(root):\n logging.log(INFO,\n '============= NNP SUBJECT with VERB ROOT ===============')\n objs, aux_relations = vbroot_subj(root)\n relations = relations + create_nested_relations(subjs,\n aux_relations, objs)\n elif DPHelper.is_adjective(root):\n logging.log(INFO,\n '============= NNP SUBJECT with ADJ ROOT ===============')\n objs, aux_relations = vbroot_subj(root)\n relations = relations + create_nested_relations(subjs,\n aux_relations, objs)\n else:\n logging.log(INFO,\n '============= NNP SUBJECT with UNKNOWN STRUCTURE ==============='\n )\n else:\n logging.log(INFO,\n '============== NOUN ROOT - No Direct SUBJ and OBJ ================'\n )\n if subj is not None:\n if subj['link'] == Relations.PASSIVE_NOM_SUBJECT:\n logging.log(INFO,\n '============= NESTED POSSESSIVE OF PASSIVE SUBJECT ==============='\n )\n subjs = subjpass_poss(subj)\n if DPHelper.has_rc_modifier(root):\n logging.log(INFO,\n '============= RELATIVE CLAUSE MODIFIER PRESENT ==============='\n )\n if DPHelper.is_proper_noun(root):\n subj, relations, objs = nnproot(root)\n all_rel_tuples = []\n for relation in relations:\n rel_tuples = [(sub, relation['relation'], obj) for sub in relation[\n 'subjs'] for obj in relation['objs']]\n all_rel_tuples += rel_tuples\n return all_rel_tuples\n",
"step-4": "import logging\nfrom logging import INFO\nfrom typing import Dict, List\nfrom .constants import Relations, POS\nfrom .evaluator import *\nfrom .general import DPHelper\nfrom .general import *\nfrom .utils import *\n\n# ========================================= DRIVER =================================================\n\ndef generate(root: Dict):\n\n # {\"relation\": <>, \"subjs\": [<>], \"objs\": [<>]}\n relations: List[Dict] = []\n\n # Is this applicable only to root?\n subj = DPHelper.get_subject(root)\n obj = DPHelper.get_object(root)\n\n\n if subj is not None and DPHelper.is_proper_noun(subj) and \\\n obj is not None and DPHelper.is_proper_noun(obj):\n\n if DPHelper.is_proper_noun(subj) and DPHelper.is_proper_noun(obj):\n logging.log(INFO, \"============ Rooted NNP SUBJECT and NNP OBJECT =============\")\n subjs = get_all_nouns(subj, proper_noun=True)\n objs = [get_noun_phrase(obj, proper_noun=True)]\n aux_relations = sub_obj_vbroot(root) # Relations between subject and object\n relations = relations + create_relations(subjs, aux_relations, objs)\n\n # Relations within clausal complements\n open_comp: List[Dict] = DPHelper.get_child_type(root, Relations.OPEN_CLAUSAL_COMPLEMENT)\n comp: List[Dict] = DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT)\n if open_comp: # Assume for now open_comps all relate to object\n subjs = [get_noun_phrase(obj, proper_noun=True)]\n objs, xcomp_relations = x_comp(open_comp[0]) # TODO Can there be multiple xcomps?\n relations = relations + create_relations(subjs, xcomp_relations, objs)\n\n elif subj is not None and DPHelper.is_proper_noun(subj):\n subjs = get_all_nouns(subj, proper_noun=True)\n\n appos_rels, appos_objs = [], []\n # Find direct appositional relations within NSUBJ block\n appos_rel_objs = []\n for appos in DPHelper.get_child_type(subj, Relations.APPOSITION):\n a_objs, a_relations = direct_appositional_relations(appos)\n relations += create_nested_relations(subjs, a_relations, a_objs)\n\n # TODO Check for clausal complement for Subj (INDEPENDENT)\n if DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT):\n pass\n\n # Passive subject, look into preposition for predicate object with possessive\n if DPHelper.is_proper_noun(subj) and subj[\"link\"] == Relations.PASSIVE_NOM_SUBJECT:\n logging.log(INFO, \"============= NNP PASSIVE SUBJECT ===============\")\n objs, aux_relations, appos = subjpass(root)\n for appos_instance in appos:\n relations = relations + create_relations(subjs, appos_instance[\"relation\"], appos_instance[\"obj\"])\n relations = relations + create_relations(subjs, aux_relations, objs)\n\n # Possible case where root is noun and hence subject is not labeled passive but relation still exists\n elif DPHelper.is_noun(root):\n logging.log(INFO, \"============= NNP SUBJECT with NOUN ROOT ===============\")\n objs, aux_relations = nnroot_subj(root)\n relations = relations + create_relations(subjs, aux_relations, objs)\n\n # Usually the case that the direct obj being non-NNP represents relation\n elif DPHelper.is_verb(root) and obj is not None:\n logging.log(INFO, \"============= NNP SUBJECT with VERB ROOT (NON-NNP DOBJ present) ===============\")\n objs, aux_relations = vbroot_subj_xobj(root)\n relations = relations + create_relations(subjs, aux_relations, objs)\n\n # Root verb without concrete noun form but valid relation (E.g. lives, resides) TODO Do we require `in/from etc.` for preposition?\n elif DPHelper.is_verb(root):\n logging.log(INFO, \"============= NNP SUBJECT with VERB ROOT ===============\")\n objs, aux_relations = vbroot_subj(root)\n relations = relations + create_nested_relations(subjs, aux_relations, objs)\n\n elif DPHelper.is_adjective(root):\n logging.log(INFO, \"============= NNP SUBJECT with ADJ ROOT ===============\")\n objs, aux_relations = vbroot_subj(root) # FIXME We assume this is similar to verb root for now\n relations = relations + create_nested_relations(subjs, aux_relations, objs)\n else:\n logging.log(INFO, \"============= NNP SUBJECT with UNKNOWN STRUCTURE ===============\")\n\n\n else:\n logging.log(INFO, \"============== NOUN ROOT - No Direct SUBJ and OBJ ================\")\n\n if subj is not None: # Mostly likely noun with possessive or nested\n if (subj[\"link\"] == Relations.PASSIVE_NOM_SUBJECT): # Necessarily assume this since noun subj is possessive, else should Corefer\n logging.log(INFO, \"============= NESTED POSSESSIVE OF PASSIVE SUBJECT ===============\")\n subjs = subjpass_poss(subj)\n if DPHelper.has_rc_modifier(root): # NNP still might be present in rc modifier\n logging.log(INFO, \"============= RELATIVE CLAUSE MODIFIER PRESENT ===============\")\n\n if DPHelper.is_proper_noun(root):\n subj, relations, objs = nnproot(root)\n\n all_rel_tuples = []\n for relation in relations:\n rel_tuples = [(sub, relation['relation'], obj) for sub in relation['subjs'] for obj in relation['objs']]\n all_rel_tuples += rel_tuples\n return all_rel_tuples\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
rak="hello\n"
n=input()
print(rak * int(n))
|
normal
|
{
"blob_id": "b0e4042ac4ed54cafedb9e53244c164527559e39",
"index": 5406,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(rak * int(n))\n",
"step-3": "rak = 'hello\\n'\nn = input()\nprint(rak * int(n))\n",
"step-4": "rak=\"hello\\n\"\nn=input()\nprint(rak * int(n)) \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 22:11:53 2020
@author: Rick
"""
sum= 0;
with open('workRecord.txt') as fp:
for line in fp.readlines():
idx= line.rfind('x',len(line)-8,len(line))
if idx>=0:
sum+= float(line.rstrip()[idx+1:len(line)])
else:
sum+= 1
print(sum)
print(sum*3)
|
normal
|
{
"blob_id": "b838d2230cb3f3270e86807e875df4d3d55438cd",
"index": 8891,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('workRecord.txt') as fp:\n for line in fp.readlines():\n idx = line.rfind('x', len(line) - 8, len(line))\n if idx >= 0:\n sum += float(line.rstrip()[idx + 1:len(line)])\n else:\n sum += 1\nprint(sum)\nprint(sum * 3)\n",
"step-3": "<mask token>\nsum = 0\nwith open('workRecord.txt') as fp:\n for line in fp.readlines():\n idx = line.rfind('x', len(line) - 8, len(line))\n if idx >= 0:\n sum += float(line.rstrip()[idx + 1:len(line)])\n else:\n sum += 1\nprint(sum)\nprint(sum * 3)\n",
"step-4": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 8 22:11:53 2020\n\n@author: Rick\n\"\"\"\nsum= 0;\nwith open('workRecord.txt') as fp:\n for line in fp.readlines():\n idx= line.rfind('x',len(line)-8,len(line))\n if idx>=0:\n sum+= float(line.rstrip()[idx+1:len(line)])\n else:\n sum+= 1\nprint(sum)\nprint(sum*3)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 5 02:39:55 2017
@author: sparsh
"""
"""
Crop Disease Classification Project for Code Fun Do 2017 - IIT Roorkee
"""
"""
File for predicting a test image.
"""
import os
os.environ['THEANO_FLAGS'] = "device=gpu1, floatX=float32"
import theano
import numpy as np
np.random.seed(1)
import pandas as pd
import h5py
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from PIL import Image
K.set_image_dim_ordering('th')
#Path to model weights file
weights_path = "E:\\Interesting\\Code Fun Do 2017\\vgg16_weights.h5"
top_model_weights_path = "E:\\Interesting\\Code Fun Do 2017\\bottleneck_fc_model.h5"
#Unknown Image Location
validation_data_dir = "E:\\Interesting\\Code Fun Do 2017\\Trial\\cercospora_leaf_spot_365.jpg"
#validation_data_dir = "E:\\Interesting\\Code Fun Do 2017\\Trial"
#input image dimensions
img_width = 200
img_height = 200
input_shape = (3, img_height, img_width)
#Model parameters
batch_size = 32
nb_classes = 4
nb_epoch = 3
nb_train_samples = 50
nb_validation_samples = 25
# build the VGG16 network
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
# (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
assert os.path.exists(weights_path), "Model weights not found (see 'weights_path' variable in script)."
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print("Model loaded.\n")
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(nb_classes, activation='softmax'))
# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
model.add(top_model)
#print("DC.\n")
print("Final Model Assembled.\n")
#datagen = ImageDataGenerator(rescale=1./255)
#generator = datagen.flow_from_directory(
# validation_data_dir,
# target_size=(img_width, img_height),
# batch_size=32,
# class_mode=None,
# shuffle=False)
#bottleneck_features_validation = model.predict_generator(generator, nb_validation_samples)
#np.save(open('bottleneck_features_validation.npy', 'w'), bottleneck_features_validation)
#print("Testing features stored.\n")
#data = np.load(open('bottleneck_features_validation.npy'))
img = Image.open(validation_data_dir)
img.load()
#print("chutiya.\n")
data = np.asarray(img, dtype="int32")
#print("harami.\n")
print(data.shape)
data = data.reshape(1, 3, 200, 200)
print("Prediction begins.\n")
output = model.predict_classes(data, batch_size=32, verbose=1)
print(output)
|
normal
|
{
"blob_id": "96210942b01c510300120913bed1bc6d497a39a9",
"index": 1945,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(1)\n<mask token>\nK.set_image_dim_ordering('th')\n<mask token>\nmodel.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nassert os.path.exists(weights_path\n ), \"Model weights not found (see 'weights_path' variable in script).\"\n<mask token>\nfor k in range(f.attrs['nb_layers']):\n if k >= len(model.layers):\n break\n g = f['layer_{}'.format(k)]\n weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]\n model.layers[k].set_weights(weights)\nf.close()\nprint('Model loaded.\\n')\n<mask token>\ntop_model.add(Flatten(input_shape=model.output_shape[1:]))\ntop_model.add(Dense(256, activation='relu'))\ntop_model.add(Dropout(0.5))\ntop_model.add(Dense(nb_classes, activation='softmax'))\ntop_model.load_weights(top_model_weights_path)\nmodel.add(top_model)\nprint('Final Model Assembled.\\n')\n<mask token>\nimg.load()\n<mask token>\nprint(data.shape)\n<mask token>\nprint('Prediction begins.\\n')\n<mask token>\nprint(output)\n",
"step-3": "<mask token>\nos.environ['THEANO_FLAGS'] = 'device=gpu1, floatX=float32'\n<mask token>\nnp.random.seed(1)\n<mask token>\nK.set_image_dim_ordering('th')\nweights_path = 'E:\\\\Interesting\\\\Code Fun Do 2017\\\\vgg16_weights.h5'\ntop_model_weights_path = (\n 'E:\\\\Interesting\\\\Code Fun Do 2017\\\\bottleneck_fc_model.h5')\nvalidation_data_dir = (\n 'E:\\\\Interesting\\\\Code Fun Do 2017\\\\Trial\\\\cercospora_leaf_spot_365.jpg')\nimg_width = 200\nimg_height = 200\ninput_shape = 3, img_height, img_width\nbatch_size = 32\nnb_classes = 4\nnb_epoch = 3\nnb_train_samples = 50\nnb_validation_samples = 25\nmodel = Sequential()\nmodel.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nassert os.path.exists(weights_path\n ), \"Model weights not found (see 'weights_path' variable in script).\"\nf = h5py.File(weights_path)\nfor k in range(f.attrs['nb_layers']):\n if k >= len(model.layers):\n break\n g = f['layer_{}'.format(k)]\n weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]\n model.layers[k].set_weights(weights)\nf.close()\nprint('Model loaded.\\n')\ntop_model = Sequential()\ntop_model.add(Flatten(input_shape=model.output_shape[1:]))\ntop_model.add(Dense(256, activation='relu'))\ntop_model.add(Dropout(0.5))\ntop_model.add(Dense(nb_classes, activation='softmax'))\ntop_model.load_weights(top_model_weights_path)\nmodel.add(top_model)\nprint('Final Model Assembled.\\n')\nimg = Image.open(validation_data_dir)\nimg.load()\ndata = np.asarray(img, dtype='int32')\nprint(data.shape)\ndata = data.reshape(1, 3, 200, 200)\nprint('Prediction begins.\\n')\noutput = model.predict_classes(data, batch_size=32, verbose=1)\nprint(output)\n",
"step-4": "<mask token>\nimport os\nos.environ['THEANO_FLAGS'] = 'device=gpu1, floatX=float32'\nimport theano\nimport numpy as np\nnp.random.seed(1)\nimport pandas as pd\nimport h5py\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nfrom PIL import Image\nK.set_image_dim_ordering('th')\nweights_path = 'E:\\\\Interesting\\\\Code Fun Do 2017\\\\vgg16_weights.h5'\ntop_model_weights_path = (\n 'E:\\\\Interesting\\\\Code Fun Do 2017\\\\bottleneck_fc_model.h5')\nvalidation_data_dir = (\n 'E:\\\\Interesting\\\\Code Fun Do 2017\\\\Trial\\\\cercospora_leaf_spot_365.jpg')\nimg_width = 200\nimg_height = 200\ninput_shape = 3, img_height, img_width\nbatch_size = 32\nnb_classes = 4\nnb_epoch = 3\nnb_train_samples = 50\nnb_validation_samples = 25\nmodel = Sequential()\nmodel.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\nassert os.path.exists(weights_path\n ), \"Model weights not found (see 'weights_path' variable in script).\"\nf = h5py.File(weights_path)\nfor k in range(f.attrs['nb_layers']):\n if k >= len(model.layers):\n break\n g = f['layer_{}'.format(k)]\n weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]\n model.layers[k].set_weights(weights)\nf.close()\nprint('Model loaded.\\n')\ntop_model = Sequential()\ntop_model.add(Flatten(input_shape=model.output_shape[1:]))\ntop_model.add(Dense(256, activation='relu'))\ntop_model.add(Dropout(0.5))\ntop_model.add(Dense(nb_classes, activation='softmax'))\ntop_model.load_weights(top_model_weights_path)\nmodel.add(top_model)\nprint('Final Model Assembled.\\n')\nimg = Image.open(validation_data_dir)\nimg.load()\ndata = np.asarray(img, dtype='int32')\nprint(data.shape)\ndata = data.reshape(1, 3, 200, 200)\nprint('Prediction begins.\\n')\noutput = model.predict_classes(data, batch_size=32, verbose=1)\nprint(output)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 5 02:39:55 2017\n\n@author: sparsh\n\"\"\"\n\n\"\"\"\nCrop Disease Classification Project for Code Fun Do 2017 - IIT Roorkee\n\"\"\"\n\n\"\"\"\nFile for predicting a test image.\n\"\"\"\n\nimport os\nos.environ['THEANO_FLAGS'] = \"device=gpu1, floatX=float32\"\nimport theano\nimport numpy as np\nnp.random.seed(1)\n\nimport pandas as pd\nimport h5py\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nfrom PIL import Image\nK.set_image_dim_ordering('th')\n\n#Path to model weights file\nweights_path = \"E:\\\\Interesting\\\\Code Fun Do 2017\\\\vgg16_weights.h5\"\ntop_model_weights_path = \"E:\\\\Interesting\\\\Code Fun Do 2017\\\\bottleneck_fc_model.h5\"\n\n#Unknown Image Location\nvalidation_data_dir = \"E:\\\\Interesting\\\\Code Fun Do 2017\\\\Trial\\\\cercospora_leaf_spot_365.jpg\"\n#validation_data_dir = \"E:\\\\Interesting\\\\Code Fun Do 2017\\\\Trial\"\n\n#input image dimensions\nimg_width = 200\nimg_height = 200\ninput_shape = (3, img_height, img_width)\n\n#Model parameters\nbatch_size = 32\nnb_classes = 4\nnb_epoch = 3\nnb_train_samples = 50\nnb_validation_samples = 25\n\n# build the VGG16 network\nmodel = Sequential()\nmodel.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))\n\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n# load the weights of the VGG16 networks\n# (trained on ImageNet, won the ILSVRC competition in 2014)\n# note: when there is a complete match between your model definition\n# and your weight savefile, you can simply call model.load_weights(filename)\nassert os.path.exists(weights_path), \"Model weights not found (see 'weights_path' variable in script).\"\nf = h5py.File(weights_path)\nfor k in range(f.attrs['nb_layers']):\n if k >= len(model.layers):\n # we don't look at the last (fully-connected) layers in the savefile\n break\n g = f['layer_{}'.format(k)]\n weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]\n model.layers[k].set_weights(weights)\nf.close()\nprint(\"Model loaded.\\n\")\n\n# build a classifier model to put on top of the convolutional model\ntop_model = Sequential()\ntop_model.add(Flatten(input_shape=model.output_shape[1:]))\ntop_model.add(Dense(256, activation='relu'))\ntop_model.add(Dropout(0.5))\ntop_model.add(Dense(nb_classes, activation='softmax'))\n\n# note that it is necessary to start with a fully-trained\n# classifier, including the top classifier,\n# in order to successfully do fine-tuning\ntop_model.load_weights(top_model_weights_path)\n\n# add the model on top of the convolutional base\nmodel.add(top_model)\n#print(\"DC.\\n\")\nprint(\"Final Model Assembled.\\n\")\n\n#datagen = ImageDataGenerator(rescale=1./255)\n#generator = datagen.flow_from_directory(\n# validation_data_dir,\n# target_size=(img_width, img_height),\n# batch_size=32,\n# class_mode=None,\n# shuffle=False)\n#bottleneck_features_validation = model.predict_generator(generator, nb_validation_samples)\n#np.save(open('bottleneck_features_validation.npy', 'w'), bottleneck_features_validation)\n#print(\"Testing features stored.\\n\")\n\n#data = np.load(open('bottleneck_features_validation.npy'))\nimg = Image.open(validation_data_dir)\n\nimg.load()\n#print(\"chutiya.\\n\")\ndata = np.asarray(img, dtype=\"int32\")\n#print(\"harami.\\n\")\nprint(data.shape)\ndata = data.reshape(1, 3, 200, 200)\nprint(\"Prediction begins.\\n\")\noutput = model.predict_classes(data, batch_size=32, verbose=1)\nprint(output)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
try:
fh = open("testfile","w")
fh.write("test")
except IOError:
print("Error:没有找到文件")
else:
print("sucess")
fh.close()
|
normal
|
{
"blob_id": "15e0b396a4726f98ce5ae2620338d7d48985707e",
"index": 9533,
"step-1": "<mask token>\n",
"step-2": "try:\n fh = open('testfile', 'w')\n fh.write('test')\nexcept IOError:\n print('Error:没有找到文件')\nelse:\n print('sucess')\n fh.close()\n",
"step-3": "try:\r\n\tfh = open(\"testfile\",\"w\")\r\n\tfh.write(\"test\")\r\nexcept IOError:\r\n\tprint(\"Error:没有找到文件\")\r\nelse:\r\n\tprint(\"sucess\")\r\n\tfh.close()\r\n\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2019 Linh Pham
# wwdtm_panelistvspanelist is relased under the terms of the Apache License 2.0
"""WWDTM Panelist Appearance Report Generator"""
import argparse
from collections import OrderedDict
from datetime import datetime
import json
import os
import shutil
from typing import List, Dict, Text
import mysql.connector
import pytz
from jinja2 import Environment, FileSystemLoader
def retrieve_panelist_appearance_counts(panelist_id: int,
database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve yearly apperance count for the requested panelist ID"""
cursor = database_connection.cursor()
query = ("SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"WHERE pm.panelistid = %s AND s.bestof = 0 "
"AND s.repeatshowid IS NULL "
"GROUP BY p.panelist, YEAR(s.showdate) "
"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC")
cursor.execute(query, (panelist_id, ))
result = cursor.fetchall()
if not result:
return None
appearances = OrderedDict()
total_appearances = 0
for row in result:
appearances[row[0]] = row[1]
total_appearances += row[1]
appearances["total"] = total_appearances
return appearances
def retrieve_all_panelist_appearance_counts(database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve all appearance counts for all panelists from the
database"""
cursor = database_connection.cursor()
query = ("SELECT DISTINCT p.panelistid, p.panelist "
"FROM ww_showpnlmap pm "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE s.bestof = 0 AND s.repeatshowid IS NULL "
"ORDER BY p.panelist ASC")
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
panelists = []
for row in result:
panelist = {}
panelist_id = row[0]
panelist["name"] = row[1]
appearances = retrieve_panelist_appearance_counts(panelist_id=panelist_id,
database_connection=database_connection)
panelist["appearances"] = appearances
panelists.append(panelist)
return panelists
def retrieve_all_years(database_connection: mysql.connector.connect) -> List[int]:
"""Retrieve a list of all available show years"""
cursor = database_connection.cursor()
query = ("SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s "
"ORDER BY YEAR(s.showdate) ASC")
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
if not result:
return None
years = []
for row in result:
years.append(row[0])
return years
def load_config():
"""Load configuration values from configuration file and from
options passed into script execution"""
# Read in configuration file for default values
with open("config.json", "r") as config_file:
config_dict = json.load(config_file)
# Read in options passed in that override values from the config.json file
parser = argparse.ArgumentParser()
parser.add_argument("--ga-property-code",
dest="ga_property_code",
type=str,
help="Google Analytics Property Code (default: %(default)s)",
default=config_dict["report"]["ga_property_code"])
parser.add_argument("--css-directory",
dest="css_directory",
type=str,
help="Directory where the base CSS stylesheet file is stored "
"(default: %(default)s)",
default=config_dict["report"]["css_directory"])
parser.add_argument("--css-filename",
dest="css_filename",
type=str,
help="File name of the report CSS stylesheet file "
"(default: %(default)s)",
default=config_dict["report"]["css_filename"])
parser.add_argument("--output-directory",
dest="output_directory",
type=str,
help="Directory where the generated report will be saved "
"(default: %(default)s)",
default=config_dict["report"]["output_directory"])
parser.add_argument("--output-filename",
dest="output_filename",
type=str,
help="File name of the generated report will be saved "
"(default: %(default)s)",
default=config_dict["report"]["output_filename"])
args = parser.parse_args()
# Override the values from the config.json file if values were set via argparse
if args.ga_property_code != config_dict["report"]["ga_property_code"]:
config_dict["report"]["ga_property_code"] = args.ga_property_code
if args.css_directory != config_dict["report"]["css_directory"]:
config_dict["report"]["css_directory"] = args.css_directory
if args.css_filename != config_dict["report"]["css_filename"]:
config_dict["report"]["css_filename"] = args.css_filename
if args.output_directory != config_dict["report"]["output_directory"]:
config_dict["report"]["output_directory"] = args.output_directory
if args.output_filename != config_dict["report"]["output_filename"]:
config_dict["report"]["output_filename"] = args.output_filename
return config_dict
def render_report(show_years: List[int],
panelists: List[Dict],
report_settings: Dict
) -> Text:
"""Render appearances report using Jinja2"""
# Setup Jinja2 Template
template_loader = FileSystemLoader("./template")
template_env = Environment(loader=template_loader,
trim_blocks=True,
lstrip_blocks=True)
template_file = "report.tmpl.html"
template = template_env.get_template(template_file)
# Generate timestamp to include in page footer
time_zone = pytz.timezone("America/Los_Angeles")
rendered_date_time = datetime.now(time_zone)
# Build dictionary to pass into template renderer
render_data = {}
render_data["show_years"] = show_years
render_data["panelists"] = panelists
render_data["settings"] = report_settings
render_data["rendered_at"] = rendered_date_time.strftime("%A, %B %d, %Y %H:%M:%S %Z")
# Render the report and write out to output directory
report = template.render(render_data=render_data)
return report
def generate_output_files(rendered_report: Text,
report_settings: Dict) -> None:
"""Writes out the generated report file to file in the output directory
and copies the base CSS file to the same directory"""
css_path = os.path.join(report_settings["css_directory"],
report_settings["css_filename"])
output_path = os.path.join(report_settings["output_directory"],
report_settings["output_filename"])
# Create the output directory if it does not exist
if not os.path.isdir(report_settings["output_directory"]):
os.mkdir(report_settings["output_directory"])
# Write out the generated report
with open(output_path, "w") as output_file:
if output_file.writable():
output_file.write(rendered_report)
else:
print("Error: {} is not writable".format(output_path))
# Copy CSS file into output directory
shutil.copy2(css_path, report_settings["output_directory"])
return
def main():
"""Bootstrap database connection, retrieve panelist appearance data,
generate the report and create an output bundle"""
app_config = load_config()
database_connection = mysql.connector.connect(**app_config["database"])
panelists = retrieve_all_panelist_appearance_counts(database_connection)
show_years = retrieve_all_years(database_connection)
rendered_report = render_report(show_years=show_years,
panelists=panelists,
report_settings=app_config["report"])
generate_output_files(rendered_report=rendered_report,
report_settings=app_config["report"])
# Only run if executed as a script and not imported
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "2d20bac0f11fa724b2d0a2e0676e5b9ce7682777",
"index": 7387,
"step-1": "<mask token>\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.\n connector.connect) ->List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n return None\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist['name'] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=\n panelist_id, database_connection=database_connection)\n panelist['appearances'] = appearances\n panelists.append(panelist)\n return panelists\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect) ->List[Dict]:\n \"\"\"Retrieve yearly apperance count for the requested panelist ID\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count FROM ww_showpnlmap pm JOIN ww_shows s ON s.showid = pm.showid JOIN ww_panelists p ON p.panelistid = pm.panelistid WHERE pm.panelistid = %s AND s.bestof = 0 AND s.repeatshowid IS NULL GROUP BY p.panelist, YEAR(s.showdate) ORDER BY p.panelist ASC, YEAR(s.showdate) ASC'\n )\n cursor.execute(query, (panelist_id,))\n result = cursor.fetchall()\n if not result:\n return None\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n appearances['total'] = total_appearances\n return appearances\n\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.\n connector.connect) ->List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n return None\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist['name'] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=\n panelist_id, database_connection=database_connection)\n panelist['appearances'] = appearances\n panelists.append(panelist)\n return panelists\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect) ->List[Dict]:\n \"\"\"Retrieve yearly apperance count for the requested panelist ID\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count FROM ww_showpnlmap pm JOIN ww_shows s ON s.showid = pm.showid JOIN ww_panelists p ON p.panelistid = pm.panelistid WHERE pm.panelistid = %s AND s.bestof = 0 AND s.repeatshowid IS NULL GROUP BY p.panelist, YEAR(s.showdate) ORDER BY p.panelist ASC, YEAR(s.showdate) ASC'\n )\n cursor.execute(query, (panelist_id,))\n result = cursor.fetchall()\n if not result:\n return None\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n appearances['total'] = total_appearances\n return appearances\n\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.\n connector.connect) ->List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n return None\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist['name'] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=\n panelist_id, database_connection=database_connection)\n panelist['appearances'] = appearances\n panelists.append(panelist)\n return panelists\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright (c) 2018-2019 Linh Pham\n# wwdtm_panelistvspanelist is relased under the terms of the Apache License 2.0\n\"\"\"WWDTM Panelist Appearance Report Generator\"\"\"\n\nimport argparse\nfrom collections import OrderedDict\nfrom datetime import datetime\nimport json\nimport os\nimport shutil\nfrom typing import List, Dict, Text\nimport mysql.connector\nimport pytz\n\nfrom jinja2 import Environment, FileSystemLoader\n\ndef retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect\n ) -> List[Dict]:\n \"\"\"Retrieve yearly apperance count for the requested panelist ID\"\"\"\n\n cursor = database_connection.cursor()\n query = (\"SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count \"\n \"FROM ww_showpnlmap pm \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"JOIN ww_panelists p ON p.panelistid = pm.panelistid \"\n \"WHERE pm.panelistid = %s AND s.bestof = 0 \"\n \"AND s.repeatshowid IS NULL \"\n \"GROUP BY p.panelist, YEAR(s.showdate) \"\n \"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC\")\n cursor.execute(query, (panelist_id, ))\n result = cursor.fetchall()\n\n if not result:\n return None\n\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n\n appearances[\"total\"] = total_appearances\n return appearances\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.connector.connect\n ) -> List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n\n cursor = database_connection.cursor()\n query = (\"SELECT DISTINCT p.panelistid, p.panelist \"\n \"FROM ww_showpnlmap pm \"\n \"JOIN ww_panelists p ON p.panelistid = pm.panelistid \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"WHERE s.bestof = 0 AND s.repeatshowid IS NULL \"\n \"ORDER BY p.panelist ASC\")\n cursor.execute(query)\n result = cursor.fetchall()\n\n if not result:\n return None\n\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist[\"name\"] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=panelist_id,\n database_connection=database_connection)\n panelist[\"appearances\"] = appearances\n panelists.append(panelist)\n\n return panelists\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) -> List[int]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\"SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s \"\n \"ORDER BY YEAR(s.showdate) ASC\")\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n\n if not result:\n return None\n\n years = []\n for row in result:\n years.append(row[0])\n\n return years\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n\n # Read in configuration file for default values\n with open(\"config.json\", \"r\") as config_file:\n config_dict = json.load(config_file)\n\n # Read in options passed in that override values from the config.json file\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ga-property-code\",\n dest=\"ga_property_code\",\n type=str,\n help=\"Google Analytics Property Code (default: %(default)s)\",\n default=config_dict[\"report\"][\"ga_property_code\"])\n parser.add_argument(\"--css-directory\",\n dest=\"css_directory\",\n type=str,\n help=\"Directory where the base CSS stylesheet file is stored \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"css_directory\"])\n parser.add_argument(\"--css-filename\",\n dest=\"css_filename\",\n type=str,\n help=\"File name of the report CSS stylesheet file \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"css_filename\"])\n parser.add_argument(\"--output-directory\",\n dest=\"output_directory\",\n type=str,\n help=\"Directory where the generated report will be saved \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"output_directory\"])\n parser.add_argument(\"--output-filename\",\n dest=\"output_filename\",\n type=str,\n help=\"File name of the generated report will be saved \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"output_filename\"])\n args = parser.parse_args()\n\n # Override the values from the config.json file if values were set via argparse\n if args.ga_property_code != config_dict[\"report\"][\"ga_property_code\"]:\n config_dict[\"report\"][\"ga_property_code\"] = args.ga_property_code\n\n if args.css_directory != config_dict[\"report\"][\"css_directory\"]:\n config_dict[\"report\"][\"css_directory\"] = args.css_directory\n\n if args.css_filename != config_dict[\"report\"][\"css_filename\"]:\n config_dict[\"report\"][\"css_filename\"] = args.css_filename\n\n if args.output_directory != config_dict[\"report\"][\"output_directory\"]:\n config_dict[\"report\"][\"output_directory\"] = args.output_directory\n\n if args.output_filename != config_dict[\"report\"][\"output_filename\"]:\n config_dict[\"report\"][\"output_filename\"] = args.output_filename\n\n return config_dict\n\ndef render_report(show_years: List[int],\n panelists: List[Dict],\n report_settings: Dict\n ) -> Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n\n # Setup Jinja2 Template\n template_loader = FileSystemLoader(\"./template\")\n template_env = Environment(loader=template_loader,\n trim_blocks=True,\n lstrip_blocks=True)\n template_file = \"report.tmpl.html\"\n template = template_env.get_template(template_file)\n\n # Generate timestamp to include in page footer\n time_zone = pytz.timezone(\"America/Los_Angeles\")\n rendered_date_time = datetime.now(time_zone)\n\n # Build dictionary to pass into template renderer\n render_data = {}\n render_data[\"show_years\"] = show_years\n render_data[\"panelists\"] = panelists\n render_data[\"settings\"] = report_settings\n render_data[\"rendered_at\"] = rendered_date_time.strftime(\"%A, %B %d, %Y %H:%M:%S %Z\")\n\n # Render the report and write out to output directory\n report = template.render(render_data=render_data)\n return report\n\ndef generate_output_files(rendered_report: Text,\n report_settings: Dict) -> None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n\n css_path = os.path.join(report_settings[\"css_directory\"],\n report_settings[\"css_filename\"])\n output_path = os.path.join(report_settings[\"output_directory\"],\n report_settings[\"output_filename\"])\n\n # Create the output directory if it does not exist\n if not os.path.isdir(report_settings[\"output_directory\"]):\n os.mkdir(report_settings[\"output_directory\"])\n\n # Write out the generated report\n with open(output_path, \"w\") as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print(\"Error: {} is not writable\".format(output_path))\n\n # Copy CSS file into output directory\n shutil.copy2(css_path, report_settings[\"output_directory\"])\n\n return\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config[\"database\"])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n\n rendered_report = render_report(show_years=show_years,\n panelists=panelists,\n report_settings=app_config[\"report\"])\n\n generate_output_files(rendered_report=rendered_report,\n report_settings=app_config[\"report\"])\n\n# Only run if executed as a script and not imported\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
print('-'*100)
print('BIENVENIDOS A TIENDA ELEGANCIA')
print('-'*100)
prendas = ('Remeras', 'Camisas', 'Pantalones', 'Faldas', 'Vestidos', 'Abrigos', 'Calzado')
precioSinPromo = 0
superPuntos = 0
#ARTICULO 1
tipoPrenda1 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))
prendaseleccionada1 = prendas[tipoPrenda1]
print(prendaseleccionada1)
precio1 = float(input('Ingrese precio: $'))
precioinicial1 = precio1
precioSinPromo = precioSinPromo + precio1
print("La prenda: ", tipoPrenda1,"participa de del plan SuperPuntos? s/n")
valor1 = input()
v1 = None
if(valor1 == "s"):
v1 = 's'
valor1 = precio1
superPuntos = superPuntos + precio1
else:
if(valor1 == "n"):
v1 = "n"
valor1 = 0
# ARTICULO 2
tipoPrenda2 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))
prendaseleccionada2 = prendas[tipoPrenda2]
print(prendaseleccionada2)
precio2 = float(input('Ingrese precio: $'))
precioinicial2 = precio2
precioSinPromo = precioSinPromo + precio2
print("La prenda: ", tipoPrenda2, "participa de del plan SuperPuntos? s/n")
valor2 = input()
v2 = None
if (valor2 == "s"):
v2 = "s"
valor2 = precio2
superPuntos = superPuntos + precio2
else:
if (valor2 == "n"):
v2 = "n"
valor2 = 0
# ARTICULO 3
tipoPrenda3 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))
prendaseleccionada3 = prendas[tipoPrenda3]
print(prendaseleccionada3)
precio3 = float(input('Ingrese precio: $'))
precioinicial3 = precio3
precioSinPromo = precioSinPromo + precio3
print("La prenda: ", tipoPrenda3, "participa de del plan SuperPuntos? s/n")
valor3 = input()
v3 = None
if (valor3 == "s"):
v3 = "s"
valor3 = precio3
superPuntos = superPuntos + precio3
else:
if (valor3 == "n"):
v3 = "n"
valor3 = 0
#PROMO 3X2
if tipoPrenda1 == tipoPrenda2 == tipoPrenda3:
if precio1 < precio2 and precio1 < precio3:
precio1 = 0
else:
if precio2 < precio3:
precio2 = 0
else:
precio3 = 0
#PROMO 50%
if tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:
if precio1 > precio2:
precio1 = precio1 / 2
else:
precio2 = precio2 / 2
if tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:
if precio1 > precio3:
precio1 = precio1 / 2
else:
precio3 = precio3 / 2
if tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:
if precio2 > precio3:
precio2 = precio2 / 2
else:
precio3 = precio3 / 2
precioTotal = precio1 + precio2 + precio3
ahorro = precioSinPromo - precioTotal
#FORMA DE PAGO
formaDePago = int(input("Ingrese la forma de pago:/ 1=Contado/ 2=Tarjeta"))
montoAPagar = 0
if formaDePago == 1:
formaDePago = "Contado (%10 de Descuento)"
montoAPagar=precioTotal/100*90
else:
if(formaDePago == 2):
cuotas=int(input("ingrese en cuantas cuotas desea pagar:"))
if(cuotas <= 3):
formaDePago="Tarjeta (%2 de Recarga) cantidad de cuotas:", cuotas
montoAPagar=precioTotal/100*102
else:
if(cuotas > 3):
formaDePago="Tarjeta (%5 de Recarga) cantidad de cuotas:", cuotas
montoAPagar=precioTotal/100*105
else:
if(cuotas <= 0):
formaDePago="Contado (%10 de Descuento)"
montoAPagar=precioTotal/100*90
if valor1 > 0 and valor2 > 0 and valor3 > 0:
superPuntos = superPuntos * 2
print("----------------------------------------------------")
print("Tienda Elegancia")
print("Tipo, Precio, SuperPuntos")
print(prendaseleccionada1 , precioinicial1, v1)
print(prendaseleccionada2 , precioinicial2 , v2)
print(prendaseleccionada3 , precioinicial3 , v3)
print("Total sin promo: ", precioSinPromo)
print("Ahorro: ", ahorro)
print("Total Con Promo: ", precioTotal)
print("Forma de Pago: ", formaDePago)
print("Monto a Pagar: ", montoAPagar)
print("Usted obtiene: ", superPuntos, "SuperPuntos")
print("----------------------------------------------------")
|
normal
|
{
"blob_id": "333d237dd4a203fcfde3668901d725f16fbc402e",
"index": 1684,
"step-1": "<mask token>\n",
"step-2": "print('-' * 100)\nprint('BIENVENIDOS A TIENDA ELEGANCIA')\nprint('-' * 100)\n<mask token>\nprint(prendaseleccionada1)\n<mask token>\nprint('La prenda: ', tipoPrenda1, 'participa de del plan SuperPuntos? s/n')\n<mask token>\nif valor1 == 's':\n v1 = 's'\n valor1 = precio1\n superPuntos = superPuntos + precio1\nelif valor1 == 'n':\n v1 = 'n'\n valor1 = 0\n<mask token>\nprint(prendaseleccionada2)\n<mask token>\nprint('La prenda: ', tipoPrenda2, 'participa de del plan SuperPuntos? s/n')\n<mask token>\nif valor2 == 's':\n v2 = 's'\n valor2 = precio2\n superPuntos = superPuntos + precio2\nelif valor2 == 'n':\n v2 = 'n'\n valor2 = 0\n<mask token>\nprint(prendaseleccionada3)\n<mask token>\nprint('La prenda: ', tipoPrenda3, 'participa de del plan SuperPuntos? s/n')\n<mask token>\nif valor3 == 's':\n v3 = 's'\n valor3 = precio3\n superPuntos = superPuntos + precio3\nelif valor3 == 'n':\n v3 = 'n'\n valor3 = 0\nif tipoPrenda1 == tipoPrenda2 == tipoPrenda3:\n if precio1 < precio2 and precio1 < precio3:\n precio1 = 0\n elif precio2 < precio3:\n precio2 = 0\n else:\n precio3 = 0\nif tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:\n if precio1 > precio2:\n precio1 = precio1 / 2\n else:\n precio2 = precio2 / 2\nif tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:\n if precio1 > precio3:\n precio1 = precio1 / 2\n else:\n precio3 = precio3 / 2\nif tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:\n if precio2 > precio3:\n precio2 = precio2 / 2\n else:\n precio3 = precio3 / 2\n<mask token>\nif formaDePago == 1:\n formaDePago = 'Contado (%10 de Descuento)'\n montoAPagar = precioTotal / 100 * 90\nelif formaDePago == 2:\n cuotas = int(input('ingrese en cuantas cuotas desea pagar:'))\n if cuotas <= 3:\n formaDePago = 'Tarjeta (%2 de Recarga) cantidad de cuotas:', cuotas\n montoAPagar = precioTotal / 100 * 102\n elif cuotas > 3:\n formaDePago = 'Tarjeta (%5 de Recarga) cantidad de cuotas:', cuotas\n montoAPagar = precioTotal / 100 * 105\n elif cuotas <= 0:\n formaDePago = 'Contado (%10 de Descuento)'\n montoAPagar = precioTotal / 100 * 90\nif valor1 > 0 and valor2 > 0 and valor3 > 0:\n superPuntos = superPuntos * 2\nprint('----------------------------------------------------')\nprint('Tienda Elegancia')\nprint('Tipo, Precio, SuperPuntos')\nprint(prendaseleccionada1, precioinicial1, v1)\nprint(prendaseleccionada2, precioinicial2, v2)\nprint(prendaseleccionada3, precioinicial3, v3)\nprint('Total sin promo: ', precioSinPromo)\nprint('Ahorro: ', ahorro)\nprint('Total Con Promo: ', precioTotal)\nprint('Forma de Pago: ', formaDePago)\nprint('Monto a Pagar: ', montoAPagar)\nprint('Usted obtiene: ', superPuntos, 'SuperPuntos')\nprint('----------------------------------------------------')\n",
"step-3": "print('-' * 100)\nprint('BIENVENIDOS A TIENDA ELEGANCIA')\nprint('-' * 100)\nprendas = ('Remeras', 'Camisas', 'Pantalones', 'Faldas', 'Vestidos',\n 'Abrigos', 'Calzado')\nprecioSinPromo = 0\nsuperPuntos = 0\ntipoPrenda1 = int(input(\n 'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '\n ))\nprendaseleccionada1 = prendas[tipoPrenda1]\nprint(prendaseleccionada1)\nprecio1 = float(input('Ingrese precio: $'))\nprecioinicial1 = precio1\nprecioSinPromo = precioSinPromo + precio1\nprint('La prenda: ', tipoPrenda1, 'participa de del plan SuperPuntos? s/n')\nvalor1 = input()\nv1 = None\nif valor1 == 's':\n v1 = 's'\n valor1 = precio1\n superPuntos = superPuntos + precio1\nelif valor1 == 'n':\n v1 = 'n'\n valor1 = 0\ntipoPrenda2 = int(input(\n 'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '\n ))\nprendaseleccionada2 = prendas[tipoPrenda2]\nprint(prendaseleccionada2)\nprecio2 = float(input('Ingrese precio: $'))\nprecioinicial2 = precio2\nprecioSinPromo = precioSinPromo + precio2\nprint('La prenda: ', tipoPrenda2, 'participa de del plan SuperPuntos? s/n')\nvalor2 = input()\nv2 = None\nif valor2 == 's':\n v2 = 's'\n valor2 = precio2\n superPuntos = superPuntos + precio2\nelif valor2 == 'n':\n v2 = 'n'\n valor2 = 0\ntipoPrenda3 = int(input(\n 'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '\n ))\nprendaseleccionada3 = prendas[tipoPrenda3]\nprint(prendaseleccionada3)\nprecio3 = float(input('Ingrese precio: $'))\nprecioinicial3 = precio3\nprecioSinPromo = precioSinPromo + precio3\nprint('La prenda: ', tipoPrenda3, 'participa de del plan SuperPuntos? s/n')\nvalor3 = input()\nv3 = None\nif valor3 == 's':\n v3 = 's'\n valor3 = precio3\n superPuntos = superPuntos + precio3\nelif valor3 == 'n':\n v3 = 'n'\n valor3 = 0\nif tipoPrenda1 == tipoPrenda2 == tipoPrenda3:\n if precio1 < precio2 and precio1 < precio3:\n precio1 = 0\n elif precio2 < precio3:\n precio2 = 0\n else:\n precio3 = 0\nif tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:\n if precio1 > precio2:\n precio1 = precio1 / 2\n else:\n precio2 = precio2 / 2\nif tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:\n if precio1 > precio3:\n precio1 = precio1 / 2\n else:\n precio3 = precio3 / 2\nif tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:\n if precio2 > precio3:\n precio2 = precio2 / 2\n else:\n precio3 = precio3 / 2\nprecioTotal = precio1 + precio2 + precio3\nahorro = precioSinPromo - precioTotal\nformaDePago = int(input('Ingrese la forma de pago:/ 1=Contado/ 2=Tarjeta'))\nmontoAPagar = 0\nif formaDePago == 1:\n formaDePago = 'Contado (%10 de Descuento)'\n montoAPagar = precioTotal / 100 * 90\nelif formaDePago == 2:\n cuotas = int(input('ingrese en cuantas cuotas desea pagar:'))\n if cuotas <= 3:\n formaDePago = 'Tarjeta (%2 de Recarga) cantidad de cuotas:', cuotas\n montoAPagar = precioTotal / 100 * 102\n elif cuotas > 3:\n formaDePago = 'Tarjeta (%5 de Recarga) cantidad de cuotas:', cuotas\n montoAPagar = precioTotal / 100 * 105\n elif cuotas <= 0:\n formaDePago = 'Contado (%10 de Descuento)'\n montoAPagar = precioTotal / 100 * 90\nif valor1 > 0 and valor2 > 0 and valor3 > 0:\n superPuntos = superPuntos * 2\nprint('----------------------------------------------------')\nprint('Tienda Elegancia')\nprint('Tipo, Precio, SuperPuntos')\nprint(prendaseleccionada1, precioinicial1, v1)\nprint(prendaseleccionada2, precioinicial2, v2)\nprint(prendaseleccionada3, precioinicial3, v3)\nprint('Total sin promo: ', precioSinPromo)\nprint('Ahorro: ', ahorro)\nprint('Total Con Promo: ', precioTotal)\nprint('Forma de Pago: ', formaDePago)\nprint('Monto a Pagar: ', montoAPagar)\nprint('Usted obtiene: ', superPuntos, 'SuperPuntos')\nprint('----------------------------------------------------')\n",
"step-4": "print('-'*100)\nprint('BIENVENIDOS A TIENDA ELEGANCIA')\nprint('-'*100)\n\nprendas = ('Remeras', 'Camisas', 'Pantalones', 'Faldas', 'Vestidos', 'Abrigos', 'Calzado')\n\nprecioSinPromo = 0\nsuperPuntos = 0\n\n#ARTICULO 1\ntipoPrenda1 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))\nprendaseleccionada1 = prendas[tipoPrenda1]\nprint(prendaseleccionada1)\nprecio1 = float(input('Ingrese precio: $'))\nprecioinicial1 = precio1\nprecioSinPromo = precioSinPromo + precio1\n\nprint(\"La prenda: \", tipoPrenda1,\"participa de del plan SuperPuntos? s/n\")\nvalor1 = input()\nv1 = None\nif(valor1 == \"s\"):\n v1 = 's'\n valor1 = precio1\n superPuntos = superPuntos + precio1\nelse:\n if(valor1 == \"n\"):\n v1 = \"n\"\n valor1 = 0\n\n# ARTICULO 2\ntipoPrenda2 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))\nprendaseleccionada2 = prendas[tipoPrenda2]\nprint(prendaseleccionada2)\nprecio2 = float(input('Ingrese precio: $'))\nprecioinicial2 = precio2\nprecioSinPromo = precioSinPromo + precio2\n\nprint(\"La prenda: \", tipoPrenda2, \"participa de del plan SuperPuntos? s/n\")\nvalor2 = input()\nv2 = None\nif (valor2 == \"s\"):\n v2 = \"s\"\n valor2 = precio2\n superPuntos = superPuntos + precio2\nelse:\n if (valor2 == \"n\"):\n v2 = \"n\"\n valor2 = 0\n\n# ARTICULO 3\ntipoPrenda3 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))\nprendaseleccionada3 = prendas[tipoPrenda3]\nprint(prendaseleccionada3)\nprecio3 = float(input('Ingrese precio: $'))\nprecioinicial3 = precio3\nprecioSinPromo = precioSinPromo + precio3\n\nprint(\"La prenda: \", tipoPrenda3, \"participa de del plan SuperPuntos? s/n\")\nvalor3 = input()\nv3 = None\nif (valor3 == \"s\"):\n v3 = \"s\"\n valor3 = precio3\n superPuntos = superPuntos + precio3\nelse:\n if (valor3 == \"n\"):\n v3 = \"n\"\n valor3 = 0\n\n#PROMO 3X2\nif tipoPrenda1 == tipoPrenda2 == tipoPrenda3:\n if precio1 < precio2 and precio1 < precio3:\n precio1 = 0\n else:\n if precio2 < precio3:\n precio2 = 0\n else:\n precio3 = 0\n\n#PROMO 50%\nif tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:\n if precio1 > precio2:\n precio1 = precio1 / 2\n else:\n precio2 = precio2 / 2\n\nif tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:\n if precio1 > precio3:\n precio1 = precio1 / 2\n else:\n precio3 = precio3 / 2\n\nif tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:\n if precio2 > precio3:\n precio2 = precio2 / 2\n else:\n precio3 = precio3 / 2\n\nprecioTotal = precio1 + precio2 + precio3\nahorro = precioSinPromo - precioTotal\n\n#FORMA DE PAGO\nformaDePago = int(input(\"Ingrese la forma de pago:/ 1=Contado/ 2=Tarjeta\"))\nmontoAPagar = 0\n\nif formaDePago == 1:\n formaDePago = \"Contado (%10 de Descuento)\"\n montoAPagar=precioTotal/100*90\nelse:\n if(formaDePago == 2):\n cuotas=int(input(\"ingrese en cuantas cuotas desea pagar:\"))\n if(cuotas <= 3):\n formaDePago=\"Tarjeta (%2 de Recarga) cantidad de cuotas:\", cuotas\n montoAPagar=precioTotal/100*102\n else:\n if(cuotas > 3):\n formaDePago=\"Tarjeta (%5 de Recarga) cantidad de cuotas:\", cuotas\n montoAPagar=precioTotal/100*105\n else:\n if(cuotas <= 0):\n formaDePago=\"Contado (%10 de Descuento)\"\n montoAPagar=precioTotal/100*90\n\nif valor1 > 0 and valor2 > 0 and valor3 > 0:\n superPuntos = superPuntos * 2\n\nprint(\"----------------------------------------------------\")\nprint(\"Tienda Elegancia\")\nprint(\"Tipo, Precio, SuperPuntos\")\nprint(prendaseleccionada1 , precioinicial1, v1)\nprint(prendaseleccionada2 , precioinicial2 , v2)\nprint(prendaseleccionada3 , precioinicial3 , v3)\nprint(\"Total sin promo: \", precioSinPromo)\nprint(\"Ahorro: \", ahorro)\nprint(\"Total Con Promo: \", precioTotal)\nprint(\"Forma de Pago: \", formaDePago)\nprint(\"Monto a Pagar: \", montoAPagar)\nprint(\"Usted obtiene: \", superPuntos, \"SuperPuntos\")\nprint(\"----------------------------------------------------\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Classic solution for merging two sorted arrays/list to a new one.
# (Based on Merge Sort)
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
m->Size of nums1 list
n->Size of nums2 list
"""
mergedArray = []
i = 0
j = 0
while(i < m and j < n):
if(nums1[i] <= nums2[j]):
mergedArray.append(nums1[i])
i += 1
else:
mergedArray.append(nums2[j])
j += 1
while(i < m):
mergedArray.append(nums1[i])
i += 1
while(j < n):
mergedArray.append(nums2[j])
j += 1
return mergedArray
|
normal
|
{
"blob_id": "a732e7141ffb403ca6c5d9c4204cb96c8e831aab",
"index": 6814,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) ->None:\n \"\"\"\n m->Size of nums1 list\n n->Size of nums2 list\n \"\"\"\n mergedArray = []\n i = 0\n j = 0\n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n mergedArray.append(nums1[i])\n i += 1\n else:\n mergedArray.append(nums2[j])\n j += 1\n while i < m:\n mergedArray.append(nums1[i])\n i += 1\n while j < n:\n mergedArray.append(nums2[j])\n j += 1\n return mergedArray\n",
"step-4": "# Classic solution for merging two sorted arrays/list to a new one.\n# (Based on Merge Sort)\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n m->Size of nums1 list\n n->Size of nums2 list\n \"\"\"\n mergedArray = []\n i = 0 \n j = 0\n while(i < m and j < n):\n if(nums1[i] <= nums2[j]):\n mergedArray.append(nums1[i])\n i += 1\n else:\n mergedArray.append(nums2[j])\n j += 1\n while(i < m):\n mergedArray.append(nums1[i])\n i += 1\n while(j < n):\n mergedArray.append(nums2[j])\n j += 1\n return mergedArray",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from a10sdk.common.A10BaseClass import A10BaseClass
class MacAgeTime(A10BaseClass):
"""Class Description::
Set Aging period for all MAC Interfaces.
Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param aging_time: {"description": "Set aging period in seconds for all MAC interfaces (default 300 seconds)", "format": "number", "default": 300, "optional": true, "maximum": 600, "minimum": 10, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/mac-age-time`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "mac-age-time"
self.a10_url="/axapi/v3/mac-age-time"
self.DeviceProxy = ""
self.aging_time = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
normal
|
{
"blob_id": "f08677430e54822abbce61d0cac5a6fea14d3872",
"index": 6078,
"step-1": "<mask token>\n\n\nclass MacAgeTime(A10BaseClass):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MacAgeTime(A10BaseClass):\n <mask token>\n\n def __init__(self, **kwargs):\n self.ERROR_MSG = ''\n self.required = []\n self.b_key = 'mac-age-time'\n self.a10_url = '/axapi/v3/mac-age-time'\n self.DeviceProxy = ''\n self.aging_time = ''\n for keys, value in kwargs.items():\n setattr(self, keys, value)\n",
"step-3": "<mask token>\n\n\nclass MacAgeTime(A10BaseClass):\n \"\"\"Class Description::\n Set Aging period for all MAC Interfaces.\n\n Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param aging_time: {\"description\": \"Set aging period in seconds for all MAC interfaces (default 300 seconds)\", \"format\": \"number\", \"default\": 300, \"optional\": true, \"maximum\": 600, \"minimum\": 10, \"type\": \"number\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https://<Hostname|Ip address>//axapi/v3/mac-age-time`.\n\n \n\n \n \"\"\"\n\n def __init__(self, **kwargs):\n self.ERROR_MSG = ''\n self.required = []\n self.b_key = 'mac-age-time'\n self.a10_url = '/axapi/v3/mac-age-time'\n self.DeviceProxy = ''\n self.aging_time = ''\n for keys, value in kwargs.items():\n setattr(self, keys, value)\n",
"step-4": "from a10sdk.common.A10BaseClass import A10BaseClass\n\n\nclass MacAgeTime(A10BaseClass):\n \"\"\"Class Description::\n Set Aging period for all MAC Interfaces.\n\n Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param aging_time: {\"description\": \"Set aging period in seconds for all MAC interfaces (default 300 seconds)\", \"format\": \"number\", \"default\": 300, \"optional\": true, \"maximum\": 600, \"minimum\": 10, \"type\": \"number\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https://<Hostname|Ip address>//axapi/v3/mac-age-time`.\n\n \n\n \n \"\"\"\n\n def __init__(self, **kwargs):\n self.ERROR_MSG = ''\n self.required = []\n self.b_key = 'mac-age-time'\n self.a10_url = '/axapi/v3/mac-age-time'\n self.DeviceProxy = ''\n self.aging_time = ''\n for keys, value in kwargs.items():\n setattr(self, keys, value)\n",
"step-5": "from a10sdk.common.A10BaseClass import A10BaseClass\n\n\nclass MacAgeTime(A10BaseClass):\n \n \"\"\"Class Description::\n Set Aging period for all MAC Interfaces.\n\n Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param aging_time: {\"description\": \"Set aging period in seconds for all MAC interfaces (default 300 seconds)\", \"format\": \"number\", \"default\": 300, \"optional\": true, \"maximum\": 600, \"minimum\": 10, \"type\": \"number\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https://<Hostname|Ip address>//axapi/v3/mac-age-time`.\n\n \n\n \n \"\"\"\n def __init__(self, **kwargs):\n self.ERROR_MSG = \"\"\n self.required=[]\n self.b_key = \"mac-age-time\"\n self.a10_url=\"/axapi/v3/mac-age-time\"\n self.DeviceProxy = \"\"\n self.aging_time = \"\"\n\n for keys, value in kwargs.items():\n setattr(self,keys, value)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import codecs
import json
#~ from lxml import etree
import lxml.html
target = "test/index.html"
url = "http://de.wikipedia.org/wiki/Liste_von_Bergen_in_der_Schweiz"
command = "wget %s -O %s" % (url, target)
#~ os.popen(command)
f = open(target)
html = lxml.html.fromstring(f.read())
f.close()
tables = html.xpath("//table")
table = tables[2]
rows = table.xpath("//tr/th")
#~ row = rows[2]
#~ ths = row.xpath("th")
#~ print len(rows)
for cell in rows[:8]:
text = cell.xpath("string()").replace("(km)","").replace("(m)","")
text = text.strip()
print text
#~ f = codecs.open("out.html","w", encoding="utf-8")
f_out = codecs.open("out.json","w", encoding="utf-8")
rows = table.xpath("//tr")
print len(rows)
#~ liste = {}
liste = []
for i, row in enumerate(rows):
cells = row.xpath("td")
if len(cells)==8:
#~ print cells[1].xpath("string()")
#~ cell = cells[1]
out = []
for cell in cells[1:3]:
links = cell.xpath("a")
if links:
out.append(links[0].xpath("string()"))
else:
out.append(cell.xpath("string()"))
#~ liste.update({"n%s"% (i):{"name":out[0], "hight":out[1]}})
liste.append({"name":out[0], "hight":out[1]})
#~ f.write('<li><a data-icon="info" data-rel="dialog" data-transition="pop" href="#no_info">%s (%s)</a></li>\n' % (out[0], out[1]))
#~ f.close()
f_out.write(json.dumps({"mountains" : {"Switzerland" : liste}}))
f_out.close()
#~ for table in tables:
#~ print len(table)
print lxml.html.tostring(table)[:100]
|
normal
|
{
"blob_id": "89499ea8dd02d5e1b2ff635ab5203a65ceee4276",
"index": 8536,
"step-1": "import os\nimport codecs\nimport json\n#~ from lxml import etree\nimport lxml.html\n\ntarget = \"test/index.html\"\nurl = \"http://de.wikipedia.org/wiki/Liste_von_Bergen_in_der_Schweiz\"\ncommand = \"wget %s -O %s\" % (url, target)\n#~ os.popen(command)\n\nf = open(target)\nhtml = lxml.html.fromstring(f.read())\nf.close()\n\ntables = html.xpath(\"//table\")\ntable = tables[2]\n\nrows = table.xpath(\"//tr/th\")\n#~ row = rows[2]\n#~ ths = row.xpath(\"th\")\n#~ print len(rows)\nfor cell in rows[:8]:\n text = cell.xpath(\"string()\").replace(\"(km)\",\"\").replace(\"(m)\",\"\")\n text = text.strip()\n print text\n\n#~ f = codecs.open(\"out.html\",\"w\", encoding=\"utf-8\")\nf_out = codecs.open(\"out.json\",\"w\", encoding=\"utf-8\")\n\nrows = table.xpath(\"//tr\")\nprint len(rows)\n#~ liste = {}\nliste = []\nfor i, row in enumerate(rows):\n cells = row.xpath(\"td\")\n if len(cells)==8:\n #~ print cells[1].xpath(\"string()\")\n #~ cell = cells[1]\n out = []\n for cell in cells[1:3]:\n links = cell.xpath(\"a\")\n if links:\n out.append(links[0].xpath(\"string()\"))\n else:\n out.append(cell.xpath(\"string()\"))\n\t\t#~ liste.update({\"n%s\"% (i):{\"name\":out[0], \"hight\":out[1]}})\n liste.append({\"name\":out[0], \"hight\":out[1]})\n \n \n #~ f.write('<li><a data-icon=\"info\" data-rel=\"dialog\" data-transition=\"pop\" href=\"#no_info\">%s (%s)</a></li>\\n' % (out[0], out[1]))\n\n#~ f.close()\n\nf_out.write(json.dumps({\"mountains\" : {\"Switzerland\" : liste}}))\nf_out.close()\n\n#~ for table in tables:\n #~ print len(table)\n\n\nprint lxml.html.tostring(table)[:100]\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import functools
import os
import platform
import sys
import webbrowser
import config
from pushbullet import Pushbullet
class Zui:
def __init__(self):
self.pb = Pushbullet(self.api_key())
self.target = self.make_devices()
self.dayone = config.URL_SCHEME
self.clear, self.pause = self.check_platform()
def api_key(self):
if config.API_KEY:
return config.API_KEY
else:
webbrowser.open('https://www.pushbullet.com/account')
API_KEY = input('Copy and Paste Access Token: ')
self.config_setting(API_KEY)
return API_KEY
def config_setting(self, api_key):
with open('config.py', 'r') as rf:
setting = rf.readlines()
setting[0] = 'API_KEY = "{0}"\n'.format(api_key)
with open('config.py', 'w') as wf:
wf.writelines(setting)
wf.flush()
def make_devices(self):
for d in self.pb.devices:
if config.PUSH_TARGET == d.nickname:
return d
else:
new_device = self.pb.new_device(config.PUSH_TARGET)
# model argument was not used, only nickname
self.pb.edit_device(
new_device,
nickname=config.PUSH_TARGET,
model=config.PUSH_TARGET
)
self.make_devices()
def clear_notepad(f):
functools.wraps(f)
def wraps(*args):
os.system(args[0].clear)
result = f(*args)
os.system(args[0].clear)
return result
return wraps
@clear_notepad
def push_to_dayone(self):
'''Pushbullet couldn't link then whitespace in URL.
So, it doesn't push_link, just push_note.
Unavilable DayOne URL shceme.
'''
try:
# body = self.dayone + self.notepad()
body = self.notepad()
return self.pb.push_note('', body, device=self.target)
except KeyboardInterrupt as e:
return False
def notepad(self):
try:
print('Push: {}, Close: C-c'.format(self.pause))
lines = [line for line in sys.stdin.readlines()]
return ''.join(lines)
except KeyboardInterrupt as e:
raise e
def check_platform(self):
cp = {
'Windows': (
'CLS',
'C-z'
),
'Darwin': (
'clear',
'C-d'
),
}
return cp[platform.system()][0], cp[platform.system()][1]
def main():
z = Zui()
while z.push_to_dayone():
pass
else:
print('Bye.')
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "66cc9ca3d8cbe9690da841e43cef217f3518122c",
"index": 7939,
"step-1": "<mask token>\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET)\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n \"\"\"Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n \"\"\"\n try:\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n <mask token>\n\n def check_platform(self):\n cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET)\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n \"\"\"Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n \"\"\"\n try:\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n\n def notepad(self):\n try:\n print('Push: {}, Close: C-c'.format(self.pause))\n lines = [line for line in sys.stdin.readlines()]\n return ''.join(lines)\n except KeyboardInterrupt as e:\n raise e\n\n def check_platform(self):\n cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\ndef main():\n z = Zui()\n while z.push_to_dayone():\n pass\n else:\n print('Bye.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET)\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n \"\"\"Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n \"\"\"\n try:\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n\n def notepad(self):\n try:\n print('Push: {}, Close: C-c'.format(self.pause))\n lines = [line for line in sys.stdin.readlines()]\n return ''.join(lines)\n except KeyboardInterrupt as e:\n raise e\n\n def check_platform(self):\n cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\ndef main():\n z = Zui()\n while z.push_to_dayone():\n pass\n else:\n print('Bye.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import functools\nimport os\nimport platform\nimport sys\nimport webbrowser\nimport config\nfrom pushbullet import Pushbullet\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET)\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n \"\"\"Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n \"\"\"\n try:\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n\n def notepad(self):\n try:\n print('Push: {}, Close: C-c'.format(self.pause))\n lines = [line for line in sys.stdin.readlines()]\n return ''.join(lines)\n except KeyboardInterrupt as e:\n raise e\n\n def check_platform(self):\n cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\ndef main():\n z = Zui()\n while z.push_to_dayone():\n pass\n else:\n print('Bye.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport functools\nimport os\nimport platform\nimport sys\nimport webbrowser\n\nimport config\nfrom pushbullet import Pushbullet\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n # model argument was not used, only nickname\n self.pb.edit_device(\n new_device,\n nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET\n )\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n '''Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n '''\n try:\n # body = self.dayone + self.notepad()\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n\n def notepad(self):\n try:\n print('Push: {}, Close: C-c'.format(self.pause))\n lines = [line for line in sys.stdin.readlines()]\n return ''.join(lines)\n except KeyboardInterrupt as e:\n raise e\n\n def check_platform(self):\n cp = {\n 'Windows': (\n 'CLS',\n 'C-z'\n ),\n 'Darwin': (\n 'clear',\n 'C-d'\n ),\n }\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\ndef main():\n z = Zui()\n while z.push_to_dayone():\n pass\n else:\n print('Bye.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
from django.conf.urls import url
from . import views
from .HouseView import CreateHouseView
app_name = 'voronoi'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^search/$', views.search, name='search'),
url(r'^house/create/$', CreateHouseView.as_view(), name='create'),
#url(r'^get_search_results/$', views.get_search_results, name='get_search_results'),
url(r'^get_search_json/$', views.get_search_json, name='get_search_json'),
url(r'^get_search_suggestions/$', views.get_search_suggestions, name='get_search_suggestions'),
# ex: /polls/5/
url(r'^(?P<house_id>[0-9]+)/$', views.detail, name='detail'),
# ex: /polls/5/results/
url(r'^(?P<house_id>[0-9]+)/ratings/$', views.ratings, name='ratings'),
]
|
normal
|
{
"blob_id": "e3ee00efa0e929b87ca33b79dc6a6064b8758d4a",
"index": 2640,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'voronoi'\nurlpatterns = [url('^$', views.index, name='index'), url('^search/$', views\n .search, name='search'), url('^house/create/$', CreateHouseView.as_view\n (), name='create'), url('^get_search_json/$', views.get_search_json,\n name='get_search_json'), url('^get_search_suggestions/$', views.\n get_search_suggestions, name='get_search_suggestions'), url(\n '^(?P<house_id>[0-9]+)/$', views.detail, name='detail'), url(\n '^(?P<house_id>[0-9]+)/ratings/$', views.ratings, name='ratings')]\n",
"step-3": "from django.conf.urls import url\nfrom . import views\nfrom .HouseView import CreateHouseView\napp_name = 'voronoi'\nurlpatterns = [url('^$', views.index, name='index'), url('^search/$', views\n .search, name='search'), url('^house/create/$', CreateHouseView.as_view\n (), name='create'), url('^get_search_json/$', views.get_search_json,\n name='get_search_json'), url('^get_search_suggestions/$', views.\n get_search_suggestions, name='get_search_suggestions'), url(\n '^(?P<house_id>[0-9]+)/$', views.detail, name='detail'), url(\n '^(?P<house_id>[0-9]+)/ratings/$', views.ratings, name='ratings')]\n",
"step-4": "from django.conf.urls import url\n\nfrom . import views\nfrom .HouseView import CreateHouseView\n\napp_name = 'voronoi'\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^search/$', views.search, name='search'),\n url(r'^house/create/$', CreateHouseView.as_view(), name='create'),\n #url(r'^get_search_results/$', views.get_search_results, name='get_search_results'),\n url(r'^get_search_json/$', views.get_search_json, name='get_search_json'),\n url(r'^get_search_suggestions/$', views.get_search_suggestions, name='get_search_suggestions'),\n\n \n \t# ex: /polls/5/\n url(r'^(?P<house_id>[0-9]+)/$', views.detail, name='detail'),\n # ex: /polls/5/results/\n url(r'^(?P<house_id>[0-9]+)/ratings/$', views.ratings, name='ratings'),\n\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
# -*- coding: latin-1 -*-
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', titre="Ludovic DELSOL - Portfolio")
@app.route('/etude')
def etude():
return render_template('etude.html', titre="Portfolio Ludovic DELSOL - Etude")
@app.route('/experience')
def experience():
return render_template('experience.html', titre="Portfolio Ludovic DELSOL - Experiences Pros")
@app.route('/competence')
def compentence():
return render_template('compentence.html', titre="Portfolio Ludovic DELSOL - Compétences")
@app.route('/projet')
def project():
return render_template('projet.html', titre="Portfolio Ludovic DELSOL - Projets")
if __name__ == '__main__':
app.run(debug=True)
|
normal
|
{
"blob_id": "c7037b6a576374f211580b304f8447349bbbbea3",
"index": 9583,
"step-1": "<mask token>\n\n\[email protected]('/etude')\ndef etude():\n return render_template('etude.html', titre=\n 'Portfolio Ludovic DELSOL - Etude')\n\n\[email protected]('/experience')\ndef experience():\n return render_template('experience.html', titre=\n 'Portfolio Ludovic DELSOL - Experiences Pros')\n\n\[email protected]('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\n 'Portfolio Ludovic DELSOL - Compétences')\n\n\[email protected]('/projet')\ndef project():\n return render_template('projet.html', titre=\n 'Portfolio Ludovic DELSOL - Projets')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', titre='Ludovic DELSOL - Portfolio')\n\n\[email protected]('/etude')\ndef etude():\n return render_template('etude.html', titre=\n 'Portfolio Ludovic DELSOL - Etude')\n\n\[email protected]('/experience')\ndef experience():\n return render_template('experience.html', titre=\n 'Portfolio Ludovic DELSOL - Experiences Pros')\n\n\[email protected]('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\n 'Portfolio Ludovic DELSOL - Compétences')\n\n\[email protected]('/projet')\ndef project():\n return render_template('projet.html', titre=\n 'Portfolio Ludovic DELSOL - Projets')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', titre='Ludovic DELSOL - Portfolio')\n\n\[email protected]('/etude')\ndef etude():\n return render_template('etude.html', titre=\n 'Portfolio Ludovic DELSOL - Etude')\n\n\[email protected]('/experience')\ndef experience():\n return render_template('experience.html', titre=\n 'Portfolio Ludovic DELSOL - Experiences Pros')\n\n\[email protected]('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\n 'Portfolio Ludovic DELSOL - Compétences')\n\n\[email protected]('/projet')\ndef project():\n return render_template('projet.html', titre=\n 'Portfolio Ludovic DELSOL - Projets')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, render_template\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', titre='Ludovic DELSOL - Portfolio')\n\n\[email protected]('/etude')\ndef etude():\n return render_template('etude.html', titre=\n 'Portfolio Ludovic DELSOL - Etude')\n\n\[email protected]('/experience')\ndef experience():\n return render_template('experience.html', titre=\n 'Portfolio Ludovic DELSOL - Experiences Pros')\n\n\[email protected]('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\n 'Portfolio Ludovic DELSOL - Compétences')\n\n\[email protected]('/projet')\ndef project():\n return render_template('projet.html', titre=\n 'Portfolio Ludovic DELSOL - Projets')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "#!/usr/bin/python\n# -*- coding: latin-1 -*-\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', titre=\"Ludovic DELSOL - Portfolio\")\n\[email protected]('/etude')\ndef etude():\n return render_template('etude.html', titre=\"Portfolio Ludovic DELSOL - Etude\")\n\[email protected]('/experience')\ndef experience():\n return render_template('experience.html', titre=\"Portfolio Ludovic DELSOL - Experiences Pros\")\n\[email protected]('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\"Portfolio Ludovic DELSOL - Compétences\")\n\[email protected]('/projet')\ndef project():\n return render_template('projet.html', titre=\"Portfolio Ludovic DELSOL - Projets\")\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import logging
from subprocess import Popen, PIPE
from .exceptions import VideoEncodingError, WrongVideoTypeError
# TODO: Create a switchable encoding engine.
logger = logging.getLogger('video.encoding')
cmd_ffmpeg = [
'ffmpeg',
'-i',
]
cmd_mp4 = [
'-vf', 'scale=640:360',
'-vcodec', 'h264',
'-acodec', 'aac',
'-y',
]
cmd_webm = [
'-vf', 'scale=640:360',
'-c:v', 'libvpx-vp9',
'-pix_fmt', 'yuv420p',
'-y',
]
# cmd_webm = [
# '-c:v'
# '-vf', 'scale=640:360',
# '-vcodec', 'libvpx',
# '-acodec', 'libvorbis',
# '-y',
# ]
cmd_jpg = [
'-frames', '1',
'-s', '640x360',
'-ss', '1',
'-y',
]
codecs = {
'jpg': cmd_jpg,
'mp4': cmd_mp4,
'webm': cmd_webm,
}
def _run_cmd(cmds):
try:
return Popen(
cmds,
shell=False,
close_fds=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
except OSError as ex:
raise VideoEncodingError('Video running error.') from ex
def encode_video_file(src_filname, dst_filename, file_type):
logger.info(
'Source file: %s, Destination file: %s, File Type: %s',
src_filname, dst_filename, file_type
)
try:
cmd = codecs[file_type]
except IndexError:
raise WrongVideoTypeError('Wrong video type.')
process = _run_cmd(
cmd_ffmpeg + [src_filname] + cmd + [dst_filename],
)
# TODO: timeout handling here.
stdout, stderr = process.communicate()
returncode = process.returncode
if returncode != 0:
logger.error(
'ffmpeg returncode %d, args: %s, output: %s',
returncode,
process.args,
stderr.decode(),
)
raise VideoEncodingError('Video encoding error.')
return returncode
|
normal
|
{
"blob_id": "163475bbe8a5b6eb161e2bb7e9b9a9a3ea0879d2",
"index": 8138,
"step-1": "<mask token>\n\n\ndef encode_video_file(src_filname, dst_filename, file_type):\n logger.info('Source file: %s, Destination file: %s, File Type: %s',\n src_filname, dst_filename, file_type)\n try:\n cmd = codecs[file_type]\n except IndexError:\n raise WrongVideoTypeError('Wrong video type.')\n process = _run_cmd(cmd_ffmpeg + [src_filname] + cmd + [dst_filename])\n stdout, stderr = process.communicate()\n returncode = process.returncode\n if returncode != 0:\n logger.error('ffmpeg returncode %d, args: %s, output: %s',\n returncode, process.args, stderr.decode())\n raise VideoEncodingError('Video encoding error.')\n return returncode\n",
"step-2": "<mask token>\n\n\ndef _run_cmd(cmds):\n try:\n return Popen(cmds, shell=False, close_fds=True, stdin=PIPE, stdout=\n PIPE, stderr=PIPE)\n except OSError as ex:\n raise VideoEncodingError('Video running error.') from ex\n\n\ndef encode_video_file(src_filname, dst_filename, file_type):\n logger.info('Source file: %s, Destination file: %s, File Type: %s',\n src_filname, dst_filename, file_type)\n try:\n cmd = codecs[file_type]\n except IndexError:\n raise WrongVideoTypeError('Wrong video type.')\n process = _run_cmd(cmd_ffmpeg + [src_filname] + cmd + [dst_filename])\n stdout, stderr = process.communicate()\n returncode = process.returncode\n if returncode != 0:\n logger.error('ffmpeg returncode %d, args: %s, output: %s',\n returncode, process.args, stderr.decode())\n raise VideoEncodingError('Video encoding error.')\n return returncode\n",
"step-3": "<mask token>\nlogger = logging.getLogger('video.encoding')\ncmd_ffmpeg = ['ffmpeg', '-i']\ncmd_mp4 = ['-vf', 'scale=640:360', '-vcodec', 'h264', '-acodec', 'aac', '-y']\ncmd_webm = ['-vf', 'scale=640:360', '-c:v', 'libvpx-vp9', '-pix_fmt',\n 'yuv420p', '-y']\ncmd_jpg = ['-frames', '1', '-s', '640x360', '-ss', '1', '-y']\ncodecs = {'jpg': cmd_jpg, 'mp4': cmd_mp4, 'webm': cmd_webm}\n\n\ndef _run_cmd(cmds):\n try:\n return Popen(cmds, shell=False, close_fds=True, stdin=PIPE, stdout=\n PIPE, stderr=PIPE)\n except OSError as ex:\n raise VideoEncodingError('Video running error.') from ex\n\n\ndef encode_video_file(src_filname, dst_filename, file_type):\n logger.info('Source file: %s, Destination file: %s, File Type: %s',\n src_filname, dst_filename, file_type)\n try:\n cmd = codecs[file_type]\n except IndexError:\n raise WrongVideoTypeError('Wrong video type.')\n process = _run_cmd(cmd_ffmpeg + [src_filname] + cmd + [dst_filename])\n stdout, stderr = process.communicate()\n returncode = process.returncode\n if returncode != 0:\n logger.error('ffmpeg returncode %d, args: %s, output: %s',\n returncode, process.args, stderr.decode())\n raise VideoEncodingError('Video encoding error.')\n return returncode\n",
"step-4": "import logging\nfrom subprocess import Popen, PIPE\nfrom .exceptions import VideoEncodingError, WrongVideoTypeError\nlogger = logging.getLogger('video.encoding')\ncmd_ffmpeg = ['ffmpeg', '-i']\ncmd_mp4 = ['-vf', 'scale=640:360', '-vcodec', 'h264', '-acodec', 'aac', '-y']\ncmd_webm = ['-vf', 'scale=640:360', '-c:v', 'libvpx-vp9', '-pix_fmt',\n 'yuv420p', '-y']\ncmd_jpg = ['-frames', '1', '-s', '640x360', '-ss', '1', '-y']\ncodecs = {'jpg': cmd_jpg, 'mp4': cmd_mp4, 'webm': cmd_webm}\n\n\ndef _run_cmd(cmds):\n try:\n return Popen(cmds, shell=False, close_fds=True, stdin=PIPE, stdout=\n PIPE, stderr=PIPE)\n except OSError as ex:\n raise VideoEncodingError('Video running error.') from ex\n\n\ndef encode_video_file(src_filname, dst_filename, file_type):\n logger.info('Source file: %s, Destination file: %s, File Type: %s',\n src_filname, dst_filename, file_type)\n try:\n cmd = codecs[file_type]\n except IndexError:\n raise WrongVideoTypeError('Wrong video type.')\n process = _run_cmd(cmd_ffmpeg + [src_filname] + cmd + [dst_filename])\n stdout, stderr = process.communicate()\n returncode = process.returncode\n if returncode != 0:\n logger.error('ffmpeg returncode %d, args: %s, output: %s',\n returncode, process.args, stderr.decode())\n raise VideoEncodingError('Video encoding error.')\n return returncode\n",
"step-5": "import logging\n\nfrom subprocess import Popen, PIPE\nfrom .exceptions import VideoEncodingError, WrongVideoTypeError\n\n# TODO: Create a switchable encoding engine.\n\nlogger = logging.getLogger('video.encoding')\n\ncmd_ffmpeg = [\n 'ffmpeg',\n '-i',\n]\n\ncmd_mp4 = [\n '-vf', 'scale=640:360',\n '-vcodec', 'h264',\n '-acodec', 'aac',\n '-y',\n]\n\ncmd_webm = [\n '-vf', 'scale=640:360',\n '-c:v', 'libvpx-vp9',\n '-pix_fmt', 'yuv420p',\n '-y',\n]\n\n# cmd_webm = [\n# '-c:v'\n# '-vf', 'scale=640:360',\n# '-vcodec', 'libvpx',\n# '-acodec', 'libvorbis',\n# '-y',\n# ]\n\ncmd_jpg = [\n '-frames', '1',\n '-s', '640x360',\n '-ss', '1',\n '-y',\n]\n\ncodecs = {\n 'jpg': cmd_jpg,\n 'mp4': cmd_mp4,\n 'webm': cmd_webm,\n}\n\n\ndef _run_cmd(cmds):\n\n try:\n return Popen(\n cmds,\n shell=False,\n close_fds=True,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE,\n )\n except OSError as ex:\n raise VideoEncodingError('Video running error.') from ex\n\n\ndef encode_video_file(src_filname, dst_filename, file_type):\n\n logger.info(\n 'Source file: %s, Destination file: %s, File Type: %s',\n src_filname, dst_filename, file_type\n )\n\n try:\n cmd = codecs[file_type]\n except IndexError:\n raise WrongVideoTypeError('Wrong video type.')\n\n process = _run_cmd(\n cmd_ffmpeg + [src_filname] + cmd + [dst_filename],\n )\n\n # TODO: timeout handling here.\n stdout, stderr = process.communicate()\n\n returncode = process.returncode\n\n if returncode != 0:\n logger.error(\n 'ffmpeg returncode %d, args: %s, output: %s',\n returncode,\n process.args,\n stderr.decode(),\n )\n raise VideoEncodingError('Video encoding error.')\n\n return returncode\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# config {stack,buffer,label}
def get_features_da(config,sent_dict):
features = []
# TODO Improve Features
if len(config[0]) > 0:
# Top of stack.
top = config[0][-1]
top_stk_token_feature = 'TOP_STK_TOKEN_'+str(sent_dict['FORM'][top].lower())
features.append(top_stk_token_feature)
top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower()) # not converting to lower has helped to increase the f1 score slightly
features.append(top_stk_lemma)
top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top].lower())
features.append(top_stk_cpostag)
if len(config[1]) > 0:
top_buffer = config[1][-1] # top of buffer, since it is in descending order
top_buffer_token_feature = 'TOP_BUFFER_TOKEN'+str(sent_dict['FORM'][top_buffer].lower())
features.append(top_buffer_token_feature)
top_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][top_buffer].lower())
features.append(top_buffer_lemma)
top_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][top_buffer].lower())
features.append(top_buffer_cpostag)
if len(config[0]) > 1:
two = config[0][-2] # 2nd from top in stack
# two_stk_token = 'two_stk_token_'+str(sent_dict['FORM'][two].lower())
# features.append(two_stk_token)
# two_stk_lemma = 'TWO_STK_LEMMA_' + str(sent_dict['LEMMA'][two].lower())
# features.append(two_stk_lemma)
two_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two].lower())
features.append(two_stk_cpostag)
if len(config[1]) > 1:
two_buffer = config[1][-2] # 2nd from top in buffer
two_buffer_token = 'TWO_BUFFER_TOKEN_'+str(sent_dict['FORM'][two_buffer].lower())
features.append(two_buffer_token)
# two_buffer_lemma = 'TWO_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][two_buffer])
# features.append(two_buffer_lemma)
two_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][two_buffer].lower())
features.append(two_buffer_cpostag)
# if len(config[0]) > 2:
# three = config[0][-3] # 3rd from top in stack
# three_stk_lemma = 'THREE_STACK_LEMMA_' + str(sent_dict['LEMMA'][three])
# features.append(three_stk_lemma)
# three_stk_cpostag = 'THREE_STACK_CPOSTAG_' + str(sent_dict['CPOSTAG'][three].lower())
# features.append(three_stk_cpostag)
if len(config[1]) > 2:
three_buffer = config[1][-3] # 3rd from top in buffer
# three_buffer_lemma = 'THREE_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][three_buffer].lower())
# features.append(three_buffer_lemma)
three_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][three_buffer].lower())
features.append(three_buffer_cpostag)
# if len(config[0]) > 3:
# four = config[0][-4] # 4th from top in stack
# four_stk_lemma = 'FOUR_STK_LEMMA_' + str(sent_dict['LEMMA'][four].lower())
# features.append(four_stk_lemma)
# four_stk_cpostag = 'FOUR_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][four].lower())
# features.append(four_stk_cpostag)
if len(config[1]) > 3:
four_buffer = config[1][-4] # 4th from top in buffer
# four_buffer_lemma = 'FOUR_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][four_buffer].lower())
# features.append(four_buffer_lemma)
four_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][four_buffer].lower())
features.append(four_buffer_cpostag)
return features
|
normal
|
{
"blob_id": "e0ce8a8ad9c842b013bbb1ea1c585b6c4c2a68f5",
"index": 2868,
"step-1": "<mask token>\n",
"step-2": "def get_features_da(config, sent_dict):\n features = []\n if len(config[0]) > 0:\n top = config[0][-1]\n top_stk_token_feature = 'TOP_STK_TOKEN_' + str(sent_dict['FORM'][\n top].lower())\n features.append(top_stk_token_feature)\n top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower())\n features.append(top_stk_lemma)\n top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top\n ].lower())\n features.append(top_stk_cpostag)\n if len(config[1]) > 0:\n top_buffer = config[1][-1]\n top_buffer_token_feature = 'TOP_BUFFER_TOKEN' + str(sent_dict[\n 'FORM'][top_buffer].lower())\n features.append(top_buffer_token_feature)\n top_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][\n top_buffer].lower())\n features.append(top_buffer_lemma)\n top_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][top_buffer].lower())\n features.append(top_buffer_cpostag)\n if len(config[0]) > 1:\n two = config[0][-2]\n two_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two\n ].lower())\n features.append(two_stk_cpostag)\n if len(config[1]) > 1:\n two_buffer = config[1][-2]\n two_buffer_token = 'TWO_BUFFER_TOKEN_' + str(sent_dict['FORM'][\n two_buffer].lower())\n features.append(two_buffer_token)\n two_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][two_buffer].lower())\n features.append(two_buffer_cpostag)\n if len(config[1]) > 2:\n three_buffer = config[1][-3]\n three_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][three_buffer].lower())\n features.append(three_buffer_cpostag)\n if len(config[1]) > 3:\n four_buffer = config[1][-4]\n four_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][four_buffer].lower())\n features.append(four_buffer_cpostag)\n return features\n",
"step-3": "# config {stack,buffer,label}\ndef get_features_da(config,sent_dict):\n features = []\n\n # TODO Improve Features\n \n if len(config[0]) > 0:\n # Top of stack.\n top = config[0][-1] \n \n top_stk_token_feature = 'TOP_STK_TOKEN_'+str(sent_dict['FORM'][top].lower())\n features.append(top_stk_token_feature)\n\t\n top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower()) # not converting to lower has helped to increase the f1 score slightly\n features.append(top_stk_lemma)\n\n top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top].lower())\n features.append(top_stk_cpostag)\n\t\n \n if len(config[1]) > 0:\n \ttop_buffer = config[1][-1] # top of buffer, since it is in descending order\n\n \ttop_buffer_token_feature = 'TOP_BUFFER_TOKEN'+str(sent_dict['FORM'][top_buffer].lower())\n \tfeatures.append(top_buffer_token_feature)\n\n \ttop_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][top_buffer].lower())\n \tfeatures.append(top_buffer_lemma)\n\n \ttop_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][top_buffer].lower())\n \tfeatures.append(top_buffer_cpostag)\n\t\n\n if len(config[0]) > 1:\n \ttwo = config[0][-2] # 2nd from top in stack\n \t\n \t# two_stk_token = 'two_stk_token_'+str(sent_dict['FORM'][two].lower())\n \t# features.append(two_stk_token)\n\n \t# two_stk_lemma = 'TWO_STK_LEMMA_' + str(sent_dict['LEMMA'][two].lower())\n \t# features.append(two_stk_lemma)\n\n \ttwo_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two].lower())\n \tfeatures.append(two_stk_cpostag)\n\n if len(config[1]) > 1:\n \ttwo_buffer = config[1][-2] # 2nd from top in buffer\n\n \ttwo_buffer_token = 'TWO_BUFFER_TOKEN_'+str(sent_dict['FORM'][two_buffer].lower())\n \tfeatures.append(two_buffer_token)\n\n \t# two_buffer_lemma = 'TWO_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][two_buffer])\n \t# features.append(two_buffer_lemma)\n\n \ttwo_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][two_buffer].lower())\n \tfeatures.append(two_buffer_cpostag)\n\t\n\n # if len(config[0]) > 2:\n # \tthree = config[0][-3] # 3rd from top in stack\n\n # \tthree_stk_lemma = 'THREE_STACK_LEMMA_' + str(sent_dict['LEMMA'][three])\n # \tfeatures.append(three_stk_lemma)\n\n # \tthree_stk_cpostag = 'THREE_STACK_CPOSTAG_' + str(sent_dict['CPOSTAG'][three].lower())\n # \tfeatures.append(three_stk_cpostag)\n\n if len(config[1]) > 2:\n \tthree_buffer = config[1][-3] # 3rd from top in buffer\n\n \t# three_buffer_lemma = 'THREE_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][three_buffer].lower())\n \t# features.append(three_buffer_lemma)\n\n \tthree_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][three_buffer].lower())\n \tfeatures.append(three_buffer_cpostag)\n\n # if len(config[0]) > 3:\n # \tfour = config[0][-4] # 4th from top in stack\n\n # \tfour_stk_lemma = 'FOUR_STK_LEMMA_' + str(sent_dict['LEMMA'][four].lower())\n # \tfeatures.append(four_stk_lemma)\n\n # \tfour_stk_cpostag = 'FOUR_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][four].lower())\n # \tfeatures.append(four_stk_cpostag)\n\n if len(config[1]) > 3:\n \tfour_buffer = config[1][-4] # 4th from top in buffer\n\n \t# four_buffer_lemma = 'FOUR_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][four_buffer].lower())\n \t# features.append(four_buffer_lemma)\n\n \tfour_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][four_buffer].lower())\n \tfeatures.append(four_buffer_cpostag)\n\n\n return features\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python2
# -*- coding: utf8 -*-
from __future__ import print_function, division, absolute_import
from flask.ext.login import current_user
from . import cert_record_process as record_process_base
from walis.thirdparty import thrift_client, thirdparty_svc
from walis.exception.util import raise_user_exc
from walis.exception.error_code import CERT_UPDATE_ERR
TRestaurantCertification = thirdparty_svc.ers.TRestaurantCertification
CERTIFICATION_TYPE_NONE = 0
RESTAURANT_NOT_EXIST_ID = -1
CERTIFICATION_NOT_EXIST = -2
CertType = thirdparty_svc.ers.CertificationConst
STATUS_PENDING = CertType.STATUS_PENDING
STATUS_PASSED = CertType.STATUS_PASSED
STATUS_FAILED = CertType.STATUS_FAILED
TYPE_CERT_PERSONAL = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_PERSONAL
TYPE_CERT_CORP = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_CORP
def get(restaurant_id):
with thrift_client('ers') as ers:
cert = ers.get_restaurant_certification(restaurant_id)
cert.comment = cert.comment.encode('utf-8')
return cert
def get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):
limit = 250
with thrift_client('ers') as ers:
return ers.query_restaurant_certification_by_status(
status, offset, limit)
def add(cert):
with thrift_client('ers') as ers:
ers.add_restaurant_certification(cert)
record_process_base.add(
cert.restaurant_id,
cert.type,
CERTIFICATION_NOT_EXIST,
STATUS_PENDING,
comment='上传个人认证信息' if cert.type ==
TYPE_CERT_PERSONAL else '上传企业认证信息')
return ''
def update(cert):
with thrift_client('ers') as ers:
db_cert = ers.get_restaurant_certification(cert.restaurant_id)
if not db_cert:
raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)
with thrift_client('ers') as ers:
ers.update_restaurant_certification(cert)
record_process_base.add(
cert.restaurant_id,
cert.type,
cert.status,
STATUS_PENDING,
comment='修改认证信息')
return ''
def process_certification(restaurant_id, status_to):
with thrift_client('ers') as ers:
ers.process_certification(current_user.id,
restaurant_id, status_to)
def get_latest_record(restaurant_id):
nopass_record = record_process_base.get_latest_record(
restaurant_id)
comment = ''
cert_status = CERTIFICATION_NOT_EXIST
if nopass_record:
comment = nopass_record.comment
cert_status = nopass_record.status_to
return comment, cert_status
|
normal
|
{
"blob_id": "746971cd6c5bf65268e89303c8f4ce98a56eb111",
"index": 8011,
"step-1": "<mask token>\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\n<mask token>\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type,\n CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert\n .type == TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type, cert.status,\n STATUS_PENDING, comment='修改认证信息')\n return ''\n\n\n<mask token>\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(restaurant_id)\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n return comment, cert_status\n",
"step-2": "<mask token>\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\ndef get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):\n limit = 250\n with thrift_client('ers') as ers:\n return ers.query_restaurant_certification_by_status(status, offset,\n limit)\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type,\n CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert\n .type == TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type, cert.status,\n STATUS_PENDING, comment='修改认证信息')\n return ''\n\n\n<mask token>\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(restaurant_id)\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n return comment, cert_status\n",
"step-3": "<mask token>\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\ndef get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):\n limit = 250\n with thrift_client('ers') as ers:\n return ers.query_restaurant_certification_by_status(status, offset,\n limit)\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type,\n CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert\n .type == TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type, cert.status,\n STATUS_PENDING, comment='修改认证信息')\n return ''\n\n\ndef process_certification(restaurant_id, status_to):\n with thrift_client('ers') as ers:\n ers.process_certification(current_user.id, restaurant_id, status_to)\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(restaurant_id)\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n return comment, cert_status\n",
"step-4": "from __future__ import print_function, division, absolute_import\nfrom flask.ext.login import current_user\nfrom . import cert_record_process as record_process_base\nfrom walis.thirdparty import thrift_client, thirdparty_svc\nfrom walis.exception.util import raise_user_exc\nfrom walis.exception.error_code import CERT_UPDATE_ERR\nTRestaurantCertification = thirdparty_svc.ers.TRestaurantCertification\nCERTIFICATION_TYPE_NONE = 0\nRESTAURANT_NOT_EXIST_ID = -1\nCERTIFICATION_NOT_EXIST = -2\nCertType = thirdparty_svc.ers.CertificationConst\nSTATUS_PENDING = CertType.STATUS_PENDING\nSTATUS_PASSED = CertType.STATUS_PASSED\nSTATUS_FAILED = CertType.STATUS_FAILED\nTYPE_CERT_PERSONAL = (thirdparty_svc.ers.RestaurantConst.\n CERTIFICATION_TYPE_PERSONAL)\nTYPE_CERT_CORP = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_CORP\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\ndef get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):\n limit = 250\n with thrift_client('ers') as ers:\n return ers.query_restaurant_certification_by_status(status, offset,\n limit)\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type,\n CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert\n .type == TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type, cert.status,\n STATUS_PENDING, comment='修改认证信息')\n return ''\n\n\ndef process_certification(restaurant_id, status_to):\n with thrift_client('ers') as ers:\n ers.process_certification(current_user.id, restaurant_id, status_to)\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(restaurant_id)\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n return comment, cert_status\n",
"step-5": "#!/usr/bin/env python2\n# -*- coding: utf8 -*-\n\nfrom __future__ import print_function, division, absolute_import\n\nfrom flask.ext.login import current_user\n\nfrom . import cert_record_process as record_process_base\nfrom walis.thirdparty import thrift_client, thirdparty_svc\nfrom walis.exception.util import raise_user_exc\nfrom walis.exception.error_code import CERT_UPDATE_ERR\n\nTRestaurantCertification = thirdparty_svc.ers.TRestaurantCertification\n\nCERTIFICATION_TYPE_NONE = 0\nRESTAURANT_NOT_EXIST_ID = -1\nCERTIFICATION_NOT_EXIST = -2\n\nCertType = thirdparty_svc.ers.CertificationConst\nSTATUS_PENDING = CertType.STATUS_PENDING\nSTATUS_PASSED = CertType.STATUS_PASSED\nSTATUS_FAILED = CertType.STATUS_FAILED\n\nTYPE_CERT_PERSONAL = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_PERSONAL\nTYPE_CERT_CORP = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_CORP\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\ndef get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):\n limit = 250\n with thrift_client('ers') as ers:\n return ers.query_restaurant_certification_by_status(\n status, offset, limit)\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(\n cert.restaurant_id,\n cert.type,\n CERTIFICATION_NOT_EXIST,\n STATUS_PENDING,\n comment='上传个人认证信息' if cert.type ==\n TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n\n record_process_base.add(\n cert.restaurant_id,\n cert.type,\n cert.status,\n STATUS_PENDING,\n comment='修改认证信息')\n return ''\n\n\ndef process_certification(restaurant_id, status_to):\n with thrift_client('ers') as ers:\n ers.process_certification(current_user.id,\n restaurant_id, status_to)\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(\n restaurant_id)\n\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n\n return comment, cert_status\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
import requests
from lxml import html
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'}
mail_ru_link = "http://mail.ru"
lenta_link = "https://lenta.ru/"
req = requests.get(mail_ru_link, headers=headers).text
root = html.fromstring(req)
news = []
links = root.xpath(
"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/@href | "
"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/@href")
titles = root.xpath("//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/*/*/h3/text() | "
"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/text()")
if len(links) > 0:
for i, l in enumerate(links):
article = {'link': l, 'title': titles[i], 'source': mail_ru_link}
news.append(article)
else:
print("Error")
req = requests.get(lenta_link, headers=headers).text
root = html.fromstring(req)
links = root.xpath(
"//div[@class='item']/a/@href")
titles = root.xpath("//div[@class='item']/a/text()")
if len(links) > 0:
for i, l in enumerate(links):
article = {'link': lenta_link + l, 'title': titles[i], 'source': lenta_link}
news.append(article)
else:
print("Error")
print(news)
|
normal
|
{
"blob_id": "00d2a29774a4278b1b022571b3f16c88224f08fc",
"index": 5207,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(links) > 0:\n for i, l in enumerate(links):\n article = {'link': l, 'title': titles[i], 'source': mail_ru_link}\n news.append(article)\nelse:\n print('Error')\n<mask token>\nif len(links) > 0:\n for i, l in enumerate(links):\n article = {'link': lenta_link + l, 'title': titles[i], 'source':\n lenta_link}\n news.append(article)\nelse:\n print('Error')\nprint(news)\n",
"step-3": "<mask token>\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'\n }\nmail_ru_link = 'http://mail.ru'\nlenta_link = 'https://lenta.ru/'\nreq = requests.get(mail_ru_link, headers=headers).text\nroot = html.fromstring(req)\nnews = []\nlinks = root.xpath(\n \"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/@href | //div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/@href\"\n )\ntitles = root.xpath(\n \"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/*/*/h3/text() | //div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/text()\"\n )\nif len(links) > 0:\n for i, l in enumerate(links):\n article = {'link': l, 'title': titles[i], 'source': mail_ru_link}\n news.append(article)\nelse:\n print('Error')\nreq = requests.get(lenta_link, headers=headers).text\nroot = html.fromstring(req)\nlinks = root.xpath(\"//div[@class='item']/a/@href\")\ntitles = root.xpath(\"//div[@class='item']/a/text()\")\nif len(links) > 0:\n for i, l in enumerate(links):\n article = {'link': lenta_link + l, 'title': titles[i], 'source':\n lenta_link}\n news.append(article)\nelse:\n print('Error')\nprint(news)\n",
"step-4": "import requests\nfrom lxml import html\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'\n }\nmail_ru_link = 'http://mail.ru'\nlenta_link = 'https://lenta.ru/'\nreq = requests.get(mail_ru_link, headers=headers).text\nroot = html.fromstring(req)\nnews = []\nlinks = root.xpath(\n \"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/@href | //div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/@href\"\n )\ntitles = root.xpath(\n \"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/*/*/h3/text() | //div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/text()\"\n )\nif len(links) > 0:\n for i, l in enumerate(links):\n article = {'link': l, 'title': titles[i], 'source': mail_ru_link}\n news.append(article)\nelse:\n print('Error')\nreq = requests.get(lenta_link, headers=headers).text\nroot = html.fromstring(req)\nlinks = root.xpath(\"//div[@class='item']/a/@href\")\ntitles = root.xpath(\"//div[@class='item']/a/text()\")\nif len(links) > 0:\n for i, l in enumerate(links):\n article = {'link': lenta_link + l, 'title': titles[i], 'source':\n lenta_link}\n news.append(article)\nelse:\n print('Error')\nprint(news)\n",
"step-5": "import requests\nfrom lxml import html\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'}\nmail_ru_link = \"http://mail.ru\"\nlenta_link = \"https://lenta.ru/\"\n\nreq = requests.get(mail_ru_link, headers=headers).text\nroot = html.fromstring(req)\n\nnews = []\n\nlinks = root.xpath(\n \"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/@href | \"\n \"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/@href\")\ntitles = root.xpath(\"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/*/*/h3/text() | \"\n \"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/text()\")\nif len(links) > 0:\n for i, l in enumerate(links):\n article = {'link': l, 'title': titles[i], 'source': mail_ru_link}\n news.append(article)\nelse:\n print(\"Error\")\n\n\nreq = requests.get(lenta_link, headers=headers).text\nroot = html.fromstring(req)\n\nlinks = root.xpath(\n \"//div[@class='item']/a/@href\")\ntitles = root.xpath(\"//div[@class='item']/a/text()\")\nif len(links) > 0:\n for i, l in enumerate(links):\n article = {'link': lenta_link + l, 'title': titles[i], 'source': lenta_link}\n news.append(article)\nelse:\n print(\"Error\")\nprint(news)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .sgd import StochasticGradientDescent
from .momentum import Momentum
|
normal
|
{
"blob_id": "aa55f1dd4f363e07d5f9104346efaa24c0457d45",
"index": 9126,
"step-1": "<mask token>\n",
"step-2": "from .sgd import StochasticGradientDescent\nfrom .momentum import Momentum\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
# Third party imports
import numpy as np
# Local application imports
from mosqito.sound_level_meter import noct_spectrum
from mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness
from mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes
from mosqito.utils.conversion import amp2db
# Optional package import
try:
from SciDataTool import DataTime, DataLinspace, DataFreq
except ImportError:
DataTime = None
DataLinspace = None
DataFreq = None
def loudness_zwst(signal, fs=None, field_type="free", is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
# Manage SciDataTool input type
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along("time")["time"]
fs = 1 / (time[1] - time[0])
signal = signal.get_along("time")[signal.symbol]
# Compute third octave band spectrum
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
# Compute dB values
spec_third = amp2db(spec_third, ref=2e-5)
# Compute main loudness
Nm = _main_loudness(spec_third, field_type)
# Computation of specific loudness pattern and integration of overall
# loudness by attaching slopes towards higher frequencies
N, N_specific = _calc_slopes(Nm)
# Define Bark axis
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
# Manage SciDataTool output type
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(
name="Critical band rate",
unit="Bark",
initial=0,
final=24,
number=int(24 / 0.1),
include_endpoint=True,
)
N_specific = DataFreq(
name="Specific loudness (Zwicker method for stationnary signal)",
symbol="N'_{zwst}",
axes=[bark_data],
values=N_specific,
unit="sone/Bark",
)
return N, N_specific, bark_axis
|
normal
|
{
"blob_id": "75716aaaca63f8ca6d32c885021c1dc0f9a12dac",
"index": 793,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along('time')['time']\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along('time')[signal.symbol]\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n spec_third = amp2db(spec_third, ref=2e-05)\n Nm = _main_loudness(spec_third, field_type)\n N, N_specific = _calc_slopes(Nm)\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(name='Critical band rate', unit='Bark',\n initial=0, final=24, number=int(24 / 0.1), include_endpoint\n =True)\n N_specific = DataFreq(name=\n 'Specific loudness (Zwicker method for stationnary signal)',\n symbol=\"N'_{zwst}\", axes=[bark_data], values=N_specific,\n unit='sone/Bark')\n return N, N_specific, bark_axis\n",
"step-3": "<mask token>\ntry:\n from SciDataTool import DataTime, DataLinspace, DataFreq\nexcept ImportError:\n DataTime = None\n DataLinspace = None\n DataFreq = None\n\n\ndef loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along('time')['time']\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along('time')[signal.symbol]\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n spec_third = amp2db(spec_third, ref=2e-05)\n Nm = _main_loudness(spec_third, field_type)\n N, N_specific = _calc_slopes(Nm)\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(name='Critical band rate', unit='Bark',\n initial=0, final=24, number=int(24 / 0.1), include_endpoint\n =True)\n N_specific = DataFreq(name=\n 'Specific loudness (Zwicker method for stationnary signal)',\n symbol=\"N'_{zwst}\", axes=[bark_data], values=N_specific,\n unit='sone/Bark')\n return N, N_specific, bark_axis\n",
"step-4": "import numpy as np\nfrom mosqito.sound_level_meter import noct_spectrum\nfrom mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness\nfrom mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes\nfrom mosqito.utils.conversion import amp2db\ntry:\n from SciDataTool import DataTime, DataLinspace, DataFreq\nexcept ImportError:\n DataTime = None\n DataLinspace = None\n DataFreq = None\n\n\ndef loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along('time')['time']\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along('time')[signal.symbol]\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n spec_third = amp2db(spec_third, ref=2e-05)\n Nm = _main_loudness(spec_third, field_type)\n N, N_specific = _calc_slopes(Nm)\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(name='Critical band rate', unit='Bark',\n initial=0, final=24, number=int(24 / 0.1), include_endpoint\n =True)\n N_specific = DataFreq(name=\n 'Specific loudness (Zwicker method for stationnary signal)',\n symbol=\"N'_{zwst}\", axes=[bark_data], values=N_specific,\n unit='sone/Bark')\n return N, N_specific, bark_axis\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Third party imports\nimport numpy as np\n\n# Local application imports\nfrom mosqito.sound_level_meter import noct_spectrum\nfrom mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness\nfrom mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes\nfrom mosqito.utils.conversion import amp2db\n\n# Optional package import\ntry:\n from SciDataTool import DataTime, DataLinspace, DataFreq\nexcept ImportError:\n DataTime = None\n DataLinspace = None\n DataFreq = None\n\n\ndef loudness_zwst(signal, fs=None, field_type=\"free\", is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n\n # Manage SciDataTool input type\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along(\"time\")[\"time\"]\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along(\"time\")[signal.symbol]\n\n # Compute third octave band spectrum\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n\n # Compute dB values\n spec_third = amp2db(spec_third, ref=2e-5)\n\n # Compute main loudness\n Nm = _main_loudness(spec_third, field_type)\n\n # Computation of specific loudness pattern and integration of overall\n # loudness by attaching slopes towards higher frequencies\n N, N_specific = _calc_slopes(Nm)\n\n # Define Bark axis\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n\n # Manage SciDataTool output type\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(\n name=\"Critical band rate\",\n unit=\"Bark\",\n initial=0,\n final=24,\n number=int(24 / 0.1),\n include_endpoint=True,\n )\n N_specific = DataFreq(\n name=\"Specific loudness (Zwicker method for stationnary signal)\",\n symbol=\"N'_{zwst}\",\n axes=[bark_data],\n values=N_specific,\n unit=\"sone/Bark\",\n )\n\n return N, N_specific, bark_axis\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Python version 3.8.5
#
# Author Maria Catharina van Veen
#
# Purpose To provide users with a tool to create
# or edit an html file.
#
# Tested OS This code was written and tested to
# work with Windows 10.
import os
from tkinter import *
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
import WebPageGeneratorGui as gui
import WebPageGeneratorFunc as func
class MainWindow(Frame):
def __init__(self, root):
Frame.__init__(self, root)
self.root = root
self.root.title("Web Page Generator")
self.root.bgcolor = "#AA0000"
self.root.config(bg = self.root.bgcolor)
gui.loadGUI(self)
if __name__ == "__main__":
root = Tk()
app = MainWindow(root)
root.mainloop()
|
normal
|
{
"blob_id": "63e5ead200fb2884d93f19e7d9b8dc76c7f4f0e3",
"index": 4611,
"step-1": "<mask token>\n\n\nclass MainWindow(Frame):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MainWindow(Frame):\n\n def __init__(self, root):\n Frame.__init__(self, root)\n self.root = root\n self.root.title('Web Page Generator')\n self.root.bgcolor = '#AA0000'\n self.root.config(bg=self.root.bgcolor)\n gui.loadGUI(self)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MainWindow(Frame):\n\n def __init__(self, root):\n Frame.__init__(self, root)\n self.root = root\n self.root.title('Web Page Generator')\n self.root.bgcolor = '#AA0000'\n self.root.config(bg=self.root.bgcolor)\n gui.loadGUI(self)\n\n\nif __name__ == '__main__':\n root = Tk()\n app = MainWindow(root)\n root.mainloop()\n",
"step-4": "import os\nfrom tkinter import *\nimport tkinter as tk\nfrom tkinter.scrolledtext import ScrolledText\nimport WebPageGeneratorGui as gui\nimport WebPageGeneratorFunc as func\n\n\nclass MainWindow(Frame):\n\n def __init__(self, root):\n Frame.__init__(self, root)\n self.root = root\n self.root.title('Web Page Generator')\n self.root.bgcolor = '#AA0000'\n self.root.config(bg=self.root.bgcolor)\n gui.loadGUI(self)\n\n\nif __name__ == '__main__':\n root = Tk()\n app = MainWindow(root)\n root.mainloop()\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Python version 3.8.5\n#\n# Author Maria Catharina van Veen\n#\n# Purpose To provide users with a tool to create\n# or edit an html file.\n#\n# Tested OS This code was written and tested to\n# work with Windows 10.\n\n\nimport os\nfrom tkinter import *\nimport tkinter as tk\nfrom tkinter.scrolledtext import ScrolledText\n\nimport WebPageGeneratorGui as gui\nimport WebPageGeneratorFunc as func\n\nclass MainWindow(Frame):\n def __init__(self, root):\n Frame.__init__(self, root)\n self.root = root\n self.root.title(\"Web Page Generator\")\n self.root.bgcolor = \"#AA0000\"\n self.root.config(bg = self.root.bgcolor)\n gui.loadGUI(self)\n\n\nif __name__ == \"__main__\":\n root = Tk()\n app = MainWindow(root)\n root.mainloop()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
"""
Holds the AWS SNS email service that can be used to send emails.
"""
import boto3
import os
import cla
import uuid
import json
import datetime
from cla.models import email_service_interface
region = os.environ.get('REGION', '')
sender_email_address = os.environ.get('SES_SENDER_EMAIL_ADDRESS', '')
topic_arn = os.environ.get('SNS_EVENT_TOPIC_ARN', '')
class SNS(email_service_interface.EmailService):
"""
AWS SNS email client model.
"""
def __init__(self):
self.region = None
self.sender_email = None
self.topic_arn = None
def initialize(self, config):
self.region = region
self.sender_email = sender_email_address
self.topic_arn = topic_arn
def send(self, subject, body, recipient, attachment=None):
msg = self.get_email_message(subject, body, self.sender_email, recipient, attachment)
# Connect to SNS.
connection = self._get_connection()
# Send the email.
try:
self._send(connection, msg)
except Exception as err:
cla.log.error('Error while sending AWS SNS email to %s: %s', recipient, str(err))
def _get_connection(self):
"""
Mockable method to get a connection to the SNS service.
"""
return boto3.client('sns', region_name=self.region)
def _send(self, connection, msg): # pylint: disable=no-self-use
"""
Mockable send method.
"""
connection.publish(
TopicArn=self.topic_arn,
Message=msg,
)
def get_email_message(self, subject, body, sender, recipients, attachment=None): # pylint: disable=too-many-arguments
"""
Helper method to get a prepared email message given the subject,
body, and recipient provided.
:param subject: The email subject
:type subject: string
:param body: The email body
:type body: string
:param sender: The sender email
:type sender: string
:param recipients: An array of recipient email addresses
:type recipient: string
:param attachment: The attachment dict (see EmailService.send() documentation).
:type: attachment: dict
:return: The json message
:rtype: string
"""
msg = {}
source = {}
data = {}
data["body"] = body
data["from"] = sender
data["subject"] = subject
data["type"] = "cla-email-event"
if isinstance(recipients, str):
data["recipients"] = [recipients]
else:
data["recipients"] = recipients
# Added MailChip/Mandrill support by setting the template and adding
# email body to the parameters list under the BODY attribute
data["template_name"] = "EasyCLA System Email Template"
data["parameters"] = {
"BODY": body
}
msg["data"] = data
source["client_id"] = "easycla-service"
source["description"] = "EasyCLA Service"
source["name"] = "EasyCLA Service"
msg["source_id"] = source
msg["id"] = str(uuid.uuid4())
msg["type"] = "cla-email-event"
msg["version"] = "0.1.0"
json_string = json.dumps(msg)
# cla.log.debug(f'Email JSON: {json_string}')
return json_string
class MockSNS(SNS):
"""
Mockable AWS SNS email client.
"""
def __init__(self):
super().__init__()
self.emails_sent = []
def _get_connection(self):
return None
def _send(self, connection, msg):
self.emails_sent.append(msg)
|
normal
|
{
"blob_id": "16dd73f2c85eff8d62cf0e605489d0db1616e36e",
"index": 8650,
"step-1": "<mask token>\n\n\nclass SNS(email_service_interface.EmailService):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-2": "<mask token>\n\n\nclass SNS(email_service_interface.EmailService):\n <mask token>\n\n def __init__(self):\n self.region = None\n self.sender_email = None\n self.topic_arn = None\n\n def initialize(self, config):\n self.region = region\n self.sender_email = sender_email_address\n self.topic_arn = topic_arn\n\n def send(self, subject, body, recipient, attachment=None):\n msg = self.get_email_message(subject, body, self.sender_email,\n recipient, attachment)\n connection = self._get_connection()\n try:\n self._send(connection, msg)\n except Exception as err:\n cla.log.error('Error while sending AWS SNS email to %s: %s',\n recipient, str(err))\n\n def _get_connection(self):\n \"\"\"\n Mockable method to get a connection to the SNS service.\n \"\"\"\n return boto3.client('sns', region_name=self.region)\n\n def _send(self, connection, msg):\n \"\"\"\n Mockable send method.\n \"\"\"\n connection.publish(TopicArn=self.topic_arn, Message=msg)\n\n def get_email_message(self, subject, body, sender, recipients,\n attachment=None):\n \"\"\"\n Helper method to get a prepared email message given the subject,\n body, and recipient provided.\n\n :param subject: The email subject\n :type subject: string\n :param body: The email body\n :type body: string\n :param sender: The sender email\n :type sender: string\n :param recipients: An array of recipient email addresses\n :type recipient: string\n :param attachment: The attachment dict (see EmailService.send() documentation).\n :type: attachment: dict\n :return: The json message\n :rtype: string\n \"\"\"\n msg = {}\n source = {}\n data = {}\n data['body'] = body\n data['from'] = sender\n data['subject'] = subject\n data['type'] = 'cla-email-event'\n if isinstance(recipients, str):\n data['recipients'] = [recipients]\n else:\n data['recipients'] = recipients\n data['template_name'] = 'EasyCLA System Email Template'\n data['parameters'] = {'BODY': body}\n msg['data'] = data\n source['client_id'] = 'easycla-service'\n source['description'] = 'EasyCLA Service'\n source['name'] = 'EasyCLA Service'\n msg['source_id'] = source\n msg['id'] = str(uuid.uuid4())\n msg['type'] = 'cla-email-event'\n msg['version'] = '0.1.0'\n json_string = json.dumps(msg)\n return json_string\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-3": "<mask token>\nregion = os.environ.get('REGION', '')\nsender_email_address = os.environ.get('SES_SENDER_EMAIL_ADDRESS', '')\ntopic_arn = os.environ.get('SNS_EVENT_TOPIC_ARN', '')\n\n\nclass SNS(email_service_interface.EmailService):\n \"\"\"\n AWS SNS email client model.\n \"\"\"\n\n def __init__(self):\n self.region = None\n self.sender_email = None\n self.topic_arn = None\n\n def initialize(self, config):\n self.region = region\n self.sender_email = sender_email_address\n self.topic_arn = topic_arn\n\n def send(self, subject, body, recipient, attachment=None):\n msg = self.get_email_message(subject, body, self.sender_email,\n recipient, attachment)\n connection = self._get_connection()\n try:\n self._send(connection, msg)\n except Exception as err:\n cla.log.error('Error while sending AWS SNS email to %s: %s',\n recipient, str(err))\n\n def _get_connection(self):\n \"\"\"\n Mockable method to get a connection to the SNS service.\n \"\"\"\n return boto3.client('sns', region_name=self.region)\n\n def _send(self, connection, msg):\n \"\"\"\n Mockable send method.\n \"\"\"\n connection.publish(TopicArn=self.topic_arn, Message=msg)\n\n def get_email_message(self, subject, body, sender, recipients,\n attachment=None):\n \"\"\"\n Helper method to get a prepared email message given the subject,\n body, and recipient provided.\n\n :param subject: The email subject\n :type subject: string\n :param body: The email body\n :type body: string\n :param sender: The sender email\n :type sender: string\n :param recipients: An array of recipient email addresses\n :type recipient: string\n :param attachment: The attachment dict (see EmailService.send() documentation).\n :type: attachment: dict\n :return: The json message\n :rtype: string\n \"\"\"\n msg = {}\n source = {}\n data = {}\n data['body'] = body\n data['from'] = sender\n data['subject'] = subject\n data['type'] = 'cla-email-event'\n if isinstance(recipients, str):\n data['recipients'] = [recipients]\n else:\n data['recipients'] = recipients\n data['template_name'] = 'EasyCLA System Email Template'\n data['parameters'] = {'BODY': body}\n msg['data'] = data\n source['client_id'] = 'easycla-service'\n source['description'] = 'EasyCLA Service'\n source['name'] = 'EasyCLA Service'\n msg['source_id'] = source\n msg['id'] = str(uuid.uuid4())\n msg['type'] = 'cla-email-event'\n msg['version'] = '0.1.0'\n json_string = json.dumps(msg)\n return json_string\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-4": "<mask token>\nimport boto3\nimport os\nimport cla\nimport uuid\nimport json\nimport datetime\nfrom cla.models import email_service_interface\nregion = os.environ.get('REGION', '')\nsender_email_address = os.environ.get('SES_SENDER_EMAIL_ADDRESS', '')\ntopic_arn = os.environ.get('SNS_EVENT_TOPIC_ARN', '')\n\n\nclass SNS(email_service_interface.EmailService):\n \"\"\"\n AWS SNS email client model.\n \"\"\"\n\n def __init__(self):\n self.region = None\n self.sender_email = None\n self.topic_arn = None\n\n def initialize(self, config):\n self.region = region\n self.sender_email = sender_email_address\n self.topic_arn = topic_arn\n\n def send(self, subject, body, recipient, attachment=None):\n msg = self.get_email_message(subject, body, self.sender_email,\n recipient, attachment)\n connection = self._get_connection()\n try:\n self._send(connection, msg)\n except Exception as err:\n cla.log.error('Error while sending AWS SNS email to %s: %s',\n recipient, str(err))\n\n def _get_connection(self):\n \"\"\"\n Mockable method to get a connection to the SNS service.\n \"\"\"\n return boto3.client('sns', region_name=self.region)\n\n def _send(self, connection, msg):\n \"\"\"\n Mockable send method.\n \"\"\"\n connection.publish(TopicArn=self.topic_arn, Message=msg)\n\n def get_email_message(self, subject, body, sender, recipients,\n attachment=None):\n \"\"\"\n Helper method to get a prepared email message given the subject,\n body, and recipient provided.\n\n :param subject: The email subject\n :type subject: string\n :param body: The email body\n :type body: string\n :param sender: The sender email\n :type sender: string\n :param recipients: An array of recipient email addresses\n :type recipient: string\n :param attachment: The attachment dict (see EmailService.send() documentation).\n :type: attachment: dict\n :return: The json message\n :rtype: string\n \"\"\"\n msg = {}\n source = {}\n data = {}\n data['body'] = body\n data['from'] = sender\n data['subject'] = subject\n data['type'] = 'cla-email-event'\n if isinstance(recipients, str):\n data['recipients'] = [recipients]\n else:\n data['recipients'] = recipients\n data['template_name'] = 'EasyCLA System Email Template'\n data['parameters'] = {'BODY': body}\n msg['data'] = data\n source['client_id'] = 'easycla-service'\n source['description'] = 'EasyCLA Service'\n source['name'] = 'EasyCLA Service'\n msg['source_id'] = source\n msg['id'] = str(uuid.uuid4())\n msg['type'] = 'cla-email-event'\n msg['version'] = '0.1.0'\n json_string = json.dumps(msg)\n return json_string\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-5": "# Copyright The Linux Foundation and each contributor to CommunityBridge.\n# SPDX-License-Identifier: MIT\n\n\"\"\"\nHolds the AWS SNS email service that can be used to send emails.\n\"\"\"\n\nimport boto3\nimport os\nimport cla\nimport uuid\nimport json\nimport datetime\nfrom cla.models import email_service_interface\n\nregion = os.environ.get('REGION', '')\nsender_email_address = os.environ.get('SES_SENDER_EMAIL_ADDRESS', '')\ntopic_arn = os.environ.get('SNS_EVENT_TOPIC_ARN', '')\n\n\nclass SNS(email_service_interface.EmailService):\n \"\"\"\n AWS SNS email client model.\n \"\"\"\n\n def __init__(self):\n self.region = None\n self.sender_email = None\n self.topic_arn = None\n\n def initialize(self, config):\n self.region = region\n self.sender_email = sender_email_address\n self.topic_arn = topic_arn\n\n def send(self, subject, body, recipient, attachment=None):\n msg = self.get_email_message(subject, body, self.sender_email, recipient, attachment)\n # Connect to SNS.\n connection = self._get_connection()\n # Send the email.\n try:\n self._send(connection, msg)\n except Exception as err:\n cla.log.error('Error while sending AWS SNS email to %s: %s', recipient, str(err))\n\n def _get_connection(self):\n \"\"\"\n Mockable method to get a connection to the SNS service.\n \"\"\"\n return boto3.client('sns', region_name=self.region)\n\n def _send(self, connection, msg): # pylint: disable=no-self-use\n \"\"\"\n Mockable send method.\n \"\"\"\n connection.publish(\n TopicArn=self.topic_arn,\n Message=msg,\n )\n\n def get_email_message(self, subject, body, sender, recipients, attachment=None): # pylint: disable=too-many-arguments\n \"\"\"\n Helper method to get a prepared email message given the subject,\n body, and recipient provided.\n\n :param subject: The email subject\n :type subject: string\n :param body: The email body\n :type body: string\n :param sender: The sender email\n :type sender: string\n :param recipients: An array of recipient email addresses\n :type recipient: string\n :param attachment: The attachment dict (see EmailService.send() documentation).\n :type: attachment: dict\n :return: The json message\n :rtype: string\n \"\"\"\n msg = {}\n source = {}\n data = {}\n\n data[\"body\"] = body\n data[\"from\"] = sender\n data[\"subject\"] = subject\n data[\"type\"] = \"cla-email-event\"\n if isinstance(recipients, str):\n data[\"recipients\"] = [recipients]\n else:\n data[\"recipients\"] = recipients\n # Added MailChip/Mandrill support by setting the template and adding\n # email body to the parameters list under the BODY attribute\n data[\"template_name\"] = \"EasyCLA System Email Template\"\n data[\"parameters\"] = {\n \"BODY\": body\n }\n\n msg[\"data\"] = data\n\n source[\"client_id\"] = \"easycla-service\"\n source[\"description\"] = \"EasyCLA Service\"\n source[\"name\"] = \"EasyCLA Service\"\n msg[\"source_id\"] = source\n\n msg[\"id\"] = str(uuid.uuid4())\n msg[\"type\"] = \"cla-email-event\"\n msg[\"version\"] = \"0.1.0\"\n json_string = json.dumps(msg)\n # cla.log.debug(f'Email JSON: {json_string}')\n return json_string\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-ids": [
6,
12,
14,
15,
16
]
}
|
[
6,
12,
14,
15,
16
] |
#容器序列
#list tuple collections.deque dict
#扁平序列
#str
#可变序列
#list collections.deque dict
#不变序列
# tuple str
|
normal
|
{
"blob_id": "1f45bdbfdd29a0b832ebac7e4ff91df1203ae158",
"index": 7712,
"step-1": "#容器序列\n#list tuple collections.deque dict\n#扁平序列\n#str \n#可变序列\n#list collections.deque dict\n#不变序列\n# tuple str",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
import time
t0 = time.time()
# ------------------------------
days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def count_days(start_date, end_date, ref_date, target_day):
# ref_date must be exactly 1 year before start_date
month = start_date[0]
day = start_date[1]
year = start_date[2]
end_month = end_date[0]
end_day = end_date[1]
end_year = end_date[2]
ref_year = ref_date[2]
ref_day_of_week = ref_date[3]
if (ref_year % 100 == 0) & (ref_year % 400 == 0):
ref_days_in_year = 366
elif ref_year % 4 == 0:
ref_days_in_year = 366
else:
ref_days_in_year = 365
day_of_week = ref_day_of_week + ref_days_in_year % 7
day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week
day_counter = 0
if day_of_week != 1:
day_of_week += days_in_month[month] - day + 1
day_of_week %= 7
month += 1
while year <= end_year:
days_in_month[2] = 29 if year % 4 == 0 else 28
while ( (year != end_year) & (month <= 12) |
(year == end_year) & (month <= end_month) ):
day_of_week += days_in_month[month] % 7
day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week
day_counter += 1 if day_of_week == target_day else 0
month += 1
month = 1
year += 1
return day_counter
print(count_days( (1, 1, 1901), (12, 31, 2000), (1, 1, 1900, 1), 7))
# ------------------------------
t1 = time.time()
print(f"program took {(t1-t0)*1000} milliseconds")
|
normal
|
{
"blob_id": "9843f957435b74e63a6fe4827cc17c824f11c7d6",
"index": 5372,
"step-1": "<mask token>\n\n\ndef count_days(start_date, end_date, ref_date, target_day):\n month = start_date[0]\n day = start_date[1]\n year = start_date[2]\n end_month = end_date[0]\n end_day = end_date[1]\n end_year = end_date[2]\n ref_year = ref_date[2]\n ref_day_of_week = ref_date[3]\n if (ref_year % 100 == 0) & (ref_year % 400 == 0):\n ref_days_in_year = 366\n elif ref_year % 4 == 0:\n ref_days_in_year = 366\n else:\n ref_days_in_year = 365\n day_of_week = ref_day_of_week + ref_days_in_year % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n day_counter = 0\n if day_of_week != 1:\n day_of_week += days_in_month[month] - day + 1\n day_of_week %= 7\n month += 1\n while year <= end_year:\n days_in_month[2] = 29 if year % 4 == 0 else 28\n while (year != end_year) & (month <= 12) | (year == end_year) & (month\n <= end_month):\n day_of_week += days_in_month[month] % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n day_counter += 1 if day_of_week == target_day else 0\n month += 1\n month = 1\n year += 1\n return day_counter\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef count_days(start_date, end_date, ref_date, target_day):\n month = start_date[0]\n day = start_date[1]\n year = start_date[2]\n end_month = end_date[0]\n end_day = end_date[1]\n end_year = end_date[2]\n ref_year = ref_date[2]\n ref_day_of_week = ref_date[3]\n if (ref_year % 100 == 0) & (ref_year % 400 == 0):\n ref_days_in_year = 366\n elif ref_year % 4 == 0:\n ref_days_in_year = 366\n else:\n ref_days_in_year = 365\n day_of_week = ref_day_of_week + ref_days_in_year % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n day_counter = 0\n if day_of_week != 1:\n day_of_week += days_in_month[month] - day + 1\n day_of_week %= 7\n month += 1\n while year <= end_year:\n days_in_month[2] = 29 if year % 4 == 0 else 28\n while (year != end_year) & (month <= 12) | (year == end_year) & (month\n <= end_month):\n day_of_week += days_in_month[month] % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n day_counter += 1 if day_of_week == target_day else 0\n month += 1\n month = 1\n year += 1\n return day_counter\n\n\nprint(count_days((1, 1, 1901), (12, 31, 2000), (1, 1, 1900, 1), 7))\n<mask token>\nprint(f'program took {(t1 - t0) * 1000} milliseconds')\n",
"step-3": "<mask token>\nt0 = time.time()\ndays_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n\ndef count_days(start_date, end_date, ref_date, target_day):\n month = start_date[0]\n day = start_date[1]\n year = start_date[2]\n end_month = end_date[0]\n end_day = end_date[1]\n end_year = end_date[2]\n ref_year = ref_date[2]\n ref_day_of_week = ref_date[3]\n if (ref_year % 100 == 0) & (ref_year % 400 == 0):\n ref_days_in_year = 366\n elif ref_year % 4 == 0:\n ref_days_in_year = 366\n else:\n ref_days_in_year = 365\n day_of_week = ref_day_of_week + ref_days_in_year % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n day_counter = 0\n if day_of_week != 1:\n day_of_week += days_in_month[month] - day + 1\n day_of_week %= 7\n month += 1\n while year <= end_year:\n days_in_month[2] = 29 if year % 4 == 0 else 28\n while (year != end_year) & (month <= 12) | (year == end_year) & (month\n <= end_month):\n day_of_week += days_in_month[month] % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n day_counter += 1 if day_of_week == target_day else 0\n month += 1\n month = 1\n year += 1\n return day_counter\n\n\nprint(count_days((1, 1, 1901), (12, 31, 2000), (1, 1, 1900, 1), 7))\nt1 = time.time()\nprint(f'program took {(t1 - t0) * 1000} milliseconds')\n",
"step-4": "import time\nt0 = time.time()\ndays_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n\ndef count_days(start_date, end_date, ref_date, target_day):\n month = start_date[0]\n day = start_date[1]\n year = start_date[2]\n end_month = end_date[0]\n end_day = end_date[1]\n end_year = end_date[2]\n ref_year = ref_date[2]\n ref_day_of_week = ref_date[3]\n if (ref_year % 100 == 0) & (ref_year % 400 == 0):\n ref_days_in_year = 366\n elif ref_year % 4 == 0:\n ref_days_in_year = 366\n else:\n ref_days_in_year = 365\n day_of_week = ref_day_of_week + ref_days_in_year % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n day_counter = 0\n if day_of_week != 1:\n day_of_week += days_in_month[month] - day + 1\n day_of_week %= 7\n month += 1\n while year <= end_year:\n days_in_month[2] = 29 if year % 4 == 0 else 28\n while (year != end_year) & (month <= 12) | (year == end_year) & (month\n <= end_month):\n day_of_week += days_in_month[month] % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n day_counter += 1 if day_of_week == target_day else 0\n month += 1\n month = 1\n year += 1\n return day_counter\n\n\nprint(count_days((1, 1, 1901), (12, 31, 2000), (1, 1, 1900, 1), 7))\nt1 = time.time()\nprint(f'program took {(t1 - t0) * 1000} milliseconds')\n",
"step-5": "import time\n\nt0 = time.time()\n# ------------------------------\n\ndays_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\ndef count_days(start_date, end_date, ref_date, target_day):\n # ref_date must be exactly 1 year before start_date\n month = start_date[0]\n day = start_date[1]\n year = start_date[2]\n\n end_month = end_date[0]\n end_day = end_date[1]\n end_year = end_date[2] \n\n ref_year = ref_date[2] \n ref_day_of_week = ref_date[3]\n if (ref_year % 100 == 0) & (ref_year % 400 == 0):\n ref_days_in_year = 366\n elif ref_year % 4 == 0:\n ref_days_in_year = 366\n else:\n ref_days_in_year = 365\n\n day_of_week = ref_day_of_week + ref_days_in_year % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n\n day_counter = 0\n\n if day_of_week != 1:\n day_of_week += days_in_month[month] - day + 1\n day_of_week %= 7\n month += 1 \n\n while year <= end_year:\n days_in_month[2] = 29 if year % 4 == 0 else 28\n while ( (year != end_year) & (month <= 12) |\n (year == end_year) & (month <= end_month) ):\n day_of_week += days_in_month[month] % 7\n day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week\n day_counter += 1 if day_of_week == target_day else 0\n month += 1\n month = 1\n year += 1\n\n return day_counter\n\nprint(count_days( (1, 1, 1901), (12, 31, 2000), (1, 1, 1900, 1), 7))\n\n# ------------------------------\nt1 = time.time()\nprint(f\"program took {(t1-t0)*1000} milliseconds\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from Tkinter import *
import time
def create_window():
window = Toplevel(root)
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
canvas = Canvas(window,width=w,height=h)
canvas.create_text(w/2,h/2,text="this will close after 3 seconds",font="Arial")
canvas.pack()
window.overrideredirect(1)
window.geometry("%dx%d+0+0" % (w, h))
window.after(3000, lambda: window.destroy())
root = Tk()
root.title("3 Second Splash")
root.geometry("250x250")
b = Button(root, text="Launch splash window", command=create_window)
b.place(relx=0.5,rely=0.5,anchor=CENTER)
#b.pack()
root.mainloop()
|
normal
|
{
"blob_id": "cac49a9a2cb753bb81c45ac1d2d887b1f48dd9bb",
"index": 9562,
"step-1": "<mask token>\n\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window, width=w, height=h)\n canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds',\n font='Arial')\n canvas.pack()\n window.overrideredirect(1)\n window.geometry('%dx%d+0+0' % (w, h))\n window.after(3000, lambda : window.destroy())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window, width=w, height=h)\n canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds',\n font='Arial')\n canvas.pack()\n window.overrideredirect(1)\n window.geometry('%dx%d+0+0' % (w, h))\n window.after(3000, lambda : window.destroy())\n\n\n<mask token>\nroot.title('3 Second Splash')\nroot.geometry('250x250')\n<mask token>\nb.place(relx=0.5, rely=0.5, anchor=CENTER)\nroot.mainloop()\n",
"step-3": "<mask token>\n\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window, width=w, height=h)\n canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds',\n font='Arial')\n canvas.pack()\n window.overrideredirect(1)\n window.geometry('%dx%d+0+0' % (w, h))\n window.after(3000, lambda : window.destroy())\n\n\nroot = Tk()\nroot.title('3 Second Splash')\nroot.geometry('250x250')\nb = Button(root, text='Launch splash window', command=create_window)\nb.place(relx=0.5, rely=0.5, anchor=CENTER)\nroot.mainloop()\n",
"step-4": "from Tkinter import *\nimport time\n\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window, width=w, height=h)\n canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds',\n font='Arial')\n canvas.pack()\n window.overrideredirect(1)\n window.geometry('%dx%d+0+0' % (w, h))\n window.after(3000, lambda : window.destroy())\n\n\nroot = Tk()\nroot.title('3 Second Splash')\nroot.geometry('250x250')\nb = Button(root, text='Launch splash window', command=create_window)\nb.place(relx=0.5, rely=0.5, anchor=CENTER)\nroot.mainloop()\n",
"step-5": "from Tkinter import *\nimport time\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window,width=w,height=h)\n canvas.create_text(w/2,h/2,text=\"this will close after 3 seconds\",font=\"Arial\")\n canvas.pack()\n window.overrideredirect(1)\n window.geometry(\"%dx%d+0+0\" % (w, h))\n window.after(3000, lambda: window.destroy())\n \nroot = Tk()\nroot.title(\"3 Second Splash\")\nroot.geometry(\"250x250\")\nb = Button(root, text=\"Launch splash window\", command=create_window)\nb.place(relx=0.5,rely=0.5,anchor=CENTER)\n#b.pack()\n\nroot.mainloop()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_vmware import vim_util
def build_recursive_traversal_spec(client_factory):
# Recurse through all ResourcePools
rp_to_rp = client_factory.create('ns0:TraversalSpec')
rp_to_rp.name = 'rpToRp'
rp_to_rp.type = 'ResourcePool'
rp_to_rp.path = 'resourcePool'
rp_to_rp.skip = False
rp_to_vm = client_factory.create('ns0:TraversalSpec')
rp_to_vm.name = 'rpToVm'
rp_to_vm.type = 'ResourcePool'
rp_to_vm.path = 'vm'
rp_to_vm.skip = False
spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_resource_pool[0].name = 'rpToRp'
spec_array_resource_pool[1].name = 'rpToVm'
rp_to_rp.selectSet = spec_array_resource_pool
# Traversal through resource pool branch
cr_to_rp = client_factory.create('ns0:TraversalSpec')
cr_to_rp.name = 'crToRp'
cr_to_rp.type = 'ComputeResource'
cr_to_rp.path = 'resourcePool'
cr_to_rp.skip = False
spec_array_compute_resource = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_compute_resource[0].name = 'rpToRp'
spec_array_compute_resource[1].name = 'rpToVm'
cr_to_rp.selectSet = spec_array_compute_resource
# Traversal through host branch
cr_to_h = client_factory.create('ns0:TraversalSpec')
cr_to_h.name = 'crToH'
cr_to_h.type = 'ComputeResource'
cr_to_h.path = 'host'
cr_to_h.skip = False
# Traversal through hostFolder branch
dc_to_hf = client_factory.create('ns0:TraversalSpec')
dc_to_hf.name = 'dcToHf'
dc_to_hf.type = 'Datacenter'
dc_to_hf.path = 'hostFolder'
dc_to_hf.skip = False
spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_host[0].name = 'visitFolders'
dc_to_hf.selectSet = spec_array_datacenter_host
# Traversal through vmFolder branch
dc_to_vmf = client_factory.create('ns0:TraversalSpec')
dc_to_vmf.name = 'dcToVmf'
dc_to_vmf.type = 'Datacenter'
dc_to_vmf.path = 'vmFolder'
dc_to_vmf.skip = False
spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_vm[0].name = 'visitFolders'
dc_to_vmf.selectSet = spec_array_datacenter_vm
# Traversal through datastore branch
dc_to_ds = client_factory.create('ns0:TraversalSpec')
dc_to_ds.name = 'dcToDs'
dc_to_ds.type = 'Datacenter'
dc_to_ds.path = 'datastore'
dc_to_ds.skip = False
spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_ds[0].name = 'visitFolders'
dc_to_ds.selectSet = spec_array_datacenter_ds
# Recurse through all hosts
h_to_vm = client_factory.create('ns0:TraversalSpec')
h_to_vm.name = 'hToVm'
h_to_vm.type = 'HostSystem'
h_to_vm.path = 'vm'
h_to_vm.skip = False
spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_host_vm[0].name = 'visitFolders'
h_to_vm.selectSet = spec_array_host_vm
# Recurse through all datastores
ds_to_vm = client_factory.create('ns0:TraversalSpec')
ds_to_vm.name = 'dsToVm'
ds_to_vm.type = 'Datastore'
ds_to_vm.path = 'vm'
ds_to_vm.skip = False
spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datastore_vm[0].name = 'visitFolders'
ds_to_vm.selectSet = spec_array_datastore_vm
# Recurse through the folders
visit_folders = client_factory.create('ns0:TraversalSpec')
visit_folders.name = 'visitFolders'
visit_folders.type = 'Folder'
visit_folders.path = 'childEntity'
visit_folders.skip = False
spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_visit_folders[0].name = 'visitFolders'
spec_array_visit_folders[1].name = 'dcToHf'
spec_array_visit_folders[2].name = 'dcToVmf'
spec_array_visit_folders[3].name = 'crToH'
spec_array_visit_folders[4].name = 'crToRp'
spec_array_visit_folders[5].name = 'dcToDs'
spec_array_visit_folders[6].name = 'hToVm'
spec_array_visit_folders[7].name = 'dsToVm'
spec_array_visit_folders[8].name = 'rpToVm'
visit_folders.selectSet = spec_array_visit_folders
# Add all of them here
spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,
cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]
return spec_array
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = (properties is None or len(properties) == 0)
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return retrieve_properties_ex(vim,
usecoll,
[property_filter_spec])
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
properties = get_dynamic_properties(vim, mobj, [property_name], type)
property_value = None
if property_name in properties:
property_value = properties.get(property_name)
return property_value
def get_dynamic_properties(vim, mobj, property_names, obj_type=None):
"""Gets specific properties of the Managed Object."""
if not obj_type:
obj_type = mobj._type
obj_content = get_object_properties(
vim, None, mobj, obj_type, property_names)
properties = {}
if obj_content:
dynamic_properties = obj_content[0].propSet
for dynamic_property in dynamic_properties:
property_name = dynamic_property.name
property_value = dynamic_property.val
properties[property_name] = property_value
return properties
def retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):
"""Retrieve properties.
Retrieve properties using PropertyCollector.RetrievePropertiesEx
and PropertyCollector.ContinueRetrievePropertiesEx
args:
:param vim: Vim object
:param prop_coll: PropertyCollector MOR
:param max_count: Max num of objects returned in a single call.
"""
objcont = []
client_factory = vim.client.factory
opts = client_factory.create('ns0:RetrieveOptions')
opts.maxObjects = max_count
res = vim.RetrievePropertiesEx(prop_coll,
specSet=spec_set,
options=opts)
while True:
if res and res.objects:
objcont.extend(res.objects)
if hasattr(res, "token") and res.token:
res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)
else:
break
return objcont
def get_objects(vim, type, properties_to_collect=None, all=False):
"""Gets the list of objects of the type specified."""
if not properties_to_collect:
properties_to_collect = ["name"]
client_factory = vim.client.factory
trav_spec = vim_util.build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory,
vim.service_content.rootFolder,
[trav_spec])
property_spec = vim_util.build_property_spec(
client_factory, type_=type,
properties_to_collect=properties_to_collect,
all_properties=all)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
[property_spec],
[object_spec])
property_collector = vim.service_content.propertyCollector
return retrieve_properties_ex(vim,
property_collector,
[property_filter_spec])
def get_prop_spec(client_factory, spec_type, properties):
"""Builds the Property Spec Object."""
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec
def get_obj_spec(client_factory, obj, select_set=None):
"""Builds the Object Spec object."""
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if select_set is not None:
obj_spec.selectSet = select_set
return obj_spec
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_property_filter_specs(vim, property_dict, objects=None):
client_factory = vim.client.factory
object_specs = []
if not objects:
objects = [vim.service_content.rootFolder]
for obj in objects:
if obj.value == get_root_folder_id(vim):
traversal_spec = [
vim_util.build_recursive_traversal_spec(client_factory)]
else:
traversal_spec = build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory,
obj,
traversal_spec)
object_specs.append(object_spec)
property_specs = []
for obj_type in property_dict:
props = property_dict[obj_type]
property_spec = vim_util.build_property_spec(
client_factory, type_=obj_type, properties_to_collect=props)
property_specs.append(property_spec)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
property_specs,
object_specs)
return property_filter_spec
def create_filter(vim, prop_filter_spec, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreateFilter(collector,
spec=prop_filter_spec,
partialUpdates=False)
def create_property_collector(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreatePropertyCollector(collector)
def destroy_property_collector(vim, collector):
if collector:
return vim.DestroyPropertyCollector(collector)
def wait_for_updates_ex(vim, version, collector=None,
max_wait=85, max_update_count=-1):
"""Polling mechanism for property collection
args:
:param vim: Vim object
:param version: version string
:param collector: PropertyCollector MOR
:param max_wait: Max time in seconds before the call returns
(Default set to 85 as 90 is the http socket timeout)
:param max_update_count: Max num of ObjectUpdates returned
in a single call. Not set if <= 0
"""
client_factory = vim.client.factory
waitopts = client_factory.create('ns0:WaitOptions')
waitopts.maxWaitSeconds = max_wait
if max_update_count > 0:
waitopts.maxObjectUpdates = max_update_count
if not collector:
collector = vim.service_content.propertyCollector
return vim.WaitForUpdatesEx(collector,
version=version,
options=waitopts)
def cancel_wait_for_updates(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CancelWaitForUpdates(collector)
def get_properties_for_a_collection_of_objects(vim, type,
obj_list, properties):
"""Gets the list of properties for the collection of objects."""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory,
lst_obj_specs, [prop_spec])
return retrieve_properties_ex(vim,
vim.service_content.propertyCollector,
[prop_filter_spec])
def get_search_index(vim):
return vim.service_content.searchIndex
def find_by_inventory_path(vim, search_index, path):
return vim.FindByInventoryPath(search_index, inventoryPath=path)
def get_root_folder_id(vim):
return vim.service_content.rootFolder.value
def get_dv_switch_manager(vim):
"""Get reference of DistributedVirtualSwitchManager."""
return vim.service_content.dvSwitchManager
def get_dvs_mor_by_uuid(vim, uuid):
"""Query DVS by UUID."""
dvs_mgr = get_dv_switch_manager(vim)
return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)
|
normal
|
{
"blob_id": "de704bffe2e23a8a83d34204e325b7fb2454ef66",
"index": 133,
"step-1": "<mask token>\n\n\ndef build_recursive_traversal_spec(client_factory):\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = properties is None or len(properties) == 0\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim, usecoll, [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\n<mask token>\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [vim_util.build_recursive_traversal_spec(\n client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, obj,\n traversal_spec)\n object_specs.append(object_spec)\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(client_factory, type_=\n obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs, object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector, spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\n<mask token>\n\n\ndef wait_for_updates_ex(vim, version, collector=None, max_wait=85,\n max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type, obj_list, properties\n ):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,\n [prop_spec])\n return retrieve_properties_ex(vim, vim.service_content.\n propertyCollector, [prop_filter_spec])\n\n\n<mask token>\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-2": "<mask token>\n\n\ndef build_recursive_traversal_spec(client_factory):\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = properties is None or len(properties) == 0\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim, usecoll, [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\n<mask token>\n\n\ndef get_prop_spec(client_factory, spec_type, properties):\n \"\"\"Builds the Property Spec Object.\"\"\"\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec\n\n\ndef get_obj_spec(client_factory, obj, select_set=None):\n \"\"\"Builds the Object Spec object.\"\"\"\n obj_spec = client_factory.create('ns0:ObjectSpec')\n obj_spec.obj = obj\n obj_spec.skip = False\n if select_set is not None:\n obj_spec.selectSet = select_set\n return obj_spec\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [vim_util.build_recursive_traversal_spec(\n client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, obj,\n traversal_spec)\n object_specs.append(object_spec)\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(client_factory, type_=\n obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs, object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector, spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\ndef destroy_property_collector(vim, collector):\n if collector:\n return vim.DestroyPropertyCollector(collector)\n\n\ndef wait_for_updates_ex(vim, version, collector=None, max_wait=85,\n max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type, obj_list, properties\n ):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,\n [prop_spec])\n return retrieve_properties_ex(vim, vim.service_content.\n propertyCollector, [prop_filter_spec])\n\n\ndef get_search_index(vim):\n return vim.service_content.searchIndex\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-3": "<mask token>\n\n\ndef build_recursive_traversal_spec(client_factory):\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = properties is None or len(properties) == 0\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim, usecoll, [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\n<mask token>\n\n\ndef retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):\n \"\"\"Retrieve properties.\n\n Retrieve properties using PropertyCollector.RetrievePropertiesEx\n and PropertyCollector.ContinueRetrievePropertiesEx\n args:\n :param vim: Vim object\n :param prop_coll: PropertyCollector MOR\n :param max_count: Max num of objects returned in a single call.\n \"\"\"\n objcont = []\n client_factory = vim.client.factory\n opts = client_factory.create('ns0:RetrieveOptions')\n opts.maxObjects = max_count\n res = vim.RetrievePropertiesEx(prop_coll, specSet=spec_set, options=opts)\n while True:\n if res and res.objects:\n objcont.extend(res.objects)\n if hasattr(res, 'token') and res.token:\n res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)\n else:\n break\n return objcont\n\n\ndef get_objects(vim, type, properties_to_collect=None, all=False):\n \"\"\"Gets the list of objects of the type specified.\"\"\"\n if not properties_to_collect:\n properties_to_collect = ['name']\n client_factory = vim.client.factory\n trav_spec = vim_util.build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, vim.\n service_content.rootFolder, [trav_spec])\n property_spec = vim_util.build_property_spec(client_factory, type_=type,\n properties_to_collect=properties_to_collect, all_properties=all)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n [property_spec], [object_spec])\n property_collector = vim.service_content.propertyCollector\n return retrieve_properties_ex(vim, property_collector, [\n property_filter_spec])\n\n\ndef get_prop_spec(client_factory, spec_type, properties):\n \"\"\"Builds the Property Spec Object.\"\"\"\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec\n\n\ndef get_obj_spec(client_factory, obj, select_set=None):\n \"\"\"Builds the Object Spec object.\"\"\"\n obj_spec = client_factory.create('ns0:ObjectSpec')\n obj_spec.obj = obj\n obj_spec.skip = False\n if select_set is not None:\n obj_spec.selectSet = select_set\n return obj_spec\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [vim_util.build_recursive_traversal_spec(\n client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, obj,\n traversal_spec)\n object_specs.append(object_spec)\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(client_factory, type_=\n obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs, object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector, spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\ndef destroy_property_collector(vim, collector):\n if collector:\n return vim.DestroyPropertyCollector(collector)\n\n\ndef wait_for_updates_ex(vim, version, collector=None, max_wait=85,\n max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type, obj_list, properties\n ):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,\n [prop_spec])\n return retrieve_properties_ex(vim, vim.service_content.\n propertyCollector, [prop_filter_spec])\n\n\ndef get_search_index(vim):\n return vim.service_content.searchIndex\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-4": "from oslo_vmware import vim_util\n\n\ndef build_recursive_traversal_spec(client_factory):\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = properties is None or len(properties) == 0\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim, usecoll, [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\ndef get_dynamic_properties(vim, mobj, property_names, obj_type=None):\n \"\"\"Gets specific properties of the Managed Object.\"\"\"\n if not obj_type:\n obj_type = mobj._type\n obj_content = get_object_properties(vim, None, mobj, obj_type,\n property_names)\n properties = {}\n if obj_content:\n dynamic_properties = obj_content[0].propSet\n for dynamic_property in dynamic_properties:\n property_name = dynamic_property.name\n property_value = dynamic_property.val\n properties[property_name] = property_value\n return properties\n\n\ndef retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):\n \"\"\"Retrieve properties.\n\n Retrieve properties using PropertyCollector.RetrievePropertiesEx\n and PropertyCollector.ContinueRetrievePropertiesEx\n args:\n :param vim: Vim object\n :param prop_coll: PropertyCollector MOR\n :param max_count: Max num of objects returned in a single call.\n \"\"\"\n objcont = []\n client_factory = vim.client.factory\n opts = client_factory.create('ns0:RetrieveOptions')\n opts.maxObjects = max_count\n res = vim.RetrievePropertiesEx(prop_coll, specSet=spec_set, options=opts)\n while True:\n if res and res.objects:\n objcont.extend(res.objects)\n if hasattr(res, 'token') and res.token:\n res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)\n else:\n break\n return objcont\n\n\ndef get_objects(vim, type, properties_to_collect=None, all=False):\n \"\"\"Gets the list of objects of the type specified.\"\"\"\n if not properties_to_collect:\n properties_to_collect = ['name']\n client_factory = vim.client.factory\n trav_spec = vim_util.build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, vim.\n service_content.rootFolder, [trav_spec])\n property_spec = vim_util.build_property_spec(client_factory, type_=type,\n properties_to_collect=properties_to_collect, all_properties=all)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n [property_spec], [object_spec])\n property_collector = vim.service_content.propertyCollector\n return retrieve_properties_ex(vim, property_collector, [\n property_filter_spec])\n\n\ndef get_prop_spec(client_factory, spec_type, properties):\n \"\"\"Builds the Property Spec Object.\"\"\"\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec\n\n\ndef get_obj_spec(client_factory, obj, select_set=None):\n \"\"\"Builds the Object Spec object.\"\"\"\n obj_spec = client_factory.create('ns0:ObjectSpec')\n obj_spec.obj = obj\n obj_spec.skip = False\n if select_set is not None:\n obj_spec.selectSet = select_set\n return obj_spec\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [vim_util.build_recursive_traversal_spec(\n client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, obj,\n traversal_spec)\n object_specs.append(object_spec)\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(client_factory, type_=\n obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs, object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector, spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\ndef destroy_property_collector(vim, collector):\n if collector:\n return vim.DestroyPropertyCollector(collector)\n\n\ndef wait_for_updates_ex(vim, version, collector=None, max_wait=85,\n max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type, obj_list, properties\n ):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,\n [prop_spec])\n return retrieve_properties_ex(vim, vim.service_content.\n propertyCollector, [prop_filter_spec])\n\n\ndef get_search_index(vim):\n return vim.service_content.searchIndex\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-5": "# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_vmware import vim_util\n\n\ndef build_recursive_traversal_spec(client_factory):\n # Recurse through all ResourcePools\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n\n # Traversal through resource pool branch\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n\n # Traversal through host branch\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n\n # Traversal through hostFolder branch\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n\n # Traversal through vmFolder branch\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n\n # Traversal through datastore branch\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n\n # Recurse through all hosts\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n\n # Recurse through all datastores\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n\n # Recurse through the folders\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n\n # Add all of them here\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = (properties is None or len(properties) == 0)\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim,\n usecoll,\n [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\ndef get_dynamic_properties(vim, mobj, property_names, obj_type=None):\n \"\"\"Gets specific properties of the Managed Object.\"\"\"\n if not obj_type:\n obj_type = mobj._type\n obj_content = get_object_properties(\n vim, None, mobj, obj_type, property_names)\n properties = {}\n if obj_content:\n dynamic_properties = obj_content[0].propSet\n for dynamic_property in dynamic_properties:\n property_name = dynamic_property.name\n property_value = dynamic_property.val\n properties[property_name] = property_value\n return properties\n\n\ndef retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):\n \"\"\"Retrieve properties.\n\n Retrieve properties using PropertyCollector.RetrievePropertiesEx\n and PropertyCollector.ContinueRetrievePropertiesEx\n args:\n :param vim: Vim object\n :param prop_coll: PropertyCollector MOR\n :param max_count: Max num of objects returned in a single call.\n \"\"\"\n objcont = []\n client_factory = vim.client.factory\n opts = client_factory.create('ns0:RetrieveOptions')\n opts.maxObjects = max_count\n res = vim.RetrievePropertiesEx(prop_coll,\n specSet=spec_set,\n options=opts)\n while True:\n if res and res.objects:\n objcont.extend(res.objects)\n if hasattr(res, \"token\") and res.token:\n res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)\n else:\n break\n return objcont\n\n\ndef get_objects(vim, type, properties_to_collect=None, all=False):\n \"\"\"Gets the list of objects of the type specified.\"\"\"\n if not properties_to_collect:\n properties_to_collect = [\"name\"]\n\n client_factory = vim.client.factory\n trav_spec = vim_util.build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory,\n vim.service_content.rootFolder,\n [trav_spec])\n property_spec = vim_util.build_property_spec(\n client_factory, type_=type,\n properties_to_collect=properties_to_collect,\n all_properties=all)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n [property_spec],\n [object_spec])\n property_collector = vim.service_content.propertyCollector\n return retrieve_properties_ex(vim,\n property_collector,\n [property_filter_spec])\n\n\ndef get_prop_spec(client_factory, spec_type, properties):\n \"\"\"Builds the Property Spec Object.\"\"\"\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec\n\n\ndef get_obj_spec(client_factory, obj, select_set=None):\n \"\"\"Builds the Object Spec object.\"\"\"\n obj_spec = client_factory.create('ns0:ObjectSpec')\n obj_spec.obj = obj\n obj_spec.skip = False\n if select_set is not None:\n obj_spec.selectSet = select_set\n return obj_spec\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [\n vim_util.build_recursive_traversal_spec(client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory,\n obj,\n traversal_spec)\n object_specs.append(object_spec)\n\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(\n client_factory, type_=obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs,\n object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector,\n spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\ndef destroy_property_collector(vim, collector):\n if collector:\n return vim.DestroyPropertyCollector(collector)\n\n\ndef wait_for_updates_ex(vim, version, collector=None,\n max_wait=85, max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector,\n version=version,\n options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory,\n lst_obj_specs, [prop_spec])\n return retrieve_properties_ex(vim,\n vim.service_content.propertyCollector,\n [prop_filter_spec])\n\n\ndef get_search_index(vim):\n return vim.service_content.searchIndex\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-ids": [
14,
18,
20,
22,
23
]
}
|
[
14,
18,
20,
22,
23
] |
import torch as th
from tpp.processes.hawkes.r_terms_recursive_v import get_r_terms
from tpp.utils.test import get_test_events_query
def run_test():
marks = 3
events, query = get_test_events_query(marks=marks)
beta = th.rand([marks, marks])
get_r_terms(events=events, beta=beta)
if __name__ == '__main__':
run_test()
|
normal
|
{
"blob_id": "2681bd9fe93a4d61214b7c45e5d73097ab73dc07",
"index": 5486,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_test():\n marks = 3\n events, query = get_test_events_query(marks=marks)\n beta = th.rand([marks, marks])\n get_r_terms(events=events, beta=beta)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_test():\n marks = 3\n events, query = get_test_events_query(marks=marks)\n beta = th.rand([marks, marks])\n get_r_terms(events=events, beta=beta)\n\n\nif __name__ == '__main__':\n run_test()\n",
"step-4": "import torch as th\nfrom tpp.processes.hawkes.r_terms_recursive_v import get_r_terms\nfrom tpp.utils.test import get_test_events_query\n\n\ndef run_test():\n marks = 3\n events, query = get_test_events_query(marks=marks)\n beta = th.rand([marks, marks])\n get_r_terms(events=events, beta=beta)\n\n\nif __name__ == '__main__':\n run_test()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import warnings
from functools import wraps
import re
import logging
import pandas as pd
import requests
def return_df(field="data"):
"""return DataFrame data"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
resp = func(self, *args, **kwargs)
if resp.get("code") == 200 and self.return_df is True:
df = pd.DataFrame(resp["resp"][field])
if "date" in df.columns:
df['date'] = df['date'].apply(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S"))
df = df.set_index("date")
return df
return resp
return wrapper
return decorator
class RQOpenClient(object):
def __init__(self, username, password, logger=None, log_level=logging.DEBUG,
base_url="https://rqopen.ricequant.com", timeout=(5, 10), return_df=True):
"""
:param username: 登录账户
:param password: 密码
:param logger: 日志
:param log_level: 日志级别
:param base_url: 服务地址,默认web端 rqpro2.0需要单独配置
:param timeout: 超时时间
:param return_df: 返回数据是否为DataFrame False返回dict
"""
self.base_url = base_url
# tel number need "+86"
if re.match(r'^[1]([3-9])[0-9]{9}$', username):
username = "+86" + username
self.username = username
self.password = password
self.client = requests.Session()
self.logger = logger if logger else logging.getLogger("RQOpenClient")
self.logger.setLevel(log_level)
self.timeout = timeout
self.return_df = return_df
def login(self):
self.logger.info("Try login. Username {}".format(self.username))
resp = self.client.post("{}/login".format(self.base_url),
{"username": self.username, "password": self.password}, timeout=self.timeout)
ret = resp.json()
self.logger.info("Login response {}".format(ret))
return ret
def _do(self, func, *args, **kwargs):
resp = func(*args, **kwargs)
if resp["code"] == 401:
login_resp = self.login()
if login_resp["code"] == 200:
self.logger.info("login success")
else:
return login_resp
elif resp["code"] == 200:
return resp
resp = func(*args, **kwargs)
return resp
def get_day_trades(self, run_id):
warnings.warn("get_day_trades will be abandoned, please use current_trades", DeprecationWarning)
return self._do(self._get_day_trades, run_id)
def get_positions(self, run_id):
warnings.warn("current_positions will be abandoned, please use current_positions", DeprecationWarning)
return self._do(self._get_positions, run_id)
def _get_day_trades(self, run_id):
resp = self.client.get("{}/pt/load_day_trades/{}".format(self.base_url, run_id), timeout=self.timeout)
return resp.json()
def _get_positions(self, run_id):
resp = self.client.get("{}/pt/load_current_positions/{}".format(self.base_url, run_id), timeout=self.timeout)
return resp.json()
# base
@return_df()
def trades(self, run_id):
"""get all trades"""
return self._do(self._get_base, "trades", run_id)
@return_df()
def positions(self, run_id):
"""get all positions (market_value)"""
return self._do(self._get_base, "positions", run_id)
@return_df()
def portfolio(self, run_id):
"""get all portfolio"""
return self._do(self._get_base, "portfolio", run_id)
@return_df("positions")
def current_positions(self, run_id):
"""get current positions"""
return self._do(self._get_base, "pt/load_current_positions", run_id)
@return_df("trades")
def current_trades(self, run_id):
"""get current positions"""
return self._do(self._get_base, "pt/load_day_trades", run_id)
def _get_base(self, api_path, run_id):
resp = self.client.get("{}/{}/{}".format(self.base_url, api_path, run_id), timeout=self.timeout)
return resp.json()
|
normal
|
{
"blob_id": "bd2edd5139a9c5050c582a54cdacca2b0739f333",
"index": 9151,
"step-1": "<mask token>\n\n\nclass RQOpenClient(object):\n\n def __init__(self, username, password, logger=None, log_level=logging.\n DEBUG, base_url='https://rqopen.ricequant.com', timeout=(5, 10),\n return_df=True):\n \"\"\"\n :param username: 登录账户\n :param password: 密码\n :param logger: 日志\n :param log_level: 日志级别\n :param base_url: 服务地址,默认web端 rqpro2.0需要单独配置\n :param timeout: 超时时间\n :param return_df: 返回数据是否为DataFrame False返回dict\n \"\"\"\n self.base_url = base_url\n if re.match('^[1]([3-9])[0-9]{9}$', username):\n username = '+86' + username\n self.username = username\n self.password = password\n self.client = requests.Session()\n self.logger = logger if logger else logging.getLogger('RQOpenClient')\n self.logger.setLevel(log_level)\n self.timeout = timeout\n self.return_df = return_df\n <mask token>\n\n def _do(self, func, *args, **kwargs):\n resp = func(*args, **kwargs)\n if resp['code'] == 401:\n login_resp = self.login()\n if login_resp['code'] == 200:\n self.logger.info('login success')\n else:\n return login_resp\n elif resp['code'] == 200:\n return resp\n resp = func(*args, **kwargs)\n return resp\n\n def get_day_trades(self, run_id):\n warnings.warn(\n 'get_day_trades will be abandoned, please use current_trades',\n DeprecationWarning)\n return self._do(self._get_day_trades, run_id)\n\n def get_positions(self, run_id):\n warnings.warn(\n 'current_positions will be abandoned, please use current_positions'\n , DeprecationWarning)\n return self._do(self._get_positions, run_id)\n\n def _get_day_trades(self, run_id):\n resp = self.client.get('{}/pt/load_day_trades/{}'.format(self.\n base_url, run_id), timeout=self.timeout)\n return resp.json()\n <mask token>\n\n @return_df()\n def trades(self, run_id):\n \"\"\"get all trades\"\"\"\n return self._do(self._get_base, 'trades', run_id)\n\n @return_df()\n def positions(self, run_id):\n \"\"\"get all positions (market_value)\"\"\"\n return self._do(self._get_base, 'positions', run_id)\n\n @return_df()\n def portfolio(self, run_id):\n \"\"\"get all portfolio\"\"\"\n return self._do(self._get_base, 'portfolio', run_id)\n\n @return_df('positions')\n def current_positions(self, run_id):\n \"\"\"get current positions\"\"\"\n return self._do(self._get_base, 'pt/load_current_positions', run_id)\n <mask token>\n\n def _get_base(self, api_path, run_id):\n resp = self.client.get('{}/{}/{}'.format(self.base_url, api_path,\n run_id), timeout=self.timeout)\n return resp.json()\n",
"step-2": "<mask token>\n\n\nclass RQOpenClient(object):\n\n def __init__(self, username, password, logger=None, log_level=logging.\n DEBUG, base_url='https://rqopen.ricequant.com', timeout=(5, 10),\n return_df=True):\n \"\"\"\n :param username: 登录账户\n :param password: 密码\n :param logger: 日志\n :param log_level: 日志级别\n :param base_url: 服务地址,默认web端 rqpro2.0需要单独配置\n :param timeout: 超时时间\n :param return_df: 返回数据是否为DataFrame False返回dict\n \"\"\"\n self.base_url = base_url\n if re.match('^[1]([3-9])[0-9]{9}$', username):\n username = '+86' + username\n self.username = username\n self.password = password\n self.client = requests.Session()\n self.logger = logger if logger else logging.getLogger('RQOpenClient')\n self.logger.setLevel(log_level)\n self.timeout = timeout\n self.return_df = return_df\n\n def login(self):\n self.logger.info('Try login. Username {}'.format(self.username))\n resp = self.client.post('{}/login'.format(self.base_url), {\n 'username': self.username, 'password': self.password}, timeout=\n self.timeout)\n ret = resp.json()\n self.logger.info('Login response {}'.format(ret))\n return ret\n\n def _do(self, func, *args, **kwargs):\n resp = func(*args, **kwargs)\n if resp['code'] == 401:\n login_resp = self.login()\n if login_resp['code'] == 200:\n self.logger.info('login success')\n else:\n return login_resp\n elif resp['code'] == 200:\n return resp\n resp = func(*args, **kwargs)\n return resp\n\n def get_day_trades(self, run_id):\n warnings.warn(\n 'get_day_trades will be abandoned, please use current_trades',\n DeprecationWarning)\n return self._do(self._get_day_trades, run_id)\n\n def get_positions(self, run_id):\n warnings.warn(\n 'current_positions will be abandoned, please use current_positions'\n , DeprecationWarning)\n return self._do(self._get_positions, run_id)\n\n def _get_day_trades(self, run_id):\n resp = self.client.get('{}/pt/load_day_trades/{}'.format(self.\n base_url, run_id), timeout=self.timeout)\n return resp.json()\n <mask token>\n\n @return_df()\n def trades(self, run_id):\n \"\"\"get all trades\"\"\"\n return self._do(self._get_base, 'trades', run_id)\n\n @return_df()\n def positions(self, run_id):\n \"\"\"get all positions (market_value)\"\"\"\n return self._do(self._get_base, 'positions', run_id)\n\n @return_df()\n def portfolio(self, run_id):\n \"\"\"get all portfolio\"\"\"\n return self._do(self._get_base, 'portfolio', run_id)\n\n @return_df('positions')\n def current_positions(self, run_id):\n \"\"\"get current positions\"\"\"\n return self._do(self._get_base, 'pt/load_current_positions', run_id)\n <mask token>\n\n def _get_base(self, api_path, run_id):\n resp = self.client.get('{}/{}/{}'.format(self.base_url, api_path,\n run_id), timeout=self.timeout)\n return resp.json()\n",
"step-3": "<mask token>\n\n\nclass RQOpenClient(object):\n\n def __init__(self, username, password, logger=None, log_level=logging.\n DEBUG, base_url='https://rqopen.ricequant.com', timeout=(5, 10),\n return_df=True):\n \"\"\"\n :param username: 登录账户\n :param password: 密码\n :param logger: 日志\n :param log_level: 日志级别\n :param base_url: 服务地址,默认web端 rqpro2.0需要单独配置\n :param timeout: 超时时间\n :param return_df: 返回数据是否为DataFrame False返回dict\n \"\"\"\n self.base_url = base_url\n if re.match('^[1]([3-9])[0-9]{9}$', username):\n username = '+86' + username\n self.username = username\n self.password = password\n self.client = requests.Session()\n self.logger = logger if logger else logging.getLogger('RQOpenClient')\n self.logger.setLevel(log_level)\n self.timeout = timeout\n self.return_df = return_df\n\n def login(self):\n self.logger.info('Try login. Username {}'.format(self.username))\n resp = self.client.post('{}/login'.format(self.base_url), {\n 'username': self.username, 'password': self.password}, timeout=\n self.timeout)\n ret = resp.json()\n self.logger.info('Login response {}'.format(ret))\n return ret\n\n def _do(self, func, *args, **kwargs):\n resp = func(*args, **kwargs)\n if resp['code'] == 401:\n login_resp = self.login()\n if login_resp['code'] == 200:\n self.logger.info('login success')\n else:\n return login_resp\n elif resp['code'] == 200:\n return resp\n resp = func(*args, **kwargs)\n return resp\n\n def get_day_trades(self, run_id):\n warnings.warn(\n 'get_day_trades will be abandoned, please use current_trades',\n DeprecationWarning)\n return self._do(self._get_day_trades, run_id)\n\n def get_positions(self, run_id):\n warnings.warn(\n 'current_positions will be abandoned, please use current_positions'\n , DeprecationWarning)\n return self._do(self._get_positions, run_id)\n\n def _get_day_trades(self, run_id):\n resp = self.client.get('{}/pt/load_day_trades/{}'.format(self.\n base_url, run_id), timeout=self.timeout)\n return resp.json()\n\n def _get_positions(self, run_id):\n resp = self.client.get('{}/pt/load_current_positions/{}'.format(\n self.base_url, run_id), timeout=self.timeout)\n return resp.json()\n\n @return_df()\n def trades(self, run_id):\n \"\"\"get all trades\"\"\"\n return self._do(self._get_base, 'trades', run_id)\n\n @return_df()\n def positions(self, run_id):\n \"\"\"get all positions (market_value)\"\"\"\n return self._do(self._get_base, 'positions', run_id)\n\n @return_df()\n def portfolio(self, run_id):\n \"\"\"get all portfolio\"\"\"\n return self._do(self._get_base, 'portfolio', run_id)\n\n @return_df('positions')\n def current_positions(self, run_id):\n \"\"\"get current positions\"\"\"\n return self._do(self._get_base, 'pt/load_current_positions', run_id)\n\n @return_df('trades')\n def current_trades(self, run_id):\n \"\"\"get current positions\"\"\"\n return self._do(self._get_base, 'pt/load_day_trades', run_id)\n\n def _get_base(self, api_path, run_id):\n resp = self.client.get('{}/{}/{}'.format(self.base_url, api_path,\n run_id), timeout=self.timeout)\n return resp.json()\n",
"step-4": "<mask token>\n\n\ndef return_df(field='data'):\n \"\"\"return DataFrame data\"\"\"\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n resp = func(self, *args, **kwargs)\n if resp.get('code') == 200 and self.return_df is True:\n df = pd.DataFrame(resp['resp'][field])\n if 'date' in df.columns:\n df['date'] = df['date'].apply(lambda x: datetime.\n datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))\n df = df.set_index('date')\n return df\n return resp\n return wrapper\n return decorator\n\n\nclass RQOpenClient(object):\n\n def __init__(self, username, password, logger=None, log_level=logging.\n DEBUG, base_url='https://rqopen.ricequant.com', timeout=(5, 10),\n return_df=True):\n \"\"\"\n :param username: 登录账户\n :param password: 密码\n :param logger: 日志\n :param log_level: 日志级别\n :param base_url: 服务地址,默认web端 rqpro2.0需要单独配置\n :param timeout: 超时时间\n :param return_df: 返回数据是否为DataFrame False返回dict\n \"\"\"\n self.base_url = base_url\n if re.match('^[1]([3-9])[0-9]{9}$', username):\n username = '+86' + username\n self.username = username\n self.password = password\n self.client = requests.Session()\n self.logger = logger if logger else logging.getLogger('RQOpenClient')\n self.logger.setLevel(log_level)\n self.timeout = timeout\n self.return_df = return_df\n\n def login(self):\n self.logger.info('Try login. Username {}'.format(self.username))\n resp = self.client.post('{}/login'.format(self.base_url), {\n 'username': self.username, 'password': self.password}, timeout=\n self.timeout)\n ret = resp.json()\n self.logger.info('Login response {}'.format(ret))\n return ret\n\n def _do(self, func, *args, **kwargs):\n resp = func(*args, **kwargs)\n if resp['code'] == 401:\n login_resp = self.login()\n if login_resp['code'] == 200:\n self.logger.info('login success')\n else:\n return login_resp\n elif resp['code'] == 200:\n return resp\n resp = func(*args, **kwargs)\n return resp\n\n def get_day_trades(self, run_id):\n warnings.warn(\n 'get_day_trades will be abandoned, please use current_trades',\n DeprecationWarning)\n return self._do(self._get_day_trades, run_id)\n\n def get_positions(self, run_id):\n warnings.warn(\n 'current_positions will be abandoned, please use current_positions'\n , DeprecationWarning)\n return self._do(self._get_positions, run_id)\n\n def _get_day_trades(self, run_id):\n resp = self.client.get('{}/pt/load_day_trades/{}'.format(self.\n base_url, run_id), timeout=self.timeout)\n return resp.json()\n\n def _get_positions(self, run_id):\n resp = self.client.get('{}/pt/load_current_positions/{}'.format(\n self.base_url, run_id), timeout=self.timeout)\n return resp.json()\n\n @return_df()\n def trades(self, run_id):\n \"\"\"get all trades\"\"\"\n return self._do(self._get_base, 'trades', run_id)\n\n @return_df()\n def positions(self, run_id):\n \"\"\"get all positions (market_value)\"\"\"\n return self._do(self._get_base, 'positions', run_id)\n\n @return_df()\n def portfolio(self, run_id):\n \"\"\"get all portfolio\"\"\"\n return self._do(self._get_base, 'portfolio', run_id)\n\n @return_df('positions')\n def current_positions(self, run_id):\n \"\"\"get current positions\"\"\"\n return self._do(self._get_base, 'pt/load_current_positions', run_id)\n\n @return_df('trades')\n def current_trades(self, run_id):\n \"\"\"get current positions\"\"\"\n return self._do(self._get_base, 'pt/load_day_trades', run_id)\n\n def _get_base(self, api_path, run_id):\n resp = self.client.get('{}/{}/{}'.format(self.base_url, api_path,\n run_id), timeout=self.timeout)\n return resp.json()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport datetime\nimport warnings\nfrom functools import wraps\nimport re\nimport logging\nimport pandas as pd\nimport requests\n\n\ndef return_df(field=\"data\"):\n \"\"\"return DataFrame data\"\"\"\n\n def decorator(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n resp = func(self, *args, **kwargs)\n if resp.get(\"code\") == 200 and self.return_df is True:\n df = pd.DataFrame(resp[\"resp\"][field])\n if \"date\" in df.columns:\n df['date'] = df['date'].apply(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d %H:%M:%S\"))\n df = df.set_index(\"date\")\n return df\n return resp\n\n return wrapper\n\n return decorator\n\n\nclass RQOpenClient(object):\n def __init__(self, username, password, logger=None, log_level=logging.DEBUG,\n base_url=\"https://rqopen.ricequant.com\", timeout=(5, 10), return_df=True):\n \"\"\"\n :param username: 登录账户\n :param password: 密码\n :param logger: 日志\n :param log_level: 日志级别\n :param base_url: 服务地址,默认web端 rqpro2.0需要单独配置\n :param timeout: 超时时间\n :param return_df: 返回数据是否为DataFrame False返回dict\n \"\"\"\n self.base_url = base_url\n # tel number need \"+86\"\n if re.match(r'^[1]([3-9])[0-9]{9}$', username):\n username = \"+86\" + username\n self.username = username\n self.password = password\n self.client = requests.Session()\n self.logger = logger if logger else logging.getLogger(\"RQOpenClient\")\n self.logger.setLevel(log_level)\n self.timeout = timeout\n self.return_df = return_df\n\n def login(self):\n self.logger.info(\"Try login. Username {}\".format(self.username))\n resp = self.client.post(\"{}/login\".format(self.base_url),\n {\"username\": self.username, \"password\": self.password}, timeout=self.timeout)\n ret = resp.json()\n self.logger.info(\"Login response {}\".format(ret))\n return ret\n\n def _do(self, func, *args, **kwargs):\n resp = func(*args, **kwargs)\n if resp[\"code\"] == 401:\n login_resp = self.login()\n if login_resp[\"code\"] == 200:\n self.logger.info(\"login success\")\n else:\n return login_resp\n elif resp[\"code\"] == 200:\n return resp\n resp = func(*args, **kwargs)\n return resp\n\n def get_day_trades(self, run_id):\n warnings.warn(\"get_day_trades will be abandoned, please use current_trades\", DeprecationWarning)\n return self._do(self._get_day_trades, run_id)\n\n def get_positions(self, run_id):\n warnings.warn(\"current_positions will be abandoned, please use current_positions\", DeprecationWarning)\n return self._do(self._get_positions, run_id)\n\n def _get_day_trades(self, run_id):\n resp = self.client.get(\"{}/pt/load_day_trades/{}\".format(self.base_url, run_id), timeout=self.timeout)\n return resp.json()\n\n def _get_positions(self, run_id):\n resp = self.client.get(\"{}/pt/load_current_positions/{}\".format(self.base_url, run_id), timeout=self.timeout)\n return resp.json()\n\n # base\n @return_df()\n def trades(self, run_id):\n \"\"\"get all trades\"\"\"\n return self._do(self._get_base, \"trades\", run_id)\n\n @return_df()\n def positions(self, run_id):\n \"\"\"get all positions (market_value)\"\"\"\n return self._do(self._get_base, \"positions\", run_id)\n\n @return_df()\n def portfolio(self, run_id):\n \"\"\"get all portfolio\"\"\"\n return self._do(self._get_base, \"portfolio\", run_id)\n\n @return_df(\"positions\")\n def current_positions(self, run_id):\n \"\"\"get current positions\"\"\"\n return self._do(self._get_base, \"pt/load_current_positions\", run_id)\n\n @return_df(\"trades\")\n def current_trades(self, run_id):\n \"\"\"get current positions\"\"\"\n return self._do(self._get_base, \"pt/load_day_trades\", run_id)\n\n def _get_base(self, api_path, run_id):\n resp = self.client.get(\"{}/{}/{}\".format(self.base_url, api_path, run_id), timeout=self.timeout)\n return resp.json()\n",
"step-ids": [
11,
12,
14,
15,
17
]
}
|
[
11,
12,
14,
15,
17
] |
PRECISAO = 3
MAX_ITER = 20
def gauss_jacobi(entrada,*valores_iniciais):
tamanho = len(entrada[0])
variaveis = [*valores_iniciais[:tamanho]]
variaveism1 = [None] * (tamanho-1)
for _ in range(0,MAX_ITER):
print(variaveis)
for linha in range(tamanho-1):
soma = 0
for coluna in range(tamanho-1):
if(linha!=coluna):
soma += -entrada[linha][coluna]*variaveis[coluna]
variaveism1[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)
if(all([variaveism1[i]==variaveis[i] for i in range(tamanho-1)])):
break
variaveis = [*variaveism1]
return variaveis
def gauss_seidel(entrada,*valores_iniciais):
tamanho = len(entrada[0])
variaveis = [*valores_iniciais[:tamanho]]
antigo = [None] * (tamanho-1)
for _ in range(0,MAX_ITER):
print(variaveis)
for linha in range(tamanho-1):
soma = 0
for coluna in range(tamanho-1):
if(linha!=coluna):
soma += -entrada[linha][coluna]*variaveis[coluna]
variaveis[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)
if(all([antigo[i]==variaveis[i] for i in range(tamanho-1)])):
break
antigo = [*variaveis]
return variaveis
def main():
print()
entrada = [
[10,2,1,7],
[1,5,1,-8],
[2,3,10,6]
]
saida = gauss_jacobi(entrada,0,0,0)
print(saida)
print()
saida = gauss_seidel(entrada,0,0,0)
print(saida)
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "842f8b4de0378a2c83d22f3fd54ba4857d249597",
"index": 9323,
"step-1": "<mask token>\n\n\ndef gauss_jacobi(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n variaveism1 = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveism1[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(variaveism1[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n variaveis = [*variaveism1]\n return variaveis\n\n\ndef gauss_seidel(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n antigo = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveis[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(antigo[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n antigo = [*variaveis]\n return variaveis\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gauss_jacobi(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n variaveism1 = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveism1[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(variaveism1[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n variaveis = [*variaveism1]\n return variaveis\n\n\ndef gauss_seidel(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n antigo = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveis[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(antigo[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n antigo = [*variaveis]\n return variaveis\n\n\ndef main():\n print()\n entrada = [[10, 2, 1, 7], [1, 5, 1, -8], [2, 3, 10, 6]]\n saida = gauss_jacobi(entrada, 0, 0, 0)\n print(saida)\n print()\n saida = gauss_seidel(entrada, 0, 0, 0)\n print(saida)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gauss_jacobi(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n variaveism1 = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveism1[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(variaveism1[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n variaveis = [*variaveism1]\n return variaveis\n\n\ndef gauss_seidel(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n antigo = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveis[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(antigo[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n antigo = [*variaveis]\n return variaveis\n\n\ndef main():\n print()\n entrada = [[10, 2, 1, 7], [1, 5, 1, -8], [2, 3, 10, 6]]\n saida = gauss_jacobi(entrada, 0, 0, 0)\n print(saida)\n print()\n saida = gauss_seidel(entrada, 0, 0, 0)\n print(saida)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "PRECISAO = 3\nMAX_ITER = 20\n\n\ndef gauss_jacobi(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n variaveism1 = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveism1[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(variaveism1[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n variaveis = [*variaveism1]\n return variaveis\n\n\ndef gauss_seidel(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n antigo = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveis[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(antigo[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n antigo = [*variaveis]\n return variaveis\n\n\ndef main():\n print()\n entrada = [[10, 2, 1, 7], [1, 5, 1, -8], [2, 3, 10, 6]]\n saida = gauss_jacobi(entrada, 0, 0, 0)\n print(saida)\n print()\n saida = gauss_seidel(entrada, 0, 0, 0)\n print(saida)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "PRECISAO = 3\r\nMAX_ITER = 20\r\n\r\ndef gauss_jacobi(entrada,*valores_iniciais):\r\n tamanho = len(entrada[0])\r\n variaveis = [*valores_iniciais[:tamanho]]\r\n variaveism1 = [None] * (tamanho-1)\r\n for _ in range(0,MAX_ITER):\r\n print(variaveis)\r\n for linha in range(tamanho-1):\r\n soma = 0\r\n for coluna in range(tamanho-1):\r\n if(linha!=coluna):\r\n soma += -entrada[linha][coluna]*variaveis[coluna]\r\n variaveism1[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)\r\n if(all([variaveism1[i]==variaveis[i] for i in range(tamanho-1)])):\r\n break\r\n variaveis = [*variaveism1]\r\n return variaveis\r\n\r\ndef gauss_seidel(entrada,*valores_iniciais):\r\n tamanho = len(entrada[0])\r\n variaveis = [*valores_iniciais[:tamanho]]\r\n antigo = [None] * (tamanho-1)\r\n for _ in range(0,MAX_ITER):\r\n print(variaveis)\r\n for linha in range(tamanho-1):\r\n soma = 0\r\n for coluna in range(tamanho-1):\r\n if(linha!=coluna):\r\n soma += -entrada[linha][coluna]*variaveis[coluna]\r\n variaveis[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)\r\n if(all([antigo[i]==variaveis[i] for i in range(tamanho-1)])):\r\n break\r\n antigo = [*variaveis]\r\n return variaveis\r\n\r\n\r\ndef main():\r\n print()\r\n entrada = [\r\n [10,2,1,7],\r\n [1,5,1,-8],\r\n [2,3,10,6]\r\n ]\r\n saida = gauss_jacobi(entrada,0,0,0)\r\n print(saida)\r\n print()\r\n saida = gauss_seidel(entrada,0,0,0)\r\n print(saida)\r\n\r\nif __name__==\"__main__\":\r\n main()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from extras.plugins import PluginConfig
from .version import __version__
class QRCodeConfig(PluginConfig):
name = 'netbox_qrcode'
verbose_name = 'qrcode'
description = 'Generate QR codes for the objects'
version = __version__
author = 'Nikolay Yuzefovich'
author_email = '[email protected]'
required_settings = []
default_settings = {
'with_text': True,
'text_fields': ['name', 'serial'],
'font': 'TahomaBold',
'custom_text': None,
'text_location': 'right',
'qr_version': 1,
'qr_error_correction': 0,
'qr_box_size': 6,
'qr_border': 4,
'device': {
'text_fields': ['name', 'serial']
},
'rack': {
'text_fields': ['name']
},
'cable': {
'text_fields': [
'_termination_a_device',
'termination_a',
'_termination_b_device',
'termination_b',
]
}
}
config = QRCodeConfig # noqa E305
|
normal
|
{
"blob_id": "6306acd1508698687842ba6b55a839743af420cc",
"index": 5840,
"step-1": "<mask token>\n\n\nclass QRCodeConfig(PluginConfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = '[email protected]'\n required_settings = []\n default_settings = {'with_text': True, 'text_fields': ['name', 'serial'\n ], 'font': 'TahomaBold', 'custom_text': None, 'text_location':\n 'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size': \n 6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},\n 'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [\n '_termination_a_device', 'termination_a', '_termination_b_device',\n 'termination_b']}}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = '[email protected]'\n required_settings = []\n default_settings = {'with_text': True, 'text_fields': ['name', 'serial'\n ], 'font': 'TahomaBold', 'custom_text': None, 'text_location':\n 'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size': \n 6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},\n 'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [\n '_termination_a_device', 'termination_a', '_termination_b_device',\n 'termination_b']}}\n\n\nconfig = QRCodeConfig\n",
"step-4": "from extras.plugins import PluginConfig\nfrom .version import __version__\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = '[email protected]'\n required_settings = []\n default_settings = {'with_text': True, 'text_fields': ['name', 'serial'\n ], 'font': 'TahomaBold', 'custom_text': None, 'text_location':\n 'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size': \n 6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},\n 'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [\n '_termination_a_device', 'termination_a', '_termination_b_device',\n 'termination_b']}}\n\n\nconfig = QRCodeConfig\n",
"step-5": "from extras.plugins import PluginConfig\nfrom .version import __version__\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = '[email protected]'\n required_settings = []\n default_settings = {\n 'with_text': True,\n 'text_fields': ['name', 'serial'],\n 'font': 'TahomaBold',\n 'custom_text': None,\n 'text_location': 'right',\n 'qr_version': 1,\n 'qr_error_correction': 0,\n 'qr_box_size': 6,\n 'qr_border': 4,\n 'device': {\n 'text_fields': ['name', 'serial']\n },\n 'rack': {\n 'text_fields': ['name']\n },\n 'cable': {\n 'text_fields': [\n '_termination_a_device',\n 'termination_a',\n '_termination_b_device',\n 'termination_b',\n ]\n }\n }\n\nconfig = QRCodeConfig # noqa E305\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# PySNMP SMI module. Autogenerated from smidump -f python DS0BUNDLE-MIB
# by libsmi2pysnmp-0.1.3 at Thu May 22 11:57:37 2014,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( InterfaceIndex, ifIndex, ) = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup")
( Bits, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, transmission, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Integer32", "ModuleIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "transmission")
( DisplayString, RowStatus, TestAndIncr, ) = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TestAndIncr")
# Objects
ds0Bundle = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 82)).setRevisions(("1998-07-16 16:30","1998-05-24 20:10",))
if mibBuilder.loadTexts: ds0Bundle.setOrganization("IETF Trunk MIB Working Group")
if mibBuilder.loadTexts: ds0Bundle.setContactInfo(" David Fowler\n\nPostal: Newbridge Networks Corporation\n 600 March Road\n Kanata, Ontario, Canada K2K 2E6\n\n Tel: +1 613 591 3600\n Fax: +1 613 599 3619\n\nE-mail: [email protected]")
if mibBuilder.loadTexts: ds0Bundle.setDescription("The MIB module to describe\nDS0 Bundle interfaces objects.")
dsx0BondingTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 1))
if mibBuilder.loadTexts: dsx0BondingTable.setDescription("The DS0 Bonding table.")
dsx0BondingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 1, 1)).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: dsx0BondingEntry.setDescription("An entry in the DS0 Bonding table. There is a\nrow in this table for each DS0Bundle interface.")
dsx0BondMode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 1), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,6,3,4,2,)).subtype(namedValues=NamedValues(("none", 1), ("other", 2), ("mode0", 3), ("mode1", 4), ("mode2", 5), ("mode3", 6), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BondMode.setDescription("This object indicates which BONDing mode is used,\nif any, for a ds0Bundle. Mode0 provides parameter\nand number exchange with no synchronization. Mode\n1 provides parameter and number exchange. Mode 1\nalso provides synchronization during\ninitialization but does not include inband\nmonitoring. Mode 2 provides all of the above plus\ninband monitoring. Mode 2 also steals 1/64th of\nthe bandwidth of each channel (thus not supporting\nn x 56/64 kbit/s data channels for most values of\nn). Mode 3 provides all of the above, but also\nprovides n x 56/64 kbit/s data channels. Most\ncommon implementations of Mode 3 add an extra\nchannel to support the inband monitoring overhead.\nModeNone should be used when the interface is not\nperforming bandwidth-on-demand.")
dsx0BondStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(1,3,2,)).subtype(namedValues=NamedValues(("idle", 1), ("callSetup", 2), ("dataTransfer", 3), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx0BondStatus.setDescription("This object indicates the current status of the\nbonding call using this ds0Bundle. idle(1) should\nbe used when the bonding mode is set to none(1).")
dsx0BondRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BondRowStatus.setDescription("This object is used to create new rows in this\ntable, modify existing rows, and to delete\nexisting rows.")
dsx0BundleNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 10, 82, 2), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx0BundleNextIndex.setDescription("This object is used to assist the manager in\nselecting a value for dsx0BundleIndex. Because\nthis object is of syntax TestAndIncr (see the\nSNMPv2-TC document, RFC 1903) it can also be used\nto avoid race conditions with multiple managers\ntrying to create rows in the table.\n\nIf the result of the SET for dsx0BundleNextIndex\nis not success, this means the value has been\nchanged from index (i.e. another manager used the\nvalue), so a new value is required.\n\nThe algorithm is:\ndone = false\nwhile done == false\n index = GET (dsx0BundleNextIndex.0)\n SET (dsx0BundleNextIndex.0=index)\n if (set failed)\n done = false\n else\n SET(dsx0BundleRowStatus.index=createAndGo)\n if (set failed)\n done = false\n else\n done = true\n other error handling")
dsx0BundleTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 3))
if mibBuilder.loadTexts: dsx0BundleTable.setDescription("There is an row in this table for each ds0Bundle\nin the system. This table can be used to\n(indirectly) create rows in the ifTable with\nifType = 'ds0Bundle(82)'.")
dsx0BundleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 3, 1)).setIndexNames((0, "DS0BUNDLE-MIB", "dsx0BundleIndex"))
if mibBuilder.loadTexts: dsx0BundleEntry.setDescription("There is a row in entry in this table for each\nds0Bundle interface.")
dsx0BundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: dsx0BundleIndex.setDescription("A unique identifier for a ds0Bundle. This is not\nthe same value as ifIndex. This table is not\nindexed by ifIndex because the manager has to\nchoose the index in a createable row and the agent\nmust be allowed to select ifIndex values.")
dsx0BundleIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx0BundleIfIndex.setDescription("The ifIndex value the agent selected for the\n(new) ds0Bundle interface.")
dsx0BundleCircuitIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BundleCircuitIdentifier.setDescription("This variable contains the transmission vendor's\ncircuit identifier, for the purpose of\nfacilitating troubleshooting.")
dsx0BundleRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BundleRowStatus.setDescription("This object is used to create and delete rows in\nthis table.")
ds0BundleConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4))
ds0BundleGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 1))
ds0BundleCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 2))
# Augmentions
# Groups
ds0BondingGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 1)).setObjects(*(("DS0BUNDLE-MIB", "dsx0BondMode"), ("DS0BUNDLE-MIB", "dsx0BondStatus"), ("DS0BUNDLE-MIB", "dsx0BondRowStatus"), ) )
if mibBuilder.loadTexts: ds0BondingGroup.setDescription("A collection of objects providing\nconfiguration information applicable\nto all DS0 interfaces.")
ds0BundleConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 2)).setObjects(*(("DS0BUNDLE-MIB", "dsx0BundleIfIndex"), ("DS0BUNDLE-MIB", "dsx0BundleRowStatus"), ("DS0BUNDLE-MIB", "dsx0BundleCircuitIdentifier"), ("DS0BUNDLE-MIB", "dsx0BundleNextIndex"), ) )
if mibBuilder.loadTexts: ds0BundleConfigGroup.setDescription("A collection of objects providing the ability to\ncreate a new ds0Bundle in the ifTable as well as\nconfiguration information about the ds0Bundle.")
# Compliances
ds0BundleCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 82, 4, 2, 1)).setObjects(*(("DS0BUNDLE-MIB", "ds0BundleConfigGroup"), ("DS0BUNDLE-MIB", "ds0BondingGroup"), ) )
if mibBuilder.loadTexts: ds0BundleCompliance.setDescription("The compliance statement for DS0Bundle\ninterfaces.")
# Exports
# Module identity
mibBuilder.exportSymbols("DS0BUNDLE-MIB", PYSNMP_MODULE_ID=ds0Bundle)
# Objects
mibBuilder.exportSymbols("DS0BUNDLE-MIB", ds0Bundle=ds0Bundle, dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry, dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus, dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus, ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)
# Groups
mibBuilder.exportSymbols("DS0BUNDLE-MIB", ds0BondingGroup=ds0BondingGroup, ds0BundleConfigGroup=ds0BundleConfigGroup)
# Compliances
mibBuilder.exportSymbols("DS0BUNDLE-MIB", ds0BundleCompliance=ds0BundleCompliance)
|
normal
|
{
"blob_id": "fab15d34d29301e53a26577725cdd66dca7507bc",
"index": 2330,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif mibBuilder.loadTexts:\n ds0Bundle.setOrganization('IETF Trunk MIB Working Group')\nif mibBuilder.loadTexts:\n ds0Bundle.setContactInfo(\n \"\"\" David Fowler\n\nPostal: Newbridge Networks Corporation\n 600 March Road\n Kanata, Ontario, Canada K2K 2E6\n\n Tel: +1 613 591 3600\n Fax: +1 613 599 3619\n\nE-mail: [email protected]\"\"\"\n )\nif mibBuilder.loadTexts:\n ds0Bundle.setDescription(\n 'The MIB module to describe\\nDS0 Bundle interfaces objects.')\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondingTable.setDescription('The DS0 Bonding table.')\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondingEntry.setDescription(\n \"\"\"An entry in the DS0 Bonding table. There is a\nrow in this table for each DS0Bundle interface.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondMode.setDescription(\n \"\"\"This object indicates which BONDing mode is used,\nif any, for a ds0Bundle. Mode0 provides parameter\nand number exchange with no synchronization. Mode\n1 provides parameter and number exchange. Mode 1\nalso provides synchronization during\ninitialization but does not include inband\nmonitoring. Mode 2 provides all of the above plus\ninband monitoring. Mode 2 also steals 1/64th of\nthe bandwidth of each channel (thus not supporting\nn x 56/64 kbit/s data channels for most values of\nn). Mode 3 provides all of the above, but also\nprovides n x 56/64 kbit/s data channels. Most\ncommon implementations of Mode 3 add an extra\nchannel to support the inband monitoring overhead.\nModeNone should be used when the interface is not\nperforming bandwidth-on-demand.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondStatus.setDescription(\n \"\"\"This object indicates the current status of the\nbonding call using this ds0Bundle. idle(1) should\nbe used when the bonding mode is set to none(1).\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondRowStatus.setDescription(\n \"\"\"This object is used to create new rows in this\ntable, modify existing rows, and to delete\nexisting rows.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleNextIndex.setDescription(\n \"\"\"This object is used to assist the manager in\nselecting a value for dsx0BundleIndex. Because\nthis object is of syntax TestAndIncr (see the\nSNMPv2-TC document, RFC 1903) it can also be used\nto avoid race conditions with multiple managers\ntrying to create rows in the table.\n\nIf the result of the SET for dsx0BundleNextIndex\nis not success, this means the value has been\nchanged from index (i.e. another manager used the\nvalue), so a new value is required.\n\nThe algorithm is:\ndone = false\nwhile done == false\n index = GET (dsx0BundleNextIndex.0)\n SET (dsx0BundleNextIndex.0=index)\n if (set failed)\n done = false\n else\n SET(dsx0BundleRowStatus.index=createAndGo)\n if (set failed)\n done = false\n else\n done = true\n other error handling\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleTable.setDescription(\n \"\"\"There is an row in this table for each ds0Bundle\nin the system. This table can be used to\n(indirectly) create rows in the ifTable with\nifType = 'ds0Bundle(82)'.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleEntry.setDescription(\n \"\"\"There is a row in entry in this table for each\nds0Bundle interface.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleIndex.setDescription(\n \"\"\"A unique identifier for a ds0Bundle. This is not\nthe same value as ifIndex. This table is not\nindexed by ifIndex because the manager has to\nchoose the index in a createable row and the agent\nmust be allowed to select ifIndex values.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleIfIndex.setDescription(\n \"\"\"The ifIndex value the agent selected for the\n(new) ds0Bundle interface.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleCircuitIdentifier.setDescription(\n \"\"\"This variable contains the transmission vendor's\ncircuit identifier, for the purpose of\nfacilitating troubleshooting.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleRowStatus.setDescription(\n \"\"\"This object is used to create and delete rows in\nthis table.\"\"\")\n<mask token>\nif mibBuilder.loadTexts:\n ds0BondingGroup.setDescription(\n \"\"\"A collection of objects providing\nconfiguration information applicable\nto all DS0 interfaces.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n ds0BundleConfigGroup.setDescription(\n \"\"\"A collection of objects providing the ability to\ncreate a new ds0Bundle in the ifTable as well as\nconfiguration information about the ds0Bundle.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n ds0BundleCompliance.setDescription(\n 'The compliance statement for DS0Bundle\\ninterfaces.')\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', PYSNMP_MODULE_ID=ds0Bundle)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0Bundle=ds0Bundle,\n dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry,\n dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus,\n dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=\n dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=\n dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=\n dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=\n dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus,\n ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=\n ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BondingGroup=ds0BondingGroup,\n ds0BundleConfigGroup=ds0BundleConfigGroup)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BundleCompliance=\n ds0BundleCompliance)\n",
"step-3": "Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols('ASN1',\n 'Integer', 'ObjectIdentifier', 'OctetString')\nNamedValues, = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')\n(ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint,\n ValueRangeConstraint, ValueSizeConstraint) = (mibBuilder.importSymbols(\n 'ASN1-REFINEMENT', 'ConstraintsIntersection', 'ConstraintsUnion',\n 'SingleValueConstraint', 'ValueRangeConstraint', 'ValueSizeConstraint'))\nInterfaceIndex, ifIndex = mibBuilder.importSymbols('IF-MIB',\n 'InterfaceIndex', 'ifIndex')\nModuleCompliance, ObjectGroup = mibBuilder.importSymbols('SNMPv2-CONF',\n 'ModuleCompliance', 'ObjectGroup')\n(Bits, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable,\n MibTableRow, MibTableColumn, TimeTicks, transmission) = (mibBuilder.\n importSymbols('SNMPv2-SMI', 'Bits', 'Integer32', 'ModuleIdentity',\n 'MibIdentifier', 'MibScalar', 'MibTable', 'MibTableRow',\n 'MibTableColumn', 'TimeTicks', 'transmission'))\nDisplayString, RowStatus, TestAndIncr = mibBuilder.importSymbols('SNMPv2-TC',\n 'DisplayString', 'RowStatus', 'TestAndIncr')\nds0Bundle = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 82)).setRevisions((\n '1998-07-16 16:30', '1998-05-24 20:10'))\nif mibBuilder.loadTexts:\n ds0Bundle.setOrganization('IETF Trunk MIB Working Group')\nif mibBuilder.loadTexts:\n ds0Bundle.setContactInfo(\n \"\"\" David Fowler\n\nPostal: Newbridge Networks Corporation\n 600 March Road\n Kanata, Ontario, Canada K2K 2E6\n\n Tel: +1 613 591 3600\n Fax: +1 613 599 3619\n\nE-mail: [email protected]\"\"\"\n )\nif mibBuilder.loadTexts:\n ds0Bundle.setDescription(\n 'The MIB module to describe\\nDS0 Bundle interfaces objects.')\ndsx0BondingTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 1))\nif mibBuilder.loadTexts:\n dsx0BondingTable.setDescription('The DS0 Bonding table.')\ndsx0BondingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 1, 1)).setIndexNames(\n (0, 'IF-MIB', 'ifIndex'))\nif mibBuilder.loadTexts:\n dsx0BondingEntry.setDescription(\n \"\"\"An entry in the DS0 Bonding table. There is a\nrow in this table for each DS0Bundle interface.\"\"\"\n )\ndsx0BondMode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 1), Integer(\n ).subtype(subtypeSpec=SingleValueConstraint(1, 5, 6, 3, 4, 2)).subtype(\n namedValues=NamedValues(('none', 1), ('other', 2), ('mode0', 3), (\n 'mode1', 4), ('mode2', 5), ('mode3', 6)))).setMaxAccess('readcreate')\nif mibBuilder.loadTexts:\n dsx0BondMode.setDescription(\n \"\"\"This object indicates which BONDing mode is used,\nif any, for a ds0Bundle. Mode0 provides parameter\nand number exchange with no synchronization. Mode\n1 provides parameter and number exchange. Mode 1\nalso provides synchronization during\ninitialization but does not include inband\nmonitoring. Mode 2 provides all of the above plus\ninband monitoring. Mode 2 also steals 1/64th of\nthe bandwidth of each channel (thus not supporting\nn x 56/64 kbit/s data channels for most values of\nn). Mode 3 provides all of the above, but also\nprovides n x 56/64 kbit/s data channels. Most\ncommon implementations of Mode 3 add an extra\nchannel to support the inband monitoring overhead.\nModeNone should be used when the interface is not\nperforming bandwidth-on-demand.\"\"\"\n )\ndsx0BondStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 2),\n Integer().subtype(subtypeSpec=SingleValueConstraint(1, 3, 2)).subtype(\n namedValues=NamedValues(('idle', 1), ('callSetup', 2), ('dataTransfer',\n 3)))).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n dsx0BondStatus.setDescription(\n \"\"\"This object indicates the current status of the\nbonding call using this ds0Bundle. idle(1) should\nbe used when the bonding mode is set to none(1).\"\"\"\n )\ndsx0BondRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 3),\n RowStatus()).setMaxAccess('readcreate')\nif mibBuilder.loadTexts:\n dsx0BondRowStatus.setDescription(\n \"\"\"This object is used to create new rows in this\ntable, modify existing rows, and to delete\nexisting rows.\"\"\"\n )\ndsx0BundleNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 10, 82, 2), TestAndIncr()\n ).setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n dsx0BundleNextIndex.setDescription(\n \"\"\"This object is used to assist the manager in\nselecting a value for dsx0BundleIndex. Because\nthis object is of syntax TestAndIncr (see the\nSNMPv2-TC document, RFC 1903) it can also be used\nto avoid race conditions with multiple managers\ntrying to create rows in the table.\n\nIf the result of the SET for dsx0BundleNextIndex\nis not success, this means the value has been\nchanged from index (i.e. another manager used the\nvalue), so a new value is required.\n\nThe algorithm is:\ndone = false\nwhile done == false\n index = GET (dsx0BundleNextIndex.0)\n SET (dsx0BundleNextIndex.0=index)\n if (set failed)\n done = false\n else\n SET(dsx0BundleRowStatus.index=createAndGo)\n if (set failed)\n done = false\n else\n done = true\n other error handling\"\"\"\n )\ndsx0BundleTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 3))\nif mibBuilder.loadTexts:\n dsx0BundleTable.setDescription(\n \"\"\"There is an row in this table for each ds0Bundle\nin the system. This table can be used to\n(indirectly) create rows in the ifTable with\nifType = 'ds0Bundle(82)'.\"\"\"\n )\ndsx0BundleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 3, 1)).setIndexNames((\n 0, 'DS0BUNDLE-MIB', 'dsx0BundleIndex'))\nif mibBuilder.loadTexts:\n dsx0BundleEntry.setDescription(\n \"\"\"There is a row in entry in this table for each\nds0Bundle interface.\"\"\"\n )\ndsx0BundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 1),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))\n ).setMaxAccess('noaccess')\nif mibBuilder.loadTexts:\n dsx0BundleIndex.setDescription(\n \"\"\"A unique identifier for a ds0Bundle. This is not\nthe same value as ifIndex. This table is not\nindexed by ifIndex because the manager has to\nchoose the index in a createable row and the agent\nmust be allowed to select ifIndex values.\"\"\"\n )\ndsx0BundleIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 2),\n InterfaceIndex()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n dsx0BundleIfIndex.setDescription(\n \"\"\"The ifIndex value the agent selected for the\n(new) ds0Bundle interface.\"\"\"\n )\ndsx0BundleCircuitIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, \n 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))\n ).setMaxAccess('readcreate')\nif mibBuilder.loadTexts:\n dsx0BundleCircuitIdentifier.setDescription(\n \"\"\"This variable contains the transmission vendor's\ncircuit identifier, for the purpose of\nfacilitating troubleshooting.\"\"\"\n )\ndsx0BundleRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 4),\n RowStatus()).setMaxAccess('readcreate')\nif mibBuilder.loadTexts:\n dsx0BundleRowStatus.setDescription(\n \"\"\"This object is used to create and delete rows in\nthis table.\"\"\")\nds0BundleConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4))\nds0BundleGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 1))\nds0BundleCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 2))\nds0BondingGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 1)).setObjects(*\n (('DS0BUNDLE-MIB', 'dsx0BondMode'), ('DS0BUNDLE-MIB', 'dsx0BondStatus'),\n ('DS0BUNDLE-MIB', 'dsx0BondRowStatus')))\nif mibBuilder.loadTexts:\n ds0BondingGroup.setDescription(\n \"\"\"A collection of objects providing\nconfiguration information applicable\nto all DS0 interfaces.\"\"\"\n )\nds0BundleConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 2)\n ).setObjects(*(('DS0BUNDLE-MIB', 'dsx0BundleIfIndex'), ('DS0BUNDLE-MIB',\n 'dsx0BundleRowStatus'), ('DS0BUNDLE-MIB', 'dsx0BundleCircuitIdentifier'\n ), ('DS0BUNDLE-MIB', 'dsx0BundleNextIndex')))\nif mibBuilder.loadTexts:\n ds0BundleConfigGroup.setDescription(\n \"\"\"A collection of objects providing the ability to\ncreate a new ds0Bundle in the ifTable as well as\nconfiguration information about the ds0Bundle.\"\"\"\n )\nds0BundleCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 82, 4, 2, 1)\n ).setObjects(*(('DS0BUNDLE-MIB', 'ds0BundleConfigGroup'), (\n 'DS0BUNDLE-MIB', 'ds0BondingGroup')))\nif mibBuilder.loadTexts:\n ds0BundleCompliance.setDescription(\n 'The compliance statement for DS0Bundle\\ninterfaces.')\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', PYSNMP_MODULE_ID=ds0Bundle)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0Bundle=ds0Bundle,\n dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry,\n dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus,\n dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=\n dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=\n dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=\n dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=\n dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus,\n ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=\n ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BondingGroup=ds0BondingGroup,\n ds0BundleConfigGroup=ds0BundleConfigGroup)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BundleCompliance=\n ds0BundleCompliance)\n",
"step-4": "# PySNMP SMI module. Autogenerated from smidump -f python DS0BUNDLE-MIB\n# by libsmi2pysnmp-0.1.3 at Thu May 22 11:57:37 2014,\n# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)\n\n# Imports\n\n( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols(\"ASN1\", \"Integer\", \"ObjectIdentifier\", \"OctetString\")\n( NamedValues, ) = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\")\n( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"SingleValueConstraint\", \"ValueRangeConstraint\", \"ValueSizeConstraint\")\n( InterfaceIndex, ifIndex, ) = mibBuilder.importSymbols(\"IF-MIB\", \"InterfaceIndex\", \"ifIndex\")\n( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"ModuleCompliance\", \"ObjectGroup\")\n( Bits, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, transmission, ) = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"Bits\", \"Integer32\", \"ModuleIdentity\", \"MibIdentifier\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"TimeTicks\", \"transmission\")\n( DisplayString, RowStatus, TestAndIncr, ) = mibBuilder.importSymbols(\"SNMPv2-TC\", \"DisplayString\", \"RowStatus\", \"TestAndIncr\")\n\n# Objects\n\nds0Bundle = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 82)).setRevisions((\"1998-07-16 16:30\",\"1998-05-24 20:10\",))\nif mibBuilder.loadTexts: ds0Bundle.setOrganization(\"IETF Trunk MIB Working Group\")\nif mibBuilder.loadTexts: ds0Bundle.setContactInfo(\" David Fowler\\n\\nPostal: Newbridge Networks Corporation\\n 600 March Road\\n Kanata, Ontario, Canada K2K 2E6\\n\\n Tel: +1 613 591 3600\\n Fax: +1 613 599 3619\\n\\nE-mail: [email protected]\")\nif mibBuilder.loadTexts: ds0Bundle.setDescription(\"The MIB module to describe\\nDS0 Bundle interfaces objects.\")\ndsx0BondingTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 1))\nif mibBuilder.loadTexts: dsx0BondingTable.setDescription(\"The DS0 Bonding table.\")\ndsx0BondingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 1, 1)).setIndexNames((0, \"IF-MIB\", \"ifIndex\"))\nif mibBuilder.loadTexts: dsx0BondingEntry.setDescription(\"An entry in the DS0 Bonding table. There is a\\nrow in this table for each DS0Bundle interface.\")\ndsx0BondMode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 1), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,6,3,4,2,)).subtype(namedValues=NamedValues((\"none\", 1), (\"other\", 2), (\"mode0\", 3), (\"mode1\", 4), (\"mode2\", 5), (\"mode3\", 6), ))).setMaxAccess(\"readcreate\")\nif mibBuilder.loadTexts: dsx0BondMode.setDescription(\"This object indicates which BONDing mode is used,\\nif any, for a ds0Bundle. Mode0 provides parameter\\nand number exchange with no synchronization. Mode\\n1 provides parameter and number exchange. Mode 1\\nalso provides synchronization during\\ninitialization but does not include inband\\nmonitoring. Mode 2 provides all of the above plus\\ninband monitoring. Mode 2 also steals 1/64th of\\nthe bandwidth of each channel (thus not supporting\\nn x 56/64 kbit/s data channels for most values of\\nn). Mode 3 provides all of the above, but also\\nprovides n x 56/64 kbit/s data channels. Most\\ncommon implementations of Mode 3 add an extra\\nchannel to support the inband monitoring overhead.\\nModeNone should be used when the interface is not\\nperforming bandwidth-on-demand.\")\ndsx0BondStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(1,3,2,)).subtype(namedValues=NamedValues((\"idle\", 1), (\"callSetup\", 2), (\"dataTransfer\", 3), ))).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: dsx0BondStatus.setDescription(\"This object indicates the current status of the\\nbonding call using this ds0Bundle. idle(1) should\\nbe used when the bonding mode is set to none(1).\")\ndsx0BondRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 3), RowStatus()).setMaxAccess(\"readcreate\")\nif mibBuilder.loadTexts: dsx0BondRowStatus.setDescription(\"This object is used to create new rows in this\\ntable, modify existing rows, and to delete\\nexisting rows.\")\ndsx0BundleNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 10, 82, 2), TestAndIncr()).setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: dsx0BundleNextIndex.setDescription(\"This object is used to assist the manager in\\nselecting a value for dsx0BundleIndex. Because\\nthis object is of syntax TestAndIncr (see the\\nSNMPv2-TC document, RFC 1903) it can also be used\\nto avoid race conditions with multiple managers\\ntrying to create rows in the table.\\n\\nIf the result of the SET for dsx0BundleNextIndex\\nis not success, this means the value has been\\nchanged from index (i.e. another manager used the\\nvalue), so a new value is required.\\n\\nThe algorithm is:\\ndone = false\\nwhile done == false\\n index = GET (dsx0BundleNextIndex.0)\\n SET (dsx0BundleNextIndex.0=index)\\n if (set failed)\\n done = false\\n else\\n SET(dsx0BundleRowStatus.index=createAndGo)\\n if (set failed)\\n done = false\\n else\\n done = true\\n other error handling\")\ndsx0BundleTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 3))\nif mibBuilder.loadTexts: dsx0BundleTable.setDescription(\"There is an row in this table for each ds0Bundle\\nin the system. This table can be used to\\n(indirectly) create rows in the ifTable with\\nifType = 'ds0Bundle(82)'.\")\ndsx0BundleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 3, 1)).setIndexNames((0, \"DS0BUNDLE-MIB\", \"dsx0BundleIndex\"))\nif mibBuilder.loadTexts: dsx0BundleEntry.setDescription(\"There is a row in entry in this table for each\\nds0Bundle interface.\")\ndsx0BundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess(\"noaccess\")\nif mibBuilder.loadTexts: dsx0BundleIndex.setDescription(\"A unique identifier for a ds0Bundle. This is not\\nthe same value as ifIndex. This table is not\\nindexed by ifIndex because the manager has to\\nchoose the index in a createable row and the agent\\nmust be allowed to select ifIndex values.\")\ndsx0BundleIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 2), InterfaceIndex()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: dsx0BundleIfIndex.setDescription(\"The ifIndex value the agent selected for the\\n(new) ds0Bundle interface.\")\ndsx0BundleCircuitIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess(\"readcreate\")\nif mibBuilder.loadTexts: dsx0BundleCircuitIdentifier.setDescription(\"This variable contains the transmission vendor's\\ncircuit identifier, for the purpose of\\nfacilitating troubleshooting.\")\ndsx0BundleRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 4), RowStatus()).setMaxAccess(\"readcreate\")\nif mibBuilder.loadTexts: dsx0BundleRowStatus.setDescription(\"This object is used to create and delete rows in\\nthis table.\")\nds0BundleConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4))\nds0BundleGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 1))\nds0BundleCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 2))\n\n# Augmentions\n\n# Groups\n\nds0BondingGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 1)).setObjects(*((\"DS0BUNDLE-MIB\", \"dsx0BondMode\"), (\"DS0BUNDLE-MIB\", \"dsx0BondStatus\"), (\"DS0BUNDLE-MIB\", \"dsx0BondRowStatus\"), ) )\nif mibBuilder.loadTexts: ds0BondingGroup.setDescription(\"A collection of objects providing\\nconfiguration information applicable\\nto all DS0 interfaces.\")\nds0BundleConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 2)).setObjects(*((\"DS0BUNDLE-MIB\", \"dsx0BundleIfIndex\"), (\"DS0BUNDLE-MIB\", \"dsx0BundleRowStatus\"), (\"DS0BUNDLE-MIB\", \"dsx0BundleCircuitIdentifier\"), (\"DS0BUNDLE-MIB\", \"dsx0BundleNextIndex\"), ) )\nif mibBuilder.loadTexts: ds0BundleConfigGroup.setDescription(\"A collection of objects providing the ability to\\ncreate a new ds0Bundle in the ifTable as well as\\nconfiguration information about the ds0Bundle.\")\n\n# Compliances\n\nds0BundleCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 82, 4, 2, 1)).setObjects(*((\"DS0BUNDLE-MIB\", \"ds0BundleConfigGroup\"), (\"DS0BUNDLE-MIB\", \"ds0BondingGroup\"), ) )\nif mibBuilder.loadTexts: ds0BundleCompliance.setDescription(\"The compliance statement for DS0Bundle\\ninterfaces.\")\n\n# Exports\n\n# Module identity\nmibBuilder.exportSymbols(\"DS0BUNDLE-MIB\", PYSNMP_MODULE_ID=ds0Bundle)\n\n# Objects\nmibBuilder.exportSymbols(\"DS0BUNDLE-MIB\", ds0Bundle=ds0Bundle, dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry, dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus, dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus, ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)\n\n# Groups\nmibBuilder.exportSymbols(\"DS0BUNDLE-MIB\", ds0BondingGroup=ds0BondingGroup, ds0BundleConfigGroup=ds0BundleConfigGroup)\n\n# Compliances\nmibBuilder.exportSymbols(\"DS0BUNDLE-MIB\", ds0BundleCompliance=ds0BundleCompliance)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.apps import AppConfig
class Sharem8Config(AppConfig):
name = 'ShareM8'
|
normal
|
{
"blob_id": "fd4d785d933c3a200f4aba094ecfe1e1c76737a5",
"index": 7629,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sharem8Config(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Sharem8Config(AppConfig):\n name = 'ShareM8'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass Sharem8Config(AppConfig):\n name = 'ShareM8'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.conf.urls import url
from basket import views
urlpatterns = [
url(r'^$', views.view_basket, name='basket'),
url(r'^add/(?P<product_pk>\d+)$', views.add_to_basket, name='add_to_basket'),
url(r'^remove/(?P<basketitem_pk>\d+)$', views.remove_from_basket, name='remove_from_basket'),
]
|
normal
|
{
"blob_id": "19d5e9db142237d1cb2276ccaf083ca4a96109fc",
"index": 3670,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', views.view_basket, name='basket'), url(\n '^add/(?P<product_pk>\\\\d+)$', views.add_to_basket, name='add_to_basket'\n ), url('^remove/(?P<basketitem_pk>\\\\d+)$', views.remove_from_basket,\n name='remove_from_basket')]\n",
"step-3": "from django.conf.urls import url\nfrom basket import views\nurlpatterns = [url('^$', views.view_basket, name='basket'), url(\n '^add/(?P<product_pk>\\\\d+)$', views.add_to_basket, name='add_to_basket'\n ), url('^remove/(?P<basketitem_pk>\\\\d+)$', views.remove_from_basket,\n name='remove_from_basket')]\n",
"step-4": "from django.conf.urls import url\n\nfrom basket import views\n\n\nurlpatterns = [\n url(r'^$', views.view_basket, name='basket'),\n url(r'^add/(?P<product_pk>\\d+)$', views.add_to_basket, name='add_to_basket'),\n url(r'^remove/(?P<basketitem_pk>\\d+)$', views.remove_from_basket, name='remove_from_basket'),\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest
import calla.test
TestCase = calla.test.TestCase
from math import pi
from calla.TB.RC_strength import *
class test(TestCase):
def test1(self):
"""
标准验证:铁路混凝土结构设计原理(容许应力计算法).ppt 例1
"""
b = 200
h0 = 411
As = 763
n = 15
M = 31.5
r = beam_strength.cal_σ1(b,h0,As,n,M)
print('σc,σs,x = ',r)
# 控制误差范围1%
assert abs(r[0]-5.26)/5.26<0.01
assert abs(r[1]-115.3)/115.3<0.01
assert abs(r[2]-167.1)/167.1<0.01
def test2(self):
"""
标准验证:混凝土结构基本原理答案吕晓寅版第12章
"""
b = 250
h = 350
l0 = 5
a = 40
a_ = 40
Ec = 3.0E4 #MPa
As = 1017
As_ = 1017
n = 10
M = 20 #kN
N = 450
r = column_strength.solve_stress(b,h,l0,a,a_,Ec,As,As_,n,M,N,0)
print('σc,σs,σs\'\n',r)
assert abs(r[0]-7.56)/7.56<0.01
assert abs(r[2]-67.8)/67.8<0.01
def test3(self): #随意修改测试
b = 600
h0 = 937.5
As = 3434.375
n = 10
M = 700
V = 300
r = beam_strength.cal_σ1(b,h0,As,n,M)
s = beam_strength.shear_stress(b,h0,As,n,V)
print('σc,σs,x = \n',r)
print('τ = ',s)
M1 = 10
M2 = 10
σs = r[1]
Es = 2.0E5
d = 28
a = 62.5
n1 = As/(pi/4*d**2)
wf = crack_width.solve_wf(M1,M2,M,σs,Es,d,a,b,n1)
print('wf = ',wf)
def test_column_strength(self): #随意修改测试
b = 1200
h = 1200
l0 = 5
a = 90
a_ = 90
Ec = 3.45E4 #MPa
As = 12316
As_ = 12316
n = 10
M = 2800 #kN
N = 14000
r = column_strength.solve_stress(b,h,l0,a,a_,Ec,As,As_,n,M,N,0)
print('σc,σs,σs\'\n',r)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "acb9b6128a3432aecf3498e1d27bdff204fee0f4",
"index": 8110,
"step-1": "<mask token>\n\n\nclass test(TestCase):\n <mask token>\n\n def test2(self):\n \"\"\"\n 标准验证:混凝土结构基本原理答案吕晓寅版第12章\n \"\"\"\n b = 250\n h = 350\n l0 = 5\n a = 40\n a_ = 40\n Ec = 30000.0\n As = 1017\n As_ = 1017\n n = 10\n M = 20\n N = 450\n r = column_strength.solve_stress(b, h, l0, a, a_, Ec, As, As_, n, M,\n N, 0)\n print(\"σc,σs,σs'\\n\", r)\n assert abs(r[0] - 7.56) / 7.56 < 0.01\n assert abs(r[2] - 67.8) / 67.8 < 0.01\n\n def test3(self):\n b = 600\n h0 = 937.5\n As = 3434.375\n n = 10\n M = 700\n V = 300\n r = beam_strength.cal_σ1(b, h0, As, n, M)\n s = beam_strength.shear_stress(b, h0, As, n, V)\n print('σc,σs,x = \\n', r)\n print('τ = ', s)\n M1 = 10\n M2 = 10\n σs = r[1]\n Es = 200000.0\n d = 28\n a = 62.5\n n1 = As / (pi / 4 * d ** 2)\n wf = crack_width.solve_wf(M1, M2, M, σs, Es, d, a, b, n1)\n print('wf = ', wf)\n\n def test_column_strength(self):\n b = 1200\n h = 1200\n l0 = 5\n a = 90\n a_ = 90\n Ec = 34500.0\n As = 12316\n As_ = 12316\n n = 10\n M = 2800\n N = 14000\n r = column_strength.solve_stress(b, h, l0, a, a_, Ec, As, As_, n, M,\n N, 0)\n print(\"σc,σs,σs'\\n\", r)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass test(TestCase):\n\n def test1(self):\n \"\"\"\n 标准验证:铁路混凝土结构设计原理(容许应力计算法).ppt 例1\n \"\"\"\n b = 200\n h0 = 411\n As = 763\n n = 15\n M = 31.5\n r = beam_strength.cal_σ1(b, h0, As, n, M)\n print('σc,σs,x = ', r)\n assert abs(r[0] - 5.26) / 5.26 < 0.01\n assert abs(r[1] - 115.3) / 115.3 < 0.01\n assert abs(r[2] - 167.1) / 167.1 < 0.01\n\n def test2(self):\n \"\"\"\n 标准验证:混凝土结构基本原理答案吕晓寅版第12章\n \"\"\"\n b = 250\n h = 350\n l0 = 5\n a = 40\n a_ = 40\n Ec = 30000.0\n As = 1017\n As_ = 1017\n n = 10\n M = 20\n N = 450\n r = column_strength.solve_stress(b, h, l0, a, a_, Ec, As, As_, n, M,\n N, 0)\n print(\"σc,σs,σs'\\n\", r)\n assert abs(r[0] - 7.56) / 7.56 < 0.01\n assert abs(r[2] - 67.8) / 67.8 < 0.01\n\n def test3(self):\n b = 600\n h0 = 937.5\n As = 3434.375\n n = 10\n M = 700\n V = 300\n r = beam_strength.cal_σ1(b, h0, As, n, M)\n s = beam_strength.shear_stress(b, h0, As, n, V)\n print('σc,σs,x = \\n', r)\n print('τ = ', s)\n M1 = 10\n M2 = 10\n σs = r[1]\n Es = 200000.0\n d = 28\n a = 62.5\n n1 = As / (pi / 4 * d ** 2)\n wf = crack_width.solve_wf(M1, M2, M, σs, Es, d, a, b, n1)\n print('wf = ', wf)\n\n def test_column_strength(self):\n b = 1200\n h = 1200\n l0 = 5\n a = 90\n a_ = 90\n Ec = 34500.0\n As = 12316\n As_ = 12316\n n = 10\n M = 2800\n N = 14000\n r = column_strength.solve_stress(b, h, l0, a, a_, Ec, As, As_, n, M,\n N, 0)\n print(\"σc,σs,σs'\\n\", r)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass test(TestCase):\n\n def test1(self):\n \"\"\"\n 标准验证:铁路混凝土结构设计原理(容许应力计算法).ppt 例1\n \"\"\"\n b = 200\n h0 = 411\n As = 763\n n = 15\n M = 31.5\n r = beam_strength.cal_σ1(b, h0, As, n, M)\n print('σc,σs,x = ', r)\n assert abs(r[0] - 5.26) / 5.26 < 0.01\n assert abs(r[1] - 115.3) / 115.3 < 0.01\n assert abs(r[2] - 167.1) / 167.1 < 0.01\n\n def test2(self):\n \"\"\"\n 标准验证:混凝土结构基本原理答案吕晓寅版第12章\n \"\"\"\n b = 250\n h = 350\n l0 = 5\n a = 40\n a_ = 40\n Ec = 30000.0\n As = 1017\n As_ = 1017\n n = 10\n M = 20\n N = 450\n r = column_strength.solve_stress(b, h, l0, a, a_, Ec, As, As_, n, M,\n N, 0)\n print(\"σc,σs,σs'\\n\", r)\n assert abs(r[0] - 7.56) / 7.56 < 0.01\n assert abs(r[2] - 67.8) / 67.8 < 0.01\n\n def test3(self):\n b = 600\n h0 = 937.5\n As = 3434.375\n n = 10\n M = 700\n V = 300\n r = beam_strength.cal_σ1(b, h0, As, n, M)\n s = beam_strength.shear_stress(b, h0, As, n, V)\n print('σc,σs,x = \\n', r)\n print('τ = ', s)\n M1 = 10\n M2 = 10\n σs = r[1]\n Es = 200000.0\n d = 28\n a = 62.5\n n1 = As / (pi / 4 * d ** 2)\n wf = crack_width.solve_wf(M1, M2, M, σs, Es, d, a, b, n1)\n print('wf = ', wf)\n\n def test_column_strength(self):\n b = 1200\n h = 1200\n l0 = 5\n a = 90\n a_ = 90\n Ec = 34500.0\n As = 12316\n As_ = 12316\n n = 10\n M = 2800\n N = 14000\n r = column_strength.solve_stress(b, h, l0, a, a_, Ec, As, As_, n, M,\n N, 0)\n print(\"σc,σs,σs'\\n\", r)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport calla.test\nTestCase = calla.test.TestCase\nfrom math import pi\nfrom calla.TB.RC_strength import *\n\n\nclass test(TestCase):\n\n def test1(self):\n \"\"\"\n 标准验证:铁路混凝土结构设计原理(容许应力计算法).ppt 例1\n \"\"\"\n b = 200\n h0 = 411\n As = 763\n n = 15\n M = 31.5\n r = beam_strength.cal_σ1(b, h0, As, n, M)\n print('σc,σs,x = ', r)\n assert abs(r[0] - 5.26) / 5.26 < 0.01\n assert abs(r[1] - 115.3) / 115.3 < 0.01\n assert abs(r[2] - 167.1) / 167.1 < 0.01\n\n def test2(self):\n \"\"\"\n 标准验证:混凝土结构基本原理答案吕晓寅版第12章\n \"\"\"\n b = 250\n h = 350\n l0 = 5\n a = 40\n a_ = 40\n Ec = 30000.0\n As = 1017\n As_ = 1017\n n = 10\n M = 20\n N = 450\n r = column_strength.solve_stress(b, h, l0, a, a_, Ec, As, As_, n, M,\n N, 0)\n print(\"σc,σs,σs'\\n\", r)\n assert abs(r[0] - 7.56) / 7.56 < 0.01\n assert abs(r[2] - 67.8) / 67.8 < 0.01\n\n def test3(self):\n b = 600\n h0 = 937.5\n As = 3434.375\n n = 10\n M = 700\n V = 300\n r = beam_strength.cal_σ1(b, h0, As, n, M)\n s = beam_strength.shear_stress(b, h0, As, n, V)\n print('σc,σs,x = \\n', r)\n print('τ = ', s)\n M1 = 10\n M2 = 10\n σs = r[1]\n Es = 200000.0\n d = 28\n a = 62.5\n n1 = As / (pi / 4 * d ** 2)\n wf = crack_width.solve_wf(M1, M2, M, σs, Es, d, a, b, n1)\n print('wf = ', wf)\n\n def test_column_strength(self):\n b = 1200\n h = 1200\n l0 = 5\n a = 90\n a_ = 90\n Ec = 34500.0\n As = 12316\n As_ = 12316\n n = 10\n M = 2800\n N = 14000\n r = column_strength.solve_stress(b, h, l0, a, a_, Ec, As, As_, n, M,\n N, 0)\n print(\"σc,σs,σs'\\n\", r)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport calla.test\nTestCase = calla.test.TestCase\nfrom math import pi\nfrom calla.TB.RC_strength import *\n\nclass test(TestCase):\n def test1(self):\n \"\"\"\n 标准验证:铁路混凝土结构设计原理(容许应力计算法).ppt 例1\n \"\"\"\n b = 200\n h0 = 411\n As = 763\n n = 15\n M = 31.5\n r = beam_strength.cal_σ1(b,h0,As,n,M)\n print('σc,σs,x = ',r)\n # 控制误差范围1%\n assert abs(r[0]-5.26)/5.26<0.01\n assert abs(r[1]-115.3)/115.3<0.01\n assert abs(r[2]-167.1)/167.1<0.01\n\n def test2(self):\n \"\"\"\n 标准验证:混凝土结构基本原理答案吕晓寅版第12章\n \"\"\"\n b = 250\n h = 350\n l0 = 5\n a = 40\n a_ = 40\n Ec = 3.0E4 #MPa\n As = 1017\n As_ = 1017\n n = 10\n M = 20 #kN\n N = 450\n r = column_strength.solve_stress(b,h,l0,a,a_,Ec,As,As_,n,M,N,0)\n print('σc,σs,σs\\'\\n',r)\n assert abs(r[0]-7.56)/7.56<0.01\n assert abs(r[2]-67.8)/67.8<0.01\n\n def test3(self): #随意修改测试\n b = 600\n h0 = 937.5\n As = 3434.375\n n = 10\n M = 700\n V = 300\n r = beam_strength.cal_σ1(b,h0,As,n,M)\n s = beam_strength.shear_stress(b,h0,As,n,V)\n print('σc,σs,x = \\n',r)\n print('τ = ',s)\n M1 = 10\n M2 = 10\n σs = r[1]\n Es = 2.0E5\n d = 28\n a = 62.5\n n1 = As/(pi/4*d**2)\n wf = crack_width.solve_wf(M1,M2,M,σs,Es,d,a,b,n1)\n print('wf = ',wf)\n\n def test_column_strength(self): #随意修改测试\n b = 1200\n h = 1200\n l0 = 5\n a = 90\n a_ = 90\n Ec = 3.45E4 #MPa\n As = 12316\n As_ = 12316\n n = 10\n M = 2800 #kN\n N = 14000\n r = column_strength.solve_stress(b,h,l0,a,a_,Ec,As,As_,n,M,N,0)\n print('σc,σs,σs\\'\\n',r)\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
...
...
model = Sequential()
model.add(Conv2D(32, kernel_size=3, input_shape=(256, 256, 3))
...
...
|
normal
|
{
"blob_id": "ad054febac3a04c625653a2f3864506eeb672d9e",
"index": 6273,
"step-1": "...\n...\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=3, input_shape=(256, 256, 3))\n...\n...\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# required !!!
# pip install selenium
# pip install webdriver-manager
from theMachine import loops
# fill the number and message
# you can fill the number with array
phoneNumber = "fill the number"
message = "fill with ur message"
loop = 1 # this how many u want to loop
loops(loop, phoneNumber, message) # input how many u want to loop
|
normal
|
{
"blob_id": "81dfdf0479fc1f136fa5153840d8c7015f9db676",
"index": 32,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nloops(loop, phoneNumber, message)\n",
"step-3": "<mask token>\nphoneNumber = 'fill the number'\nmessage = 'fill with ur message'\nloop = 1\nloops(loop, phoneNumber, message)\n",
"step-4": "from theMachine import loops\nphoneNumber = 'fill the number'\nmessage = 'fill with ur message'\nloop = 1\nloops(loop, phoneNumber, message)\n",
"step-5": "# required !!!\r\n# pip install selenium\r\n# pip install webdriver-manager\r\n\r\nfrom theMachine import loops\r\n\r\n# fill the number and message\r\n# you can fill the number with array\r\nphoneNumber = \"fill the number\"\r\nmessage = \"fill with ur message\"\r\nloop = 1 # this how many u want to loop\r\n\r\nloops(loop, phoneNumber, message) # input how many u want to loop\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#! /usr/bin/env python2
############################################################
# Program is part of PySAR v1.2 #
# Copyright(c) 2015, Heresh Fattahi, Zhang Yunjun #
# Author: Heresh Fattahi, Zhang Yunjun #
############################################################
import os
import sys
import argparse
import re
try:
import pyaps as pa
except:
sys.exit('Cannot import pyaps into Python!')
import h5py
import numpy as np
import pysar._datetime as ptime
import pysar._pysar_utilities as ut
import pysar._readfile as readfile
import pysar._writefile as writefile
###############################################################
def get_delay(grib_file, atr, inps_dict):
'''Get delay matrix using PyAPS for one acquisition
Inputs:
grib_file - strng, grib file path
atr - dict, including the following attributes:
dem_file - string, DEM file path
grib_source - string, Weather re-analysis data source
delay_type - string, comb/dry/wet
ref_y/x - string, reference pixel row/col number
inc_angle - np.array, 0/1/2 D
Output:
phs - 2D np.array, absolute tropospheric phase delay relative to ref_y/x
'''
if 'X_FIRST' in atr.keys():
aps = pa.PyAPS_geo(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
else:
aps = pa.PyAPS_rdr(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
phs = np.zeros((aps.ny, aps.nx), dtype=np.float32)
aps.getdelay(phs, inc=0.0)
# Get relative phase delay in space
yref = int(atr['ref_y'])
xref = int(atr['ref_x'])
phs -= phs[yref, xref]
# project into LOS direction
phs /= np.cos(inps_dict['inc_angle'])
# reverse the sign for consistency between different phase correction steps/methods
phs *= -1
return phs
def date_list2grib_file(date_list, hour, grib_source, grib_dir):
grib_file_list = []
for d in date_list:
grib_file = grib_dir+'/'
if grib_source == 'ECMWF' : grib_file += 'ERA-Int_%s_%s.grb' % (d, hour)
elif grib_source == 'ERA' : grib_file += 'ERA_%s_%s.grb' % (d, hour)
elif grib_source == 'NARR' : grib_file += 'narr-a_221_%s_%s00_000.grb' % (d, hour)
elif grib_source == 'MERRA' : grib_file += 'merra-%s-%s.nc4' % (d, hour)
elif grib_source == 'MERRA1': grib_file += 'merra-%s-%s.hdf' % (d, hour)
grib_file_list.append(grib_file)
return grib_file_list
def dload_grib(date_list, hour, grib_source='ECMWF', weather_dir='./'):
'''Download weather re-analysis grib files using PyAPS
Inputs:
date_list : list of string in YYYYMMDD format
hour : string in HH:MM or HH format
grib_source : string,
weather_dir : string,
Output:
grib_file_list : list of string
'''
## Grib data directory
weather_dir = os.path.abspath(weather_dir)
grib_dir = weather_dir+'/'+grib_source
if not os.path.isdir(grib_dir):
print 'making directory: '+grib_dir
os.makedirs(grib_dir)
## Date list to grib file list
grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)
## Get date list to download (skip already downloaded files)
grib_file_existed = ut.get_file_list(grib_file_list)
if grib_file_existed:
grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])
grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])
grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\
str(os.path.getsize(i))[0:2] != grib_filesize_max2)]
print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)
print 'number of grib files existed : %d' % len(grib_file_existed)
if grib_file_corrupted:
print '------------------------------------------------------------------------------'
print 'corrupted grib files detected! Delete them and re-download...'
print 'number of grib files corrupted : %d' % len(grib_file_corrupted)
for i in grib_file_corrupted:
rmCmd = 'rm '+i
print rmCmd
os.system(rmCmd)
grib_file_existed.remove(i)
print '------------------------------------------------------------------------------'
grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))
date_list2download = [str(re.findall('\d{8}', i)[0]) for i in grib_file2download]
print 'number of grib files to download: %d' % len(date_list2download)
print '------------------------------------------------------------------------------\n'
## Download grib file using PyAPS
if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)
elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)
elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)
return grib_file_existed
###############################################################
EXAMPLE='''example:
tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5
tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER
tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23
tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5
tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download
tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download
'''
REFERENCE='''reference:
Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric
phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,
doi:10.1029/2011GL048757
'''
TEMPLATE='''
## 7. Tropospheric Delay Correction (optional and recommended)
## correct tropospheric delay using the following methods:
## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)
## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)
## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)
pysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps
pysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method
pysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method
pysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram
'''
DATA_INFO='''
re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis
------------------------------------------------------------------------------------------------------------
ERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var
MERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var
To download MERRA2, you need an Earthdata account, and pre-authorize the "NASA GESDISC DATA ARCHIVE" application, following https://disc.gsfc.nasa.gov/earthdata-login.
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\n'+\
' PyAPS is used to download and calculate the delay for each time-series epoch.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=REFERENCE+'\n'+DATA_INFO+'\n'+EXAMPLE)
parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')
parser.add_argument('-d','--dem', dest='dem_file',\
help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')
parser.add_argument('-i', dest='inc_angle', default='30',\
help='a file containing all incidence angles, or a number representing for the whole image.')
parser.add_argument('--weather-dir', dest='weather_dir', \
help='directory to put downloaded weather data, i.e. ./../WEATHER\n'+\
'use directory of input timeseries_file if not specified.')
parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\
help='Delay type to calculate, comb contains both wet and dry delays')
parser.add_argument('--download', action='store_true', help='Download weather data only.')
parser.add_argument('--date-list', dest='date_list_file',\
help='Read the first column of text file as list of date to download data\n'+\
'in YYYYMMDD or YYMMDD format')
parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')
parser.add_argument('-s', dest='weather_model',\
default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\
help='source of the atmospheric data.\n'+\
'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\n'+\
'NARR is working for 1979-Jan to 2014-Oct.\n'+\
'MERRA(2) is not working.')
parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')
parser.add_argument('--template', dest='template_file',\
help='template file with input options below:\n'+TEMPLATE)
parser.add_argument('-o', dest='out_file', help='Output file name for trospheric corrected timeseries.')
inps = parser.parse_args()
# Calculate DELAY or DOWNLOAD DATA ONLY, required one of them
if not inps.download and not inps.dem_file and ( not inps.timeseries_file or not inps.date_list_file ):
parser.print_help()
sys.exit(1)
return inps
###############################################################
def main(argv):
inps = cmdLineParse()
k = None
atr = dict()
if inps.timeseries_file:
inps.timeseries_file = ut.get_file_list([inps.timeseries_file])[0]
atr = readfile.read_attribute(inps.timeseries_file)
k = atr['FILE_TYPE']
elif inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
atr = readfile.read_attribute(inps.dem_file)
if 'ref_y' not in atr.keys() and inps.ref_yx:
print 'No reference info found in input file, use input ref_yx: '+str(inps.ref_yx)
atr['ref_y'] = inps.ref_yx[0]
atr['ref_x'] = inps.ref_yx[1]
##Read Incidence angle: to map the zenith delay to the slant delay
if os.path.isfile(inps.inc_angle):
inps.inc_angle = readfile.read(inps.inc_angle, epoch='incidenceAngle')[0]
else:
inps.inc_angle = float(inps.inc_angle)
print 'incidence angle: '+str(inps.inc_angle)
inps.inc_angle = inps.inc_angle*np.pi/180.0
##Prepare DEM file in ROI_PAC format for PyAPS to read
if inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
if os.path.splitext(inps.dem_file)[1] in ['.h5']:
print 'convert DEM file to ROIPAC format'
dem, atr_dem = readfile.read(inps.dem_file, epoch='height')
if 'Y_FIRST' in atr.keys():
atr_dem['FILE_TYPE'] = '.dem'
else:
atr_dem['FILE_TYPE'] = '.hgt'
outname = os.path.splitext(inps.dem_file)[0]+'4pyaps'+atr_dem['FILE_TYPE']
inps.dem_file = writefile.write(dem, atr_dem, outname)
print '*******************************************************************************'
print 'Downloading weather model data ...'
## Get Grib Source
if inps.weather_model in ['ECMWF','ERA-Interim']: inps.grib_source = 'ECMWF'
elif inps.weather_model == 'ERA' : inps.grib_source = 'ERA'
elif inps.weather_model == 'MERRA': inps.grib_source = 'MERRA'
elif inps.weather_model == 'NARR' : inps.grib_source = 'NARR'
else: raise Reception('Unrecognized weather model: '+inps.weather_model)
print 'grib source: '+inps.grib_source
# Get weather directory
if not inps.weather_dir:
if inps.timeseries_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.timeseries_file))+'/../WEATHER'
elif inps.dem_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.dem_file))+'/../WEATHER'
else:
inps.weather_dir = os.path.abspath(os.getcwd())
print 'Store weather data into directory: '+inps.weather_dir
# Get date list to download
if not inps.date_list_file:
print 'read date list info from: '+inps.timeseries_file
h5 = h5py.File(inps.timeseries_file, 'r')
if 'timeseries' in h5.keys():
date_list = sorted(h5[k].keys())
elif k in ['interferograms','coherence','wrapped']:
ifgram_list = sorted(h5[k].keys())
date12_list = ptime.list_ifgram2date12(ifgram_list)
m_dates = [i.split('-')[0] for i in date12_list]
s_dates = [i.split('-')[1] for i in date12_list]
date_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))
else:
raise ValueError('Un-support input file type:'+k)
h5.close()
else:
date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list_file, dtype=str, usecols=(0,)).tolist())
print 'read date list info from: '+inps.date_list_file
# Get Acquisition time - hour
if not inps.hour:
inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.grib_source)
print 'Time of cloest available product: '+inps.hour
## Download data using PyAPS
inps.grib_file_list = dload_grib(date_list, inps.hour, inps.weather_model, inps.weather_dir)
if inps.download:
print 'Download completed, exit as planned.'
return
print '*******************************************************************************'
print 'Calcualting delay for each epoch.'
## Calculate tropo delay using pyaps
length = int(atr['FILE_LENGTH'])
width = int(atr['WIDTH'])
date_num = len(date_list)
trop_ts = np.zeros((date_num, length, width), np.float32)
for i in range(date_num):
grib_file = inps.grib_file_list[i]
date = date_list[i]
print 'calculate phase delay on %s from file %s' % (date, os.path.basename(grib_file))
trop_ts[i] = get_delay(grib_file, atr, vars(inps))
## Convert relative phase delay on reference date
try: ref_date = atr['ref_date']
except: ref_date = date_list[0]
print 'convert to relative phase delay with reference date: '+ref_date
ref_idx = date_list.index(ref_date)
trop_ts -= np.tile(trop_ts[ref_idx,:,:], (date_num, 1, 1))
## Write tropospheric delay to HDF5
tropFile = inps.grib_source+'.h5'
print 'writing >>> %s' % (tropFile)
h5trop = h5py.File(tropFile, 'w')
group_trop = h5trop.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
group_trop.create_dataset(date, data=trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
# Write Attributes
for key,value in atr.iteritems():
group_trop.attrs[key] = value
h5trop.close()
## Write corrected Time series to HDF5
if k == 'timeseries':
if not inps.out_file:
inps.out_file = os.path.splitext(inps.timeseries_file)[0]+'_'+inps.grib_source+'.h5'
print 'writing >>> %s' % (inps.out_file)
h5ts = h5py.File(inps.timeseries_file, 'r')
h5tsCor = h5py.File(inps.out_file, 'w')
group_tsCor = h5tsCor.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
ts = h5ts['timeseries'].get(date)[:]
group_tsCor.create_dataset(date, data=ts-trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
h5ts.close()
# Write Attributes
for key,value in atr.iteritems():
group_tsCor.attrs[key] = value
h5tsCor.close()
# Delete temporary DEM file in ROI_PAC format
if '4pyaps' in inps.dem_file:
rmCmd = 'rm %s %s.rsc' % (inps.dem_file, inps.dem_file)
print rmCmd
os.system(rmCmd)
print 'Done.'
return inps.out_file
###############################################################
if __name__ == '__main__':
main(sys.argv[1:])
|
normal
|
{
"blob_id": "9515dcdfc0ece1a6740d6e7075bbcd1c20977590",
"index": 9157,
"step-1": "#! /usr/bin/env python2\n############################################################\n# Program is part of PySAR v1.2 #\n# Copyright(c) 2015, Heresh Fattahi, Zhang Yunjun #\n# Author: Heresh Fattahi, Zhang Yunjun #\n############################################################\n\n\nimport os\nimport sys\nimport argparse\nimport re\n\ntry:\n import pyaps as pa\nexcept:\n sys.exit('Cannot import pyaps into Python!')\n\nimport h5py\nimport numpy as np\n\nimport pysar._datetime as ptime\nimport pysar._pysar_utilities as ut\nimport pysar._readfile as readfile\nimport pysar._writefile as writefile\n\n\n###############################################################\ndef get_delay(grib_file, atr, inps_dict):\n '''Get delay matrix using PyAPS for one acquisition\n Inputs:\n grib_file - strng, grib file path\n atr - dict, including the following attributes:\n dem_file - string, DEM file path\n grib_source - string, Weather re-analysis data source\n delay_type - string, comb/dry/wet\n ref_y/x - string, reference pixel row/col number\n inc_angle - np.array, 0/1/2 D\n Output:\n phs - 2D np.array, absolute tropospheric phase delay relative to ref_y/x\n '''\n if 'X_FIRST' in atr.keys():\n aps = pa.PyAPS_geo(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\\\n verb=True, Del=inps_dict['delay_type'])\n else:\n aps = pa.PyAPS_rdr(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\\\n verb=True, Del=inps_dict['delay_type'])\n phs = np.zeros((aps.ny, aps.nx), dtype=np.float32)\n aps.getdelay(phs, inc=0.0)\n\n # Get relative phase delay in space\n yref = int(atr['ref_y'])\n xref = int(atr['ref_x'])\n phs -= phs[yref, xref]\n\n # project into LOS direction\n phs /= np.cos(inps_dict['inc_angle'])\n \n # reverse the sign for consistency between different phase correction steps/methods\n phs *= -1\n \n return phs\n\n\ndef date_list2grib_file(date_list, hour, grib_source, grib_dir):\n grib_file_list = []\n for d in date_list:\n grib_file = grib_dir+'/'\n if grib_source == 'ECMWF' : grib_file += 'ERA-Int_%s_%s.grb' % (d, hour)\n elif grib_source == 'ERA' : grib_file += 'ERA_%s_%s.grb' % (d, hour)\n elif grib_source == 'NARR' : grib_file += 'narr-a_221_%s_%s00_000.grb' % (d, hour)\n elif grib_source == 'MERRA' : grib_file += 'merra-%s-%s.nc4' % (d, hour)\n elif grib_source == 'MERRA1': grib_file += 'merra-%s-%s.hdf' % (d, hour)\n grib_file_list.append(grib_file)\n return grib_file_list\n\n\ndef dload_grib(date_list, hour, grib_source='ECMWF', weather_dir='./'):\n '''Download weather re-analysis grib files using PyAPS\n Inputs:\n date_list : list of string in YYYYMMDD format\n hour : string in HH:MM or HH format\n grib_source : string, \n weather_dir : string,\n Output:\n grib_file_list : list of string\n '''\n ## Grib data directory\n weather_dir = os.path.abspath(weather_dir)\n grib_dir = weather_dir+'/'+grib_source\n if not os.path.isdir(grib_dir):\n print 'making directory: '+grib_dir\n os.makedirs(grib_dir)\n\n ## Date list to grib file list\n grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)\n\n ## Get date list to download (skip already downloaded files)\n grib_file_existed = ut.get_file_list(grib_file_list)\n if grib_file_existed:\n grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])\n grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])\n grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\\\n str(os.path.getsize(i))[0:2] != grib_filesize_max2)]\n print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)\n print 'number of grib files existed : %d' % len(grib_file_existed)\n if grib_file_corrupted:\n print '------------------------------------------------------------------------------'\n print 'corrupted grib files detected! Delete them and re-download...'\n print 'number of grib files corrupted : %d' % len(grib_file_corrupted)\n for i in grib_file_corrupted:\n rmCmd = 'rm '+i\n print rmCmd\n os.system(rmCmd)\n grib_file_existed.remove(i)\n print '------------------------------------------------------------------------------'\n grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))\n date_list2download = [str(re.findall('\\d{8}', i)[0]) for i in grib_file2download]\n print 'number of grib files to download: %d' % len(date_list2download)\n print '------------------------------------------------------------------------------\\n'\n\n ## Download grib file using PyAPS\n if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)\n elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)\n elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)\n elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)\n elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)\n\n return grib_file_existed\n\n\n###############################################################\nEXAMPLE='''example:\n tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5\n tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER\n tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt\n\n tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR\n tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23\n tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5\n\n tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download\n tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download\n'''\n\nREFERENCE='''reference:\n Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric\n phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,\n doi:10.1029/2011GL048757\n'''\n\nTEMPLATE='''\n## 7. Tropospheric Delay Correction (optional and recommended)\n## correct tropospheric delay using the following methods:\n## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)\n## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)\n## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)\npysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps\npysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method\npysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method\npysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram \n'''\n\nDATA_INFO='''\n re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis\n------------------------------------------------------------------------------------------------------------\nERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var\nMERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var\n\nTo download MERRA2, you need an Earthdata account, and pre-authorize the \"NASA GESDISC DATA ARCHIVE\" application, following https://disc.gsfc.nasa.gov/earthdata-login.\n'''\n\n\ndef cmdLineParse():\n parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\\n'+\\\n ' PyAPS is used to download and calculate the delay for each time-series epoch.',\\\n formatter_class=argparse.RawTextHelpFormatter,\\\n epilog=REFERENCE+'\\n'+DATA_INFO+'\\n'+EXAMPLE)\n\n parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')\n parser.add_argument('-d','--dem', dest='dem_file',\\\n help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')\n parser.add_argument('-i', dest='inc_angle', default='30',\\\n help='a file containing all incidence angles, or a number representing for the whole image.')\n parser.add_argument('--weather-dir', dest='weather_dir', \\\n help='directory to put downloaded weather data, i.e. ./../WEATHER\\n'+\\\n 'use directory of input timeseries_file if not specified.')\n parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\\\n help='Delay type to calculate, comb contains both wet and dry delays')\n parser.add_argument('--download', action='store_true', help='Download weather data only.')\n parser.add_argument('--date-list', dest='date_list_file',\\\n help='Read the first column of text file as list of date to download data\\n'+\\\n 'in YYYYMMDD or YYMMDD format')\n parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')\n\n parser.add_argument('-s', dest='weather_model',\\\n default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\\\n help='source of the atmospheric data.\\n'+\\\n 'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\\n'+\\\n 'NARR is working for 1979-Jan to 2014-Oct.\\n'+\\\n 'MERRA(2) is not working.')\n parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')\n\n parser.add_argument('--template', dest='template_file',\\\n help='template file with input options below:\\n'+TEMPLATE)\n parser.add_argument('-o', dest='out_file', help='Output file name for trospheric corrected timeseries.')\n\n inps = parser.parse_args()\n\n # Calculate DELAY or DOWNLOAD DATA ONLY, required one of them\n if not inps.download and not inps.dem_file and ( not inps.timeseries_file or not inps.date_list_file ):\n parser.print_help()\n sys.exit(1)\n return inps\n\n\n###############################################################\ndef main(argv):\n inps = cmdLineParse()\n\n k = None\n atr = dict()\n if inps.timeseries_file:\n inps.timeseries_file = ut.get_file_list([inps.timeseries_file])[0]\n atr = readfile.read_attribute(inps.timeseries_file)\n k = atr['FILE_TYPE']\n elif inps.dem_file:\n inps.dem_file = ut.get_file_list([inps.dem_file])[0]\n atr = readfile.read_attribute(inps.dem_file)\n if 'ref_y' not in atr.keys() and inps.ref_yx:\n print 'No reference info found in input file, use input ref_yx: '+str(inps.ref_yx)\n atr['ref_y'] = inps.ref_yx[0]\n atr['ref_x'] = inps.ref_yx[1]\n\n ##Read Incidence angle: to map the zenith delay to the slant delay\n if os.path.isfile(inps.inc_angle):\n inps.inc_angle = readfile.read(inps.inc_angle, epoch='incidenceAngle')[0]\n else:\n inps.inc_angle = float(inps.inc_angle)\n print 'incidence angle: '+str(inps.inc_angle)\n inps.inc_angle = inps.inc_angle*np.pi/180.0\n\n ##Prepare DEM file in ROI_PAC format for PyAPS to read\n if inps.dem_file:\n inps.dem_file = ut.get_file_list([inps.dem_file])[0]\n if os.path.splitext(inps.dem_file)[1] in ['.h5']:\n print 'convert DEM file to ROIPAC format'\n dem, atr_dem = readfile.read(inps.dem_file, epoch='height')\n if 'Y_FIRST' in atr.keys():\n atr_dem['FILE_TYPE'] = '.dem'\n else:\n atr_dem['FILE_TYPE'] = '.hgt'\n outname = os.path.splitext(inps.dem_file)[0]+'4pyaps'+atr_dem['FILE_TYPE']\n inps.dem_file = writefile.write(dem, atr_dem, outname)\n\n print '*******************************************************************************'\n print 'Downloading weather model data ...'\n\n ## Get Grib Source\n if inps.weather_model in ['ECMWF','ERA-Interim']: inps.grib_source = 'ECMWF'\n elif inps.weather_model == 'ERA' : inps.grib_source = 'ERA'\n elif inps.weather_model == 'MERRA': inps.grib_source = 'MERRA'\n elif inps.weather_model == 'NARR' : inps.grib_source = 'NARR'\n else: raise Reception('Unrecognized weather model: '+inps.weather_model)\n print 'grib source: '+inps.grib_source\n\n # Get weather directory\n if not inps.weather_dir:\n if inps.timeseries_file:\n inps.weather_dir = os.path.dirname(os.path.abspath(inps.timeseries_file))+'/../WEATHER'\n elif inps.dem_file:\n inps.weather_dir = os.path.dirname(os.path.abspath(inps.dem_file))+'/../WEATHER'\n else:\n inps.weather_dir = os.path.abspath(os.getcwd())\n print 'Store weather data into directory: '+inps.weather_dir\n\n # Get date list to download\n if not inps.date_list_file:\n print 'read date list info from: '+inps.timeseries_file\n h5 = h5py.File(inps.timeseries_file, 'r')\n if 'timeseries' in h5.keys():\n date_list = sorted(h5[k].keys())\n elif k in ['interferograms','coherence','wrapped']:\n ifgram_list = sorted(h5[k].keys())\n date12_list = ptime.list_ifgram2date12(ifgram_list)\n m_dates = [i.split('-')[0] for i in date12_list]\n s_dates = [i.split('-')[1] for i in date12_list]\n date_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))\n else:\n raise ValueError('Un-support input file type:'+k)\n h5.close()\n else:\n date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list_file, dtype=str, usecols=(0,)).tolist())\n print 'read date list info from: '+inps.date_list_file\n\n # Get Acquisition time - hour\n if not inps.hour:\n inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.grib_source)\n print 'Time of cloest available product: '+inps.hour\n\n ## Download data using PyAPS\n inps.grib_file_list = dload_grib(date_list, inps.hour, inps.weather_model, inps.weather_dir)\n\n if inps.download:\n print 'Download completed, exit as planned.'\n return\n\n print '*******************************************************************************'\n print 'Calcualting delay for each epoch.'\n\n ## Calculate tropo delay using pyaps\n length = int(atr['FILE_LENGTH'])\n width = int(atr['WIDTH'])\n date_num = len(date_list)\n trop_ts = np.zeros((date_num, length, width), np.float32)\n for i in range(date_num):\n grib_file = inps.grib_file_list[i] \n date = date_list[i]\n print 'calculate phase delay on %s from file %s' % (date, os.path.basename(grib_file))\n trop_ts[i] = get_delay(grib_file, atr, vars(inps))\n\n ## Convert relative phase delay on reference date\n try: ref_date = atr['ref_date']\n except: ref_date = date_list[0]\n print 'convert to relative phase delay with reference date: '+ref_date\n ref_idx = date_list.index(ref_date)\n trop_ts -= np.tile(trop_ts[ref_idx,:,:], (date_num, 1, 1))\n\n ## Write tropospheric delay to HDF5\n tropFile = inps.grib_source+'.h5'\n print 'writing >>> %s' % (tropFile)\n h5trop = h5py.File(tropFile, 'w')\n group_trop = h5trop.create_group('timeseries')\n print 'number of acquisitions: '+str(date_num)\n prog_bar = ptime.progress_bar(maxValue=date_num)\n for i in range(date_num):\n date = date_list[i]\n group_trop.create_dataset(date, data=trop_ts[i], compression='gzip')\n prog_bar.update(i+1, suffix=date)\n prog_bar.close()\n # Write Attributes\n for key,value in atr.iteritems():\n group_trop.attrs[key] = value\n h5trop.close()\n\n ## Write corrected Time series to HDF5\n if k == 'timeseries':\n if not inps.out_file:\n inps.out_file = os.path.splitext(inps.timeseries_file)[0]+'_'+inps.grib_source+'.h5'\n print 'writing >>> %s' % (inps.out_file)\n h5ts = h5py.File(inps.timeseries_file, 'r')\n h5tsCor = h5py.File(inps.out_file, 'w') \n group_tsCor = h5tsCor.create_group('timeseries')\n print 'number of acquisitions: '+str(date_num)\n prog_bar = ptime.progress_bar(maxValue=date_num)\n for i in range(date_num):\n date = date_list[i]\n ts = h5ts['timeseries'].get(date)[:]\n group_tsCor.create_dataset(date, data=ts-trop_ts[i], compression='gzip')\n prog_bar.update(i+1, suffix=date)\n prog_bar.close()\n h5ts.close()\n # Write Attributes\n for key,value in atr.iteritems():\n group_tsCor.attrs[key] = value\n h5tsCor.close()\n\n # Delete temporary DEM file in ROI_PAC format\n if '4pyaps' in inps.dem_file:\n rmCmd = 'rm %s %s.rsc' % (inps.dem_file, inps.dem_file)\n print rmCmd\n os.system(rmCmd)\n print 'Done.'\n return inps.out_file\n\n\n###############################################################\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def phi(n):
r = n
d = 2
p = n
while r > 1:
if r % d == 0:
p -= int(r/d)
while r % d == 0:
r = int(r/d)
d += 1
return p
m = (0, 1)
for n in range(2, 1000000):
p = phi(n)
m = max(m, (n/p, n))
if n % 10000 == 0:
print(n)
print(m)
|
normal
|
{
"blob_id": "e4f97018567559fc2714b75654974fb7c51f770f",
"index": 5266,
"step-1": "<mask token>\n",
"step-2": "def phi(n):\n r = n\n d = 2\n p = n\n while r > 1:\n if r % d == 0:\n p -= int(r / d)\n while r % d == 0:\n r = int(r / d)\n d += 1\n return p\n\n\n<mask token>\n",
"step-3": "def phi(n):\n r = n\n d = 2\n p = n\n while r > 1:\n if r % d == 0:\n p -= int(r / d)\n while r % d == 0:\n r = int(r / d)\n d += 1\n return p\n\n\n<mask token>\nfor n in range(2, 1000000):\n p = phi(n)\n m = max(m, (n / p, n))\n if n % 10000 == 0:\n print(n)\nprint(m)\n",
"step-4": "def phi(n):\n r = n\n d = 2\n p = n\n while r > 1:\n if r % d == 0:\n p -= int(r / d)\n while r % d == 0:\n r = int(r / d)\n d += 1\n return p\n\n\nm = 0, 1\nfor n in range(2, 1000000):\n p = phi(n)\n m = max(m, (n / p, n))\n if n % 10000 == 0:\n print(n)\nprint(m)\n",
"step-5": "def phi(n):\n r = n\n d = 2\n p = n\n while r > 1:\n if r % d == 0:\n p -= int(r/d)\n while r % d == 0:\n r = int(r/d)\n d += 1\n return p\n\nm = (0, 1)\nfor n in range(2, 1000000):\n p = phi(n)\n m = max(m, (n/p, n))\n if n % 10000 == 0:\n print(n)\n\nprint(m)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 27 18:34:40 2017
@author: Peiyong Jiang :[email protected]
Wangsheng Wang : [email protected]
Chi Feng : [email protected]
supervised by
Zhijun Wang & Yuan He
"""
import os
from win32com.client import Dispatch
folderDealTmp=input('Please input the absolute path of the father-folder:\n')
folderDeal=folderDealTmp.replace('\\','\\\\')
def GetPage5Docx(fileNameWithPath):
#open Word
word = Dispatch('Word.Application')
word.Visible = False
word = word.Documents.Open(fileNameWithPath)
#get number of sheets
word.Repaginate()
num_of_sheets = word.ComputeStatistics(2)
return num_of_sheets
def GetPage5PPT(fileNameWithPath):
Application = Dispatch("PowerPoint.Application")
Presentation = Application.Presentations.Open(fileNameWithPath, WithWindow=False)
slide_count = len(Presentation.Slides)
Presentation.Close()
return slide_count
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile=root+'\\Counter.txt'
with open(StatisticFile,'w') as fid:
pass
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile=root+'\\Counter.txt'
with open(StatisticFile,'a+') as fid:
pagesTotal=0
for name in files:
nameFile=os.path.join(root, name)
mainFile,appdFile=os.path.splitext(nameFile)
mainFolder,fullFile=os.path.split(nameFile)
if (appdFile=='.docx') and (fullFile[0:2]!='~$'):
pagesThis=GetPage5Docx(nameFile)
fid.writelines(fullFile+' '+str(pagesThis)+'\n')
pagesTotal+=pagesThis
fid.writelines('All Docx files in this folder have the pages: '+str(pagesTotal)+'\n\n\n\n\n\n')
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile=root+'\\Counter.txt'
with open(StatisticFile,'a+') as fid:
pagesTotal=0
for name in files:
nameFile=os.path.join(root, name)
mainFile,appdFile=os.path.splitext(nameFile)
mainFolder,fullFile=os.path.split(nameFile)
if ((appdFile=='.pptx') or (appdFile=='.ppt')) and (fullFile[0:2]!='~$'):
pagesThis=GetPage5PPT(nameFile)
fid.writelines(fullFile+' '+str(pagesThis)+'\n')
pagesTotal+=pagesThis
fid.writelines('All PPT/PPTX files in this folder have the pages: '+str(pagesTotal)+'\n\n\n\n\n\n')
print('Done. Please check it!')
|
normal
|
{
"blob_id": "67f09cd8b41c7a4fe457766dfed916aaf71cc20d",
"index": 9489,
"step-1": "<mask token>\n\n\ndef GetPage5Docx(fileNameWithPath):\n word = Dispatch('Word.Application')\n word.Visible = False\n word = word.Documents.Open(fileNameWithPath)\n word.Repaginate()\n num_of_sheets = word.ComputeStatistics(2)\n return num_of_sheets\n\n\ndef GetPage5PPT(fileNameWithPath):\n Application = Dispatch('PowerPoint.Application')\n Presentation = Application.Presentations.Open(fileNameWithPath,\n WithWindow=False)\n slide_count = len(Presentation.Slides)\n Presentation.Close()\n return slide_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef GetPage5Docx(fileNameWithPath):\n word = Dispatch('Word.Application')\n word.Visible = False\n word = word.Documents.Open(fileNameWithPath)\n word.Repaginate()\n num_of_sheets = word.ComputeStatistics(2)\n return num_of_sheets\n\n\ndef GetPage5PPT(fileNameWithPath):\n Application = Dispatch('PowerPoint.Application')\n Presentation = Application.Presentations.Open(fileNameWithPath,\n WithWindow=False)\n slide_count = len(Presentation.Slides)\n Presentation.Close()\n return slide_count\n\n\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'w') as fid:\n pass\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if appdFile == '.docx' and fullFile[0:2] != '~$':\n pagesThis = GetPage5Docx(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines('All Docx files in this folder have the pages: ' +\n str(pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if (appdFile == '.pptx' or appdFile == '.ppt') and fullFile[0:2\n ] != '~$':\n pagesThis = GetPage5PPT(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines(\n 'All PPT/PPTX files in this folder have the pages: ' + str(\n pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nprint('Done. Please check it!')\n",
"step-3": "<mask token>\nfolderDealTmp = input('Please input the absolute path of the father-folder:\\n')\nfolderDeal = folderDealTmp.replace('\\\\', '\\\\\\\\')\n\n\ndef GetPage5Docx(fileNameWithPath):\n word = Dispatch('Word.Application')\n word.Visible = False\n word = word.Documents.Open(fileNameWithPath)\n word.Repaginate()\n num_of_sheets = word.ComputeStatistics(2)\n return num_of_sheets\n\n\ndef GetPage5PPT(fileNameWithPath):\n Application = Dispatch('PowerPoint.Application')\n Presentation = Application.Presentations.Open(fileNameWithPath,\n WithWindow=False)\n slide_count = len(Presentation.Slides)\n Presentation.Close()\n return slide_count\n\n\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'w') as fid:\n pass\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if appdFile == '.docx' and fullFile[0:2] != '~$':\n pagesThis = GetPage5Docx(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines('All Docx files in this folder have the pages: ' +\n str(pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if (appdFile == '.pptx' or appdFile == '.ppt') and fullFile[0:2\n ] != '~$':\n pagesThis = GetPage5PPT(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines(\n 'All PPT/PPTX files in this folder have the pages: ' + str(\n pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nprint('Done. Please check it!')\n",
"step-4": "<mask token>\nimport os\nfrom win32com.client import Dispatch\nfolderDealTmp = input('Please input the absolute path of the father-folder:\\n')\nfolderDeal = folderDealTmp.replace('\\\\', '\\\\\\\\')\n\n\ndef GetPage5Docx(fileNameWithPath):\n word = Dispatch('Word.Application')\n word.Visible = False\n word = word.Documents.Open(fileNameWithPath)\n word.Repaginate()\n num_of_sheets = word.ComputeStatistics(2)\n return num_of_sheets\n\n\ndef GetPage5PPT(fileNameWithPath):\n Application = Dispatch('PowerPoint.Application')\n Presentation = Application.Presentations.Open(fileNameWithPath,\n WithWindow=False)\n slide_count = len(Presentation.Slides)\n Presentation.Close()\n return slide_count\n\n\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'w') as fid:\n pass\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if appdFile == '.docx' and fullFile[0:2] != '~$':\n pagesThis = GetPage5Docx(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines('All Docx files in this folder have the pages: ' +\n str(pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if (appdFile == '.pptx' or appdFile == '.ppt') and fullFile[0:2\n ] != '~$':\n pagesThis = GetPage5PPT(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines(\n 'All PPT/PPTX files in this folder have the pages: ' + str(\n pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nprint('Done. Please check it!')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 27 18:34:40 2017\r\n\r\n@author: Peiyong Jiang :[email protected]\r\n Wangsheng Wang : [email protected]\r\n Chi Feng : [email protected]\r\n \r\n supervised by\r\n Zhijun Wang & Yuan He\r\n\r\n \r\n\"\"\"\r\n\r\nimport os\r\nfrom win32com.client import Dispatch\r\n\r\n\r\nfolderDealTmp=input('Please input the absolute path of the father-folder:\\n')\r\n\r\nfolderDeal=folderDealTmp.replace('\\\\','\\\\\\\\')\r\n\r\ndef GetPage5Docx(fileNameWithPath):\r\n #open Word\r\n word = Dispatch('Word.Application')\r\n word.Visible = False\r\n word = word.Documents.Open(fileNameWithPath)\r\n \r\n #get number of sheets\r\n word.Repaginate()\r\n num_of_sheets = word.ComputeStatistics(2)\r\n \r\n return num_of_sheets\r\n\r\ndef GetPage5PPT(fileNameWithPath):\r\n Application = Dispatch(\"PowerPoint.Application\")\r\n Presentation = Application.Presentations.Open(fileNameWithPath, WithWindow=False)\r\n slide_count = len(Presentation.Slides)\r\n Presentation.Close()\r\n return slide_count\r\n\r\n\r\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\r\n StatisticFile=root+'\\\\Counter.txt'\r\n with open(StatisticFile,'w') as fid:\r\n pass\r\n\r\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\r\n StatisticFile=root+'\\\\Counter.txt'\r\n with open(StatisticFile,'a+') as fid:\r\n pagesTotal=0\r\n for name in files:\r\n nameFile=os.path.join(root, name)\r\n \r\n mainFile,appdFile=os.path.splitext(nameFile)\r\n mainFolder,fullFile=os.path.split(nameFile)\r\n if (appdFile=='.docx') and (fullFile[0:2]!='~$'): \r\n pagesThis=GetPage5Docx(nameFile)\r\n fid.writelines(fullFile+' '+str(pagesThis)+'\\n')\r\n pagesTotal+=pagesThis\r\n \r\n \r\n fid.writelines('All Docx files in this folder have the pages: '+str(pagesTotal)+'\\n\\n\\n\\n\\n\\n')\r\n \r\n\r\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\r\n \r\n StatisticFile=root+'\\\\Counter.txt'\r\n with open(StatisticFile,'a+') as fid:\r\n pagesTotal=0\r\n for name in files:\r\n nameFile=os.path.join(root, name)\r\n \r\n mainFile,appdFile=os.path.splitext(nameFile)\r\n mainFolder,fullFile=os.path.split(nameFile)\r\n if ((appdFile=='.pptx') or (appdFile=='.ppt')) and (fullFile[0:2]!='~$'): \r\n pagesThis=GetPage5PPT(nameFile)\r\n fid.writelines(fullFile+' '+str(pagesThis)+'\\n')\r\n pagesTotal+=pagesThis\r\n \r\n \r\n fid.writelines('All PPT/PPTX files in this folder have the pages: '+str(pagesTotal)+'\\n\\n\\n\\n\\n\\n')\r\n\r\n\r\n\r\nprint('Done. Please check it!')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, uuid, re, sys
from decimal import Decimal
from datetime import date, time, datetime
from functools import lru_cache
from typing import Iterator
import pyodbc, pytest
# WARNING: Wow Microsoft always manages to do the stupidest thing possible always trying to be
# smarter than everyone. I worked with their APIs for since before "OLE" and it has always
# been a nanny state. They won't read the UID and PWD from odbc.ini because it isn't secure.
# Really? Less secure than what? The next hack someone is going to use. Do the straight
# forward thing and explain how to secure it. it isn't their business how I deploy and secure.
#
# For every other DB we use a single default DSN but you can pass your own via an environment
# variable. For SS, we can't just use a default DSN unless you want to go trusted. (Which is
# more secure? No.) It'll be put into .bashrc most likely. Way to go. Now I'll go rename
# all of the others to DB specific names instead of PYODBC_CNXNSTR. Hot garbage as usual.
CNXNSTR = os.environ.get('PYODBC_SQLSERVER', 'DSN=pyodbc-sqlserver')
def connect(autocommit=False, attrs_before=None):
return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before)
DRIVER = connect().getinfo(pyodbc.SQL_DRIVER_NAME)
IS_FREEDTS = bool(re.search('tsodbc', DRIVER, flags=re.IGNORECASE))
IS_MSODBCSQL = bool(re.search(r'(msodbcsql|sqlncli|sqlsrv32\.dll)', DRIVER, re.IGNORECASE))
def _get_sqlserver_year():
"""
Returns the release year of the current version of SQL Server, used to skip tests for
features that are not supported. If the current DB is not SQL Server, 0 is returned.
"""
# We used to use the major version, but most documentation on the web refers to the year
# (e.g. SQL Server 2019) so we'll use that for skipping tests that do not apply.
if not IS_MSODBCSQL:
return 0
cnxn = connect()
cursor = cnxn.cursor()
row = cursor.execute("exec master..xp_msver 'ProductVersion'").fetchone()
major = row.Character_Value.split('.', 1)[0]
return {
# https://sqlserverbuilds.blogspot.com/
'8': 2000, '9': 2005, '10': 2008, '11': 2012, '12': 2014,
'13': 2016, '14': 2017, '15': 2019, '16': 2022
}[major]
SQLSERVER_YEAR = _get_sqlserver_year()
@pytest.fixture()
def cursor() -> Iterator[pyodbc.Cursor]:
cnxn = connect()
cur = cnxn.cursor()
cur.execute("drop table if exists t1")
cur.execute("drop table if exists t2")
cur.execute("drop table if exists t3")
cnxn.commit()
yield cur
if not cnxn.closed:
cur.close()
cnxn.close()
def test_text(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'text')
def test_varchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varchar')
def test_nvarchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'nvarchar')
def test_varbinary(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varbinary')
@pytest.mark.skipif(SQLSERVER_YEAR < 2005, reason='(max) not supported until 2005')
def test_unicode_longmax(cursor: pyodbc.Cursor):
# Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes
cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))")
def test_char(cursor: pyodbc.Cursor):
value = "testing"
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", "testing")
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_int(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])
def test_bigint(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF,
0x123456789])
def test_overflow_int(cursor: pyodbc.Cursor):
# python allows integers of any size, bigger than an 8 byte int can contain
input = 9999999999999999999999999999999999999
cursor.execute("create table t1(d bigint)")
with pytest.raises(OverflowError):
cursor.execute("insert into t1 values (?)", input)
result = cursor.execute("select * from t1").fetchall()
assert result == []
def test_float(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, .00012345])
def test_non_numeric_float(cursor: pyodbc.Cursor):
cursor.execute("create table t1(d float)")
for input in (float('+Infinity'), float('-Infinity'), float('NaN')):
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("insert into t1 values (?)", input)
def test_drivers():
p = pyodbc.drivers()
assert isinstance(p, list)
def test_datasources():
p = pyodbc.dataSources()
assert isinstance(p, dict)
def test_getinfo_string():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)
assert isinstance(value, str)
def test_getinfo_bool():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
assert isinstance(value, bool)
def test_getinfo_int():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
assert isinstance(value, int)
def test_getinfo_smallint():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
assert isinstance(value, int)
def test_no_fetch(cursor: pyodbc.Cursor):
# Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without
# fetches seem to confuse the driver.
cursor.execute('select 1')
cursor.execute('select 1')
cursor.execute('select 1')
def test_decode_meta(cursor: pyodbc.Cursor):
"""
Ensure column names with non-ASCII characters are converted using the configured encodings.
"""
# This is from GitHub issue #190
cursor.execute("create table t1(a int)")
cursor.execute("insert into t1 values (1)")
cursor.execute('select a as "Tipología" from t1')
assert cursor.description[0][0] == "Tipología"
def test_exc_integrity(cursor: pyodbc.Cursor):
"Make sure an IntegretyError is raised"
# This is really making sure we are properly encoding and comparing the SQLSTATEs.
cursor.execute("create table t1(s1 varchar(10) primary key)")
cursor.execute("insert into t1 values ('one')")
with pytest.raises(pyodbc.IntegrityError):
cursor.execute("insert into t1 values ('one')")
def test_multiple_bindings(cursor: pyodbc.Cursor):
"More than one bind and select on a cursor"
cursor.execute("create table t1(n int)")
cursor.execute("insert into t1 values (?)", 1)
cursor.execute("insert into t1 values (?)", 2)
cursor.execute("insert into t1 values (?)", 3)
for _ in range(3):
cursor.execute("select n from t1 where n < ?", 10)
cursor.execute("select n from t1 where n < 3")
def test_different_bindings(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int)")
cursor.execute("create table t2(d datetime)")
cursor.execute("insert into t1 values (?)", 1)
cursor.execute("insert into t2 values (?)", datetime.now())
SMALL_FENCEPOST_SIZES = [None, 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000]
LARGE_FENCEPOST_SIZES = SMALL_FENCEPOST_SIZES + [4095, 4096, 4097, 10 * 1024, 20 * 1024]
def _test_vartype(cursor: pyodbc.Cursor, datatype):
if datatype == 'text':
lengths = LARGE_FENCEPOST_SIZES
else:
lengths = SMALL_FENCEPOST_SIZES
if datatype == 'text':
cursor.execute(f"create table t1(c1 {datatype})")
else:
maxlen = lengths[-1]
cursor.execute(f"create table t1(c1 {datatype}({maxlen}))")
for length in lengths:
cursor.execute("delete from t1")
encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None
value = _generate_str(length, encoding=encoding)
try:
cursor.execute("insert into t1 values(?)", value)
except pyodbc.Error as ex:
msg = f'{datatype} insert failed: length={length} len={len(value)}'
raise Exception(msg) from ex
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def _test_scalar(cursor: pyodbc.Cursor, datatype, values):
"""
A simple test wrapper for types that are identical when written and read.
"""
cursor.execute(f"create table t1(c1 {datatype})")
for value in values:
cursor.execute("delete from t1")
cursor.execute("insert into t1 values (?)", value)
v = cursor.execute("select c1 from t1").fetchone()[0]
assert v == value
def test_noscan(cursor: pyodbc.Cursor):
assert cursor.noscan is False
cursor.noscan = True
assert cursor.noscan is True
def test_nonnative_uuid(cursor: pyodbc.Cursor):
# The default is False meaning we should return a string. Note that
# SQL Server seems to always return uppercase.
value = uuid.uuid4()
cursor.execute("create table t1(n uniqueidentifier)")
cursor.execute("insert into t1 values (?)", value)
pyodbc.native_uuid = False
result = cursor.execute("select n from t1").fetchval()
assert isinstance(result, str)
assert result == str(value).upper()
pyodbc.native_uuid = True
def test_native_uuid(cursor: pyodbc.Cursor):
# When true, we should return a uuid.UUID object.
value = uuid.uuid4()
cursor.execute("create table t1(n uniqueidentifier)")
cursor.execute("insert into t1 values (?)", value)
pyodbc.native_uuid = True
result = cursor.execute("select n from t1").fetchval()
assert isinstance(result, uuid.UUID)
assert value == result
def test_nextset(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
for i in range(4):
cursor.execute("insert into t1(i) values(?)", i)
cursor.execute(
"""
select i from t1 where i < 2 order by i;
select i from t1 where i >= 2 order by i
""")
for i, row in enumerate(cursor):
assert i == row.i
assert cursor.nextset()
for i, row in enumerate(cursor):
assert i + 2 == row.i
@pytest.mark.skipif(IS_FREEDTS, reason='https://github.com/FreeTDS/freetds/issues/230')
def test_nextset_with_raiserror(cursor: pyodbc.Cursor):
cursor.execute("select i = 1; RAISERROR('c', 16, 1);")
row = next(cursor)
assert 1 == row.i
with pytest.raises(pyodbc.ProgrammingError):
cursor.nextset()
def test_fixed_unicode(cursor: pyodbc.Cursor):
value = "t\xebsting"
cursor.execute("create table t1(s nchar(7))")
cursor.execute("insert into t1 values(?)", "t\xebsting")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
# If we alloc'd wrong, the test below might work because of an embedded NULL
assert v == value
def test_chinese(cursor: pyodbc.Cursor):
v = '我的'
cursor.execute("SELECT N'我的' AS [Name]")
row = cursor.fetchone()
assert row[0] == v
cursor.execute("SELECT N'我的' AS [Name]")
rows = cursor.fetchall()
assert rows[0][0] == v
def test_bit(cursor: pyodbc.Cursor):
value = True
cursor.execute("create table t1(b bit)")
cursor.execute("insert into t1 values (?)", value)
v = cursor.execute("select b from t1").fetchone()[0]
assert isinstance(v, bool)
assert v == value
def test_decimal(cursor: pyodbc.Cursor):
# From test provided by planders (thanks!) in Issue 91
for (precision, scale, negative) in [
(1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True),
(6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True),
(38, 10, True), (38, 38, True)]:
try:
cursor.execute("drop table t1")
except:
pass
cursor.execute(f"create table t1(d decimal({precision}, {scale}))")
# Construct a decimal that uses the maximum precision and scale.
sign = negative and '-' or ''
before = '9' * (precision - scale)
after = scale and ('.' + '9' * scale) or ''
decStr = f'{sign}{before}{after}'
value = Decimal(decStr)
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select d from t1").fetchone()[0]
assert v == value
def test_decimal_e(cursor: pyodbc.Cursor):
"""Ensure exponential notation decimals are properly handled"""
value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7
cursor.execute("create table t1(d decimal(10, 2))")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select * from t1").fetchone()[0]
assert result == value
def test_subquery_params(cursor: pyodbc.Cursor):
"""Ensure parameter markers work in a subquery"""
cursor.execute("create table t1(id integer, s varchar(20))")
cursor.execute("insert into t1 values (?,?)", 1, 'test')
row = cursor.execute("""
select x.id
from (
select id
from t1
where s = ?
and id between ? and ?
) x
""", 'test', 1, 10).fetchone()
assert row is not None
assert row[0] == 1
def test_close_cnxn():
"""Make sure using a Cursor after closing its connection doesn't crash."""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("drop table if exists t1")
cursor.execute("create table t1(id integer, s varchar(20))")
cursor.execute("insert into t1 values (?,?)", 1, 'test')
cursor.execute("select * from t1")
cnxn.close()
# Now that the connection is closed, we expect an exception. (If the code attempts to use
# the HSTMT, we'll get an access violation instead.)
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("select * from t1")
def test_empty_string(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", "")
def test_empty_string_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ""
cursor = cnxn.cursor()
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_fixed_str(cursor: pyodbc.Cursor):
value = "testing"
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
# If we alloc'd wrong, the test below might work because of an embedded NULL
assert v == value
def test_empty_unicode(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s nvarchar(20))")
cursor.execute("insert into t1 values(?)", "")
def test_empty_unicode_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ""
cursor = cnxn.cursor()
cursor.execute("create table t1(s nvarchar(20))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_negative_row_index(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", "1")
row = cursor.execute("select * from t1").fetchone()
assert row[0] == "1"
assert row[-1] == "1"
def test_version():
assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc.
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008,
reason='Date not supported until 2008?')
def test_date(cursor: pyodbc.Cursor):
value = date.today()
cursor.execute("create table t1(d date)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select d from t1").fetchone()[0]
assert isinstance(result, date)
assert value == result
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008,
reason='Time not supported until 2008?')
def test_time(cursor: pyodbc.Cursor):
value = datetime.now().time()
# We aren't yet writing values using the new extended time type so the value written to the
# database is only down to the second.
value = value.replace(microsecond=0)
cursor.execute("create table t1(t time)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select t from t1").fetchone()[0]
assert isinstance(result, time)
assert value == result
def test_datetime(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction(cursor: pyodbc.Cursor):
# SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most
# granular datetime supported is xxx000.
value = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction_rounded(cursor: pyodbc.Cursor):
# SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc
# rounds down to what the database supports.
full = datetime(2007, 1, 15, 3, 4, 5, 123456)
rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", full)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert rounded == result
def test_datetime2(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute("create table t1(dt datetime2)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_sp_results(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
select top 10 name, id, xtype, refdate
from sysobjects
""")
rows = cursor.execute("exec proc1").fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_temp(cursor: pyodbc.Cursor):
# Note: I've used "set nocount on" so that we don't get the number of rows deleted from
# #tmptable. If you don't do this, you'd need to call nextset() once to skip it.
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
select top 10 name, id, xtype, refdate
into #tmptable
from sysobjects
select * from #tmptable
""")
cursor.execute("exec proc1")
assert cursor.description is not None
assert len(cursor.description) == 4
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_vartbl(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)
insert into @tmptbl
select top 10 name, id, xtype, refdate
from sysobjects
select * from @tmptbl
""")
cursor.execute("exec proc1")
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_with_dates(cursor: pyodbc.Cursor):
# Reported in the forums that passing two datetimes to a stored procedure doesn't work.
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
cursor.execute(
"""
create procedure test_sp(@d1 datetime, @d2 datetime)
AS
declare @d as int
set @d = datediff(year, @d1, @d2)
select @d
""")
cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now())
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] == 0 # 0 years apart
def test_sp_with_none(cursor: pyodbc.Cursor):
# Reported in the forums that passing None caused an error.
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
cursor.execute(
"""
create procedure test_sp(@x varchar(20))
AS
declare @y varchar(20)
set @y = @x
select @y
""")
cursor.execute("exec test_sp ?", None)
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] is None # 0 years apart
#
# rowcount
#
def test_rowcount_delete(cursor: pyodbc.Cursor):
assert cursor.rowcount == -1
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
cursor.execute("delete from t1")
assert cursor.rowcount == count
def test_rowcount_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code. On the other hand, we could hardcode a zero return value.
"""
cursor.execute("create table t1(i int)")
# This is a different code path internally.
cursor.execute("delete from t1")
assert cursor.rowcount == 0
def test_rowcount_select(cursor: pyodbc.Cursor):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005
returns -1 after a select statement, so we'll test for that behavior. This is valid
behavior according to the DB API specification, but people don't seem to like it.
"""
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
cursor.execute("select * from t1")
assert cursor.rowcount == -1
rows = cursor.fetchall()
assert len(rows) == count
assert cursor.rowcount == -1
def test_rowcount_reset(cursor: pyodbc.Cursor):
"Ensure rowcount is reset after DDL"
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
assert cursor.rowcount == 1
cursor.execute("create table t2(i int)")
ddl_rowcount = (0 if IS_FREEDTS else -1)
assert cursor.rowcount == ddl_rowcount
def test_retcursor_delete(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
cursor.execute("insert into t1 values (1)")
v = cursor.execute("delete from t1")
assert v == cursor
def test_retcursor_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code.
"""
cursor.execute("create table t1(i int)")
# This is a different code path internally.
v = cursor.execute("delete from t1")
assert v == cursor
def test_retcursor_select(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
cursor.execute("insert into t1 values (1)")
v = cursor.execute("select * from t1")
assert v == cursor
def table_with_spaces(cursor: pyodbc.Cursor):
"Ensure we can select using [x z] syntax"
try:
cursor.execute("create table [test one](int n)")
cursor.execute("insert into [test one] values(1)")
cursor.execute("select * from [test one]")
v = cursor.fetchone()[0]
assert v == 1
finally:
cursor.rollback()
def test_lower_case():
"Ensure pyodbc.lowercase forces returned column names to lowercase."
try:
pyodbc.lowercase = True
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(Abc int, dEf int)")
cursor.execute("select * from t1")
names = [t[0] for t in cursor.description]
names.sort()
assert names == ["abc", "def"]
finally:
# Put it back so other tests don't fail.
pyodbc.lowercase = False
def test_row_description(cursor: pyodbc.Cursor):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
cursor.execute("create table t1(a int, b char(3))")
cursor.execute("insert into t1 values(1, 'abc')")
row = cursor.execute("select * from t1").fetchone()
assert cursor.description == row.cursor_description
def test_temp_select(cursor: pyodbc.Cursor):
# A project was failing to create temporary tables via select into.
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", "testing")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert v == "testing"
cursor.execute("select s into t2 from t1")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert v == "testing"
def test_executemany(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b varchar(10))")
params = [(i, str(i)) for i in range(1, 6)]
cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = cursor.execute("select count(*) from t1").fetchone()[0]
assert count == len(params)
cursor.execute("select a, b from t1 order by a")
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_one(cursor: pyodbc.Cursor):
"Pass executemany a single sequence"
cursor.execute("create table t1(a int, b varchar(10))")
params = [(1, "test")]
cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = cursor.execute("select count(*) from t1").fetchone()[0]
assert count == len(params)
cursor.execute("select a, b from t1 order by a")
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_dae_0(cursor: pyodbc.Cursor):
"""
DAE for 0-length value
"""
cursor.execute("create table t1(a nvarchar(max))")
cursor.fast_executemany = True
cursor.executemany("insert into t1(a) values(?)", [['']])
assert cursor.execute("select a from t1").fetchone()[0] == ''
cursor.fast_executemany = False
def test_executemany_failure(cursor: pyodbc.Cursor):
"""
Ensure that an exception is raised if one query in an executemany fails.
"""
cursor.execute("create table t1(a int, b varchar(10))")
params = [(1, 'good'),
('error', 'not an int'),
(3, 'good')]
with pytest.raises(pyodbc.Error):
cursor.executemany("insert into t1(a, b) value (?, ?)", params)
def test_row_slicing(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b int, c int, d int)")
cursor.execute("insert into t1 values(1,2,3,4)")
row = cursor.execute("select * from t1").fetchone()
result = row[:]
assert result is row
result = row[:-1]
assert result == (1, 2, 3)
result = row[0:4]
assert result is row
def test_row_repr(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b int, c int, d varchar(50))")
cursor.execute("insert into t1 values(1,2,3,'four')")
row = cursor.execute("select * from t1").fetchone()
result = str(row)
assert result == "(1, 2, 3, 'four')"
result = str(row[:-1])
assert result == "(1, 2, 3)"
result = str(row[:1])
assert result == "(1,)"
def test_concatenation(cursor: pyodbc.Cursor):
v2 = '0123456789' * 30
v3 = '9876543210' * 30
cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))")
cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3)
row = cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone()
assert row.both == v2 + v3
def test_view_select(cursor: pyodbc.Cursor):
# Reported in forum: Can't select from a view? I think I do this a lot, but another test
# never hurts.
# Create a table (t1) with 3 rows and a view (t2) into it.
cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))")
for i in range(3):
cursor.execute("insert into t1(c2) values (?)", f"string{i}")
cursor.execute("create view t2 as select * from t1")
# Select from the view
cursor.execute("select * from t2")
rows = cursor.fetchall()
assert rows is not None
assert len(rows) == 3
def test_autocommit():
cnxn = connect()
assert cnxn.autocommit is False
cnxn = None
cnxn = connect(autocommit=True)
assert cnxn.autocommit is True
cnxn.autocommit = False
assert cnxn.autocommit is False
def test_sqlserver_callproc(cursor: pyodbc.Cursor):
try:
cursor.execute("drop procedure pyodbctest")
cursor.commit()
except:
pass
cursor.execute("create table t1(s varchar(10))")
cursor.execute("insert into t1 values(?)", "testing")
cursor.execute("""
create procedure pyodbctest @var1 varchar(32)
as
begin
select s from t1
return
end
""")
cursor.execute("exec pyodbctest 'hi'")
def test_skip(cursor: pyodbc.Cursor):
# Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3.
cursor.execute("create table t1(id int)")
for i in range(1, 5):
cursor.execute("insert into t1 values(?)", i)
cursor.execute("select id from t1 order by id")
assert cursor.fetchone()[0] == 1
cursor.skip(2)
assert cursor.fetchone()[0] == 4
def test_timeout():
cnxn = connect()
assert cnxn.timeout == 0 # defaults to zero (off)
cnxn.timeout = 30
assert cnxn.timeout == 30
cnxn.timeout = 0
assert cnxn.timeout == 0
def test_sets_execute(cursor: pyodbc.Cursor):
# Only lists and tuples are allowed.
cursor.execute("create table t1 (word varchar (100))")
words = {'a', 'b', 'c'}
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("insert into t1 (word) values (?)", words)
with pytest.raises(pyodbc.ProgrammingError):
cursor.executemany("insert into t1 (word) values (?)", words)
def test_row_execute(cursor: pyodbc.Cursor):
"Ensure we can use a Row object as a parameter to execute"
cursor.execute("create table t1(n int, s varchar(10))")
cursor.execute("insert into t1 values (1, 'a')")
row = cursor.execute("select n, s from t1").fetchone()
assert row
cursor.execute("create table t2(n int, s varchar(10))")
cursor.execute("insert into t2 values (?, ?)", row)
def test_row_executemany(cursor: pyodbc.Cursor):
"Ensure we can use a Row object as a parameter to executemany"
cursor.execute("create table t1(n int, s varchar(10))")
for i in range(3):
cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a') + i))
rows = cursor.execute("select n, s from t1").fetchall()
assert len(rows) != 0
cursor.execute("create table t2(n int, s varchar(10))")
cursor.executemany("insert into t2 values (?, ?)", rows)
def test_description(cursor: pyodbc.Cursor):
"Ensure cursor.description is correct"
cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))")
cursor.execute("insert into t1 values (1, 'abc', '1.23')")
cursor.execute("select * from t1")
# (I'm not sure the precision of an int is constant across different versions, bits, so I'm
# hand checking the items I do know.
# int
t = cursor.description[0]
assert t[0] == 'n'
assert t[1] == int
assert t[5] == 0 # scale
assert t[6] is True # nullable
# varchar(8)
t = cursor.description[1]
assert t[0] == 's'
assert t[1] == str
assert t[4] == 8 # precision
assert t[5] == 0 # scale
assert t[6] is True # nullable
# decimal(5, 2)
t = cursor.description[2]
assert t[0] == 'd'
assert t[1] == Decimal
assert t[4] == 5 # precision
assert t[5] == 2 # scale
assert t[6] is True # nullable
def test_cursor_messages_with_print(cursor: pyodbc.Cursor):
"""
Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.
"""
assert not cursor.messages
# SQL Server PRINT statements are never more than 8000 characters
# https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks
for msg in ('hello world', 'ABCDEFGHIJ' * 800):
cursor.execute(f"PRINT '{msg}'")
messages = cursor.messages
assert isinstance(messages, list)
assert len(messages) == 1
assert isinstance(messages[0], tuple)
assert len(messages[0]) == 2
assert isinstance(messages[0][0], str)
assert isinstance(messages[0][1], str)
assert '[01000] (0)' == messages[0][0]
assert messages[0][1].endswith(msg)
def test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):
"""
Complex scenario to test the Cursor.messages attribute.
"""
cursor.execute("""
create or alter procedure test_cursor_messages as
begin
set nocount on;
print 'Message 1a';
print 'Message 1b';
select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';
select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';
print 'Message 2a';
print 'Message 2b';
end
""")
# The messages will look like:
#
# [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Message 1a
# result set 1: messages, rows
cursor.execute("exec test_cursor_messages")
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 1a', 'Field 1b']
msgs = [
re.search(r'Message \d[ab]$', m[1]).group(0)
for m in cursor.messages
]
assert msgs == ['Message 1a', 'Message 1b']
# result set 2: rows, no messages
assert cursor.nextset()
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 2a', 'Field 2b']
assert not cursor.messages
# result set 3: messages, no rows
assert cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
msgs = [
re.search(r'Message \d[ab]$', m[1]).group(0)
for m in cursor.messages
]
assert msgs == ['Message 2a', 'Message 2b']
# result set 4: no rows, no messages
assert not cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
assert not cursor.messages
def test_none_param(cursor: pyodbc.Cursor):
"Ensure None can be used for params other than the first"
# Some driver/db versions would fail if NULL was not the first parameter because
# SQLDescribeParam (only used with NULL) could not be used after the first call to
# SQLBindParameter. This means None always worked for the first column, but did not work
# for later columns.
#
# If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked.
# However, binary/varbinary won't allow an implicit conversion.
cursor.execute("create table t1(n int, blob varbinary(max))")
cursor.execute("insert into t1 values (1, newid())")
row = cursor.execute("select * from t1").fetchone()
assert row.n == 1
assert isinstance(row.blob, bytes)
sql = "update t1 set n=?, blob=?"
try:
cursor.execute(sql, 2, None)
except pyodbc.DataError:
if IS_FREEDTS:
# cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so pyodbc
# can't call SQLDescribeParam to get the correct parameter type. This can lead to
# errors being returned from SQL Server when sp_prepexec is called, e.g., "Implicit
# conversion from data type varchar to varbinary(max) is not allowed."
#
# So at least verify that the user can manually specify the parameter type
cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])
cursor.execute(sql, 2, None)
else:
raise
row = cursor.execute("select * from t1").fetchone()
assert row.n == 2
assert row.blob is None
def test_output_conversion():
def convert1(value):
# The value is the raw bytes (as a bytes object) read from the
# database. We'll simply add an X at the beginning at the end.
return 'X' + value.decode('latin1') + 'X'
def convert2(value):
# Same as above, but add a Y at the beginning at the end.
return 'Y' + value.decode('latin1') + 'Y'
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(n int, v varchar(10))")
cursor.execute("insert into t1 values (1, '123.45')")
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
# Clear all conversions and try again. There should be no Xs this time.
cnxn.clear_output_converters()
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# Same but clear using remove_output_converter.
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# Clear via add_output_converter, passing None for the converter function.
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# retrieve and temporarily replace converter (get_output_converter)
#
# case_1: converter already registered
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is not None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
#
# case_2: no converter already registered
cnxn.clear_output_converters()
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
def test_too_large(cursor: pyodbc.Cursor):
"""Ensure error raised if insert fails due to truncation"""
value = 'x' * 1000
cursor.execute("create table t1(s varchar(800))")
with pytest.raises(pyodbc.Error):
cursor.execute("insert into t1 values (?)", value)
def test_row_equal(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int, s varchar(20))")
cursor.execute("insert into t1 values (1, 'test')")
row1 = cursor.execute("select n, s from t1").fetchone()
row2 = cursor.execute("select n, s from t1").fetchone()
assert row1 == row2
def test_row_gtlt(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int, s varchar(20))")
cursor.execute("insert into t1 values (1, 'test1')")
cursor.execute("insert into t1 values (1, 'test2')")
rows = cursor.execute("select n, s from t1 order by s").fetchall()
assert rows[0] < rows[1]
assert rows[0] <= rows[1]
assert rows[1] > rows[0]
assert rows[1] >= rows[0]
assert rows[0] != rows[1]
rows = list(rows)
rows.sort() # uses <
def test_context_manager_success():
"Ensure `with` commits if an exception is not raised"
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(n int)")
cnxn.commit()
with cnxn:
cursor.execute("insert into t1 values (1)")
rows = cursor.execute("select n from t1").fetchall()
assert len(rows) == 1
assert rows[0][0] == 1
def test_context_manager_failure(cursor: pyodbc.Cursor):
"Ensure `with` rolls back if an exception is raised"
cnxn = connect()
cursor = cnxn.cursor()
# We'll insert a row and commit it. Then we'll insert another row followed by an
# exception.
cursor.execute("create table t1(n int)")
cursor.execute("insert into t1 values (1)")
cnxn.commit()
with pytest.raises(pyodbc.Error):
with cnxn:
cursor.execute("insert into t1 values (2)")
cursor.execute("delete from bogus")
cursor.execute("select max(n) from t1")
val = cursor.fetchval()
assert val == 1
def test_untyped_none(cursor: pyodbc.Cursor):
# From issue 129
value = cursor.execute("select ?", None).fetchone()[0]
assert value is None
def test_large_update_nodata(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a varbinary(max))')
hundredkb = b'x' * 100 * 1024
cursor.execute('update t1 set a=? where 1=0', (hundredkb,))
def test_func_param(cursor: pyodbc.Cursor):
try:
cursor.execute("drop function func1")
except:
pass
cursor.execute("""
create function func1 (@testparam varchar(4))
returns @rettest table (param varchar(4))
as
begin
insert @rettest
select @testparam
return
end
""")
cursor.commit()
value = cursor.execute("select * from func1(?)", 'test').fetchone()[0]
assert value == 'test'
def test_columns(cursor: pyodbc.Cursor):
# When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error
#
# Error: TypeError: argument 2 must be str, not None
#
# I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an
# optional string keyword when calling indirectly.
cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))")
cursor.columns('t1')
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
# Now do the same, but specifically pass in None to one of the keywords. Old versions
# were parsing arguments incorrectly and would raise an error. (This crops up when
# calling indirectly like columns(*args, **kwargs) which aiodbc does.)
cursor.columns('t1', schema=None, catalog=None)
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
row = results['xΏz']
assert row.type_name == 'varchar'
assert row.column_size == 4, row.column_size
for i in range(8, 16):
table_name = 'pyodbc_89abcdef'[:i]
cursor.execute(f"""
IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};
CREATE TABLE {table_name} (id INT PRIMARY KEY);
""")
col_count = len([col.column_name for col in cursor.columns(table_name)])
assert col_count == 1
cursor.execute(f"drop table {table_name}")
def test_cancel(cursor: pyodbc.Cursor):
# I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with
# making sure SQLCancel is called correctly.
cursor.execute("select 1")
cursor.cancel()
def test_emoticons_as_parameter(cursor: pyodbc.Cursor):
# https://github.com/mkleehammer/pyodbc/issues/423
#
# When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number
# of characters. Ensure it works even with 4-byte characters.
#
# http://www.fileformat.info/info/unicode/char/1f31c/index.htm
v = "x \U0001F31C z"
cursor.execute("create table t1(s nvarchar(100))")
cursor.execute("insert into t1 values (?)", v)
result = cursor.execute("select s from t1").fetchone()[0]
assert result == v
def test_emoticons_as_literal(cursor: pyodbc.Cursor):
# similar to `test_emoticons_as_parameter`, above, except for Unicode literal
#
# http://www.fileformat.info/info/unicode/char/1f31c/index.htm
# FreeTDS ODBC issue fixed in version 1.1.23
# https://github.com/FreeTDS/freetds/issues/317
v = "x \U0001F31C z"
cursor.execute("create table t1(s nvarchar(100))")
cursor.execute(f"insert into t1 values (N'{v}')")
result = cursor.execute("select s from t1").fetchone()[0]
assert result == v
def _test_tvp(cursor: pyodbc.Cursor, diff_schema):
# Test table value parameters (TVP). I like the explanation here:
#
# https://www.mssqltips.com/sqlservertip/1483/using-table-valued-parameters-tvp-in-sql-server/
#
# "At a high level the TVP allows you to populate a table declared as a T-SQL variable,
# then pass that table as a parameter to a stored procedure or function."
#
# "The TVP must be declared READONLY. You cannot perform any DML (i.e. INSERT, UPDATE,
# DELETE) against the TVP; you can only reference it in a SELECT statement."
#
# In this test we'll create a table, pass it to a stored procedure, and have the stored
# procedure simply return the rows from the TVP.
#
# Apparently the way pyodbc knows something is a TVP is because it is in a sequence. I'm
# not sure I like that as it is very generic and specific to SQL Server. It would be wiser
# to define a wrapper pyodbc.TVP or pyodbc.Table object, similar to the DB APIs `Binary`
# object.
pyodbc.native_uuid = True
# This is the default, but we'll reset it in case a previous test fails to.
procname = 'SelectTVP'
typename = 'TestTVP'
if diff_schema:
schemaname = 'myschema'
procname = schemaname + '.' + procname
typenameonly = typename
typename = schemaname + '.' + typename
# (Don't use "if exists" since older SQL Servers don't support it.)
try:
cursor.execute("drop procedure " + procname)
except:
pass
try:
cursor.execute("drop type " + typename)
except:
pass
if diff_schema:
try:
cursor.execute("drop schema " + schemaname)
except:
pass
cursor.commit()
if diff_schema:
cursor.execute("CREATE SCHEMA myschema")
cursor.commit()
cursor.execute(
f"""
CREATE TYPE {typename} AS TABLE(
c01 VARCHAR(255),
c02 VARCHAR(MAX),
c03 VARBINARY(255),
c04 VARBINARY(MAX),
c05 BIT,
c06 DATE,
c07 TIME,
c08 DATETIME2(5),
c09 BIGINT,
c10 FLOAT,
c11 NUMERIC(38, 24),
c12 UNIQUEIDENTIFIER)
""")
cursor.commit()
cursor.execute(
f"""
CREATE PROCEDURE {procname} @TVP {typename} READONLY
AS SELECT * FROM @TVP;
""")
cursor.commit()
# The values aren't exactly VERY_LONG_LEN but close enough and *significantly* faster than
# the loop we had before.
VERY_LONG_LEN = 2000000
long_string = ''.join(chr(i) for i in range(32, 127)) # printable characters
long_bytearray = bytes(list(range(255)))
very_long_string = long_string * (VERY_LONG_LEN // len(long_string))
very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(long_bytearray))
params = [
# Three rows with all of the types in the table defined above.
(
'abc', 'abc',
bytes([0xD1, 0xCE, 0xFA, 0xCE]),
bytes([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), True,
date(1997, 8, 29), time(9, 13, 39),
datetime(2018, 11, 13, 13, 33, 26, 298420),
1234567, 3.14, Decimal('31234567890123.141243449787580175325274'),
uuid.UUID('4fe34a93-e574-04cc-200a-353f0d1770b1'),
),
(
'', '',
bytes([0x00, 0x01, 0x02, 0x03, 0x04]),
bytes([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), False,
date(1, 1, 1), time(0, 0, 0),
datetime(1, 1, 1, 0, 0, 0, 0),
-9223372036854775808, -1.79E+308, Decimal('0.000000000000000000000001'),
uuid.UUID('33f7504c-2bac-1b83-01d1-7434a7ba6a17'),
),
(
long_string, very_long_string,
bytes(long_bytearray), bytes(very_long_bytearray), True,
date(9999, 12, 31), time(23, 59, 59),
datetime(9999, 12, 31, 23, 59, 59, 999990),
9223372036854775807, 1.79E+308, Decimal('99999999999999.999999999999999999999999'),
uuid.UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),
)
]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = [tuple(row) for row in cursor.execute(f"exec {procname} ?", p1).fetchall()]
# The values make it very difficult to troubleshoot if something is wrong, so instead of
# asserting they are the same, we'll walk them if there is a problem to identify which is
# wrong.
for row, param in zip(result_array, params):
if row != param:
for r, p in zip(row, param):
assert r == p
# Now test with zero rows.
params = []
p1 = [params]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = cursor.execute(f"exec {procname} ?", p1).fetchall()
assert result_array == params
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp(cursor: pyodbc.Cursor):
_test_tvp(cursor, False)
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp_diffschema(cursor: pyodbc.Cursor):
_test_tvp(cursor, True)
def get_sqlserver_version(cursor: pyodbc.Cursor):
"""
Returns the major version: 8-->2000, 9-->2005, 10-->2008
"""
cursor.execute("exec master..xp_msver 'ProductVersion'")
row = cursor.fetchone()
return int(row.Character_Value.split('.', 1)[0])
@lru_cache()
def _generate_str(length, encoding=None):
"""
Returns either a string or bytes, depending on whether encoding is provided,
that is `length` elements long.
If length is None, None is returned. This simplifies the tests by letting us put None into
an array of other lengths and pass them here, moving the special case check into one place.
"""
if length is None:
return None
# Put non-ASCII characters at the front so we don't end up chopping one in half in a
# multi-byte encoding like UTF-8.
v = 'á'
remaining = max(0, length - len(v))
if remaining:
seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'
if remaining <= len(seed):
v += seed
else:
c = (remaining + len(seed) - 1 // len(seed))
v += seed * c
if encoding:
v = v.encode(encoding)
# We chop *after* encoding because if we are encoding then we want bytes.
v = v[:length]
return v
|
normal
|
{
"blob_id": "51358ac7d4fc093f8291cfd9f098e3ac3db86cce",
"index": 8282,
"step-1": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\n<mask token>\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\n<mask token>\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\n<mask token>\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\n<mask token>\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\n<mask token>\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\n<mask token>\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\n<mask token>\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\n<mask token>\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\n<mask token>\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\n<mask token>\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\n<mask token>\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\n<mask token>\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\n<mask token>\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\n<mask token>\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\n<mask token>\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\n<mask token>\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\n<mask token>\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\n<mask token>\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\n<mask token>\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-2": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason=\n '(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\n<mask token>\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\n<mask token>\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\n<mask token>\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\n<mask token>\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"\"\"Make sure an IntegretyError is raised\"\"\"\n cursor.execute('create table t1(s1 varchar(10) primary key)')\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\n<mask token>\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\n<mask token>\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0, \n False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),\n (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (\n 38, 38, True)]:\n try:\n cursor.execute('drop table t1')\n except:\n pass\n cursor.execute(f'create table t1(d decimal({precision}, {scale}))')\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and '.' + '9' * scale or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select d from t1').fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\n<mask token>\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\n<mask token>\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\"\n )\n rows = cursor.execute('exec proc1').fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\n<mask token>\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\n<mask token>\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\n<mask token>\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute('create table t1(a nvarchar(max))')\n cursor.fast_executemany = True\n cursor.executemany('insert into t1(a) values(?)', [['']])\n assert cursor.execute('select a from t1').fetchone()[0] == ''\n cursor.fast_executemany = False\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\n<mask token>\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\n<mask token>\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\n<mask token>\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"\"\"Ensure None can be used for params other than the first\"\"\"\n cursor.execute('create table t1(n int, blob varbinary(max))')\n cursor.execute('insert into t1 values (1, newid())')\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n sql = 'update t1 set n=?, blob=?'\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n\n def convert1(value):\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n return 'Y' + value.decode('latin1') + 'Y'\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int, v varchar(10))')\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n\n\n<mask token>\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\n<mask token>\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\n<mask token>\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-3": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\ndef test_text(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'text')\n\n\n<mask token>\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason=\n '(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\n<mask token>\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\n<mask token>\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\n<mask token>\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\ndef test_decode_meta(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure column names with non-ASCII characters are converted using the configured encodings.\n \"\"\"\n cursor.execute('create table t1(a int)')\n cursor.execute('insert into t1 values (1)')\n cursor.execute('select a as \"Tipología\" from t1')\n assert cursor.description[0][0] == 'Tipología'\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"\"\"Make sure an IntegretyError is raised\"\"\"\n cursor.execute('create table t1(s1 varchar(10) primary key)')\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\n<mask token>\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\n<mask token>\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0, \n False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),\n (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (\n 38, 38, True)]:\n try:\n cursor.execute('drop table t1')\n except:\n pass\n cursor.execute(f'create table t1(d decimal({precision}, {scale}))')\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and '.' + '9' * scale or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select d from t1').fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_close_cnxn():\n \"\"\"Make sure using a Cursor after closing its connection doesn't crash.\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('drop table if exists t1')\n cursor.execute('create table t1(id integer, s varchar(20))')\n cursor.execute('insert into t1 values (?,?)', 1, 'test')\n cursor.execute('select * from t1')\n cnxn.close()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('select * from t1')\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\ndef test_empty_string_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = ''\n cursor = cnxn.cursor()\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\n<mask token>\n\n\ndef test_datetime_fraction_rounded(cursor: pyodbc.Cursor):\n full = datetime(2007, 1, 15, 3, 4, 5, 123456)\n rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', full)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert rounded == result\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\"\n )\n rows = cursor.execute('exec proc1').fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\n<mask token>\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\n<mask token>\n\n\ndef test_rowcount_delete(cursor: pyodbc.Cursor):\n assert cursor.rowcount == -1\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('delete from t1')\n assert cursor.rowcount == count\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\n<mask token>\n\n\ndef table_with_spaces(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can select using [x z] syntax\"\"\"\n try:\n cursor.execute('create table [test one](int n)')\n cursor.execute('insert into [test one] values(1)')\n cursor.execute('select * from [test one]')\n v = cursor.fetchone()[0]\n assert v == 1\n finally:\n cursor.rollback()\n\n\n<mask token>\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\ndef test_executemany(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(10))')\n params = [(i, str(i)) for i in range(1, 6)]\n cursor.executemany('insert into t1(a, b) values (?,?)', params)\n count = cursor.execute('select count(*) from t1').fetchone()[0]\n assert count == len(params)\n cursor.execute('select a, b from t1 order by a')\n rows = cursor.fetchall()\n assert count == len(rows)\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\n<mask token>\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute('create table t1(a nvarchar(max))')\n cursor.fast_executemany = True\n cursor.executemany('insert into t1(a) values(?)', [['']])\n assert cursor.execute('select a from t1').fetchone()[0] == ''\n cursor.fast_executemany = False\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\n<mask token>\n\n\ndef test_autocommit():\n cnxn = connect()\n assert cnxn.autocommit is False\n cnxn = None\n cnxn = connect(autocommit=True)\n assert cnxn.autocommit is True\n cnxn.autocommit = False\n assert cnxn.autocommit is False\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\ndef test_row_execute(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to execute\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n cursor.execute(\"insert into t1 values (1, 'a')\")\n row = cursor.execute('select n, s from t1').fetchone()\n assert row\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.execute('insert into t2 values (?, ?)', row)\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\n<mask token>\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"\"\"Ensure None can be used for params other than the first\"\"\"\n cursor.execute('create table t1(n int, blob varbinary(max))')\n cursor.execute('insert into t1 values (1, newid())')\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n sql = 'update t1 set n=?, blob=?'\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n\n def convert1(value):\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n return 'Y' + value.decode('latin1') + 'Y'\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int, v varchar(10))')\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n\n\ndef test_too_large(cursor: pyodbc.Cursor):\n \"\"\"Ensure error raised if insert fails due to truncation\"\"\"\n value = 'x' * 1000\n cursor.execute('create table t1(s varchar(800))')\n with pytest.raises(pyodbc.Error):\n cursor.execute('insert into t1 values (?)', value)\n\n\n<mask token>\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\ndef test_func_param(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop function func1')\n except:\n pass\n cursor.execute(\n \"\"\"\n create function func1 (@testparam varchar(4))\n returns @rettest table (param varchar(4))\n as\n begin\n insert @rettest\n select @testparam\n return\n end\n \"\"\"\n )\n cursor.commit()\n value = cursor.execute('select * from func1(?)', 'test').fetchone()[0]\n assert value == 'test'\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\ndef test_emoticons_as_literal(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute(f\"insert into t1 values (N'{v}')\")\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\ndef get_sqlserver_version(cursor: pyodbc.Cursor):\n \"\"\"\n Returns the major version: 8-->2000, 9-->2005, 10-->2008\n \"\"\"\n cursor.execute(\"exec master..xp_msver 'ProductVersion'\")\n row = cursor.fetchone()\n return int(row.Character_Value.split('.', 1)[0])\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-4": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\[email protected]()\ndef cursor() ->Iterator[pyodbc.Cursor]:\n cnxn = connect()\n cur = cnxn.cursor()\n cur.execute('drop table if exists t1')\n cur.execute('drop table if exists t2')\n cur.execute('drop table if exists t3')\n cnxn.commit()\n yield cur\n if not cnxn.closed:\n cur.close()\n cnxn.close()\n\n\ndef test_text(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'text')\n\n\ndef test_varchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varchar')\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason=\n '(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\ndef test_char(cursor: pyodbc.Cursor):\n value = 'testing'\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\ndef test_float(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, \n 0.00012345])\n\n\ndef test_non_numeric_float(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(d float)')\n for input in (float('+Infinity'), float('-Infinity'), float('NaN')):\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 values (?)', input)\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\ndef test_getinfo_string():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)\n assert isinstance(value, str)\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\ndef test_decode_meta(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure column names with non-ASCII characters are converted using the configured encodings.\n \"\"\"\n cursor.execute('create table t1(a int)')\n cursor.execute('insert into t1 values (1)')\n cursor.execute('select a as \"Tipología\" from t1')\n assert cursor.description[0][0] == 'Tipología'\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"\"\"Make sure an IntegretyError is raised\"\"\"\n cursor.execute('create table t1(s1 varchar(10) primary key)')\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\ndef test_different_bindings(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(n int)')\n cursor.execute('create table t2(d datetime)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t2 values (?)', datetime.now())\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef _test_scalar(cursor: pyodbc.Cursor, datatype, values):\n \"\"\"\n A simple test wrapper for types that are identical when written and read.\n \"\"\"\n cursor.execute(f'create table t1(c1 {datatype})')\n for value in values:\n cursor.execute('delete from t1')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select c1 from t1').fetchone()[0]\n assert v == value\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\ndef test_nonnative_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = False\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, str)\n assert result == str(value).upper()\n pyodbc.native_uuid = True\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\ndef test_fixed_unicode(cursor: pyodbc.Cursor):\n value = 'tësting'\n cursor.execute('create table t1(s nchar(7))')\n cursor.execute('insert into t1 values(?)', 'tësting')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n assert v == value\n\n\ndef test_chinese(cursor: pyodbc.Cursor):\n v = '我的'\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n row = cursor.fetchone()\n assert row[0] == v\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n rows = cursor.fetchall()\n assert rows[0][0] == v\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0, \n False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),\n (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (\n 38, 38, True)]:\n try:\n cursor.execute('drop table t1')\n except:\n pass\n cursor.execute(f'create table t1(d decimal({precision}, {scale}))')\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and '.' + '9' * scale or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select d from t1').fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_close_cnxn():\n \"\"\"Make sure using a Cursor after closing its connection doesn't crash.\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('drop table if exists t1')\n cursor.execute('create table t1(id integer, s varchar(20))')\n cursor.execute('insert into t1 values (?,?)', 1, 'test')\n cursor.execute('select * from t1')\n cnxn.close()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('select * from t1')\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\ndef test_empty_string_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = ''\n cursor = cnxn.cursor()\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef test_fixed_str(cursor: pyodbc.Cursor):\n value = 'testing'\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n assert v == value\n\n\n<mask token>\n\n\ndef test_empty_unicode_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = ''\n cursor = cnxn.cursor()\n cursor.execute('create table t1(s nvarchar(20))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Time not supported until 2008?')\ndef test_time(cursor: pyodbc.Cursor):\n value = datetime.now().time()\n value = value.replace(microsecond=0)\n cursor.execute('create table t1(t time)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select t from t1').fetchone()[0]\n assert isinstance(result, time)\n assert value == result\n\n\ndef test_datetime(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5, 123000)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction_rounded(cursor: pyodbc.Cursor):\n full = datetime(2007, 1, 15, 3, 4, 5, 123456)\n rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', full)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert rounded == result\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\"\n )\n rows = cursor.execute('exec proc1').fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_vartbl(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)\n\n insert into @tmptbl\n select top 10 name, id, xtype, refdate\n from sysobjects\n\n select * from @tmptbl\n \"\"\"\n )\n cursor.execute('exec proc1')\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\ndef test_sp_with_none(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@x varchar(20))\n AS\n declare @y varchar(20)\n set @y = @x\n select @y\n \"\"\"\n )\n cursor.execute('exec test_sp ?', None)\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] is None\n\n\ndef test_rowcount_delete(cursor: pyodbc.Cursor):\n assert cursor.rowcount == -1\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('delete from t1')\n assert cursor.rowcount == count\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('select * from t1')\n assert v == cursor\n\n\ndef table_with_spaces(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can select using [x z] syntax\"\"\"\n try:\n cursor.execute('create table [test one](int n)')\n cursor.execute('insert into [test one] values(1)')\n cursor.execute('select * from [test one]')\n v = cursor.fetchone()[0]\n assert v == 1\n finally:\n cursor.rollback()\n\n\ndef test_lower_case():\n \"\"\"Ensure pyodbc.lowercase forces returned column names to lowercase.\"\"\"\n try:\n pyodbc.lowercase = True\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(Abc int, dEf int)')\n cursor.execute('select * from t1')\n names = [t[0] for t in cursor.description]\n names.sort()\n assert names == ['abc', 'def']\n finally:\n pyodbc.lowercase = False\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\ndef test_executemany(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(10))')\n params = [(i, str(i)) for i in range(1, 6)]\n cursor.executemany('insert into t1(a, b) values (?,?)', params)\n count = cursor.execute('select count(*) from t1').fetchone()[0]\n assert count == len(params)\n cursor.execute('select a, b from t1 order by a')\n rows = cursor.fetchall()\n assert count == len(rows)\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_one(cursor: pyodbc.Cursor):\n \"\"\"Pass executemany a single sequence\"\"\"\n cursor.execute('create table t1(a int, b varchar(10))')\n params = [(1, 'test')]\n cursor.executemany('insert into t1(a, b) values (?,?)', params)\n count = cursor.execute('select count(*) from t1').fetchone()[0]\n assert count == len(params)\n cursor.execute('select a, b from t1 order by a')\n rows = cursor.fetchall()\n assert count == len(rows)\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute('create table t1(a nvarchar(max))')\n cursor.fast_executemany = True\n cursor.executemany('insert into t1(a) values(?)', [['']])\n assert cursor.execute('select a from t1').fetchone()[0] == ''\n cursor.fast_executemany = False\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\ndef test_view_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(c1 int identity(1, 1), c2 varchar(50))')\n for i in range(3):\n cursor.execute('insert into t1(c2) values (?)', f'string{i}')\n cursor.execute('create view t2 as select * from t1')\n cursor.execute('select * from t2')\n rows = cursor.fetchall()\n assert rows is not None\n assert len(rows) == 3\n\n\ndef test_autocommit():\n cnxn = connect()\n assert cnxn.autocommit is False\n cnxn = None\n cnxn = connect(autocommit=True)\n assert cnxn.autocommit is True\n cnxn.autocommit = False\n assert cnxn.autocommit is False\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\ndef test_row_execute(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to execute\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n cursor.execute(\"insert into t1 values (1, 'a')\")\n row = cursor.execute('select n, s from t1').fetchone()\n assert row\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.execute('insert into t2 values (?, ?)', row)\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\ndef test_cursor_messages_with_print(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.\n \"\"\"\n assert not cursor.messages\n for msg in ('hello world', 'ABCDEFGHIJ' * 800):\n cursor.execute(f\"PRINT '{msg}'\")\n messages = cursor.messages\n assert isinstance(messages, list)\n assert len(messages) == 1\n assert isinstance(messages[0], tuple)\n assert len(messages[0]) == 2\n assert isinstance(messages[0][0], str)\n assert isinstance(messages[0][1], str)\n assert '[01000] (0)' == messages[0][0]\n assert messages[0][1].endswith(msg)\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"\"\"Ensure None can be used for params other than the first\"\"\"\n cursor.execute('create table t1(n int, blob varbinary(max))')\n cursor.execute('insert into t1 values (1, newid())')\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n sql = 'update t1 set n=?, blob=?'\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n\n def convert1(value):\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n return 'Y' + value.decode('latin1') + 'Y'\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int, v varchar(10))')\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n\n\ndef test_too_large(cursor: pyodbc.Cursor):\n \"\"\"Ensure error raised if insert fails due to truncation\"\"\"\n value = 'x' * 1000\n cursor.execute('create table t1(s varchar(800))')\n with pytest.raises(pyodbc.Error):\n cursor.execute('insert into t1 values (?)', value)\n\n\ndef test_row_equal(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(n int, s varchar(20))')\n cursor.execute(\"insert into t1 values (1, 'test')\")\n row1 = cursor.execute('select n, s from t1').fetchone()\n row2 = cursor.execute('select n, s from t1').fetchone()\n assert row1 == row2\n\n\ndef test_row_gtlt(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(n int, s varchar(20))')\n cursor.execute(\"insert into t1 values (1, 'test1')\")\n cursor.execute(\"insert into t1 values (1, 'test2')\")\n rows = cursor.execute('select n, s from t1 order by s').fetchall()\n assert rows[0] < rows[1]\n assert rows[0] <= rows[1]\n assert rows[1] > rows[0]\n assert rows[1] >= rows[0]\n assert rows[0] != rows[1]\n rows = list(rows)\n rows.sort()\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\ndef test_func_param(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop function func1')\n except:\n pass\n cursor.execute(\n \"\"\"\n create function func1 (@testparam varchar(4))\n returns @rettest table (param varchar(4))\n as\n begin\n insert @rettest\n select @testparam\n return\n end\n \"\"\"\n )\n cursor.commit()\n value = cursor.execute('select * from func1(?)', 'test').fetchone()[0]\n assert value == 'test'\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\ndef test_emoticons_as_literal(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute(f\"insert into t1 values (N'{v}')\")\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\ndef _test_tvp(cursor: pyodbc.Cursor, diff_schema):\n pyodbc.native_uuid = True\n procname = 'SelectTVP'\n typename = 'TestTVP'\n if diff_schema:\n schemaname = 'myschema'\n procname = schemaname + '.' + procname\n typenameonly = typename\n typename = schemaname + '.' + typename\n try:\n cursor.execute('drop procedure ' + procname)\n except:\n pass\n try:\n cursor.execute('drop type ' + typename)\n except:\n pass\n if diff_schema:\n try:\n cursor.execute('drop schema ' + schemaname)\n except:\n pass\n cursor.commit()\n if diff_schema:\n cursor.execute('CREATE SCHEMA myschema')\n cursor.commit()\n cursor.execute(\n f\"\"\"\n CREATE TYPE {typename} AS TABLE(\n c01 VARCHAR(255),\n c02 VARCHAR(MAX),\n c03 VARBINARY(255),\n c04 VARBINARY(MAX),\n c05 BIT,\n c06 DATE,\n c07 TIME,\n c08 DATETIME2(5),\n c09 BIGINT,\n c10 FLOAT,\n c11 NUMERIC(38, 24),\n c12 UNIQUEIDENTIFIER)\n \"\"\"\n )\n cursor.commit()\n cursor.execute(\n f\"\"\"\n CREATE PROCEDURE {procname} @TVP {typename} READONLY\n AS SELECT * FROM @TVP;\n \"\"\"\n )\n cursor.commit()\n VERY_LONG_LEN = 2000000\n long_string = ''.join(chr(i) for i in range(32, 127))\n long_bytearray = bytes(list(range(255)))\n very_long_string = long_string * (VERY_LONG_LEN // len(long_string))\n very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(\n long_bytearray))\n params = [('abc', 'abc', bytes([209, 206, 250, 206]), bytes([15, 241, \n 206, 202, 254]), True, date(1997, 8, 29), time(9, 13, 39), datetime\n (2018, 11, 13, 13, 33, 26, 298420), 1234567, 3.14, Decimal(\n '31234567890123.141243449787580175325274'), uuid.UUID(\n '4fe34a93-e574-04cc-200a-353f0d1770b1')), ('', '', bytes([0, 1, 2, \n 3, 4]), bytes([0, 1, 2, 3, 4, 5]), False, date(1, 1, 1), time(0, 0,\n 0), datetime(1, 1, 1, 0, 0, 0, 0), -9223372036854775808, -1.79e+308,\n Decimal('0.000000000000000000000001'), uuid.UUID(\n '33f7504c-2bac-1b83-01d1-7434a7ba6a17')), (long_string,\n very_long_string, bytes(long_bytearray), bytes(very_long_bytearray),\n True, date(9999, 12, 31), time(23, 59, 59), datetime(9999, 12, 31, \n 23, 59, 59, 999990), 9223372036854775807, 1.79e+308, Decimal(\n '99999999999999.999999999999999999999999'), uuid.UUID(\n 'ffffffff-ffff-ffff-ffff-ffffffffffff'))]\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = [tuple(row) for row in cursor.execute(\n f'exec {procname} ?', p1).fetchall()]\n for row, param in zip(result_array, params):\n if row != param:\n for r, p in zip(row, param):\n assert r == p\n params = []\n p1 = [params]\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = cursor.execute(f'exec {procname} ?', p1).fetchall()\n assert result_array == params\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp(cursor: pyodbc.Cursor):\n _test_tvp(cursor, False)\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\ndef get_sqlserver_version(cursor: pyodbc.Cursor):\n \"\"\"\n Returns the major version: 8-->2000, 9-->2005, 10-->2008\n \"\"\"\n cursor.execute(\"exec master..xp_msver 'ProductVersion'\")\n row = cursor.fetchone()\n return int(row.Character_Value.split('.', 1)[0])\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os, uuid, re, sys\nfrom decimal import Decimal\nfrom datetime import date, time, datetime\nfrom functools import lru_cache\nfrom typing import Iterator\n\nimport pyodbc, pytest\n\n\n# WARNING: Wow Microsoft always manages to do the stupidest thing possible always trying to be\n# smarter than everyone. I worked with their APIs for since before \"OLE\" and it has always\n# been a nanny state. They won't read the UID and PWD from odbc.ini because it isn't secure.\n# Really? Less secure than what? The next hack someone is going to use. Do the straight\n# forward thing and explain how to secure it. it isn't their business how I deploy and secure.\n#\n# For every other DB we use a single default DSN but you can pass your own via an environment\n# variable. For SS, we can't just use a default DSN unless you want to go trusted. (Which is\n# more secure? No.) It'll be put into .bashrc most likely. Way to go. Now I'll go rename\n# all of the others to DB specific names instead of PYODBC_CNXNSTR. Hot garbage as usual.\n\nCNXNSTR = os.environ.get('PYODBC_SQLSERVER', 'DSN=pyodbc-sqlserver')\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before)\n\n\nDRIVER = connect().getinfo(pyodbc.SQL_DRIVER_NAME)\n\nIS_FREEDTS = bool(re.search('tsodbc', DRIVER, flags=re.IGNORECASE))\nIS_MSODBCSQL = bool(re.search(r'(msodbcsql|sqlncli|sqlsrv32\\.dll)', DRIVER, re.IGNORECASE))\n\n\ndef _get_sqlserver_year():\n \"\"\"\n Returns the release year of the current version of SQL Server, used to skip tests for\n features that are not supported. If the current DB is not SQL Server, 0 is returned.\n \"\"\"\n # We used to use the major version, but most documentation on the web refers to the year\n # (e.g. SQL Server 2019) so we'll use that for skipping tests that do not apply.\n if not IS_MSODBCSQL:\n return 0\n cnxn = connect()\n cursor = cnxn.cursor()\n row = cursor.execute(\"exec master..xp_msver 'ProductVersion'\").fetchone()\n major = row.Character_Value.split('.', 1)[0]\n return {\n # https://sqlserverbuilds.blogspot.com/\n '8': 2000, '9': 2005, '10': 2008, '11': 2012, '12': 2014,\n '13': 2016, '14': 2017, '15': 2019, '16': 2022\n }[major]\n\n\nSQLSERVER_YEAR = _get_sqlserver_year()\n\n\[email protected]()\ndef cursor() -> Iterator[pyodbc.Cursor]:\n cnxn = connect()\n cur = cnxn.cursor()\n\n cur.execute(\"drop table if exists t1\")\n cur.execute(\"drop table if exists t2\")\n cur.execute(\"drop table if exists t3\")\n cnxn.commit()\n\n yield cur\n\n if not cnxn.closed:\n cur.close()\n cnxn.close()\n\n\ndef test_text(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'text')\n\n\ndef test_varchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varchar')\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason='(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n # Issue 188:\tSegfault when fetching NVARCHAR(MAX) data over 511 bytes\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\ndef test_char(cursor: pyodbc.Cursor):\n value = \"testing\"\n cursor.execute(\"create table t1(s char(7))\")\n cursor.execute(\"insert into t1 values(?)\", \"testing\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF,\n 0x123456789])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n # python allows integers of any size, bigger than an 8 byte int can contain\n input = 9999999999999999999999999999999999999\n cursor.execute(\"create table t1(d bigint)\")\n with pytest.raises(OverflowError):\n cursor.execute(\"insert into t1 values (?)\", input)\n result = cursor.execute(\"select * from t1\").fetchall()\n assert result == []\n\n\ndef test_float(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, .00012345])\n\n\ndef test_non_numeric_float(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(d float)\")\n for input in (float('+Infinity'), float('-Infinity'), float('NaN')):\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"insert into t1 values (?)\", input)\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\ndef test_getinfo_string():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)\n assert isinstance(value, str)\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without\n # fetches seem to confuse the driver.\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\ndef test_decode_meta(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure column names with non-ASCII characters are converted using the configured encodings.\n \"\"\"\n # This is from GitHub issue #190\n cursor.execute(\"create table t1(a int)\")\n cursor.execute(\"insert into t1 values (1)\")\n cursor.execute('select a as \"Tipología\" from t1')\n assert cursor.description[0][0] == \"Tipología\"\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"Make sure an IntegretyError is raised\"\n # This is really making sure we are properly encoding and comparing the SQLSTATEs.\n cursor.execute(\"create table t1(s1 varchar(10) primary key)\")\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"More than one bind and select on a cursor\"\n cursor.execute(\"create table t1(n int)\")\n cursor.execute(\"insert into t1 values (?)\", 1)\n cursor.execute(\"insert into t1 values (?)\", 2)\n cursor.execute(\"insert into t1 values (?)\", 3)\n for _ in range(3):\n cursor.execute(\"select n from t1 where n < ?\", 10)\n cursor.execute(\"select n from t1 where n < 3\")\n\n\ndef test_different_bindings(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(n int)\")\n cursor.execute(\"create table t2(d datetime)\")\n cursor.execute(\"insert into t1 values (?)\", 1)\n cursor.execute(\"insert into t2 values (?)\", datetime.now())\n\n\nSMALL_FENCEPOST_SIZES = [None, 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000]\nLARGE_FENCEPOST_SIZES = SMALL_FENCEPOST_SIZES + [4095, 4096, 4097, 10 * 1024, 20 * 1024]\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n\n if datatype == 'text':\n cursor.execute(f\"create table t1(c1 {datatype})\")\n else:\n maxlen = lengths[-1]\n cursor.execute(f\"create table t1(c1 {datatype}({maxlen}))\")\n\n for length in lengths:\n cursor.execute(\"delete from t1\")\n\n encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n\n try:\n cursor.execute(\"insert into t1 values(?)\", value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef _test_scalar(cursor: pyodbc.Cursor, datatype, values):\n \"\"\"\n A simple test wrapper for types that are identical when written and read.\n \"\"\"\n cursor.execute(f\"create table t1(c1 {datatype})\")\n for value in values:\n cursor.execute(\"delete from t1\")\n cursor.execute(\"insert into t1 values (?)\", value)\n v = cursor.execute(\"select c1 from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\ndef test_nonnative_uuid(cursor: pyodbc.Cursor):\n # The default is False meaning we should return a string. Note that\n # SQL Server seems to always return uppercase.\n value = uuid.uuid4()\n cursor.execute(\"create table t1(n uniqueidentifier)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n pyodbc.native_uuid = False\n result = cursor.execute(\"select n from t1\").fetchval()\n assert isinstance(result, str)\n assert result == str(value).upper()\n pyodbc.native_uuid = True\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n # When true, we should return a uuid.UUID object.\n value = uuid.uuid4()\n cursor.execute(\"create table t1(n uniqueidentifier)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n pyodbc.native_uuid = True\n result = cursor.execute(\"select n from t1\").fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\ndef test_nextset(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n for i in range(4):\n cursor.execute(\"insert into t1(i) values(?)\", i)\n\n cursor.execute(\n \"\"\"\n select i from t1 where i < 2 order by i;\n select i from t1 where i >= 2 order by i\n \"\"\")\n\n for i, row in enumerate(cursor):\n assert i == row.i\n\n assert cursor.nextset()\n\n for i, row in enumerate(cursor):\n assert i + 2 == row.i\n\n\[email protected](IS_FREEDTS, reason='https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\ndef test_fixed_unicode(cursor: pyodbc.Cursor):\n value = \"t\\xebsting\"\n cursor.execute(\"create table t1(s nchar(7))\")\n cursor.execute(\"insert into t1 values(?)\", \"t\\xebsting\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n # If we alloc'd wrong, the test below might work because of an embedded NULL\n assert v == value\n\n\ndef test_chinese(cursor: pyodbc.Cursor):\n v = '我的'\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n row = cursor.fetchone()\n assert row[0] == v\n\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n rows = cursor.fetchall()\n assert rows[0][0] == v\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute(\"create table t1(b bit)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n v = cursor.execute(\"select b from t1\").fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n # From test provided by planders (thanks!) in Issue 91\n\n for (precision, scale, negative) in [\n (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True),\n (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True),\n (38, 10, True), (38, 38, True)]:\n\n try:\n cursor.execute(\"drop table t1\")\n except:\n pass\n\n cursor.execute(f\"create table t1(d decimal({precision}, {scale}))\")\n\n # Construct a decimal that uses the maximum precision and scale.\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and ('.' + '9' * scale) or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n\n cursor.execute(\"insert into t1 values(?)\", value)\n\n v = cursor.execute(\"select d from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7\n cursor.execute(\"create table t1(d decimal(10, 2))\")\n cursor.execute(\"insert into t1 values (?)\", value)\n result = cursor.execute(\"select * from t1\").fetchone()[0]\n assert result == value\n\n\ndef test_subquery_params(cursor: pyodbc.Cursor):\n \"\"\"Ensure parameter markers work in a subquery\"\"\"\n cursor.execute(\"create table t1(id integer, s varchar(20))\")\n cursor.execute(\"insert into t1 values (?,?)\", 1, 'test')\n row = cursor.execute(\"\"\"\n select x.id\n from (\n select id\n from t1\n where s = ?\n and id between ? and ?\n ) x\n \"\"\", 'test', 1, 10).fetchone()\n assert row is not None\n assert row[0] == 1\n\n\ndef test_close_cnxn():\n \"\"\"Make sure using a Cursor after closing its connection doesn't crash.\"\"\"\n\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"drop table if exists t1\")\n cursor.execute(\"create table t1(id integer, s varchar(20))\")\n cursor.execute(\"insert into t1 values (?,?)\", 1, 'test')\n cursor.execute(\"select * from t1\")\n\n cnxn.close()\n\n # Now that the connection is closed, we expect an exception. (If the code attempts to use\n # the HSTMT, we'll get an access violation instead.)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"select * from t1\")\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(s varchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", \"\")\n\n\ndef test_empty_string_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = \"\"\n cursor = cnxn.cursor()\n cursor.execute(\"create table t1(s varchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", value)\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_fixed_str(cursor: pyodbc.Cursor):\n value = \"testing\"\n cursor.execute(\"create table t1(s char(7))\")\n cursor.execute(\"insert into t1 values(?)\", value)\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n # If we alloc'd wrong, the test below might work because of an embedded NULL\n assert v == value\n\n\ndef test_empty_unicode(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(s nvarchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", \"\")\n\n\ndef test_empty_unicode_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = \"\"\n cursor = cnxn.cursor()\n cursor.execute(\"create table t1(s nvarchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", value)\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(s varchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", \"1\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert row[0] == \"1\"\n assert row[-1] == \"1\"\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc.\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008,\n reason='Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n\n cursor.execute(\"create table t1(d date)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select d from t1\").fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008,\n reason='Time not supported until 2008?')\ndef test_time(cursor: pyodbc.Cursor):\n value = datetime.now().time()\n\n # We aren't yet writing values using the new extended time type so the value written to the\n # database is only down to the second.\n value = value.replace(microsecond=0)\n\n cursor.execute(\"create table t1(t time)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select t from t1\").fetchone()[0]\n assert isinstance(result, time)\n assert value == result\n\n\ndef test_datetime(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n\n cursor.execute(\"create table t1(dt datetime)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction(cursor: pyodbc.Cursor):\n # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most\n # granular datetime supported is xxx000.\n\n value = datetime(2007, 1, 15, 3, 4, 5, 123000)\n\n cursor.execute(\"create table t1(dt datetime)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction_rounded(cursor: pyodbc.Cursor):\n # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc\n # rounds down to what the database supports.\n\n full = datetime(2007, 1, 15, 3, 4, 5, 123456)\n rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)\n\n cursor.execute(\"create table t1(dt datetime)\")\n cursor.execute(\"insert into t1 values (?)\", full)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert rounded == result\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n\n cursor.execute(\"create table t1(dt datetime2)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\")\n rows = cursor.execute(\"exec proc1\").fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10 # there has to be at least 10 items in sysobjects\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n\n # Note: I've used \"set nocount on\" so that we don't get the number of rows deleted from\n # #tmptable. If you don't do this, you'd need to call nextset() once to skip it.\n\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\")\n cursor.execute(\"exec proc1\")\n assert cursor.description is not None\n assert len(cursor.description) == 4\n\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10 # there has to be at least 10 items in sysobjects\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_vartbl(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)\n\n insert into @tmptbl\n select top 10 name, id, xtype, refdate\n from sysobjects\n\n select * from @tmptbl\n \"\"\")\n cursor.execute(\"exec proc1\")\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10 # there has to be at least 10 items in sysobjects\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n # Reported in the forums that passing two datetimes to a stored procedure doesn't work.\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\")\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\")\n cursor.execute(\"exec test_sp ?, ?\", datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0 # 0 years apart\n\n\ndef test_sp_with_none(cursor: pyodbc.Cursor):\n # Reported in the forums that passing None caused an error.\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\")\n cursor.execute(\n \"\"\"\n create procedure test_sp(@x varchar(20))\n AS\n declare @y varchar(20)\n set @y = @x\n select @y\n \"\"\")\n cursor.execute(\"exec test_sp ?\", None)\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] is None # 0 years apart\n\n\n#\n# rowcount\n#\n\n\ndef test_rowcount_delete(cursor: pyodbc.Cursor):\n assert cursor.rowcount == -1\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n cursor.execute(\"delete from t1\")\n assert cursor.rowcount == count\n\n\ndef test_rowcount_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code. On the other hand, we could hardcode a zero return value.\n \"\"\"\n cursor.execute(\"create table t1(i int)\")\n # This is a different code path internally.\n cursor.execute(\"delete from t1\")\n assert cursor.rowcount == 0\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n cursor.execute(\"select * from t1\")\n assert cursor.rowcount == -1\n\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\ndef test_rowcount_reset(cursor: pyodbc.Cursor):\n \"Ensure rowcount is reset after DDL\"\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n assert cursor.rowcount == 1\n\n cursor.execute(\"create table t2(i int)\")\n ddl_rowcount = (0 if IS_FREEDTS else -1)\n assert cursor.rowcount == ddl_rowcount\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n cursor.execute(\"insert into t1 values (1)\")\n v = cursor.execute(\"delete from t1\")\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute(\"create table t1(i int)\")\n # This is a different code path internally.\n v = cursor.execute(\"delete from t1\")\n assert v == cursor\n\n\ndef test_retcursor_select(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n cursor.execute(\"insert into t1 values (1)\")\n v = cursor.execute(\"select * from t1\")\n assert v == cursor\n\n\ndef table_with_spaces(cursor: pyodbc.Cursor):\n \"Ensure we can select using [x z] syntax\"\n\n try:\n cursor.execute(\"create table [test one](int n)\")\n cursor.execute(\"insert into [test one] values(1)\")\n cursor.execute(\"select * from [test one]\")\n v = cursor.fetchone()[0]\n assert v == 1\n finally:\n cursor.rollback()\n\n\ndef test_lower_case():\n \"Ensure pyodbc.lowercase forces returned column names to lowercase.\"\n try:\n pyodbc.lowercase = True\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"create table t1(Abc int, dEf int)\")\n cursor.execute(\"select * from t1\")\n\n names = [t[0] for t in cursor.description]\n names.sort()\n\n assert names == [\"abc\", \"def\"]\n finally:\n # Put it back so other tests don't fail.\n pyodbc.lowercase = False\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute(\"create table t1(a int, b char(3))\")\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n # A project was failing to create temporary tables via select into.\n cursor.execute(\"create table t1(s char(7))\")\n cursor.execute(\"insert into t1 values(?)\", \"testing\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert v == \"testing\"\n\n cursor.execute(\"select s into t2 from t1\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert v == \"testing\"\n\n\ndef test_executemany(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b varchar(10))\")\n\n params = [(i, str(i)) for i in range(1, 6)]\n\n cursor.executemany(\"insert into t1(a, b) values (?,?)\", params)\n\n count = cursor.execute(\"select count(*) from t1\").fetchone()[0]\n assert count == len(params)\n\n cursor.execute(\"select a, b from t1 order by a\")\n rows = cursor.fetchall()\n assert count == len(rows)\n\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_one(cursor: pyodbc.Cursor):\n \"Pass executemany a single sequence\"\n cursor.execute(\"create table t1(a int, b varchar(10))\")\n\n params = [(1, \"test\")]\n\n cursor.executemany(\"insert into t1(a, b) values (?,?)\", params)\n\n count = cursor.execute(\"select count(*) from t1\").fetchone()[0]\n assert count == len(params)\n\n cursor.execute(\"select a, b from t1 order by a\")\n rows = cursor.fetchall()\n assert count == len(rows)\n\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute(\"create table t1(a nvarchar(max))\")\n\n cursor.fast_executemany = True\n cursor.executemany(\"insert into t1(a) values(?)\", [['']])\n\n assert cursor.execute(\"select a from t1\").fetchone()[0] == ''\n\n cursor.fast_executemany = False\n\n\ndef test_executemany_failure(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure that an exception is raised if one query in an executemany fails.\n \"\"\"\n cursor.execute(\"create table t1(a int, b varchar(10))\")\n\n params = [(1, 'good'),\n ('error', 'not an int'),\n (3, 'good')]\n\n with pytest.raises(pyodbc.Error):\n cursor.executemany(\"insert into t1(a, b) value (?, ?)\", params)\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b int, c int, d int)\")\n cursor.execute(\"insert into t1 values(1,2,3,4)\")\n\n row = cursor.execute(\"select * from t1\").fetchone()\n\n result = row[:]\n assert result is row\n\n result = row[:-1]\n assert result == (1, 2, 3)\n\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b int, c int, d varchar(50))\")\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n\n row = cursor.execute(\"select * from t1\").fetchone()\n\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n\n result = str(row[:-1])\n assert result == \"(1, 2, 3)\"\n\n result = str(row[:1])\n assert result == \"(1,)\"\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n\n cursor.execute(\"create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))\")\n cursor.execute(\"insert into t1(c2, c3) values (?,?)\", v2, v3)\n\n row = cursor.execute(\"select c2, c3, c2 + c3 as both from t1\").fetchone()\n\n assert row.both == v2 + v3\n\n\ndef test_view_select(cursor: pyodbc.Cursor):\n # Reported in forum: Can't select from a view? I think I do this a lot, but another test\n # never hurts.\n\n # Create a table (t1) with 3 rows and a view (t2) into it.\n cursor.execute(\"create table t1(c1 int identity(1, 1), c2 varchar(50))\")\n for i in range(3):\n cursor.execute(\"insert into t1(c2) values (?)\", f\"string{i}\")\n cursor.execute(\"create view t2 as select * from t1\")\n\n # Select from the view\n cursor.execute(\"select * from t2\")\n rows = cursor.fetchall()\n assert rows is not None\n assert len(rows) == 3\n\n\ndef test_autocommit():\n cnxn = connect()\n assert cnxn.autocommit is False\n cnxn = None\n\n cnxn = connect(autocommit=True)\n assert cnxn.autocommit is True\n cnxn.autocommit = False\n assert cnxn.autocommit is False\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute(\"drop procedure pyodbctest\")\n cursor.commit()\n except:\n pass\n\n cursor.execute(\"create table t1(s varchar(10))\")\n cursor.execute(\"insert into t1 values(?)\", \"testing\")\n\n cursor.execute(\"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\")\n\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3.\n\n cursor.execute(\"create table t1(id int)\")\n for i in range(1, 5):\n cursor.execute(\"insert into t1 values(?)\", i)\n cursor.execute(\"select id from t1 order by id\")\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0 # defaults to zero (off)\n\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n # Only lists and tuples are allowed.\n cursor.execute(\"create table t1 (word varchar (100))\")\n\n words = {'a', 'b', 'c'}\n\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"insert into t1 (word) values (?)\", words)\n\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany(\"insert into t1 (word) values (?)\", words)\n\n\ndef test_row_execute(cursor: pyodbc.Cursor):\n \"Ensure we can use a Row object as a parameter to execute\"\n cursor.execute(\"create table t1(n int, s varchar(10))\")\n cursor.execute(\"insert into t1 values (1, 'a')\")\n row = cursor.execute(\"select n, s from t1\").fetchone()\n assert row\n\n cursor.execute(\"create table t2(n int, s varchar(10))\")\n cursor.execute(\"insert into t2 values (?, ?)\", row)\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"Ensure we can use a Row object as a parameter to executemany\"\n cursor.execute(\"create table t1(n int, s varchar(10))\")\n\n for i in range(3):\n cursor.execute(\"insert into t1 values (?, ?)\", i, chr(ord('a') + i))\n\n rows = cursor.execute(\"select n, s from t1\").fetchall()\n assert len(rows) != 0\n\n cursor.execute(\"create table t2(n int, s varchar(10))\")\n cursor.executemany(\"insert into t2 values (?, ?)\", rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"Ensure cursor.description is correct\"\n\n cursor.execute(\"create table t1(n int, s varchar(8), d decimal(5,2))\")\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute(\"select * from t1\")\n\n # (I'm not sure the precision of an int is constant across different versions, bits, so I'm\n # hand checking the items I do know.\n\n # int\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0 # scale\n assert t[6] is True # nullable\n\n # varchar(8)\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8 # precision\n assert t[5] == 0 # scale\n assert t[6] is True # nullable\n\n # decimal(5, 2)\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5 # precision\n assert t[5] == 2 # scale\n assert t[6] is True # nullable\n\n\ndef test_cursor_messages_with_print(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.\n \"\"\"\n assert not cursor.messages\n\n # SQL Server PRINT statements are never more than 8000 characters\n # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks\n for msg in ('hello world', 'ABCDEFGHIJ' * 800):\n cursor.execute(f\"PRINT '{msg}'\")\n messages = cursor.messages\n assert isinstance(messages, list)\n assert len(messages) == 1\n assert isinstance(messages[0], tuple)\n assert len(messages[0]) == 2\n assert isinstance(messages[0][0], str)\n assert isinstance(messages[0][1], str)\n assert '[01000] (0)' == messages[0][0]\n assert messages[0][1].endswith(msg)\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\")\n\n # The messages will look like:\n #\n # [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Message 1a\n\n # result set 1: messages, rows\n cursor.execute(\"exec test_cursor_messages\")\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [\n re.search(r'Message \\d[ab]$', m[1]).group(0)\n for m in cursor.messages\n ]\n assert msgs == ['Message 1a', 'Message 1b']\n\n # result set 2: rows, no messages\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n\n # result set 3: messages, no rows\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [\n re.search(r'Message \\d[ab]$', m[1]).group(0)\n for m in cursor.messages\n ]\n assert msgs == ['Message 2a', 'Message 2b']\n\n # result set 4: no rows, no messages\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"Ensure None can be used for params other than the first\"\n # Some driver/db versions would fail if NULL was not the first parameter because\n # SQLDescribeParam (only used with NULL) could not be used after the first call to\n # SQLBindParameter. This means None always worked for the first column, but did not work\n # for later columns.\n #\n # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked.\n # However, binary/varbinary won't allow an implicit conversion.\n\n cursor.execute(\"create table t1(n int, blob varbinary(max))\")\n cursor.execute(\"insert into t1 values (1, newid())\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n\n sql = \"update t1 set n=?, blob=?\"\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so pyodbc\n # can't call SQLDescribeParam to get the correct parameter type. This can lead to\n # errors being returned from SQL Server when sp_prepexec is called, e.g., \"Implicit\n # conversion from data type varchar to varbinary(max) is not allowed.\"\n #\n # So at least verify that the user can manually specify the parameter type\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute(\"select * from t1\").fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n def convert1(value):\n # The value is the raw bytes (as a bytes object) read from the\n # database. We'll simply add an X at the beginning at the end.\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n # Same as above, but add a Y at the beginning at the end.\n return 'Y' + value.decode('latin1') + 'Y'\n\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"create table t1(n int, v varchar(10))\")\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n\n # Clear all conversions and try again. There should be no Xs this time.\n cnxn.clear_output_converters()\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n # Same but clear using remove_output_converter.\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n # Clear via add_output_converter, passing None for the converter function.\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n # retrieve and temporarily replace converter (get_output_converter)\n #\n # case_1: converter already registered\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n #\n # case_2: no converter already registered\n cnxn.clear_output_converters()\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n\ndef test_too_large(cursor: pyodbc.Cursor):\n \"\"\"Ensure error raised if insert fails due to truncation\"\"\"\n value = 'x' * 1000\n cursor.execute(\"create table t1(s varchar(800))\")\n\n with pytest.raises(pyodbc.Error):\n cursor.execute(\"insert into t1 values (?)\", value)\n\n\ndef test_row_equal(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(n int, s varchar(20))\")\n cursor.execute(\"insert into t1 values (1, 'test')\")\n row1 = cursor.execute(\"select n, s from t1\").fetchone()\n row2 = cursor.execute(\"select n, s from t1\").fetchone()\n assert row1 == row2\n\n\ndef test_row_gtlt(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(n int, s varchar(20))\")\n cursor.execute(\"insert into t1 values (1, 'test1')\")\n cursor.execute(\"insert into t1 values (1, 'test2')\")\n rows = cursor.execute(\"select n, s from t1 order by s\").fetchall()\n assert rows[0] < rows[1]\n assert rows[0] <= rows[1]\n assert rows[1] > rows[0]\n assert rows[1] >= rows[0]\n assert rows[0] != rows[1]\n\n rows = list(rows)\n rows.sort() # uses <\n\n\ndef test_context_manager_success():\n \"Ensure `with` commits if an exception is not raised\"\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"create table t1(n int)\")\n cnxn.commit()\n\n with cnxn:\n cursor.execute(\"insert into t1 values (1)\")\n\n rows = cursor.execute(\"select n from t1\").fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"Ensure `with` rolls back if an exception is raised\"\n cnxn = connect()\n cursor = cnxn.cursor()\n\n # We'll insert a row and commit it. Then we'll insert another row followed by an\n # exception.\n\n cursor.execute(\"create table t1(n int)\")\n cursor.execute(\"insert into t1 values (1)\")\n cnxn.commit()\n\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute(\"insert into t1 values (2)\")\n cursor.execute(\"delete from bogus\")\n\n cursor.execute(\"select max(n) from t1\")\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n # From issue 129\n value = cursor.execute(\"select ?\", None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\ndef test_func_param(cursor: pyodbc.Cursor):\n try:\n cursor.execute(\"drop function func1\")\n except:\n pass\n cursor.execute(\"\"\"\n create function func1 (@testparam varchar(4))\n returns @rettest table (param varchar(4))\n as\n begin\n insert @rettest\n select @testparam\n return\n end\n \"\"\")\n cursor.commit()\n value = cursor.execute(\"select * from func1(?)\", 'test').fetchone()[0]\n assert value == 'test'\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error\n #\n # Error: TypeError: argument 2 must be str, not None\n #\n # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use \"|s\" for an\n # optional string keyword when calling indirectly.\n\n cursor.execute(\"create table t1(a int, b varchar(3), xΏz varchar(4))\")\n\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n\n # Now do the same, but specifically pass in None to one of the keywords. Old versions\n # were parsing arguments incorrectly and would raise an error. (This crops up when\n # calling indirectly like columns(*args, **kwargs) which aiodbc does.)\n\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n\n cursor.execute(f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\")\n\n col_count = len([col.column_name for col in cursor.columns(table_name)])\n assert col_count == 1\n\n cursor.execute(f\"drop table {table_name}\")\n\n\ndef test_cancel(cursor: pyodbc.Cursor):\n # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with\n # making sure SQLCancel is called correctly.\n cursor.execute(\"select 1\")\n cursor.cancel()\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n # https://github.com/mkleehammer/pyodbc/issues/423\n #\n # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number\n # of characters. Ensure it works even with 4-byte characters.\n #\n # http://www.fileformat.info/info/unicode/char/1f31c/index.htm\n\n v = \"x \\U0001F31C z\"\n\n cursor.execute(\"create table t1(s nvarchar(100))\")\n cursor.execute(\"insert into t1 values (?)\", v)\n\n result = cursor.execute(\"select s from t1\").fetchone()[0]\n\n assert result == v\n\n\ndef test_emoticons_as_literal(cursor: pyodbc.Cursor):\n # similar to `test_emoticons_as_parameter`, above, except for Unicode literal\n #\n # http://www.fileformat.info/info/unicode/char/1f31c/index.htm\n\n # FreeTDS ODBC issue fixed in version 1.1.23\n # https://github.com/FreeTDS/freetds/issues/317\n\n v = \"x \\U0001F31C z\"\n\n cursor.execute(\"create table t1(s nvarchar(100))\")\n cursor.execute(f\"insert into t1 values (N'{v}')\")\n\n result = cursor.execute(\"select s from t1\").fetchone()[0]\n\n assert result == v\n\n\ndef _test_tvp(cursor: pyodbc.Cursor, diff_schema):\n # Test table value parameters (TVP). I like the explanation here:\n #\n # https://www.mssqltips.com/sqlservertip/1483/using-table-valued-parameters-tvp-in-sql-server/\n #\n # \"At a high level the TVP allows you to populate a table declared as a T-SQL variable,\n # then pass that table as a parameter to a stored procedure or function.\"\n #\n # \"The TVP must be declared READONLY. You cannot perform any DML (i.e. INSERT, UPDATE,\n # DELETE) against the TVP; you can only reference it in a SELECT statement.\"\n #\n # In this test we'll create a table, pass it to a stored procedure, and have the stored\n # procedure simply return the rows from the TVP.\n #\n # Apparently the way pyodbc knows something is a TVP is because it is in a sequence. I'm\n # not sure I like that as it is very generic and specific to SQL Server. It would be wiser\n # to define a wrapper pyodbc.TVP or pyodbc.Table object, similar to the DB APIs `Binary`\n # object.\n\n pyodbc.native_uuid = True\n # This is the default, but we'll reset it in case a previous test fails to.\n\n procname = 'SelectTVP'\n typename = 'TestTVP'\n\n if diff_schema:\n schemaname = 'myschema'\n procname = schemaname + '.' + procname\n typenameonly = typename\n typename = schemaname + '.' + typename\n\n # (Don't use \"if exists\" since older SQL Servers don't support it.)\n try:\n cursor.execute(\"drop procedure \" + procname)\n except:\n pass\n try:\n cursor.execute(\"drop type \" + typename)\n except:\n pass\n if diff_schema:\n try:\n cursor.execute(\"drop schema \" + schemaname)\n except:\n pass\n cursor.commit()\n\n if diff_schema:\n cursor.execute(\"CREATE SCHEMA myschema\")\n cursor.commit()\n\n cursor.execute(\n f\"\"\"\n CREATE TYPE {typename} AS TABLE(\n c01 VARCHAR(255),\n c02 VARCHAR(MAX),\n c03 VARBINARY(255),\n c04 VARBINARY(MAX),\n c05 BIT,\n c06 DATE,\n c07 TIME,\n c08 DATETIME2(5),\n c09 BIGINT,\n c10 FLOAT,\n c11 NUMERIC(38, 24),\n c12 UNIQUEIDENTIFIER)\n \"\"\")\n cursor.commit()\n cursor.execute(\n f\"\"\"\n CREATE PROCEDURE {procname} @TVP {typename} READONLY\n AS SELECT * FROM @TVP;\n \"\"\")\n cursor.commit()\n\n # The values aren't exactly VERY_LONG_LEN but close enough and *significantly* faster than\n # the loop we had before.\n VERY_LONG_LEN = 2000000\n long_string = ''.join(chr(i) for i in range(32, 127)) # printable characters\n long_bytearray = bytes(list(range(255)))\n very_long_string = long_string * (VERY_LONG_LEN // len(long_string))\n very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(long_bytearray))\n\n params = [\n # Three rows with all of the types in the table defined above.\n (\n 'abc', 'abc',\n bytes([0xD1, 0xCE, 0xFA, 0xCE]),\n bytes([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), True,\n date(1997, 8, 29), time(9, 13, 39),\n datetime(2018, 11, 13, 13, 33, 26, 298420),\n 1234567, 3.14, Decimal('31234567890123.141243449787580175325274'),\n uuid.UUID('4fe34a93-e574-04cc-200a-353f0d1770b1'),\n ),\n (\n '', '',\n bytes([0x00, 0x01, 0x02, 0x03, 0x04]),\n bytes([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), False,\n date(1, 1, 1), time(0, 0, 0),\n datetime(1, 1, 1, 0, 0, 0, 0),\n -9223372036854775808, -1.79E+308, Decimal('0.000000000000000000000001'),\n uuid.UUID('33f7504c-2bac-1b83-01d1-7434a7ba6a17'),\n ),\n (\n long_string, very_long_string,\n bytes(long_bytearray), bytes(very_long_bytearray), True,\n date(9999, 12, 31), time(23, 59, 59),\n datetime(9999, 12, 31, 23, 59, 59, 999990),\n 9223372036854775807, 1.79E+308, Decimal('99999999999999.999999999999999999999999'),\n uuid.UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),\n )\n ]\n\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = [tuple(row) for row in cursor.execute(f\"exec {procname} ?\", p1).fetchall()]\n\n # The values make it very difficult to troubleshoot if something is wrong, so instead of\n # asserting they are the same, we'll walk them if there is a problem to identify which is\n # wrong.\n for row, param in zip(result_array, params):\n if row != param:\n for r, p in zip(row, param):\n assert r == p\n\n # Now test with zero rows.\n\n params = []\n p1 = [params]\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = cursor.execute(f\"exec {procname} ?\", p1).fetchall()\n assert result_array == params\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp(cursor: pyodbc.Cursor):\n _test_tvp(cursor, False)\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\ndef get_sqlserver_version(cursor: pyodbc.Cursor):\n\n \"\"\"\n Returns the major version: 8-->2000, 9-->2005, 10-->2008\n \"\"\"\n cursor.execute(\"exec master..xp_msver 'ProductVersion'\")\n row = cursor.fetchone()\n return int(row.Character_Value.split('.', 1)[0])\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n\n # Put non-ASCII characters at the front so we don't end up chopping one in half in a\n # multi-byte encoding like UTF-8.\n\n v = 'á'\n\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n\n if remaining <= len(seed):\n v += seed\n else:\n c = (remaining + len(seed) - 1 // len(seed))\n v += seed * c\n\n if encoding:\n v = v.encode(encoding)\n\n # We chop *after* encoding because if we are encoding then we want bytes.\n v = v[:length]\n\n return v\n",
"step-ids": [
47,
56,
70,
97,
108
]
}
|
[
47,
56,
70,
97,
108
] |
#Adds states to the list
states = {
'Oregon' : 'OR' ,
'Flordia': 'FL' ,
'California':'CA',
'New York':'NY',
'Michigan': 'MI',
}
#Adds cities to the list
cities = {
'CA':'San Fransisco',
'MI': 'Detroit',
'FL': 'Jacksonville'
}
cities['NY'] = 'New York'
cities['OR'] = 'PortLand'
#Prints cities
print('-' * 10)
print("NY State has:", cities['NY'])
print("OR State has : ",cities['OR'])
#prints states
print('-' * 10)
print("Michigan's abbreviation is: " , states['Michigan'])
print("Flordia's abreviation is :" , states['Flordia'])
print('-' * 10)
print("Michigan has : ", cities[states['Michigan']])
print("Flordia has: " , cities[states['Flordia']])
print('-' * 10)
for state , abbrev in list(states.items()):
print(f"{state} is abbreviated {abbrev}")
print('-'* 10)
for abbrev, city in list(cities.items()):
print(f"{abbrev} has the city {city} ")
print('-' * 10)
for state, abbrev in list(states.items()):
print(f"{state}state is abbreviated {abbrev}")
print(f"and has city {cities[abbrev]}")
#carefullly aquires state that may not be there
print('-' * 10)
|
normal
|
{
"blob_id": "1bdc1274cceba994524442c7a0065498a9c1d7bc",
"index": 8919,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('-' * 10)\nprint('NY State has:', cities['NY'])\nprint('OR State has : ', cities['OR'])\nprint('-' * 10)\nprint(\"Michigan's abbreviation is: \", states['Michigan'])\nprint(\"Flordia's abreviation is :\", states['Flordia'])\nprint('-' * 10)\nprint('Michigan has : ', cities[states['Michigan']])\nprint('Flordia has: ', cities[states['Flordia']])\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f'{state} is abbreviated {abbrev}')\nprint('-' * 10)\nfor abbrev, city in list(cities.items()):\n print(f'{abbrev} has the city {city} ')\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f'{state}state is abbreviated {abbrev}')\n print(f'and has city {cities[abbrev]}')\nprint('-' * 10)\n",
"step-3": "states = {'Oregon': 'OR', 'Flordia': 'FL', 'California': 'CA', 'New York':\n 'NY', 'Michigan': 'MI'}\ncities = {'CA': 'San Fransisco', 'MI': 'Detroit', 'FL': 'Jacksonville'}\ncities['NY'] = 'New York'\ncities['OR'] = 'PortLand'\nprint('-' * 10)\nprint('NY State has:', cities['NY'])\nprint('OR State has : ', cities['OR'])\nprint('-' * 10)\nprint(\"Michigan's abbreviation is: \", states['Michigan'])\nprint(\"Flordia's abreviation is :\", states['Flordia'])\nprint('-' * 10)\nprint('Michigan has : ', cities[states['Michigan']])\nprint('Flordia has: ', cities[states['Flordia']])\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f'{state} is abbreviated {abbrev}')\nprint('-' * 10)\nfor abbrev, city in list(cities.items()):\n print(f'{abbrev} has the city {city} ')\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f'{state}state is abbreviated {abbrev}')\n print(f'and has city {cities[abbrev]}')\nprint('-' * 10)\n",
"step-4": "#Adds states to the list\nstates = {\n 'Oregon' : 'OR' ,\n 'Flordia': 'FL' ,\n 'California':'CA',\n 'New York':'NY',\n 'Michigan': 'MI',\n }\n \n#Adds cities to the list \ncities = {\n 'CA':'San Fransisco',\n 'MI': 'Detroit',\n 'FL': 'Jacksonville'\n}\n\ncities['NY'] = 'New York'\ncities['OR'] = 'PortLand'\n\n#Prints cities\nprint('-' * 10)\nprint(\"NY State has:\", cities['NY'])\nprint(\"OR State has : \",cities['OR'])\n#prints states\nprint('-' * 10)\nprint(\"Michigan's abbreviation is: \" , states['Michigan'])\nprint(\"Flordia's abreviation is :\" , states['Flordia'])\n\n\nprint('-' * 10)\nprint(\"Michigan has : \", cities[states['Michigan']])\nprint(\"Flordia has: \" , cities[states['Flordia']])\n\nprint('-' * 10)\nfor state , abbrev in list(states.items()):\n print(f\"{state} is abbreviated {abbrev}\")\n\nprint('-'* 10)\nfor abbrev, city in list(cities.items()):\n print(f\"{abbrev} has the city {city} \")\n\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f\"{state}state is abbreviated {abbrev}\")\n print(f\"and has city {cities[abbrev]}\")\n#carefullly aquires state that may not be there \nprint('-' * 10)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import numpy as np
x = 2
y = 3
add_op = tf.add(x, y)
mul_op = tf.multiply(x, y)
output_1 = tf.multiply(x, add_op)
output_2 = tf.pow(add_op, mul_op)
with tf.Session() as sess:
output_1, output_2 = sess.run([output_1, output_2])
print(output_1, output_2)
|
normal
|
{
"blob_id": "da2e388c64bbf65bcef7d09d7596c2869f51524a",
"index": 4025,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.Session() as sess:\n output_1, output_2 = sess.run([output_1, output_2])\nprint(output_1, output_2)\n",
"step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n<mask token>\nx = 2\ny = 3\nadd_op = tf.add(x, y)\nmul_op = tf.multiply(x, y)\noutput_1 = tf.multiply(x, add_op)\noutput_2 = tf.pow(add_op, mul_op)\nwith tf.Session() as sess:\n output_1, output_2 = sess.run([output_1, output_2])\nprint(output_1, output_2)\n",
"step-4": "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport tensorflow as tf\nimport numpy as np\nx = 2\ny = 3\nadd_op = tf.add(x, y)\nmul_op = tf.multiply(x, y)\noutput_1 = tf.multiply(x, add_op)\noutput_2 = tf.pow(add_op, mul_op)\nwith tf.Session() as sess:\n output_1, output_2 = sess.run([output_1, output_2])\nprint(output_1, output_2)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.0.8 on 2020-08-11 13:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipe', '0006_recipe_description'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='portions',
field=models.FloatField(default=1),
),
]
|
normal
|
{
"blob_id": "43dc69c66d94d85337c11eb4cfed48d7fdef2074",
"index": 5770,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('recipe', '0006_recipe_description')]\n operations = [migrations.AddField(model_name='recipe', name='portions',\n field=models.FloatField(default=1))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('recipe', '0006_recipe_description')]\n operations = [migrations.AddField(model_name='recipe', name='portions',\n field=models.FloatField(default=1))]\n",
"step-5": "# Generated by Django 3.0.8 on 2020-08-11 13:43\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('recipe', '0006_recipe_description'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='recipe',\n name='portions',\n field=models.FloatField(default=1),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""TcEx Framework Key Value Redis Module"""
class KeyValueRedis:
"""TcEx Key Value Redis Module.
Args:
context (str): The Redis context (hash) for hashed based operations.
redis_client (redis.Client): An instance of redis client.
"""
def __init__(self, context, redis_client):
"""Initialize the Class properties."""
self._context = context
self._redis_client = redis_client
@property
def context(self):
"""Return the current context."""
return self._context
@context.setter
def context(self, context):
"""Set or update the current context."""
self._context = context
def create(self, key, value):
"""Create key/value pair in Redis.
Args:
key (str): The field name (key) for the kv pair in Redis.
value (any): The value for the kv pair in Redis.
Returns:
str: The response from Redis.
"""
return self._redis_client.hset(self.context, key, value)
def delete(self, key):
"""Alias for hdel method.
Args:
key (str): The field name (key) for the kv pair in Redis.
Returns:
str: The response from Redis.
"""
return self._redis_client.hdel(self.context, key)
def hgetall(self):
"""Read data from Redis for the current context.
Returns:
list: The response data from Redis.
"""
return self._redis_client.hgetall(self.context)
def read(self, key):
"""Read data from Redis for the provided key.
Returns:
str: The response data from Redis.
"""
value = self._redis_client.hget(self.context, key)
# convert retrieved bytes to string
if isinstance(value, bytes):
value = value.decode('utf-8')
return value
|
normal
|
{
"blob_id": "a5b74c31aed103b55404afc538af60c3eb18cb1b",
"index": 9738,
"step-1": "<mask token>\n\n\nclass KeyValueRedis:\n <mask token>\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n <mask token>\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass KeyValueRedis:\n <mask token>\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n <mask token>\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n\n def hgetall(self):\n \"\"\"Read data from Redis for the current context.\n\n Returns:\n list: The response data from Redis.\n \"\"\"\n return self._redis_client.hgetall(self.context)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass KeyValueRedis:\n <mask token>\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n\n @property\n def context(self):\n \"\"\"Return the current context.\"\"\"\n return self._context\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n\n def hgetall(self):\n \"\"\"Read data from Redis for the current context.\n\n Returns:\n list: The response data from Redis.\n \"\"\"\n return self._redis_client.hgetall(self.context)\n\n def read(self, key):\n \"\"\"Read data from Redis for the provided key.\n\n Returns:\n str: The response data from Redis.\n \"\"\"\n value = self._redis_client.hget(self.context, key)\n if isinstance(value, bytes):\n value = value.decode('utf-8')\n return value\n",
"step-4": "<mask token>\n\n\nclass KeyValueRedis:\n \"\"\"TcEx Key Value Redis Module.\n\n Args:\n context (str): The Redis context (hash) for hashed based operations.\n redis_client (redis.Client): An instance of redis client.\n \"\"\"\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n\n @property\n def context(self):\n \"\"\"Return the current context.\"\"\"\n return self._context\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n\n def hgetall(self):\n \"\"\"Read data from Redis for the current context.\n\n Returns:\n list: The response data from Redis.\n \"\"\"\n return self._redis_client.hgetall(self.context)\n\n def read(self, key):\n \"\"\"Read data from Redis for the provided key.\n\n Returns:\n str: The response data from Redis.\n \"\"\"\n value = self._redis_client.hget(self.context, key)\n if isinstance(value, bytes):\n value = value.decode('utf-8')\n return value\n",
"step-5": "\"\"\"TcEx Framework Key Value Redis Module\"\"\"\n\n\nclass KeyValueRedis:\n \"\"\"TcEx Key Value Redis Module.\n\n Args:\n context (str): The Redis context (hash) for hashed based operations.\n redis_client (redis.Client): An instance of redis client.\n \"\"\"\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n\n @property\n def context(self):\n \"\"\"Return the current context.\"\"\"\n return self._context\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n\n def hgetall(self):\n \"\"\"Read data from Redis for the current context.\n\n Returns:\n list: The response data from Redis.\n \"\"\"\n return self._redis_client.hgetall(self.context)\n\n def read(self, key):\n \"\"\"Read data from Redis for the provided key.\n\n Returns:\n str: The response data from Redis.\n \"\"\"\n value = self._redis_client.hget(self.context, key)\n # convert retrieved bytes to string\n if isinstance(value, bytes):\n value = value.decode('utf-8')\n return value\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
#! /usr/bin/env python3
"""Publishes joint trajectory to move robot to given pose"""
import rospy
from trajectory_msgs.msg import JointTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
from std_srvs.srv import Empty
import argparse
import time
def argumentParser(argument):
""" Argument parser """
parser = argparse.ArgumentParser(description='Drive robot joint to command position')
parser.add_argument('kinova_robotType', metavar='kinova_robotType', type=str, default='j2n6a300',
help='kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.')
#args_ = parser.parse_args(argument)
argv = rospy.myargv()
args_ = parser.parse_args(argv[1:])
prefix = args_.kinova_robotType
nbJoints = int(args_.kinova_robotType[3])
nbfingers = int(args_.kinova_robotType[5])
return prefix, nbJoints, nbfingers
def moveJoint (jointcmds,prefix,nbJoints):
topic_name = '/' + prefix + '/effort_joint_trajectory_controller/command'
pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)
jointCmd = JointTrajectory()
point = JointTrajectoryPoint()
jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0);
point.time_from_start = rospy.Duration.from_sec(5.0)
for i in range(0, nbJoints):
jointCmd.joint_names.append(prefix +'_joint_'+str(i+1))
point.positions.append(jointcmds[i])
point.velocities.append(0)
point.accelerations.append(0)
point.effort.append(0)
jointCmd.points.append(point)
rate = rospy.Rate(100)
count = 0
while (count < 50):
pub.publish(jointCmd)
count = count + 1
rate.sleep()
def moveFingers (jointcmds,prefix,nbJoints):
topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'
pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)
jointCmd = JointTrajectory()
point = JointTrajectoryPoint()
jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0);
point.time_from_start = rospy.Duration.from_sec(5.0)
for i in range(0, nbJoints):
jointCmd.joint_names.append(prefix +'_joint_finger_'+str(i+1))
point.positions.append(jointcmds[i])
point.velocities.append(0)
point.accelerations.append(0)
point.effort.append(0)
jointCmd.points.append(point)
rate = rospy.Rate(100)
count = 0
while (count < 500):
pub.publish(jointCmd)
count = count + 1
rate.sleep()
if __name__ == '__main__':
try:
rospy.init_node('move_robot_using_trajectory_msg')
prefix, nbJoints, nbfingers = argumentParser(None)
#allow gazebo to launch
time.sleep(5)
# Unpause the physics
rospy.wait_for_service('/gazebo/unpause_physics')
unpause_gazebo = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
resp = unpause_gazebo()
if (nbJoints==6):
#home robots
moveJoint ([0.0,2.9,1.3,4.2,1.4,0.0],prefix,nbJoints)
else:
moveJoint ([0.0,2.9,0.0,1.3,4.2,1.4,0.0],prefix,nbJoints)
moveFingers ([1,1,1],prefix,nbfingers)
except rospy.ROSInterruptException:
print("program interrupted before completion")
|
normal
|
{
"blob_id": "ee7c63f36b4720566389826680b90c6f68de85b2",
"index": 5200,
"step-1": "<mask token>\n\n\ndef moveFingers(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_finger_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 500:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef argumentParser(argument):\n \"\"\" Argument parser \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Drive robot joint to command position')\n parser.add_argument('kinova_robotType', metavar='kinova_robotType',\n type=str, default='j2n6a300', help=\n 'kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.'\n )\n argv = rospy.myargv()\n args_ = parser.parse_args(argv[1:])\n prefix = args_.kinova_robotType\n nbJoints = int(args_.kinova_robotType[3])\n nbfingers = int(args_.kinova_robotType[5])\n return prefix, nbJoints, nbfingers\n\n\n<mask token>\n\n\ndef moveFingers(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_finger_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 500:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef argumentParser(argument):\n \"\"\" Argument parser \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Drive robot joint to command position')\n parser.add_argument('kinova_robotType', metavar='kinova_robotType',\n type=str, default='j2n6a300', help=\n 'kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.'\n )\n argv = rospy.myargv()\n args_ = parser.parse_args(argv[1:])\n prefix = args_.kinova_robotType\n nbJoints = int(args_.kinova_robotType[3])\n nbfingers = int(args_.kinova_robotType[5])\n return prefix, nbJoints, nbfingers\n\n\ndef moveJoint(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_joint_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 50:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\ndef moveFingers(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_finger_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 500:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('move_robot_using_trajectory_msg')\n prefix, nbJoints, nbfingers = argumentParser(None)\n time.sleep(5)\n rospy.wait_for_service('/gazebo/unpause_physics')\n unpause_gazebo = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)\n resp = unpause_gazebo()\n if nbJoints == 6:\n moveJoint([0.0, 2.9, 1.3, 4.2, 1.4, 0.0], prefix, nbJoints)\n else:\n moveJoint([0.0, 2.9, 0.0, 1.3, 4.2, 1.4, 0.0], prefix, nbJoints)\n moveFingers([1, 1, 1], prefix, nbfingers)\n except rospy.ROSInterruptException:\n print('program interrupted before completion')\n",
"step-4": "<mask token>\nimport rospy\nfrom trajectory_msgs.msg import JointTrajectory\nfrom trajectory_msgs.msg import JointTrajectoryPoint\nfrom std_srvs.srv import Empty\nimport argparse\nimport time\n\n\ndef argumentParser(argument):\n \"\"\" Argument parser \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Drive robot joint to command position')\n parser.add_argument('kinova_robotType', metavar='kinova_robotType',\n type=str, default='j2n6a300', help=\n 'kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.'\n )\n argv = rospy.myargv()\n args_ = parser.parse_args(argv[1:])\n prefix = args_.kinova_robotType\n nbJoints = int(args_.kinova_robotType[3])\n nbfingers = int(args_.kinova_robotType[5])\n return prefix, nbJoints, nbfingers\n\n\ndef moveJoint(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_joint_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 50:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\ndef moveFingers(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_finger_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 500:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('move_robot_using_trajectory_msg')\n prefix, nbJoints, nbfingers = argumentParser(None)\n time.sleep(5)\n rospy.wait_for_service('/gazebo/unpause_physics')\n unpause_gazebo = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)\n resp = unpause_gazebo()\n if nbJoints == 6:\n moveJoint([0.0, 2.9, 1.3, 4.2, 1.4, 0.0], prefix, nbJoints)\n else:\n moveJoint([0.0, 2.9, 0.0, 1.3, 4.2, 1.4, 0.0], prefix, nbJoints)\n moveFingers([1, 1, 1], prefix, nbfingers)\n except rospy.ROSInterruptException:\n print('program interrupted before completion')\n",
"step-5": "#! /usr/bin/env python3\n\"\"\"Publishes joint trajectory to move robot to given pose\"\"\"\n\nimport rospy\nfrom trajectory_msgs.msg import JointTrajectory\nfrom trajectory_msgs.msg import JointTrajectoryPoint\nfrom std_srvs.srv import Empty\nimport argparse\nimport time\n\ndef argumentParser(argument):\n \"\"\" Argument parser \"\"\"\n parser = argparse.ArgumentParser(description='Drive robot joint to command position')\n parser.add_argument('kinova_robotType', metavar='kinova_robotType', type=str, default='j2n6a300',\n help='kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.')\n #args_ = parser.parse_args(argument)\n argv = rospy.myargv()\n args_ = parser.parse_args(argv[1:])\n prefix = args_.kinova_robotType\n nbJoints = int(args_.kinova_robotType[3])\t\n nbfingers = int(args_.kinova_robotType[5])\t\n return prefix, nbJoints, nbfingers\n\ndef moveJoint (jointcmds,prefix,nbJoints):\n topic_name = '/' + prefix + '/effort_joint_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory() \n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0); \n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix +'_joint_'+str(i+1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0) \n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while (count < 50):\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep() \n\ndef moveFingers (jointcmds,prefix,nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1) \n jointCmd = JointTrajectory() \n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0); \n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix +'_joint_finger_'+str(i+1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0) \n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while (count < 500):\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep() \n\nif __name__ == '__main__':\n try: \n rospy.init_node('move_robot_using_trajectory_msg')\t\t\n prefix, nbJoints, nbfingers = argumentParser(None) \n #allow gazebo to launch\n time.sleep(5)\n\n # Unpause the physics\n rospy.wait_for_service('/gazebo/unpause_physics')\n unpause_gazebo = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)\n resp = unpause_gazebo()\n\n if (nbJoints==6):\n #home robots\n moveJoint ([0.0,2.9,1.3,4.2,1.4,0.0],prefix,nbJoints)\n else:\n moveJoint ([0.0,2.9,0.0,1.3,4.2,1.4,0.0],prefix,nbJoints)\n\n moveFingers ([1,1,1],prefix,nbfingers)\n except rospy.ROSInterruptException:\n print(\"program interrupted before completion\")\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
from ..core.helpers import itemize
from ..core.files import backendRep, expandDir, prefixSlash, normpath
from .helpers import splitModRef
from .repo import checkoutRepo
from .links import provenanceLink
# GET DATA FOR MAIN SOURCE AND ALL MODULES
class AppData:
def __init__(
self, app, backend, moduleRefs, locations, modules, version, checkout, silent
):
"""Collects TF data according to specifications.
The specifications are passed as arguments when the object is initialized.
Parameters
----------
backend: string
`github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.
app: obj
The high-level API object
moduleRefs: tuple
Each member consists of a module ref, which is a tuple of information
that defines a module.
locations: string|tuple
One or more directory paths. They will be combined with the `modules`
argument and used as locations to search for TF data files.
modules: string|tuple
One or more directory path segments. They will be appended to the
paths given by the `locations` argument to form search locations
for TF data files.
version: string
The version of TF data that should be retrievend. Version is a directory
level just below the search locations.
checkout: string
A specifier to use a specific release or commit of a data repository.
silent: string, optional tf.core.timestamp.SILENT_D
See `tf.core.timestamp.Timestamp`
"""
self.backend = backend
self.app = app
self.moduleRefs = (
[]
if moduleRefs is None
else moduleRefs.split(",")
if type(moduleRefs) is str
else list(moduleRefs)
)
self.locationsArg = locations
self.modulesArg = modules
self.version = version
self.checkout = checkout
self.silent = silent
def getMain(self):
"""Get the main data of the corpus.
This is specified by the `org`, `repo` and `relative` settings under
`provenanceSpec` in `config.yaml`.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
checkout = self.checkout
aContext = app.context
org = aContext.org
repo = aContext.repo
relative = prefixSlash(aContext.relative)
appPath = aContext.appPath
appName = aContext.appName
if appName.startswith("app:"):
appParent = appPath.rsplit("/", 1)[0]
relative = f"{appParent}{relative}"
elif org is None or repo is None:
appPathRep = f"{appPath}/" if appPath else ""
relative = f"{appPathRep}{appName}"
self.checkout = "local"
if not self.getModule(org, repo, prefixSlash(relative), checkout, isBase=True):
self.good = False
def getStandard(self):
"""Get the data of the standard modules specified by the settings of the corpus.
These are specified in the `moduleSpecs` setting under
`provenanceSpecs` in `config.yaml`.
They will be loaded *after* the extra modules specified in the **mod**
parameter, and only in as far they have not been specifief in the
**mod** parameter. In this way you can pass overriding
checkout specifiers to the standard modules.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
loadData = app.loadData
if not loadData or loadData == "core":
return
aContext = app.context
moduleSpecs = aContext.moduleSpecs
seen = self.seen
checkout = self.checkout
backend = self.backend
for m in moduleSpecs or []:
org = m["org"]
repo = m["repo"]
relative = m["relative"]
theCheckout = m.get("checkout", checkout)
theBackend = m.get("backend", backend)
bRep = backendRep(theBackend, "spec", default=backend)
ref = f"{bRep}{org}/{repo}{relative}"
if ref in seen:
continue
if not self.getModule(
org,
repo,
relative,
theCheckout,
backend=theBackend,
specs=m,
):
self.good = False
def getRefs(self):
"""Get data from additional modules.
These are specified in the `moduleRefs` parameter of `AppData`.
We store the set of special modules in order to skip them
later when we are loading the standard modules.
"""
backend = self.backend
refs = self.moduleRefs
for ref in refs:
refPure = ref.rsplit(":", 1)[0]
if refPure in self.seen:
continue
parts = splitModRef(ref)
if not parts:
self.good = False
continue
parts[2] = prefixSlash(normpath(parts[2])) # the relative bit
theBackend = (
None if parts[-1] is None or parts[-1] == backend else parts[-1]
)
if not self.getModule(*parts[0:-1], backend=theBackend):
self.good = False
def getModules(self):
"""Get data from additional local directories.
These are specified in the `locations` and `modules` parameters of `AppData`.
"""
self.provenance = []
provenance = self.provenance
self.mLocations = []
mLocations = self.mLocations
self.locations = None
self.modules = None
self.good = True
self.seen = set()
self.getMain()
self.getRefs()
self.getStandard()
version = self.version
good = self.good
app = self.app
if good:
app.mLocations = mLocations
app.provenance = provenance
else:
return
mModules = []
if mLocations:
mModules.append(version or "")
locations = self.locationsArg
modules = self.modulesArg
givenLocations = (
[]
if locations is None
else [expandDir(app, x.strip()) for x in itemize(locations, "\n")]
if type(locations) is str
else [str(x) for x in locations]
)
givenModules = (
[]
if modules is None
else [normpath(x.strip()) for x in itemize(modules, "\n")]
if type(modules) is str
else [normpath(str(x)) for x in modules]
)
self.locations = mLocations + givenLocations
self.modules = mModules + givenModules
def getModule(
self, org, repo, relative, checkout, backend=None, isBase=False, specs=None
):
"""Prepare to load a single module.
Eventually, all TF data will be downloaded from local directories, bases
on a list of location paths and module paths.
This function computes the contribution of a single module to both the
location paths and the module paths.
Parameters
----------
org: string
GitHub organization or GitLab group of the module
repo: string:
GitHub repository or GitLab project of the module
relative: string
Path within the repository of the module
checkout: string
A specifier to use a specific release or commit of a data repository.
backend: string
The backend if different from the backend of the main module
isBase: boolean, optional False
Whether this module is the main data of the corpus.
specs: dict, optional False
Additional informational attributes of the module, e.g. a DOI
"""
backend = self.backend if backend is None else backendRep(backend, "norm")
bRep = backendRep(backend, "spec", default=self.backend)
version = self.version
silent = self.silent
mLocations = self.mLocations
provenance = self.provenance
seen = self.seen
app = self.app
_browse = app._browse
aContext = app.context
branch = aContext.provenanceSpec["branch"]
relative = prefixSlash(normpath(relative))
moduleRef = f"{bRep}{org}/{repo}{relative}"
if moduleRef in self.seen:
return True
if org is None or repo is None:
relativeBare = relative.removeprefix("/")
repoLocation = relativeBare
mLocations.append(relativeBare)
(commit, local, release) = (None, None, None)
else:
(commit, release, local, localBase, localDir) = checkoutRepo(
backend,
_browse=_browse,
org=org,
repo=repo,
folder=relative,
version=version,
checkout=checkout,
withPaths=False,
keep=False,
silent=silent,
)
if not localBase:
return False
repoLocation = f"{localBase}/{org}/{repo}"
mLocations.append(f"{localBase}/{localDir}")
seen.add(moduleRef)
if isBase:
app.repoLocation = repoLocation
info = {}
for item in (
("doi", None),
("corpus", f"{org}/{repo}{relative}"),
):
(key, default) = item
info[key] = (
getattr(aContext, key)
if isBase
else specs[key]
if specs and key in specs
else default
)
provenance.append(
(
("corpus", info["corpus"]),
("version", version),
("commit", commit or "??"),
("release", release or "none"),
(
"live",
provenanceLink(
backend, org, repo, version, branch, commit, local, release, relative
),
),
("doi", info["doi"]),
)
)
return True
def getModulesData(*args):
"""Retrieve all data for a corpus.
Parameters
----------
args: list
All parameters needed to retrieve all associated data.
They are the same as are needed to construct an `AppData` object.
"""
mData = AppData(*args)
mData.getModules()
if not mData.good or mData.locations is None:
return None
return (mData.locations, mData.modules)
|
normal
|
{
"blob_id": "7be54b2bd99680beed3e8e9cb14225756a71a4ea",
"index": 1135,
"step-1": "<mask token>\n\n\nclass AppData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AppData:\n\n def __init__(self, app, backend, moduleRefs, locations, modules,\n version, checkout, silent):\n \"\"\"Collects TF data according to specifications.\n\n The specifications are passed as arguments when the object is initialized.\n\n Parameters\n ----------\n backend: string\n `github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.\n app: obj\n The high-level API object\n moduleRefs: tuple\n Each member consists of a module ref, which is a tuple of information\n that defines a module.\n locations: string|tuple\n One or more directory paths. They will be combined with the `modules`\n argument and used as locations to search for TF data files.\n modules: string|tuple\n One or more directory path segments. They will be appended to the\n paths given by the `locations` argument to form search locations\n for TF data files.\n version: string\n The version of TF data that should be retrievend. Version is a directory\n level just below the search locations.\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n silent: string, optional tf.core.timestamp.SILENT_D\n See `tf.core.timestamp.Timestamp`\n\n \"\"\"\n self.backend = backend\n self.app = app\n self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','\n ) if type(moduleRefs) is str else list(moduleRefs)\n self.locationsArg = locations\n self.modulesArg = modules\n self.version = version\n self.checkout = checkout\n self.silent = silent\n\n def getMain(self):\n \"\"\"Get the main data of the corpus.\n\n This is specified by the `org`, `repo` and `relative` settings under\n `provenanceSpec` in `config.yaml`.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n checkout = self.checkout\n aContext = app.context\n org = aContext.org\n repo = aContext.repo\n relative = prefixSlash(aContext.relative)\n appPath = aContext.appPath\n appName = aContext.appName\n if appName.startswith('app:'):\n appParent = appPath.rsplit('/', 1)[0]\n relative = f'{appParent}{relative}'\n elif org is None or repo is None:\n appPathRep = f'{appPath}/' if appPath else ''\n relative = f'{appPathRep}{appName}'\n self.checkout = 'local'\n if not self.getModule(org, repo, prefixSlash(relative), checkout,\n isBase=True):\n self.good = False\n <mask token>\n\n def getRefs(self):\n \"\"\"Get data from additional modules.\n\n These are specified in the `moduleRefs` parameter of `AppData`.\n We store the set of special modules in order to skip them\n later when we are loading the standard modules.\n \"\"\"\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(':', 1)[0]\n if refPure in self.seen:\n continue\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n parts[2] = prefixSlash(normpath(parts[2]))\n theBackend = None if parts[-1] is None or parts[-1\n ] == backend else parts[-1]\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False\n\n def getModules(self):\n \"\"\"Get data from additional local directories.\n\n These are specified in the `locations` and `modules` parameters of `AppData`.\n \"\"\"\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n self.locations = None\n self.modules = None\n self.good = True\n self.seen = set()\n self.getMain()\n self.getRefs()\n self.getStandard()\n version = self.version\n good = self.good\n app = self.app\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n mModules = []\n if mLocations:\n mModules.append(version or '')\n locations = self.locationsArg\n modules = self.modulesArg\n givenLocations = [] if locations is None else [expandDir(app, x.\n strip()) for x in itemize(locations, '\\n')] if type(locations\n ) is str else [str(x) for x in locations]\n givenModules = [] if modules is None else [normpath(x.strip()) for\n x in itemize(modules, '\\n')] if type(modules) is str else [normpath\n (str(x)) for x in modules]\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AppData:\n\n def __init__(self, app, backend, moduleRefs, locations, modules,\n version, checkout, silent):\n \"\"\"Collects TF data according to specifications.\n\n The specifications are passed as arguments when the object is initialized.\n\n Parameters\n ----------\n backend: string\n `github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.\n app: obj\n The high-level API object\n moduleRefs: tuple\n Each member consists of a module ref, which is a tuple of information\n that defines a module.\n locations: string|tuple\n One or more directory paths. They will be combined with the `modules`\n argument and used as locations to search for TF data files.\n modules: string|tuple\n One or more directory path segments. They will be appended to the\n paths given by the `locations` argument to form search locations\n for TF data files.\n version: string\n The version of TF data that should be retrievend. Version is a directory\n level just below the search locations.\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n silent: string, optional tf.core.timestamp.SILENT_D\n See `tf.core.timestamp.Timestamp`\n\n \"\"\"\n self.backend = backend\n self.app = app\n self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','\n ) if type(moduleRefs) is str else list(moduleRefs)\n self.locationsArg = locations\n self.modulesArg = modules\n self.version = version\n self.checkout = checkout\n self.silent = silent\n\n def getMain(self):\n \"\"\"Get the main data of the corpus.\n\n This is specified by the `org`, `repo` and `relative` settings under\n `provenanceSpec` in `config.yaml`.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n checkout = self.checkout\n aContext = app.context\n org = aContext.org\n repo = aContext.repo\n relative = prefixSlash(aContext.relative)\n appPath = aContext.appPath\n appName = aContext.appName\n if appName.startswith('app:'):\n appParent = appPath.rsplit('/', 1)[0]\n relative = f'{appParent}{relative}'\n elif org is None or repo is None:\n appPathRep = f'{appPath}/' if appPath else ''\n relative = f'{appPathRep}{appName}'\n self.checkout = 'local'\n if not self.getModule(org, repo, prefixSlash(relative), checkout,\n isBase=True):\n self.good = False\n\n def getStandard(self):\n \"\"\"Get the data of the standard modules specified by the settings of the corpus.\n\n These are specified in the `moduleSpecs` setting under\n `provenanceSpecs` in `config.yaml`.\n\n They will be loaded *after* the extra modules specified in the **mod**\n parameter, and only in as far they have not been specifief in the\n **mod** parameter. In this way you can pass overriding\n checkout specifiers to the standard modules.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n loadData = app.loadData\n if not loadData or loadData == 'core':\n return\n aContext = app.context\n moduleSpecs = aContext.moduleSpecs\n seen = self.seen\n checkout = self.checkout\n backend = self.backend\n for m in (moduleSpecs or []):\n org = m['org']\n repo = m['repo']\n relative = m['relative']\n theCheckout = m.get('checkout', checkout)\n theBackend = m.get('backend', backend)\n bRep = backendRep(theBackend, 'spec', default=backend)\n ref = f'{bRep}{org}/{repo}{relative}'\n if ref in seen:\n continue\n if not self.getModule(org, repo, relative, theCheckout, backend\n =theBackend, specs=m):\n self.good = False\n\n def getRefs(self):\n \"\"\"Get data from additional modules.\n\n These are specified in the `moduleRefs` parameter of `AppData`.\n We store the set of special modules in order to skip them\n later when we are loading the standard modules.\n \"\"\"\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(':', 1)[0]\n if refPure in self.seen:\n continue\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n parts[2] = prefixSlash(normpath(parts[2]))\n theBackend = None if parts[-1] is None or parts[-1\n ] == backend else parts[-1]\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False\n\n def getModules(self):\n \"\"\"Get data from additional local directories.\n\n These are specified in the `locations` and `modules` parameters of `AppData`.\n \"\"\"\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n self.locations = None\n self.modules = None\n self.good = True\n self.seen = set()\n self.getMain()\n self.getRefs()\n self.getStandard()\n version = self.version\n good = self.good\n app = self.app\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n mModules = []\n if mLocations:\n mModules.append(version or '')\n locations = self.locationsArg\n modules = self.modulesArg\n givenLocations = [] if locations is None else [expandDir(app, x.\n strip()) for x in itemize(locations, '\\n')] if type(locations\n ) is str else [str(x) for x in locations]\n givenModules = [] if modules is None else [normpath(x.strip()) for\n x in itemize(modules, '\\n')] if type(modules) is str else [normpath\n (str(x)) for x in modules]\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules\n\n def getModule(self, org, repo, relative, checkout, backend=None, isBase\n =False, specs=None):\n \"\"\"Prepare to load a single module.\n\n Eventually, all TF data will be downloaded from local directories, bases\n on a list of location paths and module paths.\n\n This function computes the contribution of a single module to both the\n location paths and the module paths.\n\n Parameters\n ----------\n org: string\n GitHub organization or GitLab group of the module\n repo: string:\n GitHub repository or GitLab project of the module\n relative: string\n Path within the repository of the module\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n backend: string\n The backend if different from the backend of the main module\n isBase: boolean, optional False\n Whether this module is the main data of the corpus.\n specs: dict, optional False\n Additional informational attributes of the module, e.g. a DOI\n \"\"\"\n backend = self.backend if backend is None else backendRep(backend,\n 'norm')\n bRep = backendRep(backend, 'spec', default=self.backend)\n version = self.version\n silent = self.silent\n mLocations = self.mLocations\n provenance = self.provenance\n seen = self.seen\n app = self.app\n _browse = app._browse\n aContext = app.context\n branch = aContext.provenanceSpec['branch']\n relative = prefixSlash(normpath(relative))\n moduleRef = f'{bRep}{org}/{repo}{relative}'\n if moduleRef in self.seen:\n return True\n if org is None or repo is None:\n relativeBare = relative.removeprefix('/')\n repoLocation = relativeBare\n mLocations.append(relativeBare)\n commit, local, release = None, None, None\n else:\n commit, release, local, localBase, localDir = checkoutRepo(backend,\n _browse=_browse, org=org, repo=repo, folder=relative,\n version=version, checkout=checkout, withPaths=False, keep=\n False, silent=silent)\n if not localBase:\n return False\n repoLocation = f'{localBase}/{org}/{repo}'\n mLocations.append(f'{localBase}/{localDir}')\n seen.add(moduleRef)\n if isBase:\n app.repoLocation = repoLocation\n info = {}\n for item in (('doi', None), ('corpus', f'{org}/{repo}{relative}')):\n key, default = item\n info[key] = getattr(aContext, key) if isBase else specs[key\n ] if specs and key in specs else default\n provenance.append((('corpus', info['corpus']), ('version', version),\n ('commit', commit or '??'), ('release', release or 'none'), (\n 'live', provenanceLink(backend, org, repo, version, branch,\n commit, local, release, relative)), ('doi', info['doi'])))\n return True\n\n\ndef getModulesData(*args):\n \"\"\"Retrieve all data for a corpus.\n\n Parameters\n ----------\n args: list\n All parameters needed to retrieve all associated data.\n They are the same as are needed to construct an `AppData` object.\n \"\"\"\n mData = AppData(*args)\n mData.getModules()\n if not mData.good or mData.locations is None:\n return None\n return mData.locations, mData.modules\n",
"step-4": "from ..core.helpers import itemize\nfrom ..core.files import backendRep, expandDir, prefixSlash, normpath\nfrom .helpers import splitModRef\nfrom .repo import checkoutRepo\nfrom .links import provenanceLink\n\n\nclass AppData:\n\n def __init__(self, app, backend, moduleRefs, locations, modules,\n version, checkout, silent):\n \"\"\"Collects TF data according to specifications.\n\n The specifications are passed as arguments when the object is initialized.\n\n Parameters\n ----------\n backend: string\n `github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.\n app: obj\n The high-level API object\n moduleRefs: tuple\n Each member consists of a module ref, which is a tuple of information\n that defines a module.\n locations: string|tuple\n One or more directory paths. They will be combined with the `modules`\n argument and used as locations to search for TF data files.\n modules: string|tuple\n One or more directory path segments. They will be appended to the\n paths given by the `locations` argument to form search locations\n for TF data files.\n version: string\n The version of TF data that should be retrievend. Version is a directory\n level just below the search locations.\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n silent: string, optional tf.core.timestamp.SILENT_D\n See `tf.core.timestamp.Timestamp`\n\n \"\"\"\n self.backend = backend\n self.app = app\n self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','\n ) if type(moduleRefs) is str else list(moduleRefs)\n self.locationsArg = locations\n self.modulesArg = modules\n self.version = version\n self.checkout = checkout\n self.silent = silent\n\n def getMain(self):\n \"\"\"Get the main data of the corpus.\n\n This is specified by the `org`, `repo` and `relative` settings under\n `provenanceSpec` in `config.yaml`.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n checkout = self.checkout\n aContext = app.context\n org = aContext.org\n repo = aContext.repo\n relative = prefixSlash(aContext.relative)\n appPath = aContext.appPath\n appName = aContext.appName\n if appName.startswith('app:'):\n appParent = appPath.rsplit('/', 1)[0]\n relative = f'{appParent}{relative}'\n elif org is None or repo is None:\n appPathRep = f'{appPath}/' if appPath else ''\n relative = f'{appPathRep}{appName}'\n self.checkout = 'local'\n if not self.getModule(org, repo, prefixSlash(relative), checkout,\n isBase=True):\n self.good = False\n\n def getStandard(self):\n \"\"\"Get the data of the standard modules specified by the settings of the corpus.\n\n These are specified in the `moduleSpecs` setting under\n `provenanceSpecs` in `config.yaml`.\n\n They will be loaded *after* the extra modules specified in the **mod**\n parameter, and only in as far they have not been specifief in the\n **mod** parameter. In this way you can pass overriding\n checkout specifiers to the standard modules.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n loadData = app.loadData\n if not loadData or loadData == 'core':\n return\n aContext = app.context\n moduleSpecs = aContext.moduleSpecs\n seen = self.seen\n checkout = self.checkout\n backend = self.backend\n for m in (moduleSpecs or []):\n org = m['org']\n repo = m['repo']\n relative = m['relative']\n theCheckout = m.get('checkout', checkout)\n theBackend = m.get('backend', backend)\n bRep = backendRep(theBackend, 'spec', default=backend)\n ref = f'{bRep}{org}/{repo}{relative}'\n if ref in seen:\n continue\n if not self.getModule(org, repo, relative, theCheckout, backend\n =theBackend, specs=m):\n self.good = False\n\n def getRefs(self):\n \"\"\"Get data from additional modules.\n\n These are specified in the `moduleRefs` parameter of `AppData`.\n We store the set of special modules in order to skip them\n later when we are loading the standard modules.\n \"\"\"\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(':', 1)[0]\n if refPure in self.seen:\n continue\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n parts[2] = prefixSlash(normpath(parts[2]))\n theBackend = None if parts[-1] is None or parts[-1\n ] == backend else parts[-1]\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False\n\n def getModules(self):\n \"\"\"Get data from additional local directories.\n\n These are specified in the `locations` and `modules` parameters of `AppData`.\n \"\"\"\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n self.locations = None\n self.modules = None\n self.good = True\n self.seen = set()\n self.getMain()\n self.getRefs()\n self.getStandard()\n version = self.version\n good = self.good\n app = self.app\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n mModules = []\n if mLocations:\n mModules.append(version or '')\n locations = self.locationsArg\n modules = self.modulesArg\n givenLocations = [] if locations is None else [expandDir(app, x.\n strip()) for x in itemize(locations, '\\n')] if type(locations\n ) is str else [str(x) for x in locations]\n givenModules = [] if modules is None else [normpath(x.strip()) for\n x in itemize(modules, '\\n')] if type(modules) is str else [normpath\n (str(x)) for x in modules]\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules\n\n def getModule(self, org, repo, relative, checkout, backend=None, isBase\n =False, specs=None):\n \"\"\"Prepare to load a single module.\n\n Eventually, all TF data will be downloaded from local directories, bases\n on a list of location paths and module paths.\n\n This function computes the contribution of a single module to both the\n location paths and the module paths.\n\n Parameters\n ----------\n org: string\n GitHub organization or GitLab group of the module\n repo: string:\n GitHub repository or GitLab project of the module\n relative: string\n Path within the repository of the module\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n backend: string\n The backend if different from the backend of the main module\n isBase: boolean, optional False\n Whether this module is the main data of the corpus.\n specs: dict, optional False\n Additional informational attributes of the module, e.g. a DOI\n \"\"\"\n backend = self.backend if backend is None else backendRep(backend,\n 'norm')\n bRep = backendRep(backend, 'spec', default=self.backend)\n version = self.version\n silent = self.silent\n mLocations = self.mLocations\n provenance = self.provenance\n seen = self.seen\n app = self.app\n _browse = app._browse\n aContext = app.context\n branch = aContext.provenanceSpec['branch']\n relative = prefixSlash(normpath(relative))\n moduleRef = f'{bRep}{org}/{repo}{relative}'\n if moduleRef in self.seen:\n return True\n if org is None or repo is None:\n relativeBare = relative.removeprefix('/')\n repoLocation = relativeBare\n mLocations.append(relativeBare)\n commit, local, release = None, None, None\n else:\n commit, release, local, localBase, localDir = checkoutRepo(backend,\n _browse=_browse, org=org, repo=repo, folder=relative,\n version=version, checkout=checkout, withPaths=False, keep=\n False, silent=silent)\n if not localBase:\n return False\n repoLocation = f'{localBase}/{org}/{repo}'\n mLocations.append(f'{localBase}/{localDir}')\n seen.add(moduleRef)\n if isBase:\n app.repoLocation = repoLocation\n info = {}\n for item in (('doi', None), ('corpus', f'{org}/{repo}{relative}')):\n key, default = item\n info[key] = getattr(aContext, key) if isBase else specs[key\n ] if specs and key in specs else default\n provenance.append((('corpus', info['corpus']), ('version', version),\n ('commit', commit or '??'), ('release', release or 'none'), (\n 'live', provenanceLink(backend, org, repo, version, branch,\n commit, local, release, relative)), ('doi', info['doi'])))\n return True\n\n\ndef getModulesData(*args):\n \"\"\"Retrieve all data for a corpus.\n\n Parameters\n ----------\n args: list\n All parameters needed to retrieve all associated data.\n They are the same as are needed to construct an `AppData` object.\n \"\"\"\n mData = AppData(*args)\n mData.getModules()\n if not mData.good or mData.locations is None:\n return None\n return mData.locations, mData.modules\n",
"step-5": "from ..core.helpers import itemize\nfrom ..core.files import backendRep, expandDir, prefixSlash, normpath\nfrom .helpers import splitModRef\nfrom .repo import checkoutRepo\nfrom .links import provenanceLink\n\n\n# GET DATA FOR MAIN SOURCE AND ALL MODULES\n\n\nclass AppData:\n def __init__(\n self, app, backend, moduleRefs, locations, modules, version, checkout, silent\n ):\n \"\"\"Collects TF data according to specifications.\n\n The specifications are passed as arguments when the object is initialized.\n\n Parameters\n ----------\n backend: string\n `github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.\n app: obj\n The high-level API object\n moduleRefs: tuple\n Each member consists of a module ref, which is a tuple of information\n that defines a module.\n locations: string|tuple\n One or more directory paths. They will be combined with the `modules`\n argument and used as locations to search for TF data files.\n modules: string|tuple\n One or more directory path segments. They will be appended to the\n paths given by the `locations` argument to form search locations\n for TF data files.\n version: string\n The version of TF data that should be retrievend. Version is a directory\n level just below the search locations.\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n silent: string, optional tf.core.timestamp.SILENT_D\n See `tf.core.timestamp.Timestamp`\n\n \"\"\"\n self.backend = backend\n self.app = app\n self.moduleRefs = (\n []\n if moduleRefs is None\n else moduleRefs.split(\",\")\n if type(moduleRefs) is str\n else list(moduleRefs)\n )\n self.locationsArg = locations\n self.modulesArg = modules\n self.version = version\n self.checkout = checkout\n self.silent = silent\n\n def getMain(self):\n \"\"\"Get the main data of the corpus.\n\n This is specified by the `org`, `repo` and `relative` settings under\n `provenanceSpec` in `config.yaml`.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n\n app = self.app\n checkout = self.checkout\n aContext = app.context\n org = aContext.org\n repo = aContext.repo\n relative = prefixSlash(aContext.relative)\n appPath = aContext.appPath\n appName = aContext.appName\n\n if appName.startswith(\"app:\"):\n appParent = appPath.rsplit(\"/\", 1)[0]\n relative = f\"{appParent}{relative}\"\n elif org is None or repo is None:\n appPathRep = f\"{appPath}/\" if appPath else \"\"\n relative = f\"{appPathRep}{appName}\"\n self.checkout = \"local\"\n\n if not self.getModule(org, repo, prefixSlash(relative), checkout, isBase=True):\n self.good = False\n\n def getStandard(self):\n \"\"\"Get the data of the standard modules specified by the settings of the corpus.\n\n These are specified in the `moduleSpecs` setting under\n `provenanceSpecs` in `config.yaml`.\n\n They will be loaded *after* the extra modules specified in the **mod**\n parameter, and only in as far they have not been specifief in the\n **mod** parameter. In this way you can pass overriding\n checkout specifiers to the standard modules.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n\n app = self.app\n loadData = app.loadData\n\n if not loadData or loadData == \"core\":\n return\n\n aContext = app.context\n moduleSpecs = aContext.moduleSpecs\n seen = self.seen\n checkout = self.checkout\n backend = self.backend\n\n for m in moduleSpecs or []:\n org = m[\"org\"]\n repo = m[\"repo\"]\n relative = m[\"relative\"]\n theCheckout = m.get(\"checkout\", checkout)\n theBackend = m.get(\"backend\", backend)\n bRep = backendRep(theBackend, \"spec\", default=backend)\n\n ref = f\"{bRep}{org}/{repo}{relative}\"\n if ref in seen:\n continue\n\n if not self.getModule(\n org,\n repo,\n relative,\n theCheckout,\n backend=theBackend,\n specs=m,\n ):\n self.good = False\n\n def getRefs(self):\n \"\"\"Get data from additional modules.\n\n These are specified in the `moduleRefs` parameter of `AppData`.\n We store the set of special modules in order to skip them\n later when we are loading the standard modules.\n \"\"\"\n\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(\":\", 1)[0]\n if refPure in self.seen:\n continue\n\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n\n parts[2] = prefixSlash(normpath(parts[2])) # the relative bit\n theBackend = (\n None if parts[-1] is None or parts[-1] == backend else parts[-1]\n )\n\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False\n\n def getModules(self):\n \"\"\"Get data from additional local directories.\n\n These are specified in the `locations` and `modules` parameters of `AppData`.\n \"\"\"\n\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n\n self.locations = None\n self.modules = None\n\n self.good = True\n self.seen = set()\n\n self.getMain()\n self.getRefs()\n self.getStandard()\n\n version = self.version\n good = self.good\n app = self.app\n\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n\n mModules = []\n if mLocations:\n mModules.append(version or \"\")\n\n locations = self.locationsArg\n modules = self.modulesArg\n\n givenLocations = (\n []\n if locations is None\n else [expandDir(app, x.strip()) for x in itemize(locations, \"\\n\")]\n if type(locations) is str\n else [str(x) for x in locations]\n )\n givenModules = (\n []\n if modules is None\n else [normpath(x.strip()) for x in itemize(modules, \"\\n\")]\n if type(modules) is str\n else [normpath(str(x)) for x in modules]\n )\n\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules\n\n def getModule(\n self, org, repo, relative, checkout, backend=None, isBase=False, specs=None\n ):\n \"\"\"Prepare to load a single module.\n\n Eventually, all TF data will be downloaded from local directories, bases\n on a list of location paths and module paths.\n\n This function computes the contribution of a single module to both the\n location paths and the module paths.\n\n Parameters\n ----------\n org: string\n GitHub organization or GitLab group of the module\n repo: string:\n GitHub repository or GitLab project of the module\n relative: string\n Path within the repository of the module\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n backend: string\n The backend if different from the backend of the main module\n isBase: boolean, optional False\n Whether this module is the main data of the corpus.\n specs: dict, optional False\n Additional informational attributes of the module, e.g. a DOI\n \"\"\"\n\n backend = self.backend if backend is None else backendRep(backend, \"norm\")\n bRep = backendRep(backend, \"spec\", default=self.backend)\n version = self.version\n silent = self.silent\n mLocations = self.mLocations\n provenance = self.provenance\n seen = self.seen\n app = self.app\n _browse = app._browse\n aContext = app.context\n branch = aContext.provenanceSpec[\"branch\"]\n\n relative = prefixSlash(normpath(relative))\n\n moduleRef = f\"{bRep}{org}/{repo}{relative}\"\n if moduleRef in self.seen:\n return True\n\n if org is None or repo is None:\n relativeBare = relative.removeprefix(\"/\")\n repoLocation = relativeBare\n mLocations.append(relativeBare)\n (commit, local, release) = (None, None, None)\n else:\n (commit, release, local, localBase, localDir) = checkoutRepo(\n backend,\n _browse=_browse,\n org=org,\n repo=repo,\n folder=relative,\n version=version,\n checkout=checkout,\n withPaths=False,\n keep=False,\n silent=silent,\n )\n if not localBase:\n return False\n\n repoLocation = f\"{localBase}/{org}/{repo}\"\n mLocations.append(f\"{localBase}/{localDir}\")\n\n seen.add(moduleRef)\n if isBase:\n app.repoLocation = repoLocation\n\n info = {}\n for item in (\n (\"doi\", None),\n (\"corpus\", f\"{org}/{repo}{relative}\"),\n ):\n (key, default) = item\n info[key] = (\n getattr(aContext, key)\n if isBase\n else specs[key]\n if specs and key in specs\n else default\n )\n provenance.append(\n (\n (\"corpus\", info[\"corpus\"]),\n (\"version\", version),\n (\"commit\", commit or \"??\"),\n (\"release\", release or \"none\"),\n (\n \"live\",\n provenanceLink(\n backend, org, repo, version, branch, commit, local, release, relative\n ),\n ),\n (\"doi\", info[\"doi\"]),\n )\n )\n return True\n\n\ndef getModulesData(*args):\n \"\"\"Retrieve all data for a corpus.\n\n Parameters\n ----------\n args: list\n All parameters needed to retrieve all associated data.\n They are the same as are needed to construct an `AppData` object.\n \"\"\"\n\n mData = AppData(*args)\n mData.getModules()\n\n if not mData.good or mData.locations is None:\n return None\n\n return (mData.locations, mData.modules)\n",
"step-ids": [
1,
5,
8,
9,
10
]
}
|
[
1,
5,
8,
9,
10
] |
def to_bitmask(n, bits):
# [2:] to chop off the "0b" part
mask = [int(digit) for digit in bin(n)[2:]]
# pad to fixed length
return [0] * (bits - len(mask)) + mask
def invert_mask(mask):
return [int(not bit) for bit in mask]
|
normal
|
{
"blob_id": "98f36b216e718fc4fe42d1717ff9ba82cc24c2ff",
"index": 7752,
"step-1": "<mask token>\n",
"step-2": "def to_bitmask(n, bits):\n mask = [int(digit) for digit in bin(n)[2:]]\n return [0] * (bits - len(mask)) + mask\n\n\n<mask token>\n",
"step-3": "def to_bitmask(n, bits):\n mask = [int(digit) for digit in bin(n)[2:]]\n return [0] * (bits - len(mask)) + mask\n\n\ndef invert_mask(mask):\n return [int(not bit) for bit in mask]\n",
"step-4": "def to_bitmask(n, bits):\n # [2:] to chop off the \"0b\" part\n mask = [int(digit) for digit in bin(n)[2:]] \n # pad to fixed length\n return [0] * (bits - len(mask)) + mask \n \ndef invert_mask(mask):\n return [int(not bit) for bit in mask]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import asyncio
import sys
import aioredis
import msgpack
async def main(host: str, endpoint: str, message: str):
msg = msgpack.packb(
{
"endpoint": endpoint,
"headers": {"Content-Type": "text/json"},
"payload": message.encode("utf-8"),
},
)
redis = await aioredis.create_redis_pool(host)
await redis.rpush("acapy.outbound_transport", msg)
if __name__ == "__main__":
args = sys.argv
if len(args) <= 1:
raise SystemExit("Pass redis host URL as the first parameter")
if len(args) <= 2:
raise SystemExit("Pass endpoint as the second parameter")
if len(args) <= 3:
raise SystemExit("Pass message contents as the third parameter")
asyncio.get_event_loop().run_until_complete(main(args[1], args[2], args[3]))
|
normal
|
{
"blob_id": "e94d66732a172286814bc0b0051a52c1374a4de5",
"index": 3168,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nasync def main(host: str, endpoint: str, message: str):\n msg = msgpack.packb({'endpoint': endpoint, 'headers': {'Content-Type':\n 'text/json'}, 'payload': message.encode('utf-8')})\n redis = await aioredis.create_redis_pool(host)\n await redis.rpush('acapy.outbound_transport', msg)\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) <= 1:\n raise SystemExit('Pass redis host URL as the first parameter')\n if len(args) <= 2:\n raise SystemExit('Pass endpoint as the second parameter')\n if len(args) <= 3:\n raise SystemExit('Pass message contents as the third parameter')\n asyncio.get_event_loop().run_until_complete(main(args[1], args[2], args[3])\n )\n",
"step-3": "import asyncio\nimport sys\nimport aioredis\nimport msgpack\n\n\nasync def main(host: str, endpoint: str, message: str):\n msg = msgpack.packb({'endpoint': endpoint, 'headers': {'Content-Type':\n 'text/json'}, 'payload': message.encode('utf-8')})\n redis = await aioredis.create_redis_pool(host)\n await redis.rpush('acapy.outbound_transport', msg)\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) <= 1:\n raise SystemExit('Pass redis host URL as the first parameter')\n if len(args) <= 2:\n raise SystemExit('Pass endpoint as the second parameter')\n if len(args) <= 3:\n raise SystemExit('Pass message contents as the third parameter')\n asyncio.get_event_loop().run_until_complete(main(args[1], args[2], args[3])\n )\n",
"step-4": "import asyncio\nimport sys\n\nimport aioredis\nimport msgpack\n\n\nasync def main(host: str, endpoint: str, message: str):\n msg = msgpack.packb(\n {\n \"endpoint\": endpoint,\n \"headers\": {\"Content-Type\": \"text/json\"},\n \"payload\": message.encode(\"utf-8\"),\n },\n )\n redis = await aioredis.create_redis_pool(host)\n await redis.rpush(\"acapy.outbound_transport\", msg)\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n if len(args) <= 1:\n raise SystemExit(\"Pass redis host URL as the first parameter\")\n if len(args) <= 2:\n raise SystemExit(\"Pass endpoint as the second parameter\")\n if len(args) <= 3:\n raise SystemExit(\"Pass message contents as the third parameter\")\n asyncio.get_event_loop().run_until_complete(main(args[1], args[2], args[3]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""2520 is the smallest number that can be divided by each of the
numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all
of the numbers from 1 to 20?
"""
from fractions import gcd
def smallest_divisible(nmax=20):
smallest = 1
for i in range(1, nmax+1):
if smallest % i:
smallest *= i/gcd(i, smallest)
return smallest
|
normal
|
{
"blob_id": "1cc696410a5d2eaf294d032c04a96974d5ef5db0",
"index": 2831,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef smallest_divisible(nmax=20):\n smallest = 1\n for i in range(1, nmax + 1):\n if smallest % i:\n smallest *= i / gcd(i, smallest)\n return smallest\n",
"step-3": "<mask token>\nfrom fractions import gcd\n\n\ndef smallest_divisible(nmax=20):\n smallest = 1\n for i in range(1, nmax + 1):\n if smallest % i:\n smallest *= i / gcd(i, smallest)\n return smallest\n",
"step-4": "\"\"\"2520 is the smallest number that can be divided by each of the\nnumbers from 1 to 10 without any remainder.\n\nWhat is the smallest positive number that is evenly divisible by all\nof the numbers from 1 to 20?\n\"\"\"\nfrom fractions import gcd\n\ndef smallest_divisible(nmax=20):\n smallest = 1\n for i in range(1, nmax+1):\n if smallest % i:\n smallest *= i/gcd(i, smallest)\n\n return smallest\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import sys
import typer
from cupcake import version_callback
from cupcake.sequence import GFF
app = typer.Typer(
name="cupcake.sequence.get_gffs_from_list",
help="Get records from a GFF file from a list",
)
def get_gff_from_list(gff_filename, listfile, partial_ok=False):
seqs = [line.strip() for line in open(listfile)]
for r in GFF.collapseGFFReader(gff_filename):
if (
r.seqid in seqs
or r.seqid.split("|")[0] in seqs
or (partial_ok and any(r.seqid.startswith(x) for x in seqs))
):
GFF.write_collapseGFF_format(sys.stdout, r)
@app.command(name="")
def main(
gff_filename: str = typer.Argument(
..., help="Input gff filename to extract sequences from"
),
list_filename: str = typer.Argument(..., help="List of sequence IDs to extract"),
partial: bool = typer.Option(
False,
help="OK if seq IDs only match the beginning",
),
version: bool = typer.Option(
None,
"--version",
callback=version_callback,
is_eager=True,
help="Prints the version of the SQANTI3 package.",
),
) -> None:
get_gff_from_list(gff_filename, list_filename, partial)
if __name__ == "__main__":
typer.run(main)
|
normal
|
{
"blob_id": "166520ab5b9fd5a55dd2aa30b4d62f55096ce6cb",
"index": 2105,
"step-1": "<mask token>\n\n\ndef get_gff_from_list(gff_filename, listfile, partial_ok=False):\n seqs = [line.strip() for line in open(listfile)]\n for r in GFF.collapseGFFReader(gff_filename):\n if r.seqid in seqs or r.seqid.split('|')[0\n ] in seqs or partial_ok and any(r.seqid.startswith(x) for x in seqs\n ):\n GFF.write_collapseGFF_format(sys.stdout, r)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_gff_from_list(gff_filename, listfile, partial_ok=False):\n seqs = [line.strip() for line in open(listfile)]\n for r in GFF.collapseGFFReader(gff_filename):\n if r.seqid in seqs or r.seqid.split('|')[0\n ] in seqs or partial_ok and any(r.seqid.startswith(x) for x in seqs\n ):\n GFF.write_collapseGFF_format(sys.stdout, r)\n\n\[email protected](name='')\ndef main(gff_filename: str=typer.Argument(..., help=\n 'Input gff filename to extract sequences from'), list_filename: str=\n typer.Argument(..., help='List of sequence IDs to extract'), partial:\n bool=typer.Option(False, help='OK if seq IDs only match the beginning'),\n version: bool=typer.Option(None, '--version', callback=version_callback,\n is_eager=True, help='Prints the version of the SQANTI3 package.')) ->None:\n get_gff_from_list(gff_filename, list_filename, partial)\n\n\nif __name__ == '__main__':\n typer.run(main)\n",
"step-3": "<mask token>\napp = typer.Typer(name='cupcake.sequence.get_gffs_from_list', help=\n 'Get records from a GFF file from a list')\n\n\ndef get_gff_from_list(gff_filename, listfile, partial_ok=False):\n seqs = [line.strip() for line in open(listfile)]\n for r in GFF.collapseGFFReader(gff_filename):\n if r.seqid in seqs or r.seqid.split('|')[0\n ] in seqs or partial_ok and any(r.seqid.startswith(x) for x in seqs\n ):\n GFF.write_collapseGFF_format(sys.stdout, r)\n\n\[email protected](name='')\ndef main(gff_filename: str=typer.Argument(..., help=\n 'Input gff filename to extract sequences from'), list_filename: str=\n typer.Argument(..., help='List of sequence IDs to extract'), partial:\n bool=typer.Option(False, help='OK if seq IDs only match the beginning'),\n version: bool=typer.Option(None, '--version', callback=version_callback,\n is_eager=True, help='Prints the version of the SQANTI3 package.')) ->None:\n get_gff_from_list(gff_filename, list_filename, partial)\n\n\nif __name__ == '__main__':\n typer.run(main)\n",
"step-4": "import sys\nimport typer\nfrom cupcake import version_callback\nfrom cupcake.sequence import GFF\napp = typer.Typer(name='cupcake.sequence.get_gffs_from_list', help=\n 'Get records from a GFF file from a list')\n\n\ndef get_gff_from_list(gff_filename, listfile, partial_ok=False):\n seqs = [line.strip() for line in open(listfile)]\n for r in GFF.collapseGFFReader(gff_filename):\n if r.seqid in seqs or r.seqid.split('|')[0\n ] in seqs or partial_ok and any(r.seqid.startswith(x) for x in seqs\n ):\n GFF.write_collapseGFF_format(sys.stdout, r)\n\n\[email protected](name='')\ndef main(gff_filename: str=typer.Argument(..., help=\n 'Input gff filename to extract sequences from'), list_filename: str=\n typer.Argument(..., help='List of sequence IDs to extract'), partial:\n bool=typer.Option(False, help='OK if seq IDs only match the beginning'),\n version: bool=typer.Option(None, '--version', callback=version_callback,\n is_eager=True, help='Prints the version of the SQANTI3 package.')) ->None:\n get_gff_from_list(gff_filename, list_filename, partial)\n\n\nif __name__ == '__main__':\n typer.run(main)\n",
"step-5": "#!/usr/bin/env python\nimport sys\n\nimport typer\n\nfrom cupcake import version_callback\nfrom cupcake.sequence import GFF\n\napp = typer.Typer(\n name=\"cupcake.sequence.get_gffs_from_list\",\n help=\"Get records from a GFF file from a list\",\n)\n\n\ndef get_gff_from_list(gff_filename, listfile, partial_ok=False):\n seqs = [line.strip() for line in open(listfile)]\n for r in GFF.collapseGFFReader(gff_filename):\n if (\n r.seqid in seqs\n or r.seqid.split(\"|\")[0] in seqs\n or (partial_ok and any(r.seqid.startswith(x) for x in seqs))\n ):\n GFF.write_collapseGFF_format(sys.stdout, r)\n\n\[email protected](name=\"\")\ndef main(\n gff_filename: str = typer.Argument(\n ..., help=\"Input gff filename to extract sequences from\"\n ),\n list_filename: str = typer.Argument(..., help=\"List of sequence IDs to extract\"),\n partial: bool = typer.Option(\n False,\n help=\"OK if seq IDs only match the beginning\",\n ),\n version: bool = typer.Option(\n None,\n \"--version\",\n callback=version_callback,\n is_eager=True,\n help=\"Prints the version of the SQANTI3 package.\",\n ),\n) -> None:\n\n get_gff_from_list(gff_filename, list_filename, partial)\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class VirtualChassis(Base):
"""Virtual Chassis is used to get and to manage a Virtual Chassis topology and get the list of discovered appliances
The VirtualChassis class encapsulates a required virtualChassis resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'virtualChassis'
def __init__(self, parent):
super(VirtualChassis, self).__init__(parent)
@property
def DiscoveredAppliance(self):
"""An instance of the DiscoveredAppliance class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance
return DiscoveredAppliance(self)
@property
def Hypervisor(self):
"""An instance of the Hypervisor class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor
return Hypervisor(self)
@property
def IxVmCard(self):
"""An instance of the IxVmCard class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard
return IxVmCard(self)
@property
def EnableLicenseCheck(self):
"""Enables license check on port connect
Returns:
bool
"""
return self._get_attribute('enableLicenseCheck')
@EnableLicenseCheck.setter
def EnableLicenseCheck(self, value):
self._set_attribute('enableLicenseCheck', value)
@property
def Hostname(self):
"""Virtual Chassis hostname or IP
Returns:
str
"""
return self._get_attribute('hostname')
@property
def LicenseServer(self):
"""The address of the license server
Returns:
str
"""
return self._get_attribute('licenseServer')
@LicenseServer.setter
def LicenseServer(self, value):
self._set_attribute('licenseServer', value)
@property
def NtpServer(self):
"""The address of the NTP server
Returns:
str
"""
return self._get_attribute('ntpServer')
@NtpServer.setter
def NtpServer(self, value):
self._set_attribute('ntpServer', value)
@property
def StartTxDelay(self):
"""The delay amount for transmit
Returns:
str
"""
return self._get_attribute('startTxDelay')
@StartTxDelay.setter
def StartTxDelay(self, value):
self._set_attribute('startTxDelay', value)
def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer=None, StartTxDelay=None):
"""Updates a child instance of virtualChassis on the server.
Args:
EnableLicenseCheck (bool): Enables license check on port connect
LicenseServer (str): The address of the license server
NtpServer (str): The address of the NTP server
StartTxDelay (str): The delay amount for transmit
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
|
normal
|
{
"blob_id": "4b78c99dd6156afe960effcacb25804446310f7c",
"index": 9708,
"step-1": "<mask token>\n\n\nclass VirtualChassis(Base):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n <mask token>\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n <mask token>\n <mask token>\n\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer\n =None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-2": "<mask token>\n\n\nclass VirtualChassis(Base):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n\n @property\n def DiscoveredAppliance(self):\n \"\"\"An instance of the DiscoveredAppliance class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance\n return DiscoveredAppliance(self)\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n <mask token>\n <mask token>\n\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer\n =None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-3": "<mask token>\n\n\nclass VirtualChassis(Base):\n <mask token>\n __slots__ = ()\n _SDM_NAME = 'virtualChassis'\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n\n @property\n def DiscoveredAppliance(self):\n \"\"\"An instance of the DiscoveredAppliance class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance\n return DiscoveredAppliance(self)\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n\n @property\n def IxVmCard(self):\n \"\"\"An instance of the IxVmCard class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard\n return IxVmCard(self)\n\n @property\n def EnableLicenseCheck(self):\n \"\"\"Enables license check on port connect\n\n Returns:\n bool\n \"\"\"\n return self._get_attribute('enableLicenseCheck')\n\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer\n =None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-4": "<mask token>\n\n\nclass VirtualChassis(Base):\n \"\"\"Virtual Chassis is used to get and to manage a Virtual Chassis topology and get the list of discovered appliances\n The VirtualChassis class encapsulates a required virtualChassis resource which will be retrieved from the server every time the property is accessed.\n \"\"\"\n __slots__ = ()\n _SDM_NAME = 'virtualChassis'\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n\n @property\n def DiscoveredAppliance(self):\n \"\"\"An instance of the DiscoveredAppliance class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance\n return DiscoveredAppliance(self)\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n\n @property\n def IxVmCard(self):\n \"\"\"An instance of the IxVmCard class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard\n return IxVmCard(self)\n\n @property\n def EnableLicenseCheck(self):\n \"\"\"Enables license check on port connect\n\n Returns:\n bool\n \"\"\"\n return self._get_attribute('enableLicenseCheck')\n\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer\n =None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-5": "# MIT LICENSE\n#\n# Copyright 1997 - 2019 by IXIA Keysight\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE. \nfrom ixnetwork_restpy.base import Base\nfrom ixnetwork_restpy.files import Files\n\n\nclass VirtualChassis(Base):\n \"\"\"Virtual Chassis is used to get and to manage a Virtual Chassis topology and get the list of discovered appliances\n The VirtualChassis class encapsulates a required virtualChassis resource which will be retrieved from the server every time the property is accessed.\n \"\"\"\n\n __slots__ = ()\n _SDM_NAME = 'virtualChassis'\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n\n @property\n def DiscoveredAppliance(self):\n \"\"\"An instance of the DiscoveredAppliance class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance\n return DiscoveredAppliance(self)\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n\n @property\n def IxVmCard(self):\n \"\"\"An instance of the IxVmCard class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard\n return IxVmCard(self)\n\n @property\n def EnableLicenseCheck(self):\n \"\"\"Enables license check on port connect\n\n Returns:\n bool\n \"\"\"\n return self._get_attribute('enableLicenseCheck')\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer=None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-ids": [
12,
13,
16,
17,
19
]
}
|
[
12,
13,
16,
17,
19
] |
import sqlite3
def to_string(pessoa):
for linha in pessoa:
print('id: {}\nNome: {}'.format(linha[0], linha[1]))
if __name__ == '__main__':
con = sqlite3.connect('lab05-ex01.sqlite')
cursor = con.cursor()
cursor.execute("SELECT * FROM Pessoa")
print(cursor.fetchall())
nome = input("Nome da pessoa: ")
clausula = (nome,)
cursor.execute("SELECT * FROM Pessoa WHERE nome = ?", clausula)
pessoa = cursor.fetchall()
to_string(pessoa)
cursor.close()
con.close()
|
normal
|
{
"blob_id": "4246773a8da61ff21d5faa8ab8ad2d7e75fafb60",
"index": 3058,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\n\nif __name__ == '__main__':\n con = sqlite3.connect('lab05-ex01.sqlite')\n cursor = con.cursor()\n cursor.execute('SELECT * FROM Pessoa')\n print(cursor.fetchall())\n nome = input('Nome da pessoa: ')\n clausula = nome,\n cursor.execute('SELECT * FROM Pessoa WHERE nome = ?', clausula)\n pessoa = cursor.fetchall()\n to_string(pessoa)\n cursor.close()\n con.close()\n",
"step-4": "import sqlite3\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\n\nif __name__ == '__main__':\n con = sqlite3.connect('lab05-ex01.sqlite')\n cursor = con.cursor()\n cursor.execute('SELECT * FROM Pessoa')\n print(cursor.fetchall())\n nome = input('Nome da pessoa: ')\n clausula = nome,\n cursor.execute('SELECT * FROM Pessoa WHERE nome = ?', clausula)\n pessoa = cursor.fetchall()\n to_string(pessoa)\n cursor.close()\n con.close()\n",
"step-5": "import sqlite3\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\nif __name__ == '__main__':\n\n con = sqlite3.connect('lab05-ex01.sqlite')\n\n cursor = con.cursor()\n\n cursor.execute(\"SELECT * FROM Pessoa\")\n print(cursor.fetchall())\n\n nome = input(\"Nome da pessoa: \")\n clausula = (nome,)\n\n cursor.execute(\"SELECT * FROM Pessoa WHERE nome = ?\", clausula)\n pessoa = cursor.fetchall()\n to_string(pessoa)\n\n\n cursor.close()\n con.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#coding=utf-8
from django.contrib import admin
from models import *
#增加额外的方法
def make_published(modeladmin, request, queryset):
queryset.update(state=1)
class OrderInfoAdmin(admin.ModelAdmin):
list_display = ('ordernum', 'total', 'state')
search_fields = ('total', )
list_filter = ('bpub_date',)
actions = [make_published]
class address_infoAdmin(admin.ModelAdmin):
exclude = ('isDelete',)
#2017/1/05注册admin站点
admin.site.register(cart)
admin.site.register(address_info,address_infoAdmin)
admin.site.register(OrderInfo,OrderInfoAdmin)
admin.site.register(OrderDetailInfo)
admin.site.register(GoodsInfo)
|
normal
|
{
"blob_id": "74a0282495bf4bbd34b397e0922074659a66d6ff",
"index": 4809,
"step-1": "<mask token>\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\nadmin.site.register(cart)\nadmin.site.register(address_info, address_infoAdmin)\nadmin.site.register(OrderInfo, OrderInfoAdmin)\nadmin.site.register(OrderDetailInfo)\nadmin.site.register(GoodsInfo)\n",
"step-4": "from django.contrib import admin\nfrom models import *\n\n\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\nadmin.site.register(cart)\nadmin.site.register(address_info, address_infoAdmin)\nadmin.site.register(OrderInfo, OrderInfoAdmin)\nadmin.site.register(OrderDetailInfo)\nadmin.site.register(GoodsInfo)\n",
"step-5": "#coding=utf-8\nfrom django.contrib import admin\nfrom models import *\n\n#增加额外的方法\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = ('ordernum', 'total', 'state')\n search_fields = ('total', )\n list_filter = ('bpub_date',)\n actions = [make_published]\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = ('isDelete',)\n\n\n#2017/1/05注册admin站点\nadmin.site.register(cart)\nadmin.site.register(address_info,address_infoAdmin)\nadmin.site.register(OrderInfo,OrderInfoAdmin)\nadmin.site.register(OrderDetailInfo)\nadmin.site.register(GoodsInfo)\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import plotly.figure_factory as ff
import pandas as pd
import csv
df=pd.read_csv("phone.csv")
fig=ff.create_distplot([df["Avg Rating"].tolist()],["Samsung"],show_hist=False)
fig.show()
|
normal
|
{
"blob_id": "5ae4f489da7b4f0913c9b16c86cc60537cc51234",
"index": 9858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfig.show()\n",
"step-3": "<mask token>\ndf = pd.read_csv('phone.csv')\nfig = ff.create_distplot([df['Avg Rating'].tolist()], ['Samsung'],\n show_hist=False)\nfig.show()\n",
"step-4": "import plotly.figure_factory as ff\nimport pandas as pd\nimport csv\ndf = pd.read_csv('phone.csv')\nfig = ff.create_distplot([df['Avg Rating'].tolist()], ['Samsung'],\n show_hist=False)\nfig.show()\n",
"step-5": "import plotly.figure_factory as ff\r\nimport pandas as pd\r\nimport csv\r\n\r\ndf=pd.read_csv(\"phone.csv\")\r\nfig=ff.create_distplot([df[\"Avg Rating\"].tolist()],[\"Samsung\"],show_hist=False)\r\nfig.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def digitSum(x):
if x < 10:
return x
return x % 10 + digitSum(x // 10)
def solve(S, n):
Discriminante = S * S + 4 * n
r = int(Discriminante ** 0.5)
if r * r == Discriminante:
if r % 2 == S % 2:
return (r - S) // 2
else:
return -1
else:
return -1
n = int(input())
ans = -1
for S in range(1, 163):
x = solve(S, n)
if x > 0 and digitSum(x) == S:
if ans == -1:
ans = x
else:
ans = min(ans, x)
print(ans)
|
normal
|
{
"blob_id": "f89800e0d8d4026c167381f275ca86c2cf7f011e",
"index": 4066,
"step-1": "<mask token>\n\n\ndef solve(S, n):\n Discriminante = S * S + 4 * n\n r = int(Discriminante ** 0.5)\n if r * r == Discriminante:\n if r % 2 == S % 2:\n return (r - S) // 2\n else:\n return -1\n else:\n return -1\n\n\n<mask token>\n",
"step-2": "def digitSum(x):\n if x < 10:\n return x\n return x % 10 + digitSum(x // 10)\n\n\ndef solve(S, n):\n Discriminante = S * S + 4 * n\n r = int(Discriminante ** 0.5)\n if r * r == Discriminante:\n if r % 2 == S % 2:\n return (r - S) // 2\n else:\n return -1\n else:\n return -1\n\n\n<mask token>\n",
"step-3": "def digitSum(x):\n if x < 10:\n return x\n return x % 10 + digitSum(x // 10)\n\n\ndef solve(S, n):\n Discriminante = S * S + 4 * n\n r = int(Discriminante ** 0.5)\n if r * r == Discriminante:\n if r % 2 == S % 2:\n return (r - S) // 2\n else:\n return -1\n else:\n return -1\n\n\n<mask token>\nfor S in range(1, 163):\n x = solve(S, n)\n if x > 0 and digitSum(x) == S:\n if ans == -1:\n ans = x\n else:\n ans = min(ans, x)\nprint(ans)\n",
"step-4": "def digitSum(x):\n if x < 10:\n return x\n return x % 10 + digitSum(x // 10)\n\n\ndef solve(S, n):\n Discriminante = S * S + 4 * n\n r = int(Discriminante ** 0.5)\n if r * r == Discriminante:\n if r % 2 == S % 2:\n return (r - S) // 2\n else:\n return -1\n else:\n return -1\n\n\nn = int(input())\nans = -1\nfor S in range(1, 163):\n x = solve(S, n)\n if x > 0 and digitSum(x) == S:\n if ans == -1:\n ans = x\n else:\n ans = min(ans, x)\nprint(ans)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2016 Matt Menzenski
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import unicode_literals
def fuzzy_match_simple(pattern, instring):
"""Return True if each character in pattern is found in order in instring.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:return: True if there is a match, False otherwise
:rtype: ``bool``
"""
p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)
while (p_idx != p_len) and (s_idx != s_len):
if pattern[p_idx].lower() == instring[s_idx].lower():
p_idx += 1
s_idx += 1
return p_len != 0 and s_len != 0 and p_idx == p_len
def fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=10,
lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):
"""Return match boolean and match score.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:param int adj_bonus: bonus for adjacent matches
:param int sep_bonus: bonus if match occurs after a separator
:param int camel_bonus: bonus if match is uppercase
:param int lead_penalty: penalty applied for each letter before 1st match
:param int max_lead_penalty: maximum total ``lead_penalty``
:param int unmatched_penalty: penalty for each unmatched letter
:return: 2-tuple with match truthiness at idx 0 and score at idx 1
:rtype: ``tuple``
"""
score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)
prev_match, prev_lower = False, False
prev_sep = True # so that matching first letter gets sep_bonus
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
matched_indices = []
while s_idx != s_len:
p_char = pattern[p_idx] if (p_idx != p_len) else None
s_char = instring[s_idx]
p_lower = p_char.lower() if p_char else None
s_lower, s_upper = s_char.lower(), s_char.upper()
next_match = p_char and p_lower == s_lower
rematch = best_letter and best_lower == s_lower
advanced = next_match and best_letter
p_repeat = best_letter and p_char and best_lower == p_lower
if advanced or p_repeat:
score += best_letter_score
matched_indices.append(best_letter_idx)
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
if next_match or rematch:
new_score = 0
# apply penalty for each letter before the first match
# using max because penalties are negative (so max = smallest)
if p_idx == 0:
score += max(s_idx * lead_penalty, max_lead_penalty)
# apply bonus for consecutive matches
if prev_match:
new_score += adj_bonus
# apply bonus for matches after a separator
if prev_sep:
new_score += sep_bonus
# apply bonus across camelCase boundaries
if prev_lower and s_char == s_upper and s_lower != s_upper:
new_score += camel_bonus
# update pattern index iff the next pattern letter was matched
if next_match:
p_idx += 1
# update best letter match (may be next or rematch)
if new_score >= best_letter_score:
# apply penalty for now-skipped letter
if best_letter is not None:
score += unmatched_penalty
best_letter = s_char
best_lower = best_letter.lower()
best_letter_idx = s_idx
best_letter_score = new_score
prev_match = True
else:
score += unmatched_penalty
prev_match = False
prev_lower = s_char == s_lower and s_lower != s_upper
prev_sep = s_char in '_ '
s_idx += 1
if best_letter:
score += best_letter_score
matched_indices.append(best_letter_idx)
return p_idx == p_len, score
|
normal
|
{
"blob_id": "576bb15ad081cd368265c98875be5d032cdafd22",
"index": 4789,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=\n 10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):\n \"\"\"Return match boolean and match score.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n :param int adj_bonus: bonus for adjacent matches\n :param int sep_bonus: bonus if match occurs after a separator\n :param int camel_bonus: bonus if match is uppercase\n :param int lead_penalty: penalty applied for each letter before 1st match\n :param int max_lead_penalty: maximum total ``lead_penalty``\n :param int unmatched_penalty: penalty for each unmatched letter\n\n :return: 2-tuple with match truthiness at idx 0 and score at idx 1\n :rtype: ``tuple``\n \"\"\"\n score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)\n prev_match, prev_lower = False, False\n prev_sep = True\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n matched_indices = []\n while s_idx != s_len:\n p_char = pattern[p_idx] if p_idx != p_len else None\n s_char = instring[s_idx]\n p_lower = p_char.lower() if p_char else None\n s_lower, s_upper = s_char.lower(), s_char.upper()\n next_match = p_char and p_lower == s_lower\n rematch = best_letter and best_lower == s_lower\n advanced = next_match and best_letter\n p_repeat = best_letter and p_char and best_lower == p_lower\n if advanced or p_repeat:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n if next_match or rematch:\n new_score = 0\n if p_idx == 0:\n score += max(s_idx * lead_penalty, max_lead_penalty)\n if prev_match:\n new_score += adj_bonus\n if prev_sep:\n new_score += sep_bonus\n if prev_lower and s_char == s_upper and s_lower != s_upper:\n new_score += camel_bonus\n if next_match:\n p_idx += 1\n if new_score >= best_letter_score:\n if best_letter is not None:\n score += unmatched_penalty\n best_letter = s_char\n best_lower = best_letter.lower()\n best_letter_idx = s_idx\n best_letter_score = new_score\n prev_match = True\n else:\n score += unmatched_penalty\n prev_match = False\n prev_lower = s_char == s_lower and s_lower != s_upper\n prev_sep = s_char in '_ '\n s_idx += 1\n if best_letter:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n return p_idx == p_len, score\n",
"step-3": "<mask token>\n\n\ndef fuzzy_match_simple(pattern, instring):\n \"\"\"Return True if each character in pattern is found in order in instring.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n\n :return: True if there is a match, False otherwise\n :rtype: ``bool``\n \"\"\"\n p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)\n while p_idx != p_len and s_idx != s_len:\n if pattern[p_idx].lower() == instring[s_idx].lower():\n p_idx += 1\n s_idx += 1\n return p_len != 0 and s_len != 0 and p_idx == p_len\n\n\ndef fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=\n 10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):\n \"\"\"Return match boolean and match score.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n :param int adj_bonus: bonus for adjacent matches\n :param int sep_bonus: bonus if match occurs after a separator\n :param int camel_bonus: bonus if match is uppercase\n :param int lead_penalty: penalty applied for each letter before 1st match\n :param int max_lead_penalty: maximum total ``lead_penalty``\n :param int unmatched_penalty: penalty for each unmatched letter\n\n :return: 2-tuple with match truthiness at idx 0 and score at idx 1\n :rtype: ``tuple``\n \"\"\"\n score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)\n prev_match, prev_lower = False, False\n prev_sep = True\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n matched_indices = []\n while s_idx != s_len:\n p_char = pattern[p_idx] if p_idx != p_len else None\n s_char = instring[s_idx]\n p_lower = p_char.lower() if p_char else None\n s_lower, s_upper = s_char.lower(), s_char.upper()\n next_match = p_char and p_lower == s_lower\n rematch = best_letter and best_lower == s_lower\n advanced = next_match and best_letter\n p_repeat = best_letter and p_char and best_lower == p_lower\n if advanced or p_repeat:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n if next_match or rematch:\n new_score = 0\n if p_idx == 0:\n score += max(s_idx * lead_penalty, max_lead_penalty)\n if prev_match:\n new_score += adj_bonus\n if prev_sep:\n new_score += sep_bonus\n if prev_lower and s_char == s_upper and s_lower != s_upper:\n new_score += camel_bonus\n if next_match:\n p_idx += 1\n if new_score >= best_letter_score:\n if best_letter is not None:\n score += unmatched_penalty\n best_letter = s_char\n best_lower = best_letter.lower()\n best_letter_idx = s_idx\n best_letter_score = new_score\n prev_match = True\n else:\n score += unmatched_penalty\n prev_match = False\n prev_lower = s_char == s_lower and s_lower != s_upper\n prev_sep = s_char in '_ '\n s_idx += 1\n if best_letter:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n return p_idx == p_len, score\n",
"step-4": "<mask token>\nfrom __future__ import unicode_literals\n\n\ndef fuzzy_match_simple(pattern, instring):\n \"\"\"Return True if each character in pattern is found in order in instring.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n\n :return: True if there is a match, False otherwise\n :rtype: ``bool``\n \"\"\"\n p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)\n while p_idx != p_len and s_idx != s_len:\n if pattern[p_idx].lower() == instring[s_idx].lower():\n p_idx += 1\n s_idx += 1\n return p_len != 0 and s_len != 0 and p_idx == p_len\n\n\ndef fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=\n 10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):\n \"\"\"Return match boolean and match score.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n :param int adj_bonus: bonus for adjacent matches\n :param int sep_bonus: bonus if match occurs after a separator\n :param int camel_bonus: bonus if match is uppercase\n :param int lead_penalty: penalty applied for each letter before 1st match\n :param int max_lead_penalty: maximum total ``lead_penalty``\n :param int unmatched_penalty: penalty for each unmatched letter\n\n :return: 2-tuple with match truthiness at idx 0 and score at idx 1\n :rtype: ``tuple``\n \"\"\"\n score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)\n prev_match, prev_lower = False, False\n prev_sep = True\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n matched_indices = []\n while s_idx != s_len:\n p_char = pattern[p_idx] if p_idx != p_len else None\n s_char = instring[s_idx]\n p_lower = p_char.lower() if p_char else None\n s_lower, s_upper = s_char.lower(), s_char.upper()\n next_match = p_char and p_lower == s_lower\n rematch = best_letter and best_lower == s_lower\n advanced = next_match and best_letter\n p_repeat = best_letter and p_char and best_lower == p_lower\n if advanced or p_repeat:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n if next_match or rematch:\n new_score = 0\n if p_idx == 0:\n score += max(s_idx * lead_penalty, max_lead_penalty)\n if prev_match:\n new_score += adj_bonus\n if prev_sep:\n new_score += sep_bonus\n if prev_lower and s_char == s_upper and s_lower != s_upper:\n new_score += camel_bonus\n if next_match:\n p_idx += 1\n if new_score >= best_letter_score:\n if best_letter is not None:\n score += unmatched_penalty\n best_letter = s_char\n best_lower = best_letter.lower()\n best_letter_idx = s_idx\n best_letter_score = new_score\n prev_match = True\n else:\n score += unmatched_penalty\n prev_match = False\n prev_lower = s_char == s_lower and s_lower != s_upper\n prev_sep = s_char in '_ '\n s_idx += 1\n if best_letter:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n return p_idx == p_len, score\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nMIT License\n\nCopyright (c) 2016 Matt Menzenski\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\n\ndef fuzzy_match_simple(pattern, instring):\n \"\"\"Return True if each character in pattern is found in order in instring.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n\n :return: True if there is a match, False otherwise\n :rtype: ``bool``\n \"\"\"\n p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)\n while (p_idx != p_len) and (s_idx != s_len):\n if pattern[p_idx].lower() == instring[s_idx].lower():\n p_idx += 1\n s_idx += 1\n return p_len != 0 and s_len != 0 and p_idx == p_len\n\n\ndef fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=10,\n lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):\n \"\"\"Return match boolean and match score.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n :param int adj_bonus: bonus for adjacent matches\n :param int sep_bonus: bonus if match occurs after a separator\n :param int camel_bonus: bonus if match is uppercase\n :param int lead_penalty: penalty applied for each letter before 1st match\n :param int max_lead_penalty: maximum total ``lead_penalty``\n :param int unmatched_penalty: penalty for each unmatched letter\n\n :return: 2-tuple with match truthiness at idx 0 and score at idx 1\n :rtype: ``tuple``\n \"\"\"\n score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)\n prev_match, prev_lower = False, False\n prev_sep = True # so that matching first letter gets sep_bonus\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n matched_indices = []\n\n while s_idx != s_len:\n p_char = pattern[p_idx] if (p_idx != p_len) else None\n s_char = instring[s_idx]\n p_lower = p_char.lower() if p_char else None\n s_lower, s_upper = s_char.lower(), s_char.upper()\n\n next_match = p_char and p_lower == s_lower\n rematch = best_letter and best_lower == s_lower\n\n advanced = next_match and best_letter\n p_repeat = best_letter and p_char and best_lower == p_lower\n\n if advanced or p_repeat:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n\n if next_match or rematch:\n new_score = 0\n\n # apply penalty for each letter before the first match\n # using max because penalties are negative (so max = smallest)\n if p_idx == 0:\n score += max(s_idx * lead_penalty, max_lead_penalty)\n\n # apply bonus for consecutive matches\n if prev_match:\n new_score += adj_bonus\n\n # apply bonus for matches after a separator\n if prev_sep:\n new_score += sep_bonus\n\n # apply bonus across camelCase boundaries\n if prev_lower and s_char == s_upper and s_lower != s_upper:\n new_score += camel_bonus\n\n # update pattern index iff the next pattern letter was matched\n if next_match:\n p_idx += 1\n\n # update best letter match (may be next or rematch)\n if new_score >= best_letter_score:\n # apply penalty for now-skipped letter\n if best_letter is not None:\n score += unmatched_penalty\n best_letter = s_char\n best_lower = best_letter.lower()\n best_letter_idx = s_idx\n best_letter_score = new_score\n\n prev_match = True\n\n else:\n score += unmatched_penalty\n prev_match = False\n\n prev_lower = s_char == s_lower and s_lower != s_upper\n prev_sep = s_char in '_ '\n\n s_idx += 1\n\n if best_letter:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n\n return p_idx == p_len, score",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# #Create a function that takes a text file and returns the number of words
# ___ count_words filepath
# w.. o.. ? ? __ file # read
# strng = ?.r..
# strng_list = ?.s.. " "
# r.. l.. ?
#
# print ? "words1.txt"
|
normal
|
{
"blob_id": "b83310c18294def950cef6710c7644c7e8a3208f",
"index": 5219,
"step-1": "# #Create a function that takes a text file and returns the number of words\n# ___ count_words filepath\n# w.. o.. ? ? __ file # read\n# strng = ?.r..\n# strng_list = ?.s.. \" \"\n# r.. l.. ?\n#\n# print ? \"words1.txt\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
# coding: UTF-8 -*-
import os.path
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
EMOTICONS = {
"O:)": "angel",
"o:)": "angel",
"O:-)": "angel",
"o:-)": "angel",
"o:-3": "angel",
"o:3": "angel",
"O;^)": "angel",
">:[": "annoyed/disappointed",
":-(": "annoyed/disappointed",
":(": "annoyed/disappointed",
":((": "annoyed/disappointed",
":-((": "annoyed/disappointed",
":-c": "annoyed/disappointed",
":-<": "annoyed/disappointed",
":?C": "annoyed/disappointed",
":<": "annoyed/disappointed",
":[": "annoyed/disappointed",
":{": "annoyed/disappointed",
":=||": "annoyed/disappointed",
":@": "annoyed/disappointed",
">:(": "annoyed/disappointed",
":/": "annoyed/disappointed",
":\\": "annoyed/disappointed",
"=/": "annoyed/disappointed",
"=\\": "annoyed/disappointed",
">:/": "annoyed/disappointed",
">:\\": "annoyed/disappointed",
":S": "annoyed/disappointed",
":s": "annoyed/disappointed",
":-S": "annoyed/disappointed",
":-s": "annoyed/disappointed",
":|": "annoyed/disappointed",
":-|": "annoyed/disappointed",
":$": "annoyed/disappointed",
"?_?": "annoyed/disappointed",
"(>_<)": "annoyed/disappointed",
">_<": "annoyed/disappointed",
">__<": "annoyed/disappointed",
"(>__<)": "annoyed/disappointed",
"(-.-)": "annoyed/disappointed",
"(-_-)": "annoyed/disappointed",
"(._.)": "annoyed/disappointed",
"/:)": "annoyed/disappointed",
":-$": "annoyed/disappointed",
">:P": "annoyed/disappointed",
"K": "annoyed/disappointed",
"3:)": "devilish",
"3:-)": "devilish",
"}:-)": "devilish",
"}:)": "devilish",
">:)": "devilish",
"B-)": "happy",
":-)": "happy",
":)": "happy",
":o)": "happy",
":]": "happy",
":3": "happy",
":c)": "happy",
":>": "happy",
"=]": "happy",
"8)": "happy",
"=)": "happy",
":}": "happy",
":^)": "happy",
":?)": "happy",
":-))": "happy",
"<:-P": "happy",
"<:P": "happy",
"<:-p": "happy",
"<:p": "happy",
";;)": "happy",
"J": "happy",
"<3": "heart",
"^5": "high-five",
">_>^": "high-five",
"^<_<": "high-five",
":*": "kiss",
":*)": "kiss",
":^*": "kiss",
"}{": "kiss",
"('}{')": "kiss",
":-D": "laughing",
":D": "laughing",
"8-D": "laughing",
"8D": "laughing",
"x-D": "laughing",
"xD": "laughing",
"X-D": "laughing",
"XD": "laughing",
"=-D": "laughing",
"=D": "laughing",
";D": "laughing",
"-3": "laughing",
"3": "laughing",
"B^D": "laughing",
"D:<": "laughing",
"D:": "laughing",
"D8": "laughing",
"D;": "laughing",
"D=": "laughing",
"DX": "laughing",
":-B": "nerd",
"8-)": "nerd",
"8)": "nerd",
"</3": "sad",
":'(": "sad",
":'-(": "sad",
"QQ": "sad",
"L": "sad",
":#": "sealed mouth",
":-#": "sealed mouth",
":-X": "sealed mouth",
":-x": "sealed mouth",
":X": "sealed mouth",
":x": "sealed mouth",
"??": "shooting star",
"??": "shooting star",
"~?": "shooting star",
">:O": "suprprised/shocked",
">:o": "suprprised/shocked",
":-O": "suprprised/shocked",
":-o": "suprprised/shocked",
":O": "suprprised/shocked",
":o": "suprprised/shocked",
"O_o": "suprprised/shocked",
"o_O": "suprprised/shocked",
"O.o": "suprprised/shocked",
"o.O": "suprprised/shocked",
"(O_o)": "suprprised/shocked",
"(o_O)": "suprprised/shocked",
"(O.o)": "suprprised/shocked",
"(o.O)": "suprprised/shocked",
":'-)": "tears of happines",
":')": "tears of happines",
":P": "teasing/playful",
":p": "teasing/playful",
">:P": "teasing/playful",
">:p": "teasing/playful",
"X-P": "teasing/playful",
"x-p": "teasing/playful",
"xp": "teasing/playful",
"XP": "teasing/playful",
":-P": "teasing/playful",
":-p": "teasing/playful",
"=P": "teasing/playful",
"=P": "teasing/playful",
":-?": "teasing/playful",
":-b": "teasing/playful",
":b": "teasing/playful",
";)": "wink",
u"º)": "wink",
";-)": "wink",
";]": "wink",
u"^Ü^": "happy",
}
special_tokens = EMOTICONS
from DAPOS.data.variation import Prefix, Suffix
EASY_WORDS = {
u"ليا": [(Prefix(u"ل"), u"يا", Suffix(u""))],
u"لي": [(Prefix(u"ل"), u"ي", Suffix(u""))],
u"لكم": [(Prefix(u"ل"), u"كم", Suffix(u""))],
u"لكما": [(Prefix(u"ل"), u"كما", Suffix(u""))],
u"له": [(Prefix(u"ل"), u"ه", Suffix(u""))],
u"لها": [(Prefix(u"ل"), u"ها", Suffix(u""))],
u"لهم": [(Prefix(u"ل"), u"هم", Suffix(u""))],
u"لهما": [(Prefix(u"ل"), u"هما", Suffix(u""))],
u"لهن": [(Prefix(u"ل"), u"هم", Suffix(u""))],
u"بيا": [(Prefix(u"ب"), u"يا", Suffix(u""))],
u"بي": [(Prefix(u"ب"), u"ي", Suffix(u""))],
u"بك": [(Prefix(u"ب"), u"ك", Suffix(u""))],
u"بكم": [(Prefix(u"ب"), u"كم", Suffix(u""))],
u"بكما": [(Prefix(u"ب"), u"كما", Suffix(u""))],
u"به": [(Prefix(u"ب"), u"ه", Suffix(u""))],
u"بها": [(Prefix(u"ب"), u"ها", Suffix(u""))],
u"بهما": [(Prefix(u"ب"), u"هما", Suffix(u""))],
u"بهم": [(Prefix(u"ب"), u"هم", Suffix(u""))],
u"بهن": [(Prefix(u"ب"), u"هن", Suffix(u""))],
u"عليا": [(Prefix(u""), u"على", Suffix(u"يا"))],
u"فيا": [(Prefix(u"ف"), u"يا", Suffix(u""))],
}
EMOTICONS_TAG = 'EMO'
PUNCTUATION_TAG = 'PUNC'
DIGIT_TAG = 'CD'
NOTDEFINED_TAG = 'NN'
|
normal
|
{
"blob_id": "3f3ed0165120dc135a4ce1f282dbdf9dad57adf8",
"index": 980,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\nEMOTICONS = {'O:)': 'angel', 'o:)': 'angel', 'O:-)': 'angel', 'o:-)':\n 'angel', 'o:-3': 'angel', 'o:3': 'angel', 'O;^)': 'angel', '>:[':\n 'annoyed/disappointed', ':-(': 'annoyed/disappointed', ':(':\n 'annoyed/disappointed', ':((': 'annoyed/disappointed', ':-((':\n 'annoyed/disappointed', ':-c': 'annoyed/disappointed', ':-<':\n 'annoyed/disappointed', ':?C': 'annoyed/disappointed', ':<':\n 'annoyed/disappointed', ':[': 'annoyed/disappointed', ':{':\n 'annoyed/disappointed', ':=||': 'annoyed/disappointed', ':@':\n 'annoyed/disappointed', '>:(': 'annoyed/disappointed', ':/':\n 'annoyed/disappointed', ':\\\\': 'annoyed/disappointed', '=/':\n 'annoyed/disappointed', '=\\\\': 'annoyed/disappointed', '>:/':\n 'annoyed/disappointed', '>:\\\\': 'annoyed/disappointed', ':S':\n 'annoyed/disappointed', ':s': 'annoyed/disappointed', ':-S':\n 'annoyed/disappointed', ':-s': 'annoyed/disappointed', ':|':\n 'annoyed/disappointed', ':-|': 'annoyed/disappointed', ':$':\n 'annoyed/disappointed', '?_?': 'annoyed/disappointed', '(>_<)':\n 'annoyed/disappointed', '>_<': 'annoyed/disappointed', '>__<':\n 'annoyed/disappointed', '(>__<)': 'annoyed/disappointed', '(-.-)':\n 'annoyed/disappointed', '(-_-)': 'annoyed/disappointed', '(._.)':\n 'annoyed/disappointed', '/:)': 'annoyed/disappointed', ':-$':\n 'annoyed/disappointed', '>:P': 'annoyed/disappointed', 'K':\n 'annoyed/disappointed', '3:)': 'devilish', '3:-)': 'devilish', '}:-)':\n 'devilish', '}:)': 'devilish', '>:)': 'devilish', 'B-)': 'happy', ':-)':\n 'happy', ':)': 'happy', ':o)': 'happy', ':]': 'happy', ':3': 'happy',\n ':c)': 'happy', ':>': 'happy', '=]': 'happy', '8)': 'happy', '=)':\n 'happy', ':}': 'happy', ':^)': 'happy', ':?)': 'happy', ':-))': 'happy',\n '<:-P': 'happy', '<:P': 'happy', '<:-p': 'happy', '<:p': 'happy', ';;)':\n 'happy', 'J': 'happy', '<3': 'heart', '^5': 'high-five', '>_>^':\n 'high-five', '^<_<': 'high-five', ':*': 'kiss', ':*)': 'kiss', ':^*':\n 'kiss', '}{': 'kiss', \"('}{')\": 'kiss', ':-D': 'laughing', ':D':\n 'laughing', '8-D': 'laughing', '8D': 'laughing', 'x-D': 'laughing',\n 'xD': 'laughing', 'X-D': 'laughing', 'XD': 'laughing', '=-D':\n 'laughing', '=D': 'laughing', ';D': 'laughing', '-3': 'laughing', '3':\n 'laughing', 'B^D': 'laughing', 'D:<': 'laughing', 'D:': 'laughing',\n 'D8': 'laughing', 'D;': 'laughing', 'D=': 'laughing', 'DX': 'laughing',\n ':-B': 'nerd', '8-)': 'nerd', '8)': 'nerd', '</3': 'sad', \":'(\": 'sad',\n \":'-(\": 'sad', 'QQ': 'sad', 'L': 'sad', ':#': 'sealed mouth', ':-#':\n 'sealed mouth', ':-X': 'sealed mouth', ':-x': 'sealed mouth', ':X':\n 'sealed mouth', ':x': 'sealed mouth', '??': 'shooting star', '??':\n 'shooting star', '~?': 'shooting star', '>:O': 'suprprised/shocked',\n '>:o': 'suprprised/shocked', ':-O': 'suprprised/shocked', ':-o':\n 'suprprised/shocked', ':O': 'suprprised/shocked', ':o':\n 'suprprised/shocked', 'O_o': 'suprprised/shocked', 'o_O':\n 'suprprised/shocked', 'O.o': 'suprprised/shocked', 'o.O':\n 'suprprised/shocked', '(O_o)': 'suprprised/shocked', '(o_O)':\n 'suprprised/shocked', '(O.o)': 'suprprised/shocked', '(o.O)':\n 'suprprised/shocked', \":'-)\": 'tears of happines', \":')\":\n 'tears of happines', ':P': 'teasing/playful', ':p': 'teasing/playful',\n '>:P': 'teasing/playful', '>:p': 'teasing/playful', 'X-P':\n 'teasing/playful', 'x-p': 'teasing/playful', 'xp': 'teasing/playful',\n 'XP': 'teasing/playful', ':-P': 'teasing/playful', ':-p':\n 'teasing/playful', '=P': 'teasing/playful', '=P': 'teasing/playful',\n ':-?': 'teasing/playful', ':-b': 'teasing/playful', ':b':\n 'teasing/playful', ';)': 'wink', u'º)': 'wink', ';-)': 'wink', ';]':\n 'wink', u'^Ü^': 'happy'}\nspecial_tokens = EMOTICONS\n<mask token>\nEASY_WORDS = {u'ليا': [(Prefix(u'ل'), u'يا', Suffix(u''))], u'لي': [(Prefix\n (u'ل'), u'ي', Suffix(u''))], u'لكم': [(Prefix(u'ل'), u'كم', Suffix(u'')\n )], u'لكما': [(Prefix(u'ل'), u'كما', Suffix(u''))], u'له': [(Prefix(\n u'ل'), u'ه', Suffix(u''))], u'لها': [(Prefix(u'ل'), u'ها', Suffix(u''))\n ], u'لهم': [(Prefix(u'ل'), u'هم', Suffix(u''))], u'لهما': [(Prefix(u'ل'\n ), u'هما', Suffix(u''))], u'لهن': [(Prefix(u'ل'), u'هم', Suffix(u''))],\n u'بيا': [(Prefix(u'ب'), u'يا', Suffix(u''))], u'بي': [(Prefix(u'ب'),\n u'ي', Suffix(u''))], u'بك': [(Prefix(u'ب'), u'ك', Suffix(u''))], u'بكم':\n [(Prefix(u'ب'), u'كم', Suffix(u''))], u'بكما': [(Prefix(u'ب'), u'كما',\n Suffix(u''))], u'به': [(Prefix(u'ب'), u'ه', Suffix(u''))], u'بها': [(\n Prefix(u'ب'), u'ها', Suffix(u''))], u'بهما': [(Prefix(u'ب'), u'هما',\n Suffix(u''))], u'بهم': [(Prefix(u'ب'), u'هم', Suffix(u''))], u'بهن': [(\n Prefix(u'ب'), u'هن', Suffix(u''))], u'عليا': [(Prefix(u''), u'على',\n Suffix(u'يا'))], u'فيا': [(Prefix(u'ف'), u'يا', Suffix(u''))]}\nEMOTICONS_TAG = 'EMO'\nPUNCTUATION_TAG = 'PUNC'\nDIGIT_TAG = 'CD'\nNOTDEFINED_TAG = 'NN'\n",
"step-3": "import os.path\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\nEMOTICONS = {'O:)': 'angel', 'o:)': 'angel', 'O:-)': 'angel', 'o:-)':\n 'angel', 'o:-3': 'angel', 'o:3': 'angel', 'O;^)': 'angel', '>:[':\n 'annoyed/disappointed', ':-(': 'annoyed/disappointed', ':(':\n 'annoyed/disappointed', ':((': 'annoyed/disappointed', ':-((':\n 'annoyed/disappointed', ':-c': 'annoyed/disappointed', ':-<':\n 'annoyed/disappointed', ':?C': 'annoyed/disappointed', ':<':\n 'annoyed/disappointed', ':[': 'annoyed/disappointed', ':{':\n 'annoyed/disappointed', ':=||': 'annoyed/disappointed', ':@':\n 'annoyed/disappointed', '>:(': 'annoyed/disappointed', ':/':\n 'annoyed/disappointed', ':\\\\': 'annoyed/disappointed', '=/':\n 'annoyed/disappointed', '=\\\\': 'annoyed/disappointed', '>:/':\n 'annoyed/disappointed', '>:\\\\': 'annoyed/disappointed', ':S':\n 'annoyed/disappointed', ':s': 'annoyed/disappointed', ':-S':\n 'annoyed/disappointed', ':-s': 'annoyed/disappointed', ':|':\n 'annoyed/disappointed', ':-|': 'annoyed/disappointed', ':$':\n 'annoyed/disappointed', '?_?': 'annoyed/disappointed', '(>_<)':\n 'annoyed/disappointed', '>_<': 'annoyed/disappointed', '>__<':\n 'annoyed/disappointed', '(>__<)': 'annoyed/disappointed', '(-.-)':\n 'annoyed/disappointed', '(-_-)': 'annoyed/disappointed', '(._.)':\n 'annoyed/disappointed', '/:)': 'annoyed/disappointed', ':-$':\n 'annoyed/disappointed', '>:P': 'annoyed/disappointed', 'K':\n 'annoyed/disappointed', '3:)': 'devilish', '3:-)': 'devilish', '}:-)':\n 'devilish', '}:)': 'devilish', '>:)': 'devilish', 'B-)': 'happy', ':-)':\n 'happy', ':)': 'happy', ':o)': 'happy', ':]': 'happy', ':3': 'happy',\n ':c)': 'happy', ':>': 'happy', '=]': 'happy', '8)': 'happy', '=)':\n 'happy', ':}': 'happy', ':^)': 'happy', ':?)': 'happy', ':-))': 'happy',\n '<:-P': 'happy', '<:P': 'happy', '<:-p': 'happy', '<:p': 'happy', ';;)':\n 'happy', 'J': 'happy', '<3': 'heart', '^5': 'high-five', '>_>^':\n 'high-five', '^<_<': 'high-five', ':*': 'kiss', ':*)': 'kiss', ':^*':\n 'kiss', '}{': 'kiss', \"('}{')\": 'kiss', ':-D': 'laughing', ':D':\n 'laughing', '8-D': 'laughing', '8D': 'laughing', 'x-D': 'laughing',\n 'xD': 'laughing', 'X-D': 'laughing', 'XD': 'laughing', '=-D':\n 'laughing', '=D': 'laughing', ';D': 'laughing', '-3': 'laughing', '3':\n 'laughing', 'B^D': 'laughing', 'D:<': 'laughing', 'D:': 'laughing',\n 'D8': 'laughing', 'D;': 'laughing', 'D=': 'laughing', 'DX': 'laughing',\n ':-B': 'nerd', '8-)': 'nerd', '8)': 'nerd', '</3': 'sad', \":'(\": 'sad',\n \":'-(\": 'sad', 'QQ': 'sad', 'L': 'sad', ':#': 'sealed mouth', ':-#':\n 'sealed mouth', ':-X': 'sealed mouth', ':-x': 'sealed mouth', ':X':\n 'sealed mouth', ':x': 'sealed mouth', '??': 'shooting star', '??':\n 'shooting star', '~?': 'shooting star', '>:O': 'suprprised/shocked',\n '>:o': 'suprprised/shocked', ':-O': 'suprprised/shocked', ':-o':\n 'suprprised/shocked', ':O': 'suprprised/shocked', ':o':\n 'suprprised/shocked', 'O_o': 'suprprised/shocked', 'o_O':\n 'suprprised/shocked', 'O.o': 'suprprised/shocked', 'o.O':\n 'suprprised/shocked', '(O_o)': 'suprprised/shocked', '(o_O)':\n 'suprprised/shocked', '(O.o)': 'suprprised/shocked', '(o.O)':\n 'suprprised/shocked', \":'-)\": 'tears of happines', \":')\":\n 'tears of happines', ':P': 'teasing/playful', ':p': 'teasing/playful',\n '>:P': 'teasing/playful', '>:p': 'teasing/playful', 'X-P':\n 'teasing/playful', 'x-p': 'teasing/playful', 'xp': 'teasing/playful',\n 'XP': 'teasing/playful', ':-P': 'teasing/playful', ':-p':\n 'teasing/playful', '=P': 'teasing/playful', '=P': 'teasing/playful',\n ':-?': 'teasing/playful', ':-b': 'teasing/playful', ':b':\n 'teasing/playful', ';)': 'wink', u'º)': 'wink', ';-)': 'wink', ';]':\n 'wink', u'^Ü^': 'happy'}\nspecial_tokens = EMOTICONS\nfrom DAPOS.data.variation import Prefix, Suffix\nEASY_WORDS = {u'ليا': [(Prefix(u'ل'), u'يا', Suffix(u''))], u'لي': [(Prefix\n (u'ل'), u'ي', Suffix(u''))], u'لكم': [(Prefix(u'ل'), u'كم', Suffix(u'')\n )], u'لكما': [(Prefix(u'ل'), u'كما', Suffix(u''))], u'له': [(Prefix(\n u'ل'), u'ه', Suffix(u''))], u'لها': [(Prefix(u'ل'), u'ها', Suffix(u''))\n ], u'لهم': [(Prefix(u'ل'), u'هم', Suffix(u''))], u'لهما': [(Prefix(u'ل'\n ), u'هما', Suffix(u''))], u'لهن': [(Prefix(u'ل'), u'هم', Suffix(u''))],\n u'بيا': [(Prefix(u'ب'), u'يا', Suffix(u''))], u'بي': [(Prefix(u'ب'),\n u'ي', Suffix(u''))], u'بك': [(Prefix(u'ب'), u'ك', Suffix(u''))], u'بكم':\n [(Prefix(u'ب'), u'كم', Suffix(u''))], u'بكما': [(Prefix(u'ب'), u'كما',\n Suffix(u''))], u'به': [(Prefix(u'ب'), u'ه', Suffix(u''))], u'بها': [(\n Prefix(u'ب'), u'ها', Suffix(u''))], u'بهما': [(Prefix(u'ب'), u'هما',\n Suffix(u''))], u'بهم': [(Prefix(u'ب'), u'هم', Suffix(u''))], u'بهن': [(\n Prefix(u'ب'), u'هن', Suffix(u''))], u'عليا': [(Prefix(u''), u'على',\n Suffix(u'يا'))], u'فيا': [(Prefix(u'ف'), u'يا', Suffix(u''))]}\nEMOTICONS_TAG = 'EMO'\nPUNCTUATION_TAG = 'PUNC'\nDIGIT_TAG = 'CD'\nNOTDEFINED_TAG = 'NN'\n",
"step-4": "# coding: UTF-8 -*-\nimport os.path\n\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nEMOTICONS = {\n \"O:)\": \"angel\",\n \"o:)\": \"angel\",\n \"O:-)\": \"angel\",\n \"o:-)\": \"angel\",\n \"o:-3\": \"angel\",\n \"o:3\": \"angel\",\n \"O;^)\": \"angel\",\n \">:[\": \"annoyed/disappointed\",\n \":-(\": \"annoyed/disappointed\",\n \":(\": \"annoyed/disappointed\",\n \":((\": \"annoyed/disappointed\",\n \":-((\": \"annoyed/disappointed\",\n \":-c\": \"annoyed/disappointed\",\n \":-<\": \"annoyed/disappointed\",\n \":?C\": \"annoyed/disappointed\",\n \":<\": \"annoyed/disappointed\",\n \":[\": \"annoyed/disappointed\",\n \":{\": \"annoyed/disappointed\",\n \":=||\": \"annoyed/disappointed\",\n \":@\": \"annoyed/disappointed\",\n \">:(\": \"annoyed/disappointed\",\n \":/\": \"annoyed/disappointed\",\n \":\\\\\": \"annoyed/disappointed\",\n \"=/\": \"annoyed/disappointed\",\n \"=\\\\\": \"annoyed/disappointed\",\n \">:/\": \"annoyed/disappointed\",\n \">:\\\\\": \"annoyed/disappointed\",\n \":S\": \"annoyed/disappointed\",\n \":s\": \"annoyed/disappointed\",\n \":-S\": \"annoyed/disappointed\",\n \":-s\": \"annoyed/disappointed\",\n \":|\": \"annoyed/disappointed\",\n \":-|\": \"annoyed/disappointed\",\n \":$\": \"annoyed/disappointed\",\n \"?_?\": \"annoyed/disappointed\",\n \"(>_<)\": \"annoyed/disappointed\",\n \">_<\": \"annoyed/disappointed\",\n \">__<\": \"annoyed/disappointed\",\n \"(>__<)\": \"annoyed/disappointed\",\n \"(-.-)\": \"annoyed/disappointed\",\n \"(-_-)\": \"annoyed/disappointed\",\n \"(._.)\": \"annoyed/disappointed\",\n \"/:)\": \"annoyed/disappointed\",\n \":-$\": \"annoyed/disappointed\",\n \">:P\": \"annoyed/disappointed\",\n \"K\": \"annoyed/disappointed\",\n \"3:)\": \"devilish\",\n \"3:-)\": \"devilish\",\n \"}:-)\": \"devilish\",\n \"}:)\": \"devilish\",\n \">:)\": \"devilish\",\n \"B-)\": \"happy\",\n \":-)\": \"happy\",\n \":)\": \"happy\",\n \":o)\": \"happy\",\n \":]\": \"happy\",\n \":3\": \"happy\",\n \":c)\": \"happy\",\n \":>\": \"happy\",\n \"=]\": \"happy\",\n \"8)\": \"happy\",\n \"=)\": \"happy\",\n \":}\": \"happy\",\n \":^)\": \"happy\",\n \":?)\": \"happy\",\n \":-))\": \"happy\",\n \"<:-P\": \"happy\",\n \"<:P\": \"happy\",\n \"<:-p\": \"happy\",\n \"<:p\": \"happy\",\n \";;)\": \"happy\",\n \"J\": \"happy\",\n \"<3\": \"heart\",\n \"^5\": \"high-five\",\n \">_>^\": \"high-five\",\n \"^<_<\": \"high-five\",\n \":*\": \"kiss\",\n \":*)\": \"kiss\",\n \":^*\": \"kiss\",\n \"}{\": \"kiss\",\n \"('}{')\": \"kiss\",\n \":-D\": \"laughing\",\n \":D\": \"laughing\",\n \"8-D\": \"laughing\",\n \"8D\": \"laughing\",\n \"x-D\": \"laughing\",\n \"xD\": \"laughing\",\n \"X-D\": \"laughing\",\n \"XD\": \"laughing\",\n \"=-D\": \"laughing\",\n \"=D\": \"laughing\",\n \";D\": \"laughing\",\n \"-3\": \"laughing\",\n \"3\": \"laughing\",\n \"B^D\": \"laughing\",\n \"D:<\": \"laughing\",\n \"D:\": \"laughing\",\n \"D8\": \"laughing\",\n \"D;\": \"laughing\",\n \"D=\": \"laughing\",\n \"DX\": \"laughing\",\n \":-B\": \"nerd\",\n \"8-)\": \"nerd\",\n \"8)\": \"nerd\",\n \"</3\": \"sad\",\n \":'(\": \"sad\",\n \":'-(\": \"sad\",\n \"QQ\": \"sad\",\n \"L\": \"sad\",\n \":#\": \"sealed mouth\",\n \":-#\": \"sealed mouth\",\n \":-X\": \"sealed mouth\",\n \":-x\": \"sealed mouth\",\n \":X\": \"sealed mouth\",\n \":x\": \"sealed mouth\",\n \"??\": \"shooting star\",\n \"??\": \"shooting star\",\n \"~?\": \"shooting star\",\n \">:O\": \"suprprised/shocked\",\n \">:o\": \"suprprised/shocked\",\n \":-O\": \"suprprised/shocked\",\n \":-o\": \"suprprised/shocked\",\n \":O\": \"suprprised/shocked\",\n \":o\": \"suprprised/shocked\",\n \"O_o\": \"suprprised/shocked\",\n \"o_O\": \"suprprised/shocked\",\n \"O.o\": \"suprprised/shocked\",\n \"o.O\": \"suprprised/shocked\",\n \"(O_o)\": \"suprprised/shocked\",\n \"(o_O)\": \"suprprised/shocked\",\n \"(O.o)\": \"suprprised/shocked\",\n \"(o.O)\": \"suprprised/shocked\",\n \":'-)\": \"tears of happines\",\n \":')\": \"tears of happines\",\n \":P\": \"teasing/playful\",\n \":p\": \"teasing/playful\",\n \">:P\": \"teasing/playful\",\n \">:p\": \"teasing/playful\",\n \"X-P\": \"teasing/playful\",\n \"x-p\": \"teasing/playful\",\n \"xp\": \"teasing/playful\",\n \"XP\": \"teasing/playful\",\n \":-P\": \"teasing/playful\",\n \":-p\": \"teasing/playful\",\n \"=P\": \"teasing/playful\",\n \"=P\": \"teasing/playful\",\n \":-?\": \"teasing/playful\",\n \":-b\": \"teasing/playful\",\n \":b\": \"teasing/playful\",\n \";)\": \"wink\",\n u\"º)\": \"wink\",\n \";-)\": \"wink\",\n \";]\": \"wink\",\n u\"^Ü^\": \"happy\",\n}\n\nspecial_tokens = EMOTICONS\n\nfrom DAPOS.data.variation import Prefix, Suffix\n\nEASY_WORDS = {\n u\"ليا\": [(Prefix(u\"ل\"), u\"يا\", Suffix(u\"\"))],\n u\"لي\": [(Prefix(u\"ل\"), u\"ي\", Suffix(u\"\"))],\n u\"لكم\": [(Prefix(u\"ل\"), u\"كم\", Suffix(u\"\"))],\n u\"لكما\": [(Prefix(u\"ل\"), u\"كما\", Suffix(u\"\"))],\n u\"له\": [(Prefix(u\"ل\"), u\"ه\", Suffix(u\"\"))],\n u\"لها\": [(Prefix(u\"ل\"), u\"ها\", Suffix(u\"\"))],\n u\"لهم\": [(Prefix(u\"ل\"), u\"هم\", Suffix(u\"\"))],\n u\"لهما\": [(Prefix(u\"ل\"), u\"هما\", Suffix(u\"\"))],\n u\"لهن\": [(Prefix(u\"ل\"), u\"هم\", Suffix(u\"\"))],\n u\"بيا\": [(Prefix(u\"ب\"), u\"يا\", Suffix(u\"\"))],\n u\"بي\": [(Prefix(u\"ب\"), u\"ي\", Suffix(u\"\"))],\n u\"بك\": [(Prefix(u\"ب\"), u\"ك\", Suffix(u\"\"))],\n u\"بكم\": [(Prefix(u\"ب\"), u\"كم\", Suffix(u\"\"))],\n u\"بكما\": [(Prefix(u\"ب\"), u\"كما\", Suffix(u\"\"))],\n u\"به\": [(Prefix(u\"ب\"), u\"ه\", Suffix(u\"\"))],\n u\"بها\": [(Prefix(u\"ب\"), u\"ها\", Suffix(u\"\"))],\n u\"بهما\": [(Prefix(u\"ب\"), u\"هما\", Suffix(u\"\"))],\n u\"بهم\": [(Prefix(u\"ب\"), u\"هم\", Suffix(u\"\"))],\n u\"بهن\": [(Prefix(u\"ب\"), u\"هن\", Suffix(u\"\"))],\n u\"عليا\": [(Prefix(u\"\"), u\"على\", Suffix(u\"يا\"))],\n u\"فيا\": [(Prefix(u\"ف\"), u\"يا\", Suffix(u\"\"))],\n}\n\n\nEMOTICONS_TAG = 'EMO'\nPUNCTUATION_TAG = 'PUNC'\nDIGIT_TAG = 'CD'\nNOTDEFINED_TAG = 'NN'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
ACHAEA_ENDPOINT = 'https://api.achaea.com'
def _requires_auth(func):
def wrapper(self, *args, **kwargs):
if self.auth is not True:
raise APIError()
return func(self, *args, **kwargs)
return wrapper
class API:
auth = None
CHECKAUTH_RESOURCE = '/checkauth.json'
CHARACTERS_RESOURCE = '/characters.json'
SPECIFIC_CHARACTER_RESOURCE = '/characters/{}.json'
NEWS_RESOURCE = '/news.json'
SPECIFIC_NEWS_RESOURCE = '/news/{}.json'
SPECIFIC_NEWS_POST_RESOURCE = '/news/{}/{}.json'
def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):
self.endpoint = endpoint
if username is not None and password is not None:
self.username = username
self.password = password
self.checkauth()
def _get_endpoint(self, fmt_str, args):
return self.endpoint + fmt_str.format(*args)
def _make_request(self, resource, args=(), authed=False, params={}):
endpoint = self._get_endpoint(resource, args)
auth_params = {}
if authed:
if self.username is None or self.password is None:
raise APIError()
auth_params = {'character': self.username, 'password': self.password}
params = params.copy()
params.update(auth_params)
req = requests.get(endpoint, params=params)
return req
def checkauth(self):
if self.auth is not None:
return self.auth
req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)
if req.status_code == 200:
self.auth = True
else:
self.auth = False
return self.auth
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
@_requires_auth
def _character_authed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), True)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def character(self, character=None):
if self.auth is True and (self.username == character or character is None):
return self._character_authed(character or self.username)
else:
return self._character_unauthed(character)
def sections(self):
req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)
if req.status_code != 200:
return None
result = req.json()
sections_list = map(NewsSection.parse, result)
return sections_list
def posts(self, section, page=None):
params = {}
if page is not None:
params['page'] = page
req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,), authed=self.auth,
params=params)
if req.status_code != 200:
return None
result = req.json()
return result
def post(self, section, number):
pass
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills, mob_kills,
explorer_rank, current_class, messages_total=None, messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank, player_kills, mob_kills,
explorer_rank, current_class, messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.read, self.total)
|
normal
|
{
"blob_id": "da66b254afb3a8fcd3783a38d8624caa917e58c3",
"index": 652,
"step-1": "<mask token>\n\n\nclass API:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n <mask token>\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), False)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills,\n mob_kills, explorer_rank, current_class, messages_total=None,\n messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n return Character(name, fullname, level, house, xp_rank,\n player_kills, mob_kills, explorer_rank, current_class,\n messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.\n read, self.total)\n",
"step-2": "<mask token>\n\n\nclass API:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):\n self.endpoint = endpoint\n if username is not None and password is not None:\n self.username = username\n self.password = password\n self.checkauth()\n <mask token>\n <mask token>\n <mask token>\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n\n @_requires_auth\n def _character_authed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), True)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), False)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def character(self, character=None):\n if self.auth is True and (self.username == character or character is\n None):\n return self._character_authed(character or self.username)\n else:\n return self._character_unauthed(character)\n <mask token>\n <mask token>\n <mask token>\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills,\n mob_kills, explorer_rank, current_class, messages_total=None,\n messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n return Character(name, fullname, level, house, xp_rank,\n player_kills, mob_kills, explorer_rank, current_class,\n messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.\n read, self.total)\n",
"step-3": "<mask token>\n\n\nclass API:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):\n self.endpoint = endpoint\n if username is not None and password is not None:\n self.username = username\n self.password = password\n self.checkauth()\n\n def _get_endpoint(self, fmt_str, args):\n return self.endpoint + fmt_str.format(*args)\n\n def _make_request(self, resource, args=(), authed=False, params={}):\n endpoint = self._get_endpoint(resource, args)\n auth_params = {}\n if authed:\n if self.username is None or self.password is None:\n raise APIError()\n auth_params = {'character': self.username, 'password': self.\n password}\n params = params.copy()\n params.update(auth_params)\n req = requests.get(endpoint, params=params)\n return req\n\n def checkauth(self):\n if self.auth is not None:\n return self.auth\n req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)\n if req.status_code == 200:\n self.auth = True\n else:\n self.auth = False\n return self.auth\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n\n @_requires_auth\n def _character_authed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), True)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), False)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def character(self, character=None):\n if self.auth is True and (self.username == character or character is\n None):\n return self._character_authed(character or self.username)\n else:\n return self._character_unauthed(character)\n\n def sections(self):\n req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)\n if req.status_code != 200:\n return None\n result = req.json()\n sections_list = map(NewsSection.parse, result)\n return sections_list\n\n def posts(self, section, page=None):\n params = {}\n if page is not None:\n params['page'] = page\n req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,),\n authed=self.auth, params=params)\n if req.status_code != 200:\n return None\n result = req.json()\n return result\n <mask token>\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills,\n mob_kills, explorer_rank, current_class, messages_total=None,\n messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n return Character(name, fullname, level, house, xp_rank,\n player_kills, mob_kills, explorer_rank, current_class,\n messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.\n read, self.total)\n",
"step-4": "<mask token>\n\n\ndef _requires_auth(func):\n\n def wrapper(self, *args, **kwargs):\n if self.auth is not True:\n raise APIError()\n return func(self, *args, **kwargs)\n return wrapper\n\n\nclass API:\n auth = None\n CHECKAUTH_RESOURCE = '/checkauth.json'\n CHARACTERS_RESOURCE = '/characters.json'\n SPECIFIC_CHARACTER_RESOURCE = '/characters/{}.json'\n NEWS_RESOURCE = '/news.json'\n SPECIFIC_NEWS_RESOURCE = '/news/{}.json'\n SPECIFIC_NEWS_POST_RESOURCE = '/news/{}/{}.json'\n\n def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):\n self.endpoint = endpoint\n if username is not None and password is not None:\n self.username = username\n self.password = password\n self.checkauth()\n\n def _get_endpoint(self, fmt_str, args):\n return self.endpoint + fmt_str.format(*args)\n\n def _make_request(self, resource, args=(), authed=False, params={}):\n endpoint = self._get_endpoint(resource, args)\n auth_params = {}\n if authed:\n if self.username is None or self.password is None:\n raise APIError()\n auth_params = {'character': self.username, 'password': self.\n password}\n params = params.copy()\n params.update(auth_params)\n req = requests.get(endpoint, params=params)\n return req\n\n def checkauth(self):\n if self.auth is not None:\n return self.auth\n req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)\n if req.status_code == 200:\n self.auth = True\n else:\n self.auth = False\n return self.auth\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n\n @_requires_auth\n def _character_authed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), True)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), False)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def character(self, character=None):\n if self.auth is True and (self.username == character or character is\n None):\n return self._character_authed(character or self.username)\n else:\n return self._character_unauthed(character)\n\n def sections(self):\n req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)\n if req.status_code != 200:\n return None\n result = req.json()\n sections_list = map(NewsSection.parse, result)\n return sections_list\n\n def posts(self, section, page=None):\n params = {}\n if page is not None:\n params['page'] = page\n req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,),\n authed=self.auth, params=params)\n if req.status_code != 200:\n return None\n result = req.json()\n return result\n\n def post(self, section, number):\n pass\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills,\n mob_kills, explorer_rank, current_class, messages_total=None,\n messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n return Character(name, fullname, level, house, xp_rank,\n player_kills, mob_kills, explorer_rank, current_class,\n messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.\n read, self.total)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\n\nACHAEA_ENDPOINT = 'https://api.achaea.com'\n\n\ndef _requires_auth(func):\n def wrapper(self, *args, **kwargs):\n if self.auth is not True:\n raise APIError()\n return func(self, *args, **kwargs)\n return wrapper\n\n\nclass API:\n\n auth = None\n\n CHECKAUTH_RESOURCE = '/checkauth.json'\n CHARACTERS_RESOURCE = '/characters.json'\n SPECIFIC_CHARACTER_RESOURCE = '/characters/{}.json'\n NEWS_RESOURCE = '/news.json'\n SPECIFIC_NEWS_RESOURCE = '/news/{}.json'\n SPECIFIC_NEWS_POST_RESOURCE = '/news/{}/{}.json'\n\n def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):\n self.endpoint = endpoint\n if username is not None and password is not None:\n self.username = username\n self.password = password\n self.checkauth()\n\n def _get_endpoint(self, fmt_str, args):\n return self.endpoint + fmt_str.format(*args)\n\n def _make_request(self, resource, args=(), authed=False, params={}):\n endpoint = self._get_endpoint(resource, args)\n auth_params = {}\n if authed:\n if self.username is None or self.password is None:\n raise APIError()\n auth_params = {'character': self.username, 'password': self.password}\n params = params.copy()\n params.update(auth_params)\n req = requests.get(endpoint, params=params)\n return req\n\n def checkauth(self):\n if self.auth is not None:\n return self.auth\n\n req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)\n\n if req.status_code == 200:\n self.auth = True\n else:\n self.auth = False\n\n return self.auth\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n\n @_requires_auth\n def _character_authed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), True)\n if req.status_code != 200:\n return None\n\n result = req.json()\n return Character.parse(result)\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), False)\n if req.status_code != 200:\n return None\n\n result = req.json()\n return Character.parse(result)\n\n def character(self, character=None):\n if self.auth is True and (self.username == character or character is None):\n return self._character_authed(character or self.username)\n else:\n return self._character_unauthed(character)\n\n def sections(self):\n req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)\n if req.status_code != 200:\n return None\n\n result = req.json()\n sections_list = map(NewsSection.parse, result)\n return sections_list\n\n def posts(self, section, page=None):\n params = {}\n if page is not None:\n params['page'] = page\n req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,), authed=self.auth,\n params=params)\n if req.status_code != 200:\n return None\n\n result = req.json()\n return result\n\n def post(self, section, number):\n pass\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills, mob_kills,\n explorer_rank, current_class, messages_total=None, messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n\n return Character(name, fullname, level, house, xp_rank, player_kills, mob_kills,\n explorer_rank, current_class, messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.read, self.total)\n",
"step-ids": [
12,
15,
20,
23,
26
]
}
|
[
12,
15,
20,
23,
26
] |
# defining private variables
class Privacy:
def __init__(self, val):
self.__val = 900;
print("Private data member =",self.__val,"\n")
value = Privacy(800);
print("Value not changable\n")
value.__val;
|
normal
|
{
"blob_id": "b767519229058b50183d78bb97121f050e5b6bad",
"index": 423,
"step-1": "class Privacy:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Privacy:\n\n def __init__(self, val):\n self.__val = 900\n print('Private data member =', self.__val, '\\n')\n\n\n<mask token>\n",
"step-3": "class Privacy:\n\n def __init__(self, val):\n self.__val = 900\n print('Private data member =', self.__val, '\\n')\n\n\n<mask token>\nprint('Value not changable\\n')\nvalue.__val\n",
"step-4": "class Privacy:\n\n def __init__(self, val):\n self.__val = 900\n print('Private data member =', self.__val, '\\n')\n\n\nvalue = Privacy(800)\nprint('Value not changable\\n')\nvalue.__val\n",
"step-5": "# defining private variables\r\nclass Privacy:\r\n def __init__(self, val):\r\n self.__val = 900; \r\n print(\"Private data member =\",self.__val,\"\\n\")\r\nvalue = Privacy(800);\r\nprint(\"Value not changable\\n\")\r\nvalue.__val;\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os, shutil, time, pickle, warnings, logging
import yaml
from sklearn import preprocessing
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn import metrics
from scipy.special import erfinv
from scipy.stats import mode
warnings.filterwarnings('ignore')
def data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5, random_state=42):
folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(
np.arange(len(df)), y=df[col_stratified]))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_KFold(df, col_index, n_splits=5, random_state=42):
folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(
np.arange(len(df))))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42):
"""
:param df:
:param col_index:
:param col_group:
:param n_splits:
:param random_state:
:return:
"""
group = np.sort(df[col_group].unique())
print("num group: {}".format(len(group)))
np.random.seed(random_state)
group = group[np.random.permutation(len(group))]
fold_list = []
fold = 0
count = 0
fold_list.append([])
for i, item in enumerate(group):
count += (df[col_group] == item).sum()
fold_list[fold].append(item)
if count > len(df) / n_splits * (fold + 1):
fold_list.append([])
fold += 1
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(
lambda x: x not in fold_list[fold]).astype(np.int)
df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'.format(fold + 1)]
for i in range(n_splits):
print("fold: {}, valid: {}. group: {}".format(
i + 1,
(df_new['fold{}_valid'.format(i + 1)] == 1).sum(),
len(fold_list[i]))
)
return df_new
def main():
df = pd.read_csv("../input/melanoma/train.csv")
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "4d0b08f8ca77d188aa218442ac0689fd2c057a89",
"index": 8357,
"step-1": "<mask token>\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,\n random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state).split(np.arange(len(df)), y=df[\n col_stratified]))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=\n random_state).split(np.arange(len(df))))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\n<mask token>\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,\n random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state).split(np.arange(len(df)), y=df[\n col_stratified]))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=\n random_state).split(np.arange(len(df))))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os, shutil, time, pickle, warnings, logging\nimport yaml\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn import metrics\nfrom scipy.special import erfinv\nfrom scipy.stats import mode\nwarnings.filterwarnings('ignore')\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,\n random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state).split(np.arange(len(df)), y=df[\n col_stratified]))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=\n random_state).split(np.arange(len(df))))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os, shutil, time, pickle, warnings, logging\nimport yaml\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn import metrics\nfrom scipy.special import erfinv\nfrom scipy.stats import mode\n\nwarnings.filterwarnings('ignore')\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5, random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(\n np.arange(len(df)), y=df[col_stratified]))\n\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(\n np.arange(len(df))))\n\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print(\"num group: {}\".format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(\n lambda x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'.format(fold + 1)]\n\n for i in range(n_splits):\n print(\"fold: {}, valid: {}. group: {}\".format(\n i + 1,\n (df_new['fold{}_valid'.format(i + 1)] == 1).sum(),\n len(fold_list[i]))\n )\n\n return df_new\n\n\ndef main():\n df = pd.read_csv(\"../input/melanoma/train.csv\")\n\n\nif __name__ == '__main__':\n main()",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from network.utility import *
from entities.message import Message, BroadcastMessage, GroupMessage
from entities.node import Node
from entities.group import GroupBroadcast
from entities.request import Request
import threading
import time
import logging
import random
import json
import socket
from services.user import UserService
class Sender:
def __init__(self, reverseMap, info):
self.reverseMap = reverseMap
self.info = info
def sendMessage(self, message):
data = {"timestamp": message.timestamp, "message": message.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": message.fromUsername,
"srcGroup": self.info.get("groupID", ""),
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": False,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
addr = self.reverseMap.get(message.toUsername)
worker = SenderWorker(addr, msg)
worker.start()
def sendMessageBroadcast(self, message):
data = {"timestamp": message.timestamp, "message": message.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": message.fromUsername,
"srcGroup": self.info.get("groupID", ""),
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
def sendMessageGroup(self, message):
data = {"timestamp": message.timestamp, "message": message.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": message.fromUsername,
"srcGroup": message.groupID,
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": True,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupJoinRequest(self, request):
data = {"message": request.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": request.fromUsername,
"srcGroup": self.info["groupID"],
"desGroup": request.groupID,
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": False,
"memberRq": True,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupAcknowledgeRequest(self, request):
body = b""
header = {
"srcUsername": self.info["username"],
"srcGroup": self.info["groupID"],
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": False,
"groupBroadcast": False,
"memberRq": False,
"ackRq": True,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
addr = self.reverseMap.get(request.fromUsername)
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupDenyRequest(self, request):
body = b""
header = {
"srcUsername": self.info["username"],
"srcGroup": self.info["groupID"],
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": False,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": True,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
addr = self.reverseMap.get(request.fromUsername)
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupBroadcast(self):
data = self.info
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": self.info["username"],
"srcGroup": self.info["groupID"],
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": True,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
class SenderWorker(threading.Thread):
def __init__(self, addr, msg):
threading.Thread.__init__(self)
self.addr = addr
self.packageHash = bytes.fromhex(
format(random.getrandbits(256), "x").zfill(64))
self.msg = self.packageHash+msg
self.sock = None
def run(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
start = time.time()
logger.debug(
f"On thread #{threading.get_ident()}, start connection attempt")
while True:
iStart = time.time()
if type(self.msg) not in [str, bytearray, bytes]:
print('Sender worker msg: ', self.msg)
if type(self.addr) not in [str, bytearray, bytes]:
print('SenderWorker addr: ', self.addr,
'type: ', type(self.addr))
self.addr = self.addr[0]
self.sock.sendto(self.msg, (self.addr, 8421,))
if time.time() - iStart > 0.3:
break
logger.debug(f"Send complete using {time.time()-start} seconds")
self.sock.close()
logger = logging.getLogger('Sender')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh = logging.FileHandler("applog.log")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
|
normal
|
{
"blob_id": "67446f50d1c062eddcad282d3bf508967c5192fc",
"index": 4905,
"step-1": "<mask token>\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n <mask token>\n\n def sendMessageGroup(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': message.\n groupID, 'desGroup': '', 'admin': self.info.get('isAdmin', ''),\n 'member': self.info.get('isMember', ''), 'broadcast': True,\n 'groupBroadcast': True, 'memberRq': False, 'ackRq': False,\n 'denyRq': False, 'leaveRq': False, 'nodeRq': False, 'big': \n False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n <mask token>\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': True, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n <mask token>\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': True, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(format(random.getrandbits(256),\n 'x').zfill(64))\n self.msg = self.packageHash + msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f'On thread #{threading.get_ident()}, start connection attempt')\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr, 'type: ', type(self\n .addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f'Send complete using {time.time() - start} seconds')\n self.sock.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageBroadcast(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageGroup(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': message.\n groupID, 'desGroup': '', 'admin': self.info.get('isAdmin', ''),\n 'member': self.info.get('isMember', ''), 'broadcast': True,\n 'groupBroadcast': True, 'memberRq': False, 'ackRq': False,\n 'denyRq': False, 'leaveRq': False, 'nodeRq': False, 'big': \n False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupJoinRequest(self, request):\n data = {'message': request.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': request.fromUsername, 'srcGroup': self.\n info['groupID'], 'desGroup': request.groupID, 'admin': self.\n info.get('isAdmin', ''), 'member': self.info.get('isMember', ''\n ), 'broadcast': True, 'groupBroadcast': False, 'memberRq': True,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': True, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupDenyRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': True, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': True, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(format(random.getrandbits(256),\n 'x').zfill(64))\n self.msg = self.packageHash + msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f'On thread #{threading.get_ident()}, start connection attempt')\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr, 'type: ', type(self\n .addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f'Send complete using {time.time() - start} seconds')\n self.sock.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageBroadcast(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageGroup(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': message.\n groupID, 'desGroup': '', 'admin': self.info.get('isAdmin', ''),\n 'member': self.info.get('isMember', ''), 'broadcast': True,\n 'groupBroadcast': True, 'memberRq': False, 'ackRq': False,\n 'denyRq': False, 'leaveRq': False, 'nodeRq': False, 'big': \n False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupJoinRequest(self, request):\n data = {'message': request.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': request.fromUsername, 'srcGroup': self.\n info['groupID'], 'desGroup': request.groupID, 'admin': self.\n info.get('isAdmin', ''), 'member': self.info.get('isMember', ''\n ), 'broadcast': True, 'groupBroadcast': False, 'memberRq': True,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': True, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupDenyRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': True, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': True, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(format(random.getrandbits(256),\n 'x').zfill(64))\n self.msg = self.packageHash + msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f'On thread #{threading.get_ident()}, start connection attempt')\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr, 'type: ', type(self\n .addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f'Send complete using {time.time() - start} seconds')\n self.sock.close()\n\n\n<mask token>\nlogger.setLevel(logging.DEBUG)\n<mask token>\nch.setFormatter(formatter)\n<mask token>\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n",
"step-4": "from network.utility import *\nfrom entities.message import Message, BroadcastMessage, GroupMessage\nfrom entities.node import Node\nfrom entities.group import GroupBroadcast\nfrom entities.request import Request\nimport threading\nimport time\nimport logging\nimport random\nimport json\nimport socket\nfrom services.user import UserService\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageBroadcast(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageGroup(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': message.\n groupID, 'desGroup': '', 'admin': self.info.get('isAdmin', ''),\n 'member': self.info.get('isMember', ''), 'broadcast': True,\n 'groupBroadcast': True, 'memberRq': False, 'ackRq': False,\n 'denyRq': False, 'leaveRq': False, 'nodeRq': False, 'big': \n False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupJoinRequest(self, request):\n data = {'message': request.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': request.fromUsername, 'srcGroup': self.\n info['groupID'], 'desGroup': request.groupID, 'admin': self.\n info.get('isAdmin', ''), 'member': self.info.get('isMember', ''\n ), 'broadcast': True, 'groupBroadcast': False, 'memberRq': True,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': True, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupDenyRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': True, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': True, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(format(random.getrandbits(256),\n 'x').zfill(64))\n self.msg = self.packageHash + msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f'On thread #{threading.get_ident()}, start connection attempt')\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr, 'type: ', type(self\n .addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f'Send complete using {time.time() - start} seconds')\n self.sock.close()\n\n\nlogger = logging.getLogger('Sender')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nfh = logging.FileHandler('applog.log')\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n",
"step-5": "from network.utility import *\nfrom entities.message import Message, BroadcastMessage, GroupMessage\nfrom entities.node import Node\nfrom entities.group import GroupBroadcast\nfrom entities.request import Request\nimport threading\nimport time\nimport logging\nimport random\nimport json\nimport socket\nfrom services.user import UserService\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {\"timestamp\": message.timestamp, \"message\": message.message}\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": message.fromUsername,\n \"srcGroup\": self.info.get(\"groupID\", \"\"),\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": False,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageBroadcast(self, message):\n data = {\"timestamp\": message.timestamp, \"message\": message.message}\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": message.fromUsername,\n \"srcGroup\": self.info.get(\"groupID\", \"\"),\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": True,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageGroup(self, message):\n data = {\"timestamp\": message.timestamp, \"message\": message.message}\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": message.fromUsername,\n \"srcGroup\": message.groupID,\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": True,\n \"groupBroadcast\": True,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupJoinRequest(self, request):\n data = {\"message\": request.message}\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": request.fromUsername,\n \"srcGroup\": self.info[\"groupID\"],\n \"desGroup\": request.groupID,\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": True,\n \"groupBroadcast\": False,\n \"memberRq\": True,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b\"\"\n header = {\n \"srcUsername\": self.info[\"username\"],\n \"srcGroup\": self.info[\"groupID\"],\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": False,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": True,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupDenyRequest(self, request):\n body = b\"\"\n header = {\n \"srcUsername\": self.info[\"username\"],\n \"srcGroup\": self.info[\"groupID\"],\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": False,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": True,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": self.info[\"username\"],\n \"srcGroup\": self.info[\"groupID\"],\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": True,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": True,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(\n format(random.getrandbits(256), \"x\").zfill(64))\n self.msg = self.packageHash+msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f\"On thread #{threading.get_ident()}, start connection attempt\")\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr,\n 'type: ', type(self.addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421,))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f\"Send complete using {time.time()-start} seconds\")\n self.sock.close()\n\n\nlogger = logging.getLogger('Sender')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nfh = logging.FileHandler(\"applog.log\")\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n",
"step-ids": [
9,
12,
13,
15,
16
]
}
|
[
9,
12,
13,
15,
16
] |
data=[1,4,2,3,6,8,9,7]
def partition(data,l,h):
i=l
j=h
pivot=data[l]
while(i<j):
while(data[i]<=pivot and i<=h-1):
i=i+1
while(data[j]>pivot and j>=l+1):
j=j-1
if(i<j):
data[i],data[j]=data[j],data[i]
data[l],data[j]=data[j],data[l]
return j
def quickSort(data,l,h):
if(l<h):
divider=partition(data,l,h)
quickSort(data,l,divider-1)
quickSort(data,divider+1,h)
quickSort(data,0,len(data)-1)
print(data)
|
normal
|
{
"blob_id": "1cd82883e9a73cfbe067d58c30659b9b2e5bf473",
"index": 9349,
"step-1": "<mask token>\n\n\ndef partition(data, l, h):\n i = l\n j = h\n pivot = data[l]\n while i < j:\n while data[i] <= pivot and i <= h - 1:\n i = i + 1\n while data[j] > pivot and j >= l + 1:\n j = j - 1\n if i < j:\n data[i], data[j] = data[j], data[i]\n data[l], data[j] = data[j], data[l]\n return j\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef partition(data, l, h):\n i = l\n j = h\n pivot = data[l]\n while i < j:\n while data[i] <= pivot and i <= h - 1:\n i = i + 1\n while data[j] > pivot and j >= l + 1:\n j = j - 1\n if i < j:\n data[i], data[j] = data[j], data[i]\n data[l], data[j] = data[j], data[l]\n return j\n\n\ndef quickSort(data, l, h):\n if l < h:\n divider = partition(data, l, h)\n quickSort(data, l, divider - 1)\n quickSort(data, divider + 1, h)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef partition(data, l, h):\n i = l\n j = h\n pivot = data[l]\n while i < j:\n while data[i] <= pivot and i <= h - 1:\n i = i + 1\n while data[j] > pivot and j >= l + 1:\n j = j - 1\n if i < j:\n data[i], data[j] = data[j], data[i]\n data[l], data[j] = data[j], data[l]\n return j\n\n\ndef quickSort(data, l, h):\n if l < h:\n divider = partition(data, l, h)\n quickSort(data, l, divider - 1)\n quickSort(data, divider + 1, h)\n\n\nquickSort(data, 0, len(data) - 1)\nprint(data)\n",
"step-4": "data = [1, 4, 2, 3, 6, 8, 9, 7]\n\n\ndef partition(data, l, h):\n i = l\n j = h\n pivot = data[l]\n while i < j:\n while data[i] <= pivot and i <= h - 1:\n i = i + 1\n while data[j] > pivot and j >= l + 1:\n j = j - 1\n if i < j:\n data[i], data[j] = data[j], data[i]\n data[l], data[j] = data[j], data[l]\n return j\n\n\ndef quickSort(data, l, h):\n if l < h:\n divider = partition(data, l, h)\n quickSort(data, l, divider - 1)\n quickSort(data, divider + 1, h)\n\n\nquickSort(data, 0, len(data) - 1)\nprint(data)\n",
"step-5": "data=[1,4,2,3,6,8,9,7]\r\n\r\ndef partition(data,l,h):\r\n i=l\r\n j=h\r\n pivot=data[l]\r\n\r\n while(i<j):\r\n while(data[i]<=pivot and i<=h-1):\r\n i=i+1\r\n \r\n\r\n while(data[j]>pivot and j>=l+1):\r\n\r\n j=j-1\r\n\r\n \r\n \r\n if(i<j):\r\n data[i],data[j]=data[j],data[i]\r\n\r\n\r\n data[l],data[j]=data[j],data[l]\r\n return j\r\n\r\ndef quickSort(data,l,h):\r\n if(l<h):\r\n divider=partition(data,l,h)\r\n quickSort(data,l,divider-1)\r\n quickSort(data,divider+1,h)\r\n\r\n \r\n\r\n\r\nquickSort(data,0,len(data)-1)\r\nprint(data)\r\n \r\n \r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from eums.test.api.api_test_helpers import create_option
from eums.test.factories.question_factory import MultipleChoiceQuestionFactory
from eums.test.api.authenticated_api_test_case import AuthenticatedAPITestCase
from eums.test.config import BACKEND_URL
from eums.models.question import MultipleChoiceQuestion
ENDPOINT_URL = BACKEND_URL + 'option/'
RECEIVED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'received-options/'
QUALITY_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'quality-options/'
SATISFIED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'satisfied-options/'
class OptionsEndPointTest(AuthenticatedAPITestCase):
def test_should_create_item(self):
question = MultipleChoiceQuestionFactory()
option_details = {'text': "Bad", 'question': question.id}
response = self.client.post(ENDPOINT_URL, option_details, format='json')
self.assertEqual(response.status_code, 201)
self.assertDictContainsSubset(option_details, response.data)
def test_should_get_options_sorted_by_text(self):
question = MultipleChoiceQuestionFactory()
option_one_details = {'text': "B Option", 'question': question.id}
option_two_details = {'text': "A Option", 'question': question.id}
create_option(self, option_one_details)
create_option(self, option_two_details)
get_response = self.client.get(ENDPOINT_URL)
self.assertEqual(get_response.status_code, 200)
self.assertDictContainsSubset(option_two_details, get_response.data[0])
self.assertDictContainsSubset(option_one_details, get_response.data[1])
class ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):
def test_should_only_get_received_options(self):
received_question,_ = MultipleChoiceQuestion.objects.get_or_create(
uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'],
text='Was product received?', label='productReceived'
)
other_question = MultipleChoiceQuestionFactory()
option_one_details = {'text': "Yes", 'question': received_question.id}
option_two_details = {'text': "No", 'question': received_question.id}
option_three_details = {'text': "Other", 'question': other_question.id}
create_option(self, option_one_details)
create_option(self, option_two_details)
create_option(self, option_three_details)
get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)
self.assertEqual(get_response.status_code, 200)
self.assertDictContainsSubset(option_one_details, get_response.data[0])
self.assertDictContainsSubset(option_two_details, get_response.data[2])
self.assertNotIn(option_three_details, get_response.data)
class QualityOptionsEndPointTest(AuthenticatedAPITestCase):
def test_should_only_get_quality_options_sorted_by_text(self):
quality_question,_ = MultipleChoiceQuestion.objects.get_or_create(
uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'],
text='What is the quality of the product?', label='qualityOfProduct'
)
other_question = MultipleChoiceQuestionFactory()
option_one_details = {'text': "B Option", 'question': quality_question.id}
option_two_details = {'text': "A Option", 'question': quality_question.id}
option_three_details = {'text': "C Option", 'question': other_question.id}
create_option(self, option_one_details)
create_option(self, option_two_details)
create_option(self, option_three_details)
get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)
self.assertEqual(get_response.status_code, 200)
self.assertDictContainsSubset(option_two_details, get_response.data[0])
self.assertDictContainsSubset(option_one_details, get_response.data[1])
self.assertNotIn(option_three_details, get_response.data)
class SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):
def test_should_only_get_satisfied_options(self):
satisfied_question,_ = MultipleChoiceQuestion.objects.get_or_create(
uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'],
text='Are you satisfied with the product?', label='satisfiedWithProduct'
)
other_question = MultipleChoiceQuestionFactory()
option_one_details = {'text': "Yes", 'question': satisfied_question.id}
option_two_details = {'text': "No", 'question': satisfied_question.id}
option_three_details = {'text': "Other", 'question': other_question.id}
create_option(self, option_one_details)
create_option(self, option_two_details)
create_option(self, option_three_details)
get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)
self.assertEqual(get_response.status_code, 200)
self.assertDictContainsSubset(option_one_details, get_response.data[0])
self.assertDictContainsSubset(option_two_details, get_response.data[2])
self.assertNotIn(option_three_details, get_response.data)
|
normal
|
{
"blob_id": "1152f144e17c11416f9ed56b4408f18615b16dc2",
"index": 5187,
"step-1": "<mask token>\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n <mask token>\n <mask token>\n\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_received_options(self):\n received_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Was product received?', label='productReceived')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': received_question.id}\n option_two_details = {'text': 'No', 'question': received_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'What is the quality of the product?', label='qualityOfProduct')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question':\n quality_question.id}\n option_two_details = {'text': 'A Option', 'question':\n quality_question.id}\n option_three_details = {'text': 'C Option', 'question':\n other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_satisfied_options(self):\n satisfied_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': satisfied_question.id}\n option_two_details = {'text': 'No', 'question': satisfied_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n",
"step-2": "<mask token>\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_create_item(self):\n question = MultipleChoiceQuestionFactory()\n option_details = {'text': 'Bad', 'question': question.id}\n response = self.client.post(ENDPOINT_URL, option_details, format='json'\n )\n self.assertEqual(response.status_code, 201)\n self.assertDictContainsSubset(option_details, response.data)\n <mask token>\n\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_received_options(self):\n received_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Was product received?', label='productReceived')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': received_question.id}\n option_two_details = {'text': 'No', 'question': received_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'What is the quality of the product?', label='qualityOfProduct')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question':\n quality_question.id}\n option_two_details = {'text': 'A Option', 'question':\n quality_question.id}\n option_three_details = {'text': 'C Option', 'question':\n other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_satisfied_options(self):\n satisfied_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': satisfied_question.id}\n option_two_details = {'text': 'No', 'question': satisfied_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n",
"step-3": "<mask token>\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_create_item(self):\n question = MultipleChoiceQuestionFactory()\n option_details = {'text': 'Bad', 'question': question.id}\n response = self.client.post(ENDPOINT_URL, option_details, format='json'\n )\n self.assertEqual(response.status_code, 201)\n self.assertDictContainsSubset(option_details, response.data)\n\n def test_should_get_options_sorted_by_text(self):\n question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question': question.id}\n option_two_details = {'text': 'A Option', 'question': question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n get_response = self.client.get(ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_received_options(self):\n received_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Was product received?', label='productReceived')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': received_question.id}\n option_two_details = {'text': 'No', 'question': received_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'What is the quality of the product?', label='qualityOfProduct')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question':\n quality_question.id}\n option_two_details = {'text': 'A Option', 'question':\n quality_question.id}\n option_three_details = {'text': 'C Option', 'question':\n other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_satisfied_options(self):\n satisfied_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': satisfied_question.id}\n option_two_details = {'text': 'No', 'question': satisfied_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n",
"step-4": "<mask token>\nENDPOINT_URL = BACKEND_URL + 'option/'\nRECEIVED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'received-options/'\nQUALITY_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'quality-options/'\nSATISFIED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'satisfied-options/'\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_create_item(self):\n question = MultipleChoiceQuestionFactory()\n option_details = {'text': 'Bad', 'question': question.id}\n response = self.client.post(ENDPOINT_URL, option_details, format='json'\n )\n self.assertEqual(response.status_code, 201)\n self.assertDictContainsSubset(option_details, response.data)\n\n def test_should_get_options_sorted_by_text(self):\n question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question': question.id}\n option_two_details = {'text': 'A Option', 'question': question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n get_response = self.client.get(ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_received_options(self):\n received_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Was product received?', label='productReceived')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': received_question.id}\n option_two_details = {'text': 'No', 'question': received_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'What is the quality of the product?', label='qualityOfProduct')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question':\n quality_question.id}\n option_two_details = {'text': 'A Option', 'question':\n quality_question.id}\n option_three_details = {'text': 'C Option', 'question':\n other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_satisfied_options(self):\n satisfied_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': satisfied_question.id}\n option_two_details = {'text': 'No', 'question': satisfied_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n",
"step-5": "from eums.test.api.api_test_helpers import create_option\nfrom eums.test.factories.question_factory import MultipleChoiceQuestionFactory\nfrom eums.test.api.authenticated_api_test_case import AuthenticatedAPITestCase\nfrom eums.test.config import BACKEND_URL\nfrom eums.models.question import MultipleChoiceQuestion\n\n\nENDPOINT_URL = BACKEND_URL + 'option/'\nRECEIVED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'received-options/'\nQUALITY_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'quality-options/'\nSATISFIED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'satisfied-options/'\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n def test_should_create_item(self):\n question = MultipleChoiceQuestionFactory()\n option_details = {'text': \"Bad\", 'question': question.id}\n\n response = self.client.post(ENDPOINT_URL, option_details, format='json')\n\n self.assertEqual(response.status_code, 201)\n self.assertDictContainsSubset(option_details, response.data)\n\n def test_should_get_options_sorted_by_text(self):\n question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': \"B Option\", 'question': question.id}\n option_two_details = {'text': \"A Option\", 'question': question.id}\n\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n\n get_response = self.client.get(ENDPOINT_URL)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n def test_should_only_get_received_options(self):\n received_question,_ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'],\n text='Was product received?', label='productReceived'\n )\n other_question = MultipleChoiceQuestionFactory()\n\n option_one_details = {'text': \"Yes\", 'question': received_question.id}\n option_two_details = {'text': \"No\", 'question': received_question.id}\n option_three_details = {'text': \"Other\", 'question': other_question.id}\n\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question,_ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'],\n text='What is the quality of the product?', label='qualityOfProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n\n option_one_details = {'text': \"B Option\", 'question': quality_question.id}\n option_two_details = {'text': \"A Option\", 'question': quality_question.id}\n option_three_details = {'text': \"C Option\", 'question': other_question.id}\n\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n def test_should_only_get_satisfied_options(self):\n satisfied_question,_ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'],\n text='Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n\n option_one_details = {'text': \"Yes\", 'question': satisfied_question.id}\n option_two_details = {'text': \"No\", 'question': satisfied_question.id}\n option_three_details = {'text': \"Other\", 'question': other_question.id}\n\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
from django import forms
class SignupAliasForm(forms.Form):
alias = forms.CharField(max_length=20, required=True)
email_secret = forms.CharField(max_length=100, required=True)
|
normal
|
{
"blob_id": "953186a330ae9dff15c037b556746590d748c7ad",
"index": 4974,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SignupAliasForm(forms.Form):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SignupAliasForm(forms.Form):\n alias = forms.CharField(max_length=20, required=True)\n email_secret = forms.CharField(max_length=100, required=True)\n",
"step-4": "from django import forms\n\n\nclass SignupAliasForm(forms.Form):\n alias = forms.CharField(max_length=20, required=True)\n email_secret = forms.CharField(max_length=100, required=True)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
byte = int(sys.argv[1])
qlty = float(sys.argv[2])
n = 0
while True:
o = sys.stdin.read(byte)
if qlty>(qlty*n)%1:
oo = o
sys.stdout.write(o)
else:
sys.stdout.write(oo)
if not o:
break
n=n+1
|
normal
|
{
"blob_id": "70845ab4aab80d988a5c01d0b4fb76e63b800527",
"index": 6484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n o = sys.stdin.read(byte)\n if qlty > qlty * n % 1:\n oo = o\n sys.stdout.write(o)\n else:\n sys.stdout.write(oo)\n if not o:\n break\n n = n + 1\n",
"step-3": "<mask token>\nbyte = int(sys.argv[1])\nqlty = float(sys.argv[2])\nn = 0\nwhile True:\n o = sys.stdin.read(byte)\n if qlty > qlty * n % 1:\n oo = o\n sys.stdout.write(o)\n else:\n sys.stdout.write(oo)\n if not o:\n break\n n = n + 1\n",
"step-4": "import sys\nbyte = int(sys.argv[1])\nqlty = float(sys.argv[2])\nn = 0\nwhile True:\n o = sys.stdin.read(byte)\n if qlty > qlty * n % 1:\n oo = o\n sys.stdout.write(o)\n else:\n sys.stdout.write(oo)\n if not o:\n break\n n = n + 1\n",
"step-5": "import sys\r\nbyte = int(sys.argv[1])\r\nqlty = float(sys.argv[2])\r\nn = 0\r\nwhile True:\r\n o = sys.stdin.read(byte)\r\n if qlty>(qlty*n)%1:\r\n oo = o\r\n sys.stdout.write(o)\r\n else:\r\n sys.stdout.write(oo)\r\n if not o:\r\n break\r\n n=n+1",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
__author__ = "Maxime Beauchamp"
__version__ = "0.1"
__date__ = "2020-12-10"
__email__ = "[email protected]"
from graphics_OSSE import *
# function to create recursive paths
def mk_dir_recursive(dir_path):
if os.path.isdir(dir_path):
return
h, t = os.path.split(dir_path) # head/tail
if not os.path.isdir(h):
mk_dir_recursive(h)
new_path = join_paths(h, t)
if not os.path.isdir(new_path):
os.mkdir(new_path)
type_obs = sys.argv[1]
domain = sys.argv[2]
workpath = "/users/local/m19beauc/4DVARNN-DinAE_xp/"+domain+"/OSSE/scores_allmethods_nadlag_"+type_obs
scratchpath = "/users/local/m19beauc/4DVARNN-DinAE_xp/"+domain+"/OSSE"
if not os.path.exists(workpath):
mk_dir_recursive(workpath)
#else:
# shutil.rmtree(workpath)
# mk_dir_recursive(workpath)
## parameters
if domain=="OSMOSIS":
extent = [-19.5,-11.5,45.,55.]
indLat = 200
indLon = 160
elif domain=='GULFSTREAM':
extent = [-65.,-55.,33.,43.]
indLat = 200
indLon = 200
else:
extent=[-65.,-55.,30.,40.]
indLat = 200
indLon = 200
#lon = lon[:indLon]
#lat = lat[:indLat]
## store all data in a list
AnDA_nadir_lag_0_file = scratchpath+'/resAnDA_nadir_nadlag_0_'+type_obs+'/saved_path.pickle'
FP_GENN_nadir_lag_0_file = scratchpath+'/resIA_nadir_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
AnDA_nadir_lag_5_file = scratchpath+'/resAnDA_nadir_nadlag_5_'+type_obs+'/saved_path.pickle'
FP_GENN_nadir_lag_5_file = scratchpath+'/resIA_nadir_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
AnDA_nadirswot_lag_0_file = scratchpath+'/resAnDA_nadirswot_nadlag_0'+type_obs+'/saved_path.pickle'
FP_GENN_nadirswot_lag_0_file = scratchpath+'/resIA_nadirswot_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
AnDA_nadirswot_lag_5_file = scratchpath+'/resAnDA_nadirswot_nadlag_5'+type_obs+'/saved_path.pickle'
FP_GENN_nadirswot_lag_5_file = scratchpath+'/resIA_nadirswot_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
# Reload saved AnDA result
with open(AnDA_nadir_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_0 = AnDA_ssh_1
itrp_dineof_nadir_0 = itrp_dineof
with open(AnDA_nadirswot_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1
itrp_dineof_nadirswot_0 = itrp_dineof
with open(AnDA_nadir_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_5 = AnDA_ssh_1
itrp_dineof_nadir_5 = itrp_dineof
with open(AnDA_nadirswot_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1
itrp_dineof_nadirswot_5 = itrp_dineof
# Reload saved ConvAE and GE-NN results
with open(FP_GENN_nadir_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9]
with open(FP_GENN_nadir_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9]
## list of dates
lday1 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=60+i),"%Y-%m-%d") for i in range(20) ]
lday2 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=140+i),"%Y-%m-%d") for i in range(20) ]
lday3 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=220+i),"%Y-%m-%d") for i in range(20) ]
lday4 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=300+i),"%Y-%m-%d") for i in range(20) ]
lday = np.concatenate([lday1,lday2,lday3,lday4])
lday2 = [ datetime.strptime(lday[i],'%Y-%m-%d') for i in range(len(lday)) ]
GT = AnDA_ssh_1_nadir.GT[:,:indLat,:indLon]
# list_data (AnDA nadir)
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:,:indLat,:indLon])
list_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:,:indLat,:indLon])
# arguments for plots (nadir)
labels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_AnDA_nadir_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
# list_data (AnDA nadirswot)
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:,:indLat,:indLon])
list_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:,:indLat,:indLon])
# arguments for plots (nadirswot)
labels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_AnDA_nadirswot_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
# list_data (GENN nadir)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadir_0[:,:indLat,:indLon])
list_data.append(itrp_FP_GENN_nadir_5[:,:indLat,:indLon])
# arguments for plots (nadir)
labels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_GENN_nadir_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
# list_data (GENN nadirswot)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadirswot_0[:,:indLat,:indLon])
list_data.append(itrp_FP_GENN_nadirswot_5[:,:indLat,:indLon])
# arguments for plots (nadirswot)
labels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_GENN_nadirswot_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
|
normal
|
{
"blob_id": "9f4cd9ed8aea03f5908aef4a154d964f0810619b",
"index": 9820,
"step-1": "<mask token>\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\n<mask token>\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\nif domain == 'OSMOSIS':\n extent = [-19.5, -11.5, 45.0, 55.0]\n indLat = 200\n indLon = 160\nelif domain == 'GULFSTREAM':\n extent = [-65.0, -55.0, 33.0, 43.0]\n indLat = 200\n indLon = 200\nelse:\n extent = [-65.0, -55.0, 30.0, 40.0]\n indLat = 200\n indLon = 200\n<mask token>\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1\n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1\n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9\n ]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9\n ]\n<mask token>\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n<mask token>\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n<mask token>\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n<mask token>\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n",
"step-3": "__author__ = 'Maxime Beauchamp'\n__version__ = '0.1'\n__date__ = '2020-12-10'\n__email__ = '[email protected]'\n<mask token>\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\ntype_obs = sys.argv[1]\ndomain = sys.argv[2]\nworkpath = ('/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain +\n '/OSSE/scores_allmethods_nadlag_' + type_obs)\nscratchpath = '/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain + '/OSSE'\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\nif domain == 'OSMOSIS':\n extent = [-19.5, -11.5, 45.0, 55.0]\n indLat = 200\n indLon = 160\nelif domain == 'GULFSTREAM':\n extent = [-65.0, -55.0, 33.0, 43.0]\n indLat = 200\n indLon = 200\nelse:\n extent = [-65.0, -55.0, 30.0, 40.0]\n indLat = 200\n indLon = 200\nAnDA_nadir_lag_0_file = (scratchpath + '/resAnDA_nadir_nadlag_0_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_0_file = (scratchpath + '/resIA_nadir_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadir_lag_5_file = (scratchpath + '/resAnDA_nadir_nadlag_5_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_5_file = (scratchpath + '/resIA_nadir_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_0_file = (scratchpath + '/resAnDA_nadirswot_nadlag_0' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_0_file = (scratchpath + '/resIA_nadirswot_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_5_file = (scratchpath + '/resAnDA_nadirswot_nadlag_5' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_5_file = (scratchpath + '/resIA_nadirswot_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1\n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1\n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9\n ]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9\n ]\nlday1 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=60 + i), '%Y-%m-%d') for i in range(20)]\nlday2 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=140 + i), '%Y-%m-%d') for i in range(20)]\nlday3 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=220 + i), '%Y-%m-%d') for i in range(20)]\nlday4 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=300 + i), '%Y-%m-%d') for i in range(20)]\nlday = np.concatenate([lday1, lday2, lday3, lday4])\nlday2 = [datetime.strptime(lday[i], '%Y-%m-%d') for i in range(len(lday))]\nGT = AnDA_ssh_1_nadir.GT[:, :indLat, :indLon]\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n",
"step-4": "__author__ = 'Maxime Beauchamp'\n__version__ = '0.1'\n__date__ = '2020-12-10'\n__email__ = '[email protected]'\nfrom graphics_OSSE import *\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\ntype_obs = sys.argv[1]\ndomain = sys.argv[2]\nworkpath = ('/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain +\n '/OSSE/scores_allmethods_nadlag_' + type_obs)\nscratchpath = '/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain + '/OSSE'\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\nif domain == 'OSMOSIS':\n extent = [-19.5, -11.5, 45.0, 55.0]\n indLat = 200\n indLon = 160\nelif domain == 'GULFSTREAM':\n extent = [-65.0, -55.0, 33.0, 43.0]\n indLat = 200\n indLon = 200\nelse:\n extent = [-65.0, -55.0, 30.0, 40.0]\n indLat = 200\n indLon = 200\nAnDA_nadir_lag_0_file = (scratchpath + '/resAnDA_nadir_nadlag_0_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_0_file = (scratchpath + '/resIA_nadir_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadir_lag_5_file = (scratchpath + '/resAnDA_nadir_nadlag_5_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_5_file = (scratchpath + '/resIA_nadir_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_0_file = (scratchpath + '/resAnDA_nadirswot_nadlag_0' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_0_file = (scratchpath + '/resIA_nadirswot_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_5_file = (scratchpath + '/resAnDA_nadirswot_nadlag_5' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_5_file = (scratchpath + '/resIA_nadirswot_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1\n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1\n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9\n ]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9\n ]\nlday1 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=60 + i), '%Y-%m-%d') for i in range(20)]\nlday2 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=140 + i), '%Y-%m-%d') for i in range(20)]\nlday3 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=220 + i), '%Y-%m-%d') for i in range(20)]\nlday4 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=300 + i), '%Y-%m-%d') for i in range(20)]\nlday = np.concatenate([lday1, lday2, lday3, lday4])\nlday2 = [datetime.strptime(lday[i], '%Y-%m-%d') for i in range(len(lday))]\nGT = AnDA_ssh_1_nadir.GT[:, :indLat, :indLon]\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n",
"step-5": "#!/usr/bin/env python\n\n__author__ = \"Maxime Beauchamp\"\n__version__ = \"0.1\"\n__date__ = \"2020-12-10\"\n__email__ = \"[email protected]\"\n\nfrom graphics_OSSE import *\n\n# function to create recursive paths\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path) # head/tail\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\ntype_obs = sys.argv[1]\ndomain = sys.argv[2] \n\nworkpath = \"/users/local/m19beauc/4DVARNN-DinAE_xp/\"+domain+\"/OSSE/scores_allmethods_nadlag_\"+type_obs\nscratchpath = \"/users/local/m19beauc/4DVARNN-DinAE_xp/\"+domain+\"/OSSE\"\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\n#else:\n# shutil.rmtree(workpath)\n# mk_dir_recursive(workpath) \n\n## parameters\nif domain==\"OSMOSIS\":\n extent = [-19.5,-11.5,45.,55.]\n indLat = 200\n indLon = 160\nelif domain=='GULFSTREAM':\n extent = [-65.,-55.,33.,43.]\n indLat = 200\n indLon = 200\nelse:\n extent=[-65.,-55.,30.,40.]\n indLat = 200\n indLon = 200\n#lon = lon[:indLon]\n#lat = lat[:indLat]\n\n## store all data in a list\nAnDA_nadir_lag_0_file = scratchpath+'/resAnDA_nadir_nadlag_0_'+type_obs+'/saved_path.pickle'\nFP_GENN_nadir_lag_0_file = scratchpath+'/resIA_nadir_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\nAnDA_nadir_lag_5_file = scratchpath+'/resAnDA_nadir_nadlag_5_'+type_obs+'/saved_path.pickle'\nFP_GENN_nadir_lag_5_file = scratchpath+'/resIA_nadir_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\nAnDA_nadirswot_lag_0_file = scratchpath+'/resAnDA_nadirswot_nadlag_0'+type_obs+'/saved_path.pickle'\nFP_GENN_nadirswot_lag_0_file = scratchpath+'/resIA_nadirswot_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\nAnDA_nadirswot_lag_5_file = scratchpath+'/resAnDA_nadirswot_nadlag_5'+type_obs+'/saved_path.pickle'\nFP_GENN_nadirswot_lag_5_file = scratchpath+'/resIA_nadirswot_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\n\n# Reload saved AnDA result\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1 \n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1 \n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\n# Reload saved ConvAE and GE-NN results\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9]\n\n\n## list of dates\nlday1 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=60+i),\"%Y-%m-%d\") for i in range(20) ]\nlday2 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=140+i),\"%Y-%m-%d\") for i in range(20) ]\nlday3 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=220+i),\"%Y-%m-%d\") for i in range(20) ]\nlday4 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=300+i),\"%Y-%m-%d\") for i in range(20) ]\nlday = np.concatenate([lday1,lday2,lday3,lday4])\nlday2 = [ datetime.strptime(lday[i],'%Y-%m-%d') for i in range(len(lday)) ]\n\nGT = AnDA_ssh_1_nadir.GT[:,:indLat,:indLon]\n# list_data (AnDA nadir)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:,:indLat,:indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:,:indLat,:indLon])\n# arguments for plots (nadir)\nlabels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_AnDA_nadir_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n# list_data (AnDA nadirswot)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:,:indLat,:indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:,:indLat,:indLon])\n# arguments for plots (nadirswot)\nlabels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_AnDA_nadirswot_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n# list_data (GENN nadir)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:,:indLat,:indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:,:indLat,:indLon])\n# arguments for plots (nadir)\nlabels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_GENN_nadir_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n# list_data (GENN nadirswot)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:,:indLat,:indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:,:indLat,:indLon])\n# arguments for plots (nadirswot)\nlabels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_GENN_nadirswot_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# author : rovo98
# date: 2018.3.19
# this is a demo for test calling functions.
n1 = 255
n2 = 1000
print(hex(n1))
print(hex(n2))
print(abs(-119999))
|
normal
|
{
"blob_id": "31064145ae2702f93a475d0957395c62a6b320ee",
"index": 1741,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(hex(n1))\nprint(hex(n2))\nprint(abs(-119999))\n",
"step-3": "n1 = 255\nn2 = 1000\nprint(hex(n1))\nprint(hex(n2))\nprint(abs(-119999))\n",
"step-4": "# -*- coding: utf-8 -*-\r\n# author : rovo98\r\n# date: 2018.3.19\r\n\r\n\r\n# this is a demo for test calling functions.\r\nn1 = 255\r\nn2 = 1000\r\n\r\nprint(hex(n1))\r\nprint(hex(n2))\r\n\r\nprint(abs(-119999))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Library for Stalker project
#Libraries
import pandas as pd
import seaborn as sns
from IPython.display import Image, display
import matplotlib.pyplot as plt
# Google search
from googlesearch import search
# Tldextract to get domain of url
import tldextract as tld
# BeautifulSoup
from bs4 import BeautifulSoup as bs
from bs4.element import Comment
import urllib.request
# NLTK to analyze webs
import nltk
from nltk.corpus import stopwords
from nltk import FreqDist
from nltk.tokenize import word_tokenize
# Find close matches
from difflib import get_close_matches
# Sentiment analysis
from textblob import TextBlob
# Twitter sentiment analysis
import tweepy
# News API
from newsapi import NewsApiClient
# Credentials
import credentials as cd
# Finding info in APIs
newsapi = NewsApiClient(api_key=cd.news_credentials['api_key'])
news_sources = 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch'
# Twitter API
consumer_key = cd.twitter_credentials['consumer_key']
consumer_key_secret = cd.twitter_credentials['consumer_key_secret']
access_token = cd.twitter_credentials['access_token']
access_token_secret = cd.twitter_credentials['access_token_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Finding query on Google
# Finding related urls
def find_webs(query):
urls = []
rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube','pinterest','angel']
sites = []
red_social = False
for s in search(query, tld="com", num=30, stop=30, pause=3, lang='en'):
if len(urls)<10:
for rs in rrss:
if rs in s or tld.extract(s).domain in sites:
red_social = True
if not red_social and s not in urls:
urls.append(s)
sites.append(tld.extract(s).domain)
red_social = False
return urls
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = bs(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def cleaning_urls_text(url):
try:
html = text_from_html(urllib.request.urlopen(url).read())
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(html)
return [w for w in word_tokens if not w in stop_words]
except:
return []
def filter_warning_words(sentence):
warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail',
'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide',
'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot',
'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']
return list(filter(lambda word: word in warning_word, sentence))
def warnings_count(url):
clean_sentence = cleaning_urls_text(url)
length = len(filter_warning_words(clean_sentence))
return (url, length) if length != 0 else None
def most_warnings(urls, look_for):
list_len_tup = list(map(warnings_count, urls))
list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))
list_len_tup_clean.sort(key = lambda item: item[1], reverse=True)
top_urls = [url for url, length in list_len_tup_clean[:2]]
if len(top_urls) > 1:
print(f"""
We found something sketchy. You might want to check these links:
- {top_urls[0]}
- {top_urls[1]}
""")
elif len(top_urls) == 1:
print(f"""
We found something sketchy. You might want to check this link:
{top_urls[0]}
""")
else:
print(f"We couldn't find anything worrying about {look_for} on Google. Nice!")
# Input correction
def retrieve_name(my_name, companies):
companies_list = []
for i in companies.dropna(subset=['name']).name:
companies_list.append(i)
if my_name in companies_list:
return my_name
elif len(get_close_matches(my_name, companies_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0])
if (action == "y"):
return get_close_matches(my_name, companies_list)[0]
elif (action == "n"):
return my_name
else:
return("we don't understand you. Apologies.")
def retrieve_sector(my_sector, investments):
investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list'])
sector_list0 = []
sector_list = []
for item in investments['company_category_list']:
if ',' in item:
sector_list0.append(item.split(sep=', '))
else:
sector_list0.append(item)
for i in sector_list0:
if type(i) == list:
for sec in i:
sector_list.append(sec)
else:
sector_list.append(i)
if my_sector in sector_list:
return my_sector
elif len(get_close_matches(my_sector, sector_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0])
if (action == "y"):
return get_close_matches(my_sector, sector_list)[0]
else:
return my_sector
# Sentiment analysis tweeter
def tw_sent_sector(public_tweets, sector):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {sector} industry in Twitter is {sent}")
# Sentiment analysis news
def news_sentiment_sector(public_news, sector):
news_list = []
for piece in range(len(public_news['articles'])):
news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0])
news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0])
if sum(news_list)>0:
news_sent = 'Positive'
elif sum(news_list)<0:
news_sent = 'Negative'
else:
news_sent = 'Neutral'
print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}")
# Look for data about sector
def category(sector, investments):
# Gather tweets
public_tweets = api.search(sector)
# Gather news
public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en')
# Prepare the data for the sector
investments = investments.dropna(subset=['company_category_list'])
sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1)
sector_investments.reset_index(drop=True)
sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at'])
sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year )
sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month )
sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day )
# Sentiment analysis Twitter
tw_sent_sector(public_tweets, sector)
# Sentiment analysis News
news_sentiment_sector(public_news, sector)
# create plot
sector_year = sector_investments.groupby(['Year']).sum()[-10:]
movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100)
if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0:
in_dec = 'increased'
grow = 'growing'
else:
in_dec = 'decreased'
grow = 'falling'
movement = movement[1:]
sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}')
investments_per_year = sector_investments.groupby(['Year']).count()
peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list()
peak_amount = max(sector_year.raised_amount_usd)
#peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list()
low_amount = min(sector_year.raised_amount_usd)
most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False)
low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list()
format_doll = ',.2f'
print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years.
It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested.
""")
plt.ylabel('Raised amount in USD')
plt.show()
sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}')
plt.ylabel('Number of investments')
#print("""Plot explanaition average investment
""")
plt.show()
#print(f"""
# The Top 3 companies with biggest investments are:
#- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised,
#- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and
#- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised
#""")
# Sentiment analysis founder
def tw_analysis_founder(public_tweets, founder):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {founder} in Twitter is {sent}")
# Look for data about the founder
def founders(founder, people):
full_name = founder.split()
public_tweets = api.search(founder)
# What to search on Google
look_for = founder
for i in range(len(people)):
if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]:
display(Image(url=people.profile_image_url[i]))
print(f'We found this information about {founder}:')
print(f"Founder's name: {people.first_name[i]} {people.last_name[i]} ")
print(f"Title: {people.title[i]}")
print(f"Organization: {people.organization[i]}")
print(f"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}")
if people.twitter_url[i] != None:
print(f"Twitter URL: {people.twitter_url[i]}")
if people.linkedin_url[i] != None:
print(f"Linkedin URL: {people.linkedin_url[i]}")
if people.facebook_url[i] != None:
print(f"Facebook URL: {people.facebook_url[i]}")
# Twitter analysis
tw_analysis_founder(public_tweets, founder)
# Google search
most_warnings(find_webs(founder), look_for)
# Look for data about company
def find_companies_by_size(size, companies, name, sector, company):
company_nan = companies.dropna()
company_sector = company_nan[company_nan['category_list'].str.contains(sector)].drop('index',axis=1).dropna()
company_sector['total_funding_size']=pd.qcut(company_sector.funding_total_usd, q=[0, .25, .75, 1], labels=['small', 'medium', 'big'])
if name in company_nan['name']:
return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')& (company_sector['country_code']==company.country_code)].sample()
else:
return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')].sample()
def competitor_info(company):
print(f"Company name: {company.name.item()}")
print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}")
print(f"Total rounds: {company.funding_rounds.item()}")
print(f"Webpage: {company.homepage_url.item()}")
print(f"Country: {company.country_code.item()}")
print(f"Status: {company.status.item()}")
print(f"Founded in: {company.founded_at.item()}")
# Sentiment analysis company
def tw_analysis_company(public_tweets, company):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {company} in Twitter is {sent}")
def startup(name, companies, sector):
company = companies[companies['name'] == name]
# What to search on Google
look_for = name
# Gather tweets
public_tweets = api.search(name)
try:
print(f"Company name: {company.name.item()}")
print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}")
print(f"Total rounds: {company.funding_rounds.item()}")
print(f"Webpage: {company.homepage_url.item()}")
print(f"Country: {company.country_code.item()}")
print(f"Status: {company.status.item()}")
# Find competitors
print('\n')
print(f"Competitors similar to {company.name.item()}:")
print('\n')
competitor_info(find_companies_by_size('small', companies, name, sector, company))
print('\n')
competitor_info(find_companies_by_size('medium', companies, name, sector, company))
print('\n')
competitor_info(find_companies_by_size('big', companies, name, sector, company))
except:
print(f"We couldn't find information about {name} in Crunchbase")
#Twitter sentiment analysis for company
tw_analysis_company(public_tweets, name)
# Google search
most_warnings(find_webs(name), look_for)
|
normal
|
{
"blob_id": "9c7ecd3c878d43633606439aa63f840176f20dee",
"index": 7941,
"step-1": "<mask token>\n\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube',\n 'pinterest', 'angel']\n sites = []\n red_social = False\n for s in search(query, tld='com', num=30, stop=30, pause=3, lang='en'):\n if len(urls) < 10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain)\n red_social = False\n return urls\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta',\n '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\n<mask token>\n\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime',\n 'arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary',\n 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud',\n 'genocide', 'hijacking', 'homicide', 'kidnapping', 'manslaughter',\n 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery',\n 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None\n\n\n<mask token>\n\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd',\n 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_sector, sector_list)[0])\n if action == 'y':\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n\ndef tw_sent_sector(public_tweets, sector):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {sector} industry in Twitter is {sent}')\n\n\n<mask token>\n\n\ndef category(sector, investments):\n public_tweets = api.search(sector)\n public_news = newsapi.get_everything(q=sector, sources=news_sources,\n language='en')\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].\n str.contains(sector)].drop('index', axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments[\n 'funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda\n x: x.year)\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda\n x: x.month)\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda\n x: x.day)\n tw_sent_sector(public_tweets, sector)\n news_sentiment_sector(public_news, sector)\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = (sector_year.raised_amount_usd.iloc[len(sector_year) - 1] -\n sector_year.raised_amount_usd.iloc[0]\n ) / sector_year.raised_amount_usd.iloc[0] * 100\n if sector_year.raised_amount_usd.iloc[0\n ] + sector_year.raised_amount_usd.iloc[len(sector_year) - 1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd\n ).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd'] == max(\n sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name'\n ).sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd'] == min(\n sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(\n f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement), format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount, format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount, format_doll)} invested.\n\"\"\"\n )\n plt.ylabel('Raised amount in USD')\n plt.show()\n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year\n .Day[-10:]).set_title(\n f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \"\"\")\n plt.show()\n #print(f\"\"\"\n\n\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {founder} in Twitter is {sent}')\n\n\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[\n i] == full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(\n f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \"\n )\n print(f'Title: {people.title[i]}')\n print(f'Organization: {people.organization[i]}')\n print(\n f'Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}'\n )\n if people.twitter_url[i] != None:\n print(f'Twitter URL: {people.twitter_url[i]}')\n if people.linkedin_url[i] != None:\n print(f'Linkedin URL: {people.linkedin_url[i]}')\n if people.facebook_url[i] != None:\n print(f'Facebook URL: {people.facebook_url[i]}')\n tw_analysis_founder(public_tweets, founder)\n most_warnings(find_webs(founder), look_for)\n\n\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(\n sector)].drop('index', axis=1).dropna()\n company_sector['total_funding_size'] = pd.qcut(company_sector.\n funding_total_usd, q=[0, 0.25, 0.75, 1], labels=['small', 'medium',\n 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed') & (company_sector[\n 'country_code'] == company.country_code)].sample()\n else:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed')].sample()\n\n\ndef competitor_info(company):\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print(f'Founded in: {company.founded_at.item()}')\n\n\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {company} in Twitter is {sent}')\n\n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n look_for = name\n public_tweets = api.search(name)\n try:\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print('\\n')\n print(f'Competitors similar to {company.name.item()}:')\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('medium', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('big', companies, name,\n sector, company))\n except:\n print(f\"We couldn't find information about {name} in Crunchbase\")\n tw_analysis_company(public_tweets, name)\n most_warnings(find_webs(name), look_for)\n",
"step-2": "<mask token>\n\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube',\n 'pinterest', 'angel']\n sites = []\n red_social = False\n for s in search(query, tld='com', num=30, stop=30, pause=3, lang='en'):\n if len(urls) < 10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain)\n red_social = False\n return urls\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta',\n '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\n<mask token>\n\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime',\n 'arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary',\n 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud',\n 'genocide', 'hijacking', 'homicide', 'kidnapping', 'manslaughter',\n 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery',\n 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None\n\n\n<mask token>\n\n\ndef retrieve_name(my_name, companies):\n companies_list = []\n for i in companies.dropna(subset=['name']).name:\n companies_list.append(i)\n if my_name in companies_list:\n return my_name\n elif len(get_close_matches(my_name, companies_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_name, companies_list)[0])\n if action == 'y':\n return get_close_matches(my_name, companies_list)[0]\n elif action == 'n':\n return my_name\n else:\n return \"we don't understand you. Apologies.\"\n\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd',\n 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_sector, sector_list)[0])\n if action == 'y':\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n\ndef tw_sent_sector(public_tweets, sector):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {sector} industry in Twitter is {sent}')\n\n\n<mask token>\n\n\ndef category(sector, investments):\n public_tweets = api.search(sector)\n public_news = newsapi.get_everything(q=sector, sources=news_sources,\n language='en')\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].\n str.contains(sector)].drop('index', axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments[\n 'funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda\n x: x.year)\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda\n x: x.month)\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda\n x: x.day)\n tw_sent_sector(public_tweets, sector)\n news_sentiment_sector(public_news, sector)\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = (sector_year.raised_amount_usd.iloc[len(sector_year) - 1] -\n sector_year.raised_amount_usd.iloc[0]\n ) / sector_year.raised_amount_usd.iloc[0] * 100\n if sector_year.raised_amount_usd.iloc[0\n ] + sector_year.raised_amount_usd.iloc[len(sector_year) - 1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd\n ).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd'] == max(\n sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name'\n ).sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd'] == min(\n sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(\n f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement), format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount, format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount, format_doll)} invested.\n\"\"\"\n )\n plt.ylabel('Raised amount in USD')\n plt.show()\n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year\n .Day[-10:]).set_title(\n f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \"\"\")\n plt.show()\n #print(f\"\"\"\n\n\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {founder} in Twitter is {sent}')\n\n\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[\n i] == full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(\n f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \"\n )\n print(f'Title: {people.title[i]}')\n print(f'Organization: {people.organization[i]}')\n print(\n f'Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}'\n )\n if people.twitter_url[i] != None:\n print(f'Twitter URL: {people.twitter_url[i]}')\n if people.linkedin_url[i] != None:\n print(f'Linkedin URL: {people.linkedin_url[i]}')\n if people.facebook_url[i] != None:\n print(f'Facebook URL: {people.facebook_url[i]}')\n tw_analysis_founder(public_tweets, founder)\n most_warnings(find_webs(founder), look_for)\n\n\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(\n sector)].drop('index', axis=1).dropna()\n company_sector['total_funding_size'] = pd.qcut(company_sector.\n funding_total_usd, q=[0, 0.25, 0.75, 1], labels=['small', 'medium',\n 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed') & (company_sector[\n 'country_code'] == company.country_code)].sample()\n else:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed')].sample()\n\n\ndef competitor_info(company):\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print(f'Founded in: {company.founded_at.item()}')\n\n\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {company} in Twitter is {sent}')\n\n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n look_for = name\n public_tweets = api.search(name)\n try:\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print('\\n')\n print(f'Competitors similar to {company.name.item()}:')\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('medium', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('big', companies, name,\n sector, company))\n except:\n print(f\"We couldn't find information about {name} in Crunchbase\")\n tw_analysis_company(public_tweets, name)\n most_warnings(find_webs(name), look_for)\n",
"step-3": "<mask token>\n\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube',\n 'pinterest', 'angel']\n sites = []\n red_social = False\n for s in search(query, tld='com', num=30, stop=30, pause=3, lang='en'):\n if len(urls) < 10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain)\n red_social = False\n return urls\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta',\n '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\n<mask token>\n\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime',\n 'arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary',\n 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud',\n 'genocide', 'hijacking', 'homicide', 'kidnapping', 'manslaughter',\n 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery',\n 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None\n\n\n<mask token>\n\n\ndef retrieve_name(my_name, companies):\n companies_list = []\n for i in companies.dropna(subset=['name']).name:\n companies_list.append(i)\n if my_name in companies_list:\n return my_name\n elif len(get_close_matches(my_name, companies_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_name, companies_list)[0])\n if action == 'y':\n return get_close_matches(my_name, companies_list)[0]\n elif action == 'n':\n return my_name\n else:\n return \"we don't understand you. Apologies.\"\n\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd',\n 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_sector, sector_list)[0])\n if action == 'y':\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n\ndef tw_sent_sector(public_tweets, sector):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {sector} industry in Twitter is {sent}')\n\n\ndef news_sentiment_sector(public_news, sector):\n news_list = []\n for piece in range(len(public_news['articles'])):\n news_list.append(TextBlob(public_news['articles'][piece]['title']).\n sentiment[0])\n news_list.append(TextBlob(public_news['articles'][piece][\n 'description']).sentiment[0])\n if sum(news_list) > 0:\n news_sent = 'Positive'\n elif sum(news_list) < 0:\n news_sent = 'Negative'\n else:\n news_sent = 'Neutral'\n print(\n f'There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}'\n )\n\n\ndef category(sector, investments):\n public_tweets = api.search(sector)\n public_news = newsapi.get_everything(q=sector, sources=news_sources,\n language='en')\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].\n str.contains(sector)].drop('index', axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments[\n 'funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda\n x: x.year)\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda\n x: x.month)\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda\n x: x.day)\n tw_sent_sector(public_tweets, sector)\n news_sentiment_sector(public_news, sector)\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = (sector_year.raised_amount_usd.iloc[len(sector_year) - 1] -\n sector_year.raised_amount_usd.iloc[0]\n ) / sector_year.raised_amount_usd.iloc[0] * 100\n if sector_year.raised_amount_usd.iloc[0\n ] + sector_year.raised_amount_usd.iloc[len(sector_year) - 1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd\n ).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd'] == max(\n sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name'\n ).sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd'] == min(\n sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(\n f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement), format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount, format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount, format_doll)} invested.\n\"\"\"\n )\n plt.ylabel('Raised amount in USD')\n plt.show()\n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year\n .Day[-10:]).set_title(\n f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \"\"\")\n plt.show()\n #print(f\"\"\"\n\n\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {founder} in Twitter is {sent}')\n\n\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[\n i] == full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(\n f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \"\n )\n print(f'Title: {people.title[i]}')\n print(f'Organization: {people.organization[i]}')\n print(\n f'Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}'\n )\n if people.twitter_url[i] != None:\n print(f'Twitter URL: {people.twitter_url[i]}')\n if people.linkedin_url[i] != None:\n print(f'Linkedin URL: {people.linkedin_url[i]}')\n if people.facebook_url[i] != None:\n print(f'Facebook URL: {people.facebook_url[i]}')\n tw_analysis_founder(public_tweets, founder)\n most_warnings(find_webs(founder), look_for)\n\n\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(\n sector)].drop('index', axis=1).dropna()\n company_sector['total_funding_size'] = pd.qcut(company_sector.\n funding_total_usd, q=[0, 0.25, 0.75, 1], labels=['small', 'medium',\n 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed') & (company_sector[\n 'country_code'] == company.country_code)].sample()\n else:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed')].sample()\n\n\ndef competitor_info(company):\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print(f'Founded in: {company.founded_at.item()}')\n\n\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {company} in Twitter is {sent}')\n\n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n look_for = name\n public_tweets = api.search(name)\n try:\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print('\\n')\n print(f'Competitors similar to {company.name.item()}:')\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('medium', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('big', companies, name,\n sector, company))\n except:\n print(f\"We couldn't find information about {name} in Crunchbase\")\n tw_analysis_company(public_tweets, name)\n most_warnings(find_webs(name), look_for)\n",
"step-4": "<mask token>\nnewsapi = NewsApiClient(api_key=cd.news_credentials['api_key'])\nnews_sources = (\n 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch'\n )\nconsumer_key = cd.twitter_credentials['consumer_key']\nconsumer_key_secret = cd.twitter_credentials['consumer_key_secret']\naccess_token = cd.twitter_credentials['access_token']\naccess_token_secret = cd.twitter_credentials['access_token_secret']\nauth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube',\n 'pinterest', 'angel']\n sites = []\n red_social = False\n for s in search(query, tld='com', num=30, stop=30, pause=3, lang='en'):\n if len(urls) < 10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain)\n red_social = False\n return urls\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta',\n '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\ndef text_from_html(body):\n soup = bs(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n return u' '.join(t.strip() for t in visible_texts)\n\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime',\n 'arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary',\n 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud',\n 'genocide', 'hijacking', 'homicide', 'kidnapping', 'manslaughter',\n 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery',\n 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None\n\n\ndef most_warnings(urls, look_for):\n list_len_tup = list(map(warnings_count, urls))\n list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))\n list_len_tup_clean.sort(key=lambda item: item[1], reverse=True)\n top_urls = [url for url, length in list_len_tup_clean[:2]]\n if len(top_urls) > 1:\n print(\n f\"\"\"\n We found something sketchy. You might want to check these links:\n \n - {top_urls[0]}\n \n - {top_urls[1]}\n \"\"\"\n )\n elif len(top_urls) == 1:\n print(\n f\"\"\"\n We found something sketchy. You might want to check this link:\n {top_urls[0]}\n \"\"\"\n )\n else:\n print(\n f\"We couldn't find anything worrying about {look_for} on Google. Nice!\"\n )\n\n\ndef retrieve_name(my_name, companies):\n companies_list = []\n for i in companies.dropna(subset=['name']).name:\n companies_list.append(i)\n if my_name in companies_list:\n return my_name\n elif len(get_close_matches(my_name, companies_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_name, companies_list)[0])\n if action == 'y':\n return get_close_matches(my_name, companies_list)[0]\n elif action == 'n':\n return my_name\n else:\n return \"we don't understand you. Apologies.\"\n\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd',\n 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_sector, sector_list)[0])\n if action == 'y':\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n\ndef tw_sent_sector(public_tweets, sector):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {sector} industry in Twitter is {sent}')\n\n\ndef news_sentiment_sector(public_news, sector):\n news_list = []\n for piece in range(len(public_news['articles'])):\n news_list.append(TextBlob(public_news['articles'][piece]['title']).\n sentiment[0])\n news_list.append(TextBlob(public_news['articles'][piece][\n 'description']).sentiment[0])\n if sum(news_list) > 0:\n news_sent = 'Positive'\n elif sum(news_list) < 0:\n news_sent = 'Negative'\n else:\n news_sent = 'Neutral'\n print(\n f'There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}'\n )\n\n\ndef category(sector, investments):\n public_tweets = api.search(sector)\n public_news = newsapi.get_everything(q=sector, sources=news_sources,\n language='en')\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].\n str.contains(sector)].drop('index', axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments[\n 'funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda\n x: x.year)\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda\n x: x.month)\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda\n x: x.day)\n tw_sent_sector(public_tweets, sector)\n news_sentiment_sector(public_news, sector)\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = (sector_year.raised_amount_usd.iloc[len(sector_year) - 1] -\n sector_year.raised_amount_usd.iloc[0]\n ) / sector_year.raised_amount_usd.iloc[0] * 100\n if sector_year.raised_amount_usd.iloc[0\n ] + sector_year.raised_amount_usd.iloc[len(sector_year) - 1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd\n ).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd'] == max(\n sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name'\n ).sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd'] == min(\n sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(\n f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement), format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount, format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount, format_doll)} invested.\n\"\"\"\n )\n plt.ylabel('Raised amount in USD')\n plt.show()\n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year\n .Day[-10:]).set_title(\n f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \"\"\")\n plt.show()\n #print(f\"\"\"\n\n\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {founder} in Twitter is {sent}')\n\n\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[\n i] == full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(\n f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \"\n )\n print(f'Title: {people.title[i]}')\n print(f'Organization: {people.organization[i]}')\n print(\n f'Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}'\n )\n if people.twitter_url[i] != None:\n print(f'Twitter URL: {people.twitter_url[i]}')\n if people.linkedin_url[i] != None:\n print(f'Linkedin URL: {people.linkedin_url[i]}')\n if people.facebook_url[i] != None:\n print(f'Facebook URL: {people.facebook_url[i]}')\n tw_analysis_founder(public_tweets, founder)\n most_warnings(find_webs(founder), look_for)\n\n\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(\n sector)].drop('index', axis=1).dropna()\n company_sector['total_funding_size'] = pd.qcut(company_sector.\n funding_total_usd, q=[0, 0.25, 0.75, 1], labels=['small', 'medium',\n 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed') & (company_sector[\n 'country_code'] == company.country_code)].sample()\n else:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed')].sample()\n\n\ndef competitor_info(company):\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print(f'Founded in: {company.founded_at.item()}')\n\n\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {company} in Twitter is {sent}')\n\n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n look_for = name\n public_tweets = api.search(name)\n try:\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print('\\n')\n print(f'Competitors similar to {company.name.item()}:')\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('medium', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('big', companies, name,\n sector, company))\n except:\n print(f\"We couldn't find information about {name} in Crunchbase\")\n tw_analysis_company(public_tweets, name)\n most_warnings(find_webs(name), look_for)\n",
"step-5": "# Library for Stalker project\n\n#Libraries \nimport pandas as pd\nimport seaborn as sns\nfrom IPython.display import Image, display\nimport matplotlib.pyplot as plt\n# Google search\nfrom googlesearch import search\n# Tldextract to get domain of url\nimport tldextract as tld\n# BeautifulSoup\nfrom bs4 import BeautifulSoup as bs\nfrom bs4.element import Comment\nimport urllib.request\n# NLTK to analyze webs\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk import FreqDist\nfrom nltk.tokenize import word_tokenize\n# Find close matches\nfrom difflib import get_close_matches\n# Sentiment analysis\nfrom textblob import TextBlob\n# Twitter sentiment analysis\nimport tweepy\n# News API\nfrom newsapi import NewsApiClient\n# Credentials\nimport credentials as cd\n\n# Finding info in APIs\nnewsapi = NewsApiClient(api_key=cd.news_credentials['api_key'])\nnews_sources = 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch'\n\n# Twitter API\nconsumer_key = cd.twitter_credentials['consumer_key']\nconsumer_key_secret = cd.twitter_credentials['consumer_key_secret']\naccess_token = cd.twitter_credentials['access_token']\naccess_token_secret = cd.twitter_credentials['access_token_secret']\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n# Finding query on Google\n\n# Finding related urls\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube','pinterest','angel']\n sites = []\n red_social = False\n for s in search(query, tld=\"com\", num=30, stop=30, pause=3, lang='en'):\n\n if len(urls)<10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain) \n\n red_social = False\n return urls\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\ndef text_from_html(body):\n soup = bs(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts) \n return u\" \".join(t.strip() for t in visible_texts)\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english')) \n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail',\n 'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide', \n 'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot',\n 'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None \n\ndef most_warnings(urls, look_for):\n list_len_tup = list(map(warnings_count, urls))\n list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))\n list_len_tup_clean.sort(key = lambda item: item[1], reverse=True)\n top_urls = [url for url, length in list_len_tup_clean[:2]]\n \n if len(top_urls) > 1:\n print(f\"\"\"\n We found something sketchy. You might want to check these links:\n \n - {top_urls[0]}\n \n - {top_urls[1]}\n \"\"\")\n elif len(top_urls) == 1:\n print(f\"\"\"\n We found something sketchy. You might want to check this link:\n {top_urls[0]}\n \"\"\")\n else:\n print(f\"We couldn't find anything worrying about {look_for} on Google. Nice!\")\n \n \n# Input correction\ndef retrieve_name(my_name, companies):\n companies_list = []\n for i in companies.dropna(subset=['name']).name:\n companies_list.append(i)\n \n if my_name in companies_list:\n return my_name\n elif len(get_close_matches(my_name, companies_list)) > 0:\n action = input(\"Did you mean %s instead? [y or n]: \" % get_close_matches(my_name, companies_list)[0])\n if (action == \"y\"):\n return get_close_matches(my_name, companies_list)[0]\n elif (action == \"n\"):\n return my_name\n else:\n return(\"we don't understand you. Apologies.\")\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input(\"Did you mean %s instead? [y or n]: \" % get_close_matches(my_sector, sector_list) [0])\n if (action == \"y\"):\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n # Sentiment analysis tweeter\ndef tw_sent_sector(public_tweets, sector): \n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list)>0:\n sent = 'Positive'\n elif sum(sentiment_list)<0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f\"The sentiment about {sector} industry in Twitter is {sent}\")\n \n \n# Sentiment analysis news\ndef news_sentiment_sector(public_news, sector):\n news_list = []\n for piece in range(len(public_news['articles'])):\n news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0])\n news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0]) \n if sum(news_list)>0:\n news_sent = 'Positive'\n elif sum(news_list)<0:\n news_sent = 'Negative'\n else:\n news_sent = 'Neutral'\n print(f\"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}\")\n\n# Look for data about sector \ndef category(sector, investments):\n # Gather tweets\n public_tweets = api.search(sector)\n \n # Gather news\n public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en')\n \n # Prepare the data for the sector\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year )\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month )\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day )\n \n # Sentiment analysis Twitter\n tw_sent_sector(public_tweets, sector)\n \n # Sentiment analysis News\n news_sentiment_sector(public_news, sector)\n \n # create plot\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100)\n if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n #peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list()\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested.\n\"\"\")\n \n plt.ylabel('Raised amount in USD')\n plt.show()\n \n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \n #print(\"\"\"Plot explanaition average investment\n \n \"\"\")\n plt.show()\n #print(f\"\"\"\n \n # The Top 3 companies with biggest investments are:\n #- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised,\n #- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and\n #- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised\n \n #\"\"\")\n \n # Sentiment analysis founder\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list)>0:\n sent = 'Positive'\n elif sum(sentiment_list)<0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f\"The sentiment about {founder} in Twitter is {sent}\")\n \n\n# Look for data about the founder\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n # What to search on Google\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \")\n print(f\"Title: {people.title[i]}\")\n print(f\"Organization: {people.organization[i]}\")\n print(f\"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}\")\n if people.twitter_url[i] != None:\n print(f\"Twitter URL: {people.twitter_url[i]}\")\n if people.linkedin_url[i] != None:\n print(f\"Linkedin URL: {people.linkedin_url[i]}\")\n if people.facebook_url[i] != None:\n print(f\"Facebook URL: {people.facebook_url[i]}\")\n # Twitter analysis\n tw_analysis_founder(public_tweets, founder)\n # Google search\n \n most_warnings(find_webs(founder), look_for)\n \n \n\n# Look for data about company\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(sector)].drop('index',axis=1).dropna()\n company_sector['total_funding_size']=pd.qcut(company_sector.funding_total_usd, q=[0, .25, .75, 1], labels=['small', 'medium', 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')& (company_sector['country_code']==company.country_code)].sample()\n else: \n return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')].sample()\n \n\n\ndef competitor_info(company):\n print(f\"Company name: {company.name.item()}\")\n print(f\"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}\")\n print(f\"Total rounds: {company.funding_rounds.item()}\")\n print(f\"Webpage: {company.homepage_url.item()}\")\n print(f\"Country: {company.country_code.item()}\")\n print(f\"Status: {company.status.item()}\")\n print(f\"Founded in: {company.founded_at.item()}\")\n\n# Sentiment analysis company\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list)>0:\n sent = 'Positive'\n elif sum(sentiment_list)<0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f\"The sentiment about {company} in Twitter is {sent}\")\n \n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n # What to search on Google\n look_for = name\n # Gather tweets\n public_tweets = api.search(name)\n try:\n print(f\"Company name: {company.name.item()}\")\n print(f\"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}\")\n print(f\"Total rounds: {company.funding_rounds.item()}\")\n print(f\"Webpage: {company.homepage_url.item()}\")\n print(f\"Country: {company.country_code.item()}\")\n print(f\"Status: {company.status.item()}\")\n \n # Find competitors\n print('\\n')\n print(f\"Competitors similar to {company.name.item()}:\")\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name, sector, company))\n print('\\n') \n competitor_info(find_companies_by_size('medium', companies, name, sector, company))\n print('\\n') \n competitor_info(find_companies_by_size('big', companies, name, sector, company))\n except: \n print(f\"We couldn't find information about {name} in Crunchbase\")\n \n #Twitter sentiment analysis for company\n tw_analysis_company(public_tweets, name)\n # Google search\n most_warnings(find_webs(name), look_for)\n ",
"step-ids": [
14,
15,
16,
20,
22
]
}
|
[
14,
15,
16,
20,
22
] |
# -*- coding: utf-8 -*-
class Bot(dict):
def __init__(self):
self["getRayon"] = 0
self["getPosition"] = (-1000, -1000)
self.traj = []
def getTrajectoires(self):
return self.traj
def getRayon(self):
return self["getRayon"]
def getPosition(self):
return self["getPosition"]
if __name__ == "__main__":
import sys
import os
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(FILE_DIR, "../../ia"))
sys.path.append(os.path.join(FILE_DIR, "../../libs"))
import time
from graphview import GraphView
from event.goals import navigation
from event import collision
filename = os.path.join(FILE_DIR, "../../ia/event/goals/navigation/map.xml")
try:
offset = sys.argv[1]
except:
offset = 0
start = time.time()
other_bot = Bot()
other_bot.name = 'other'
other_bot["getRayon"] = 200
used_bot = Bot()
used_bot.name = 'used'
used_bot["getRayon"] = 120
ennemy1 = Bot()
ennemy1.name = 'en1'
ennemy2 = Bot()
ennemy2.name = 'en2'
ennemy1["getPosition"] = (1800, 1500)
ennemy1["getRayon"] = 200
ennemy2["getPosition"] = (2200, 500)
ennemy1["getRayon"] = 120
ng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2], filename)
col = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])
print("init time : %s" % (time.time() - start))
v = GraphView(ng, col, other_bot, used_bot)
v.mainloop()
|
normal
|
{
"blob_id": "d178818faf5fb18f5da48c1e2cf7991600731d06",
"index": 4457,
"step-1": "class Bot(dict):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Bot(dict):\n\n def __init__(self):\n self['getRayon'] = 0\n self['getPosition'] = -1000, -1000\n self.traj = []\n\n def getTrajectoires(self):\n return self.traj\n\n def getRayon(self):\n return self['getRayon']\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Bot(dict):\n\n def __init__(self):\n self['getRayon'] = 0\n self['getPosition'] = -1000, -1000\n self.traj = []\n\n def getTrajectoires(self):\n return self.traj\n\n def getRayon(self):\n return self['getRayon']\n\n def getPosition(self):\n return self['getPosition']\n\n\n<mask token>\n",
"step-4": "class Bot(dict):\n\n def __init__(self):\n self['getRayon'] = 0\n self['getPosition'] = -1000, -1000\n self.traj = []\n\n def getTrajectoires(self):\n return self.traj\n\n def getRayon(self):\n return self['getRayon']\n\n def getPosition(self):\n return self['getPosition']\n\n\nif __name__ == '__main__':\n import sys\n import os\n FILE_DIR = os.path.dirname(os.path.abspath(__file__))\n sys.path.append(os.path.join(FILE_DIR, '../../ia'))\n sys.path.append(os.path.join(FILE_DIR, '../../libs'))\n import time\n from graphview import GraphView\n from event.goals import navigation\n from event import collision\n filename = os.path.join(FILE_DIR, '../../ia/event/goals/navigation/map.xml'\n )\n try:\n offset = sys.argv[1]\n except:\n offset = 0\n start = time.time()\n other_bot = Bot()\n other_bot.name = 'other'\n other_bot['getRayon'] = 200\n used_bot = Bot()\n used_bot.name = 'used'\n used_bot['getRayon'] = 120\n ennemy1 = Bot()\n ennemy1.name = 'en1'\n ennemy2 = Bot()\n ennemy2.name = 'en2'\n ennemy1['getPosition'] = 1800, 1500\n ennemy1['getRayon'] = 200\n ennemy2['getPosition'] = 2200, 500\n ennemy1['getRayon'] = 120\n ng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2],\n filename)\n col = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])\n print('init time : %s' % (time.time() - start))\n v = GraphView(ng, col, other_bot, used_bot)\n v.mainloop()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\nclass Bot(dict):\n\tdef __init__(self):\n\t\tself[\"getRayon\"] = 0\n\t\tself[\"getPosition\"] = (-1000, -1000)\n\t\tself.traj = []\n\tdef getTrajectoires(self):\n\t\treturn self.traj\n\tdef getRayon(self):\n\t\treturn self[\"getRayon\"]\n\tdef getPosition(self):\n\t\treturn self[\"getPosition\"]\n\nif __name__ == \"__main__\":\n\timport sys\n\timport os\n\tFILE_DIR = os.path.dirname(os.path.abspath(__file__))\n\tsys.path.append(os.path.join(FILE_DIR, \"../../ia\"))\n\tsys.path.append(os.path.join(FILE_DIR, \"../../libs\"))\n\t\n\timport time\n\t\n\tfrom graphview import GraphView\n\tfrom event.goals import navigation\n\tfrom event import collision\n\t\n\tfilename = os.path.join(FILE_DIR, \"../../ia/event/goals/navigation/map.xml\")\n\ttry:\n\t\toffset = sys.argv[1]\n\texcept:\n\t\toffset = 0\n\tstart = time.time()\n\tother_bot = Bot()\n\tother_bot.name = 'other'\n\tother_bot[\"getRayon\"] = 200\n\tused_bot = Bot()\n\tused_bot.name = 'used'\n\tused_bot[\"getRayon\"] = 120\n\tennemy1 = Bot()\n\tennemy1.name = 'en1'\n\tennemy2 = Bot()\n\tennemy2.name = 'en2'\n\tennemy1[\"getPosition\"] = (1800, 1500)\n\tennemy1[\"getRayon\"] = 200\n\tennemy2[\"getPosition\"] = (2200, 500)\n\tennemy1[\"getRayon\"] = 120\n\tng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2], filename)\n\tcol = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])\n\tprint(\"init time : %s\" % (time.time() - start))\n\t\n\tv = GraphView(ng, col, other_bot, used_bot)\n\tv.mainloop()\n\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
def best_rank_selection(generation):
max_selected = len(generation) // 10
sorted_by_fitness = sorted(generation, key=lambda x: x.fitness, reverse
=True)
return sorted_by_fitness[:max_selected]
|
normal
|
{
"blob_id": "05a80a904548e90bea635469b94264f219062560",
"index": 7968,
"step-1": "<mask token>\n",
"step-2": "def best_rank_selection(generation):\n max_selected = len(generation) // 10\n sorted_by_fitness = sorted(generation, key=lambda x: x.fitness, reverse\n =True)\n return sorted_by_fitness[:max_selected]\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from fractions import Fraction as f
print f(49,98) * f(19, 95) * f(16, 64) * f(26, 65)
|
normal
|
{
"blob_id": "51b32972c97df50a45eb2b9ca58cdec0394e63ee",
"index": 3193,
"step-1": "from fractions import Fraction as f\n\nprint f(49,98) * f(19, 95) * f(16, 64) * f(26, 65)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# encoding: utf-8
# -*- coding: utf-8 -*-
# @contact: [email protected]
# @software: PyCharm
# @time: 2019/3/6 9:59
# @author: Paulson●Wier
# @file: 5_词向量.py
# @desc:
# (1)Word2Vec
from gensim.models import Word2Vec
import jieba
# 定义停用词、标点符号
punctuation = ['、',')','(',',',",", "。", ":", ";", ".", "'", '"', "’", "?", "/", "-", "+", "&", "(", ")"]
sentences = [
"长江是中国第一大河,干流全长6397公里(以沱沱河为源),一般称6300公里。流域总面积一百八十余万平方公里,年平均入海水量约九千六百余亿立方米。以干流长度和入海水量论,长江均居世界第三位。",
"黄河,中国古代也称河,发源于中华人民共和国青海省巴颜喀拉山脉,流经青海、四川、甘肃、宁夏、内蒙古、陕西、山西、河南、山东9个省区,最后于山东省东营垦利县注入渤海。干流河道全长5464千米,仅次于长江,为中国第二长河。黄河还是世界第五长河。",
"黄河,是中华民族的母亲河。作为中华文明的发祥地,维系炎黄子孙的血脉.是中华民族民族精神与民族情感的象征。",
"黄河被称为中华文明的母亲河。公元前2000多年华夏族在黄河领域的中原地区形成、繁衍。",
"在兰州的“黄河第一桥”内蒙古托克托县河口镇以上的黄河河段为黄河上游。",
"黄河上游根据河道特性的不同,又可分为河源段、峡谷段和冲积平原三部分。 ",
"黄河,是中华民族的母亲河。"
]
sentences = [jieba.lcut(sen) for sen in sentences]
print('sentences:\n',sentences)
# 去标点
tokenized = []
for sentence in sentences:
words = []
for word in sentence:
if word not in punctuation:
words.append(word)
tokenized.append(words)
print('tokenized:\n',tokenized)
# 进行模型训练
model = Word2Vec(tokenized,sg=1,size=100,window=5,min_count=2,negative=1,sample=0.001,hs=1,workers=4)
'''
参数解释如下:
sg=1 是 skip-gram 算法,对低频词敏感;默认 sg=0 为 CBOW 算法。
size 是输出词向量的维数,值太小会导致词映射因为冲突而影响结果,值太大则会耗内存并使算法计算变慢,一般值取为100到200之间。
window 是句子中当前词与目标词之间的最大距离,3表示在目标词前看3-b 个词,后面看 b 个词(b 在0-3之间随机)。
min_count 是对词进行过滤,频率小于 min-count 的单词则会被忽视,默认值为5。
negative 和 sample 可根据训练结果进行微调,sample 表示更高频率的词被随机下采样到所设置的阈值,默认值为 1e-3。
hs=1 表示层级 softmax 将会被使用,默认 hs=0 且 negative 不为0,则负采样将会被选择使用。
'''
model.save('model') #保存模型
model = Word2Vec.load('model') #加载模型
#相似度
print(model.wv.similarity('黄河','长江'))
print(model.wv.most_similar(positive=['黄河','母亲河'],negative=['长江']))
# (2)Doc2Vec
# from gensim.models.doc2vec import Doc2Vec,LabeledSentence
# doc_labels = ["长江", "黄河", "黄河", "黄河", "黄河", "黄河", "黄河"]
# class LabeledLineSentence(object):
# def __init__(self,doc_list,labels_list):
# self.labels_list = labels_list
# self.doc_list = doc_list
#
# def __iter__(self):
# for idx ,doc in enumerate(self.doc_list):
# yield LabeledSentence(words=doc,tags=[self.labels_list[idx]])
#
# # model = Doc2Vec(documents, dm=1, size=100, window=8, min_count=5, workers=4)
# model = Doc2Vec(documents, dm=1, size=100, window=8, min_count=5, workers=4)
# model.save('model1')
# model = Doc2Vec.load('model1')
#
# iter_data = LabeledLineSentence(tokenized, doc_labels)
# model = Doc2Vec(dm=1, size=100, window=8, min_count=5, workers=4)
# model.build_vocab(iter_data)
|
normal
|
{
"blob_id": "5c61ec549a3e78da4ea8a18bb4f8382f2b5c2cfa",
"index": 4438,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('sentences:\\n', sentences)\n<mask token>\nfor sentence in sentences:\n words = []\n for word in sentence:\n if word not in punctuation:\n words.append(word)\n tokenized.append(words)\nprint('tokenized:\\n', tokenized)\n<mask token>\nmodel.save('model')\n<mask token>\nprint(model.wv.similarity('黄河', '长江'))\nprint(model.wv.most_similar(positive=['黄河', '母亲河'], negative=['长江']))\n",
"step-3": "<mask token>\npunctuation = ['、', ')', '(', ',', ',', '。', ':', ';', '.', \"'\", '\"', '’',\n '?', '/', '-', '+', '&', '(', ')']\nsentences = [\n '长江是中国第一大河,干流全长6397公里(以沱沱河为源),一般称6300公里。流域总面积一百八十余万平方公里,年平均入海水量约九千六百余亿立方米。以干流长度和入海水量论,长江均居世界第三位。'\n ,\n '黄河,中国古代也称河,发源于中华人民共和国青海省巴颜喀拉山脉,流经青海、四川、甘肃、宁夏、内蒙古、陕西、山西、河南、山东9个省区,最后于山东省东营垦利县注入渤海。干流河道全长5464千米,仅次于长江,为中国第二长河。黄河还是世界第五长河。'\n , '黄河,是中华民族的母亲河。作为中华文明的发祥地,维系炎黄子孙的血脉.是中华民族民族精神与民族情感的象征。',\n '黄河被称为中华文明的母亲河。公元前2000多年华夏族在黄河领域的中原地区形成、繁衍。',\n '在兰州的“黄河第一桥”内蒙古托克托县河口镇以上的黄河河段为黄河上游。',\n '黄河上游根据河道特性的不同,又可分为河源段、峡谷段和冲积平原三部分。 ', '黄河,是中华民族的母亲河。']\nsentences = [jieba.lcut(sen) for sen in sentences]\nprint('sentences:\\n', sentences)\ntokenized = []\nfor sentence in sentences:\n words = []\n for word in sentence:\n if word not in punctuation:\n words.append(word)\n tokenized.append(words)\nprint('tokenized:\\n', tokenized)\nmodel = Word2Vec(tokenized, sg=1, size=100, window=5, min_count=2, negative\n =1, sample=0.001, hs=1, workers=4)\n<mask token>\nmodel.save('model')\nmodel = Word2Vec.load('model')\nprint(model.wv.similarity('黄河', '长江'))\nprint(model.wv.most_similar(positive=['黄河', '母亲河'], negative=['长江']))\n",
"step-4": "from gensim.models import Word2Vec\nimport jieba\npunctuation = ['、', ')', '(', ',', ',', '。', ':', ';', '.', \"'\", '\"', '’',\n '?', '/', '-', '+', '&', '(', ')']\nsentences = [\n '长江是中国第一大河,干流全长6397公里(以沱沱河为源),一般称6300公里。流域总面积一百八十余万平方公里,年平均入海水量约九千六百余亿立方米。以干流长度和入海水量论,长江均居世界第三位。'\n ,\n '黄河,中国古代也称河,发源于中华人民共和国青海省巴颜喀拉山脉,流经青海、四川、甘肃、宁夏、内蒙古、陕西、山西、河南、山东9个省区,最后于山东省东营垦利县注入渤海。干流河道全长5464千米,仅次于长江,为中国第二长河。黄河还是世界第五长河。'\n , '黄河,是中华民族的母亲河。作为中华文明的发祥地,维系炎黄子孙的血脉.是中华民族民族精神与民族情感的象征。',\n '黄河被称为中华文明的母亲河。公元前2000多年华夏族在黄河领域的中原地区形成、繁衍。',\n '在兰州的“黄河第一桥”内蒙古托克托县河口镇以上的黄河河段为黄河上游。',\n '黄河上游根据河道特性的不同,又可分为河源段、峡谷段和冲积平原三部分。 ', '黄河,是中华民族的母亲河。']\nsentences = [jieba.lcut(sen) for sen in sentences]\nprint('sentences:\\n', sentences)\ntokenized = []\nfor sentence in sentences:\n words = []\n for word in sentence:\n if word not in punctuation:\n words.append(word)\n tokenized.append(words)\nprint('tokenized:\\n', tokenized)\nmodel = Word2Vec(tokenized, sg=1, size=100, window=5, min_count=2, negative\n =1, sample=0.001, hs=1, workers=4)\n<mask token>\nmodel.save('model')\nmodel = Word2Vec.load('model')\nprint(model.wv.similarity('黄河', '长江'))\nprint(model.wv.most_similar(positive=['黄河', '母亲河'], negative=['长江']))\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n\n# -*- coding: utf-8 -*-\n# @contact: [email protected]\n# @software: PyCharm\n# @time: 2019/3/6 9:59\n# @author: Paulson●Wier\n# @file: 5_词向量.py\n# @desc:\n\n# (1)Word2Vec\n\nfrom gensim.models import Word2Vec\nimport jieba\n\n# 定义停用词、标点符号\npunctuation = ['、',')','(',',',\",\", \"。\", \":\", \";\", \".\", \"'\", '\"', \"’\", \"?\", \"/\", \"-\", \"+\", \"&\", \"(\", \")\"]\nsentences = [\n \"长江是中国第一大河,干流全长6397公里(以沱沱河为源),一般称6300公里。流域总面积一百八十余万平方公里,年平均入海水量约九千六百余亿立方米。以干流长度和入海水量论,长江均居世界第三位。\",\n \"黄河,中国古代也称河,发源于中华人民共和国青海省巴颜喀拉山脉,流经青海、四川、甘肃、宁夏、内蒙古、陕西、山西、河南、山东9个省区,最后于山东省东营垦利县注入渤海。干流河道全长5464千米,仅次于长江,为中国第二长河。黄河还是世界第五长河。\",\n \"黄河,是中华民族的母亲河。作为中华文明的发祥地,维系炎黄子孙的血脉.是中华民族民族精神与民族情感的象征。\",\n \"黄河被称为中华文明的母亲河。公元前2000多年华夏族在黄河领域的中原地区形成、繁衍。\",\n \"在兰州的“黄河第一桥”内蒙古托克托县河口镇以上的黄河河段为黄河上游。\",\n \"黄河上游根据河道特性的不同,又可分为河源段、峡谷段和冲积平原三部分。 \",\n \"黄河,是中华民族的母亲河。\"\n]\n\nsentences = [jieba.lcut(sen) for sen in sentences]\nprint('sentences:\\n',sentences)\n\n# 去标点\ntokenized = []\nfor sentence in sentences:\n words = []\n for word in sentence:\n if word not in punctuation:\n words.append(word)\n tokenized.append(words)\nprint('tokenized:\\n',tokenized)\n\n# 进行模型训练\nmodel = Word2Vec(tokenized,sg=1,size=100,window=5,min_count=2,negative=1,sample=0.001,hs=1,workers=4)\n'''\n参数解释如下:\n\nsg=1 是 skip-gram 算法,对低频词敏感;默认 sg=0 为 CBOW 算法。\nsize 是输出词向量的维数,值太小会导致词映射因为冲突而影响结果,值太大则会耗内存并使算法计算变慢,一般值取为100到200之间。\nwindow 是句子中当前词与目标词之间的最大距离,3表示在目标词前看3-b 个词,后面看 b 个词(b 在0-3之间随机)。\n\nmin_count 是对词进行过滤,频率小于 min-count 的单词则会被忽视,默认值为5。\nnegative 和 sample 可根据训练结果进行微调,sample 表示更高频率的词被随机下采样到所设置的阈值,默认值为 1e-3。\nhs=1 表示层级 softmax 将会被使用,默认 hs=0 且 negative 不为0,则负采样将会被选择使用。\n\n'''\nmodel.save('model') #保存模型\nmodel = Word2Vec.load('model') #加载模型\n\n#相似度\nprint(model.wv.similarity('黄河','长江'))\nprint(model.wv.most_similar(positive=['黄河','母亲河'],negative=['长江']))\n\n\n# (2)Doc2Vec\n# from gensim.models.doc2vec import Doc2Vec,LabeledSentence\n# doc_labels = [\"长江\", \"黄河\", \"黄河\", \"黄河\", \"黄河\", \"黄河\", \"黄河\"]\n# class LabeledLineSentence(object):\n# def __init__(self,doc_list,labels_list):\n# self.labels_list = labels_list\n# self.doc_list = doc_list\n#\n# def __iter__(self):\n# for idx ,doc in enumerate(self.doc_list):\n# yield LabeledSentence(words=doc,tags=[self.labels_list[idx]])\n#\n# # model = Doc2Vec(documents, dm=1, size=100, window=8, min_count=5, workers=4)\n# model = Doc2Vec(documents, dm=1, size=100, window=8, min_count=5, workers=4)\n# model.save('model1')\n# model = Doc2Vec.load('model1')\n#\n# iter_data = LabeledLineSentence(tokenized, doc_labels)\n# model = Doc2Vec(dm=1, size=100, window=8, min_count=5, workers=4)\n# model.build_vocab(iter_data)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#Write by Jess.S 25/1/2019
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
def draw_point(x,y):
plt.scatter(x, y)
plt.title('点分布图')#显示图表标题
plt.xlabel('x轴')#x轴名称
plt.ylabel('y轴')#y轴名称
plt.grid(True)#显示网格线
plt.show()
def draw_route(route_list,x,y):
plt.scatter(x, y)
for route in route_list:
route= np.array(route)
# print(route.shape)
plt.plot(route[:,0],route[:,1])
plt.title('路径图')#显示图表标题
plt.xlabel('x轴')#x轴名称
plt.ylabel('y轴')#y轴名称
plt.grid(True)#显示网格线
plt.show()
def read_data(path,node):
csv_data = pd.read_csv(path) # 读取训练数据
# print(csv_data)
x = csv_data['Easting']
y = csv_data['Southing']
# print(x)
# print(y)
for i in range(len(x)):
xy = []
xy.append(x[i])
xy.append(y[i])
node.append(xy)
# print(node)
node_sort =sorted(node, key=lambda x: (x[0], x[1]))
# print(node_sort)
#另一种利用numpy的排序方法
# node = np.array(node)
# node = node[np.lexsort(node[:,::-1].T)]
# print(node)
return node_sort,x,y
#判断前沿面的点是否被更新
# def dominant(prev,current):
# if prev[0]<current[0] & prev[1]<current[1]:
# return True
# return False
#
# #判断两条路径是否有重叠部分
# def judge_line(origin,n1,n2):
# if((n1[1]-origin[1])/(n1[0]-origin[0])==(n2[1]-origin[1])/(n2[0]-origin[0])):
# return True
# return False
def init_routing(route_number,route_list,leading_edge,node_sort):
for n in node_sort:
if(n == node_sort[0]):
continue
route = []
route.append(node_sort[0])
route.append(n)
route_list.append(route)
leading_edge.append(n)
if(len(route_list)>=route_number):
return route_list
return
def expand(route_list,leading_edge,node_sort,route_number):
for i in range(len(node_sort)):
if(i<=route_number):
continue
y_min = 0
max_index = 0
for a in range(len(leading_edge)):
if(leading_edge[a][1]>y_min):
y_min = leading_edge[a][1]
max_index = a
index = -1
for n in range(len(leading_edge)):
delta_y = leading_edge[n][1] - node_sort[i][1]
if((delta_y>=0) & (delta_y<y_min)):
y_min = delta_y
index = n
if(index < 0):
index = max_index
route_list[index].append(node_sort[i])
leading_edge[index] = node_sort[i]
return route_list
if __name__=='__main__':
path = 'coordinates v1.csv'
node = []#所有点的坐标信息,下面进行排序
route_list = []#存储现有的路径信息
leading_edge = []#存储路径最前沿延续的路径index
route_number = 6
node_sort,x,y = read_data(path, node)
route_list = init_routing(route_number, route_list, leading_edge,node_sort)
route_list = expand(route_list, leading_edge, node_sort, route_number)
route_list = np.array(route_list)
draw_route(route_list,x,y)
print(route_list)
|
normal
|
{
"blob_id": "1c60620814a4aea2573caf99cee87590a8d57c18",
"index": 5483,
"step-1": "<mask token>\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\nif __name__ == '__main__':\n path = 'coordinates v1.csv'\n node = []\n route_list = []\n leading_edge = []\n route_number = 6\n node_sort, x, y = read_data(path, node)\n route_list = init_routing(route_number, route_list, leading_edge, node_sort\n )\n route_list = expand(route_list, leading_edge, node_sort, route_number)\n route_list = np.array(route_list)\n draw_route(route_list, x, y)\n print(route_list)\n",
"step-3": "<mask token>\nplt.rcParams['font.sans-serif'] = ['FangSong']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\nif __name__ == '__main__':\n path = 'coordinates v1.csv'\n node = []\n route_list = []\n leading_edge = []\n route_number = 6\n node_sort, x, y = read_data(path, node)\n route_list = init_routing(route_number, route_list, leading_edge, node_sort\n )\n route_list = expand(route_list, leading_edge, node_sort, route_number)\n route_list = np.array(route_list)\n draw_route(route_list, x, y)\n print(route_list)\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['font.sans-serif'] = ['FangSong']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\nif __name__ == '__main__':\n path = 'coordinates v1.csv'\n node = []\n route_list = []\n leading_edge = []\n route_number = 6\n node_sort, x, y = read_data(path, node)\n route_list = init_routing(route_number, route_list, leading_edge, node_sort\n )\n route_list = expand(route_list, leading_edge, node_sort, route_number)\n route_list = np.array(route_list)\n draw_route(route_list, x, y)\n print(route_list)\n",
"step-5": "#Write by Jess.S 25/1/2019\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nplt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体\r\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\r\n\r\ndef draw_point(x,y):\r\n plt.scatter(x, y)\r\n plt.title('点分布图')#显示图表标题\r\n plt.xlabel('x轴')#x轴名称\r\n plt.ylabel('y轴')#y轴名称\r\n plt.grid(True)#显示网格线\r\n plt.show()\r\n\r\ndef draw_route(route_list,x,y):\r\n plt.scatter(x, y)\r\n for route in route_list:\r\n route= np.array(route)\r\n# print(route.shape)\r\n plt.plot(route[:,0],route[:,1])\r\n plt.title('路径图')#显示图表标题\r\n plt.xlabel('x轴')#x轴名称\r\n plt.ylabel('y轴')#y轴名称\r\n plt.grid(True)#显示网格线\r\n plt.show()\r\n \r\ndef read_data(path,node):\r\n csv_data = pd.read_csv(path) # 读取训练数据\r\n # print(csv_data)\r\n x = csv_data['Easting']\r\n y = csv_data['Southing']\r\n\r\n # print(x)\r\n # print(y)\r\n for i in range(len(x)):\r\n xy = []\r\n xy.append(x[i])\r\n xy.append(y[i])\r\n node.append(xy)\r\n # print(node)\r\n node_sort =sorted(node, key=lambda x: (x[0], x[1]))\r\n # print(node_sort)\r\n #另一种利用numpy的排序方法\r\n \r\n # node = np.array(node)\r\n # node = node[np.lexsort(node[:,::-1].T)]\r\n # print(node)\r\n return node_sort,x,y\r\n#判断前沿面的点是否被更新\r\n# def dominant(prev,current):\r\n# if prev[0]<current[0] & prev[1]<current[1]:\r\n# return True\r\n# return False\r\n# \r\n# #判断两条路径是否有重叠部分\r\n# def judge_line(origin,n1,n2):\r\n# if((n1[1]-origin[1])/(n1[0]-origin[0])==(n2[1]-origin[1])/(n2[0]-origin[0])):\r\n# return True\r\n# return False\r\n\r\ndef init_routing(route_number,route_list,leading_edge,node_sort): \r\n for n in node_sort:\r\n if(n == node_sort[0]):\r\n continue\r\n route = []\r\n route.append(node_sort[0])\r\n route.append(n)\r\n route_list.append(route)\r\n leading_edge.append(n)\r\n if(len(route_list)>=route_number):\r\n return route_list\r\n return\r\n \r\ndef expand(route_list,leading_edge,node_sort,route_number):\r\n for i in range(len(node_sort)):\r\n if(i<=route_number):\r\n continue\r\n y_min = 0\r\n max_index = 0\r\n for a in range(len(leading_edge)):\r\n if(leading_edge[a][1]>y_min):\r\n y_min = leading_edge[a][1]\r\n max_index = a\r\n index = -1\r\n for n in range(len(leading_edge)):\r\n delta_y = leading_edge[n][1] - node_sort[i][1]\r\n if((delta_y>=0) & (delta_y<y_min)):\r\n y_min = delta_y\r\n index = n\r\n if(index < 0):\r\n index = max_index \r\n route_list[index].append(node_sort[i])\r\n leading_edge[index] = node_sort[i]\r\n return route_list \r\n\r\nif __name__=='__main__':\r\n path = 'coordinates v1.csv'\r\n node = []#所有点的坐标信息,下面进行排序\r\n route_list = []#存储现有的路径信息\r\n leading_edge = []#存储路径最前沿延续的路径index\r\n route_number = 6\r\n node_sort,x,y = read_data(path, node)\r\n route_list = init_routing(route_number, route_list, leading_edge,node_sort)\r\n route_list = expand(route_list, leading_edge, node_sort, route_number)\r\n route_list = np.array(route_list)\r\n draw_route(route_list,x,y)\r\n print(route_list)\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-21 00:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0018_relatorioquedadeconsumo_justificado'),
]
operations = [
migrations.RemoveField(
model_name='relatoriocorrentezerada',
name='expira',
),
migrations.RemoveField(
model_name='relatoriotensaozerada',
name='expira',
),
migrations.AddField(
model_name='relatoriotensaozerada',
name='data_expira',
field=models.DateTimeField(blank=True, null=True, verbose_name='data_expira'),
),
]
|
normal
|
{
"blob_id": "a58949d25a719dc9ce0626948ab0397814e9ea0e",
"index": 1574,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('analysis', '0018_relatorioquedadeconsumo_justificado')]\n operations = [migrations.RemoveField(model_name=\n 'relatoriocorrentezerada', name='expira'), migrations.RemoveField(\n model_name='relatoriotensaozerada', name='expira'), migrations.\n AddField(model_name='relatoriotensaozerada', name='data_expira',\n field=models.DateTimeField(blank=True, null=True, verbose_name=\n 'data_expira'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('analysis', '0018_relatorioquedadeconsumo_justificado')]\n operations = [migrations.RemoveField(model_name=\n 'relatoriocorrentezerada', name='expira'), migrations.RemoveField(\n model_name='relatoriotensaozerada', name='expira'), migrations.\n AddField(model_name='relatoriotensaozerada', name='data_expira',\n field=models.DateTimeField(blank=True, null=True, verbose_name=\n 'data_expira'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.1 on 2016-11-21 00:43\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('analysis', '0018_relatorioquedadeconsumo_justificado'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='relatoriocorrentezerada',\n name='expira',\n ),\n migrations.RemoveField(\n model_name='relatoriotensaozerada',\n name='expira',\n ),\n migrations.AddField(\n model_name='relatoriotensaozerada',\n name='data_expira',\n field=models.DateTimeField(blank=True, null=True, verbose_name='data_expira'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.urls import path
from . import views
urlpatterns = [
# @app.route("/")
path('', views.home),
path("teams", views.showTeams),
path("teams/new", views.new),
path("teams/<teamname>", views.showSpecificTeam),
# path("allfood", views.showAllFoodItems),
# path("team/<teamname>", views.showSpecificTeam)
]
|
normal
|
{
"blob_id": "e267108177841110493061a4f84ae3d29850d028",
"index": 1853,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.home), path('teams', views.showTeams), path(\n 'teams/new', views.new), path('teams/<teamname>', views.showSpecificTeam)]\n",
"step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.home), path('teams', views.showTeams), path(\n 'teams/new', views.new), path('teams/<teamname>', views.showSpecificTeam)]\n",
"step-4": "from django.urls import path \nfrom . import views\n\n\nurlpatterns = [\n # @app.route(\"/\")\n path('', views.home),\n path(\"teams\", views.showTeams),\n path(\"teams/new\", views.new),\n path(\"teams/<teamname>\", views.showSpecificTeam),\n # path(\"allfood\", views.showAllFoodItems),\n # path(\"team/<teamname>\", views.showSpecificTeam) \n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
import os
from Alfred3 import Items, Tools
def to_absolute_path(filepath):
filepath = os.path.expanduser(filepath)
return os.path.abspath(filepath)
def is_valid_path(path):
abs_path = to_absolute_path(path)
if os.path.exists(abs_path) and os.path.isdir(abs_path):
return True
else:
return False
env_source = Tools.getEnv("source")
env_target = Tools.getEnv("target")
query = Tools.getArgv(1)
path_to_ask = "source" if env_source == "" else "target"
new_path = to_absolute_path(query)
wf = Items()
if query != "" and is_valid_path(new_path):
wf.setItem(
title=f"Path exists, add as {path_to_ask} path?",
subtitle=new_path,
arg=f"{new_path}|add"
)
elif query.startswith("/") or query.startswith("~"):
wf.setItem(
title="Path does not exists, create?",
subtitle=new_path,
arg=f"{new_path}|create"
)
else:
wf.setItem(
title=f"Enter {path_to_ask} path",
subtitle="Type a directory path starting with / or ~",
valid=False
)
wf.addItem()
wf.write()
|
normal
|
{
"blob_id": "1cf573863fca660cc1fec71ab64743e7a2dd74d8",
"index": 1730,
"step-1": "<mask token>\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\n<mask token>\nif query != '' and is_valid_path(new_path):\n wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle=\n new_path, arg=f'{new_path}|add')\nelif query.startswith('/') or query.startswith('~'):\n wf.setItem(title='Path does not exists, create?', subtitle=new_path,\n arg=f'{new_path}|create')\nelse:\n wf.setItem(title=f'Enter {path_to_ask} path', subtitle=\n 'Type a directory path starting with / or ~', valid=False)\nwf.addItem()\nwf.write()\n",
"step-3": "<mask token>\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\nenv_source = Tools.getEnv('source')\nenv_target = Tools.getEnv('target')\nquery = Tools.getArgv(1)\npath_to_ask = 'source' if env_source == '' else 'target'\nnew_path = to_absolute_path(query)\nwf = Items()\nif query != '' and is_valid_path(new_path):\n wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle=\n new_path, arg=f'{new_path}|add')\nelif query.startswith('/') or query.startswith('~'):\n wf.setItem(title='Path does not exists, create?', subtitle=new_path,\n arg=f'{new_path}|create')\nelse:\n wf.setItem(title=f'Enter {path_to_ask} path', subtitle=\n 'Type a directory path starting with / or ~', valid=False)\nwf.addItem()\nwf.write()\n",
"step-4": "import os\nfrom Alfred3 import Items, Tools\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\nenv_source = Tools.getEnv('source')\nenv_target = Tools.getEnv('target')\nquery = Tools.getArgv(1)\npath_to_ask = 'source' if env_source == '' else 'target'\nnew_path = to_absolute_path(query)\nwf = Items()\nif query != '' and is_valid_path(new_path):\n wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle=\n new_path, arg=f'{new_path}|add')\nelif query.startswith('/') or query.startswith('~'):\n wf.setItem(title='Path does not exists, create?', subtitle=new_path,\n arg=f'{new_path}|create')\nelse:\n wf.setItem(title=f'Enter {path_to_ask} path', subtitle=\n 'Type a directory path starting with / or ~', valid=False)\nwf.addItem()\nwf.write()\n",
"step-5": "#!/usr/bin/env python3\n\nimport os\n\nfrom Alfred3 import Items, Tools\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\nenv_source = Tools.getEnv(\"source\")\nenv_target = Tools.getEnv(\"target\")\nquery = Tools.getArgv(1)\n\npath_to_ask = \"source\" if env_source == \"\" else \"target\"\n\nnew_path = to_absolute_path(query)\n\n\nwf = Items()\n\nif query != \"\" and is_valid_path(new_path):\n wf.setItem(\n title=f\"Path exists, add as {path_to_ask} path?\",\n subtitle=new_path,\n arg=f\"{new_path}|add\"\n )\nelif query.startswith(\"/\") or query.startswith(\"~\"):\n wf.setItem(\n title=\"Path does not exists, create?\",\n subtitle=new_path,\n arg=f\"{new_path}|create\"\n )\nelse:\n wf.setItem(\n title=f\"Enter {path_to_ask} path\",\n subtitle=\"Type a directory path starting with / or ~\",\n valid=False\n )\nwf.addItem()\nwf.write()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from entities.GpsFix import GpsFix
class Visit(object):
"""
A Visit, which represents an arrival-departure to a stay point
Attributes:
id_visit: the id of the visit itself
id_stay_point: the id of the stay point
pivot_arrival_fix: the GpsFix that corresponds to real world arrival
pivot_departure_fix: the GpsFix that corresponds to real world departure
detection_arrival_fix: the GpsFix that triggered the arrival by the platform
detection_departure_fix: the GpsFix that triggered the departure by the platform
stay_time: stay time of the visit in seconds
"""
def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix, pivot_departure_fix: GpsFix,
detection_arrival_fix: GpsFix,
detection_departure_fix: GpsFix):
"""
Builds a Visit object
:param id_visit: the id of the visit
:param id_stay_point: the id of the stay point
:param pivot_arrival_fix: the GpsFix that corresponds to real world arrival
:param pivot_departure_fix: the GpsFix that corresponds to real world departure
:param detection_arrival_fix: the GpsFix that triggered the arrival by the platform
:param detection_departure_fix: the GpsFix that triggered the departure by the platform
"""
self.id_visit = id_visit
self.id_stay_point = id_stay_point
self.pivot_arrival_fix = pivot_arrival_fix
self.pivot_departure_fix = pivot_departure_fix
self.detection_arrival_fix = detection_arrival_fix
self.detection_departure_fix = detection_departure_fix
self.stay_time = None
self.update_stay_time()
def update_stay_time(self):
"""
Updates the stay time of visit
:return: None
"""
# It would not be better to simply self.stay_time = self.get_length() ??
self.stay_time = self.get_length()
def get_length(self) -> int:
"""
Gets the length of visit in seconds
:return: The length of visit in seconds
"""
return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()
def __str__(self):
date_format = '%Y-%m-%d %H:%M:%S'
return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,
self.pivot_arrival_fix.timestamp.strftime(date_format),
self.pivot_departure_fix.timestamp.strftime(date_format), self.get_length())
|
normal
|
{
"blob_id": "703ed320e7c06856a0798d9c0de9aafe24458767",
"index": 7937,
"step-1": "<mask token>\n\n\nclass Visit(object):\n <mask token>\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix,\n pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n self.stay_time = self.get_length()\n <mask token>\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format), self.\n pivot_departure_fix.timestamp.strftime(date_format), self.\n get_length())\n",
"step-2": "<mask token>\n\n\nclass Visit(object):\n <mask token>\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix,\n pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n self.stay_time = self.get_length()\n\n def get_length(self) ->int:\n \"\"\"\n Gets the length of visit in seconds\n :return: The length of visit in seconds\n \"\"\"\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix\n .timestamp).total_seconds()\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format), self.\n pivot_departure_fix.timestamp.strftime(date_format), self.\n get_length())\n",
"step-3": "<mask token>\n\n\nclass Visit(object):\n \"\"\"\n A Visit, which represents an arrival-departure to a stay point\n\n Attributes:\n id_visit: the id of the visit itself\n id_stay_point: the id of the stay point\n pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n pivot_departure_fix: the GpsFix that corresponds to real world departure\n detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n detection_departure_fix: the GpsFix that triggered the departure by the platform\n stay_time: stay time of the visit in seconds\n \"\"\"\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix,\n pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n self.stay_time = self.get_length()\n\n def get_length(self) ->int:\n \"\"\"\n Gets the length of visit in seconds\n :return: The length of visit in seconds\n \"\"\"\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix\n .timestamp).total_seconds()\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format), self.\n pivot_departure_fix.timestamp.strftime(date_format), self.\n get_length())\n",
"step-4": "from entities.GpsFix import GpsFix\n\n\nclass Visit(object):\n \"\"\"\n A Visit, which represents an arrival-departure to a stay point\n\n Attributes:\n id_visit: the id of the visit itself\n id_stay_point: the id of the stay point\n pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n pivot_departure_fix: the GpsFix that corresponds to real world departure\n detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n detection_departure_fix: the GpsFix that triggered the departure by the platform\n stay_time: stay time of the visit in seconds\n \"\"\"\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix,\n pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n self.stay_time = self.get_length()\n\n def get_length(self) ->int:\n \"\"\"\n Gets the length of visit in seconds\n :return: The length of visit in seconds\n \"\"\"\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix\n .timestamp).total_seconds()\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format), self.\n pivot_departure_fix.timestamp.strftime(date_format), self.\n get_length())\n",
"step-5": "from entities.GpsFix import GpsFix\n\n\nclass Visit(object):\n \"\"\"\n A Visit, which represents an arrival-departure to a stay point\n\n Attributes:\n id_visit: the id of the visit itself\n id_stay_point: the id of the stay point\n pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n pivot_departure_fix: the GpsFix that corresponds to real world departure\n detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n detection_departure_fix: the GpsFix that triggered the departure by the platform\n stay_time: stay time of the visit in seconds\n \"\"\"\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix, pivot_departure_fix: GpsFix,\n detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n # It would not be better to simply self.stay_time = self.get_length() ??\n self.stay_time = self.get_length()\n\n def get_length(self) -> int:\n \"\"\"\n Gets the length of visit in seconds\n :return: The length of visit in seconds\n \"\"\"\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format),\n self.pivot_departure_fix.timestamp.strftime(date_format), self.get_length())\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import tkinter as tk
from tkinter import Tk, ttk
from tkinter import filedialog
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
import crystalpeaktab as cp
import smallangletab as sa
matplotlib.use("TkAgg")
class mainwin:
def __init__(self, master):
self.master = master
master.title
master.title("University of Utah XRD Analysis Multi-tool")
#Sets up tabs
self.tab_parent = ttk.Notebook(master)
self.tab1 = ttk.Frame(self.tab_parent)
self.tab2 = ttk.Frame(self.tab_parent)
self.tab3 = ttk.Frame(self.tab_parent)
self.tab_parent.add(self.tab1, text="Crystallization Peak Fit")
self.tab_parent.add(self.tab2, text="Small Angle Simulation")
self.tab_parent.grid(row=1, column=0)
# Spacers
tk.Label(self.master, text="").grid(row=2, column=3)
# Sets the first tab to be the crystal peak analysis
cp.tab(self.tab1)
# Sets the second tab to be the Small Angle Analytic Simulation
sa.tab(self.tab2)
# ======================================================================================================================
# ======================================================================================================================
# MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN
# ======================================================================================================================
root = tk.Tk()
my_gui = mainwin(root)
root.mainloop()
# ======================================================================================================================
# ======================================================================================================================
|
normal
|
{
"blob_id": "137ed9c36265781dbebabbd1ee0ea84c9850201a",
"index": 1642,
"step-1": "<mask token>\n\n\nclass mainwin:\n\n def __init__(self, master):\n self.master = master\n master.title\n master.title('University of Utah XRD Analysis Multi-tool')\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')\n self.tab_parent.add(self.tab2, text='Small Angle Simulation')\n self.tab_parent.grid(row=1, column=0)\n tk.Label(self.master, text='').grid(row=2, column=3)\n cp.tab(self.tab1)\n sa.tab(self.tab2)\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('TkAgg')\n\n\nclass mainwin:\n\n def __init__(self, master):\n self.master = master\n master.title\n master.title('University of Utah XRD Analysis Multi-tool')\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')\n self.tab_parent.add(self.tab2, text='Small Angle Simulation')\n self.tab_parent.grid(row=1, column=0)\n tk.Label(self.master, text='').grid(row=2, column=3)\n cp.tab(self.tab1)\n sa.tab(self.tab2)\n\n\n<mask token>\nroot.mainloop()\n",
"step-3": "<mask token>\nmatplotlib.use('TkAgg')\n\n\nclass mainwin:\n\n def __init__(self, master):\n self.master = master\n master.title\n master.title('University of Utah XRD Analysis Multi-tool')\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')\n self.tab_parent.add(self.tab2, text='Small Angle Simulation')\n self.tab_parent.grid(row=1, column=0)\n tk.Label(self.master, text='').grid(row=2, column=3)\n cp.tab(self.tab1)\n sa.tab(self.tab2)\n\n\nroot = tk.Tk()\nmy_gui = mainwin(root)\nroot.mainloop()\n",
"step-4": "import tkinter as tk\nfrom tkinter import Tk, ttk\nfrom tkinter import filedialog\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\nimport matplotlib.animation as animation\nfrom matplotlib import style\nimport crystalpeaktab as cp\nimport smallangletab as sa\nmatplotlib.use('TkAgg')\n\n\nclass mainwin:\n\n def __init__(self, master):\n self.master = master\n master.title\n master.title('University of Utah XRD Analysis Multi-tool')\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')\n self.tab_parent.add(self.tab2, text='Small Angle Simulation')\n self.tab_parent.grid(row=1, column=0)\n tk.Label(self.master, text='').grid(row=2, column=3)\n cp.tab(self.tab1)\n sa.tab(self.tab2)\n\n\nroot = tk.Tk()\nmy_gui = mainwin(root)\nroot.mainloop()\n",
"step-5": "import tkinter as tk\nfrom tkinter import Tk, ttk\nfrom tkinter import filedialog\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\nfrom matplotlib.figure import Figure\nimport matplotlib.animation as animation\nfrom matplotlib import style\nimport crystalpeaktab as cp\nimport smallangletab as sa\nmatplotlib.use(\"TkAgg\")\n\nclass mainwin:\n def __init__(self, master):\n self.master = master\n master.title\n master.title(\"University of Utah XRD Analysis Multi-tool\")\n #Sets up tabs\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text=\"Crystallization Peak Fit\")\n self.tab_parent.add(self.tab2, text=\"Small Angle Simulation\")\n self.tab_parent.grid(row=1, column=0)\n # Spacers\n tk.Label(self.master, text=\"\").grid(row=2, column=3)\n # Sets the first tab to be the crystal peak analysis\n cp.tab(self.tab1)\n # Sets the second tab to be the Small Angle Analytic Simulation\n sa.tab(self.tab2)\n# ======================================================================================================================\n# ======================================================================================================================\n# MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN\n# ======================================================================================================================\nroot = tk.Tk()\nmy_gui = mainwin(root)\nroot.mainloop()\n# ======================================================================================================================\n# ======================================================================================================================\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# this is just to test with ilp_polytope
import polytope
polytope.ilp_polytope.test2()
|
normal
|
{
"blob_id": "d2fce15636e43ca618c39c5c963bbf0c3a6a3886",
"index": 4444,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npolytope.ilp_polytope.test2()\n",
"step-3": "import polytope\npolytope.ilp_polytope.test2()\n",
"step-4": "# this is just to test with ilp_polytope\nimport polytope\n\npolytope.ilp_polytope.test2()\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import common
import student_code
class bcolors:
RED = "\x1b[31m"
GREEN = "\x1b[32m"
NORMAL = "\x1b[0m"
def check_result(title, map1, map2):
result=True
print(title)
for y in range(0,common.constants.MAP_HEIGHT):
v=""
for x in range(0,common.constants.MAP_WIDTH):
if (map1[y][x]==map2[y][x]):
v+=bcolors.GREEN+str(map1[y][x])+bcolors.NORMAL
else:
result = False
v+=bcolors.RED+str(map1[y][x])+bcolors.NORMAL
print(v)
if (result):
print("Test Result: " + bcolors.GREEN+"Passed"+bcolors.NORMAL)
else:
print("Test Result: " + bcolors.RED+"Failed"+bcolors.NORMAL)
return result
data1 = (
"100000011"
"110111011"
"111111011"
"110000003"
"111111011"
"111020000")
gold_df1 = ("100000011"
"110111011"
"111111011"
"110000555"
"111111511"
"111055540")
data2 = (
"200000011"
"011111011"
"000001011"
"111011003"
"111111011"
"111000011"
"111111011")
gold_df2 = ("555555511"
"411111511"
"444441511"
"111411555"
"111111011"
"111000011"
"111111011")
data3 = (
"100000011"
"111011011"
"000011011"
"111011003"
"110011011"
"111200011")
gold_df3 = (
"100000011"
"111011011"
"000011011"
"111411555"
"110411511"
"111555511")
all_passed = True
gold_dfmap1 = common.init_map();
common.set_map(gold_dfmap1, gold_df1)
dfmap1 = common.init_map()
common.set_map(dfmap1, data1)
df1 = student_code.astar_search(dfmap1)
tdf1 ="Reachable goal:"
cdf1 = check_result(tdf1,dfmap1,gold_dfmap1)
all_passed = all_passed and cdf1 and df1
gold_dfmap2 = common.init_map();
common.set_map(gold_dfmap2, gold_df2)
dfmap2 = common.init_map()
common.set_map(dfmap2, data2)
df2 = student_code.astar_search(dfmap2)
tdf2 ="Reachable goal:"
cdf2 = check_result(tdf2,dfmap2,gold_dfmap2)
all_passed = all_passed and cdf2 and df2
gold_dfmap3 = common.init_map();
common.set_map(gold_dfmap3, gold_df3)
dfmap3 = common.init_map()
common.set_map(dfmap3, data3)
df3 = student_code.astar_search(dfmap3)
tdf3 ="Reachable goal:"
cdf3 = check_result(tdf3,dfmap3,gold_dfmap3)
all_passed = all_passed and cdf3 and df3
all_passed = all_passed and cdf5 and df5
if all_passed:
exit(0)
else:
exit(1)
|
normal
|
{
"blob_id": "602d2c545c6e3eabe5c6285d2ab0c7f4216a00f5",
"index": 1563,
"step-1": "<mask token>\n\n\nclass bcolors:\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n NORMAL = '\\x1b[0m'\n\n\ndef check_result(title, map1, map2):\n result = True\n print(title)\n for y in range(0, common.constants.MAP_HEIGHT):\n v = ''\n for x in range(0, common.constants.MAP_WIDTH):\n if map1[y][x] == map2[y][x]:\n v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL\n else:\n result = False\n v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL\n print(v)\n if result:\n print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)\n else:\n print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass bcolors:\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n NORMAL = '\\x1b[0m'\n\n\ndef check_result(title, map1, map2):\n result = True\n print(title)\n for y in range(0, common.constants.MAP_HEIGHT):\n v = ''\n for x in range(0, common.constants.MAP_WIDTH):\n if map1[y][x] == map2[y][x]:\n v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL\n else:\n result = False\n v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL\n print(v)\n if result:\n print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)\n else:\n print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)\n return result\n\n\n<mask token>\ncommon.set_map(gold_dfmap1, gold_df1)\n<mask token>\ncommon.set_map(dfmap1, data1)\n<mask token>\ncommon.set_map(gold_dfmap2, gold_df2)\n<mask token>\ncommon.set_map(dfmap2, data2)\n<mask token>\ncommon.set_map(gold_dfmap3, gold_df3)\n<mask token>\ncommon.set_map(dfmap3, data3)\n<mask token>\nif all_passed:\n exit(0)\nelse:\n exit(1)\n",
"step-3": "<mask token>\n\n\nclass bcolors:\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n NORMAL = '\\x1b[0m'\n\n\ndef check_result(title, map1, map2):\n result = True\n print(title)\n for y in range(0, common.constants.MAP_HEIGHT):\n v = ''\n for x in range(0, common.constants.MAP_WIDTH):\n if map1[y][x] == map2[y][x]:\n v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL\n else:\n result = False\n v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL\n print(v)\n if result:\n print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)\n else:\n print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)\n return result\n\n\ndata1 = '100000011110111011111111011110000003111111011111020000'\ngold_df1 = '100000011110111011111111011110000555111111511111055540'\ndata2 = '200000011011111011000001011111011003111111011111000011111111011'\ngold_df2 = '555555511411111511444441511111411555111111011111000011111111011'\ndata3 = '100000011111011011000011011111011003110011011111200011'\ngold_df3 = '100000011111011011000011011111411555110411511111555511'\nall_passed = True\ngold_dfmap1 = common.init_map()\ncommon.set_map(gold_dfmap1, gold_df1)\ndfmap1 = common.init_map()\ncommon.set_map(dfmap1, data1)\ndf1 = student_code.astar_search(dfmap1)\ntdf1 = 'Reachable goal:'\ncdf1 = check_result(tdf1, dfmap1, gold_dfmap1)\nall_passed = all_passed and cdf1 and df1\ngold_dfmap2 = common.init_map()\ncommon.set_map(gold_dfmap2, gold_df2)\ndfmap2 = common.init_map()\ncommon.set_map(dfmap2, data2)\ndf2 = student_code.astar_search(dfmap2)\ntdf2 = 'Reachable goal:'\ncdf2 = check_result(tdf2, dfmap2, gold_dfmap2)\nall_passed = all_passed and cdf2 and df2\ngold_dfmap3 = common.init_map()\ncommon.set_map(gold_dfmap3, gold_df3)\ndfmap3 = common.init_map()\ncommon.set_map(dfmap3, data3)\ndf3 = student_code.astar_search(dfmap3)\ntdf3 = 'Reachable goal:'\ncdf3 = check_result(tdf3, dfmap3, gold_dfmap3)\nall_passed = all_passed and cdf3 and df3\nall_passed = all_passed and cdf5 and df5\nif all_passed:\n exit(0)\nelse:\n exit(1)\n",
"step-4": "import common\nimport student_code\n\n\nclass bcolors:\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n NORMAL = '\\x1b[0m'\n\n\ndef check_result(title, map1, map2):\n result = True\n print(title)\n for y in range(0, common.constants.MAP_HEIGHT):\n v = ''\n for x in range(0, common.constants.MAP_WIDTH):\n if map1[y][x] == map2[y][x]:\n v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL\n else:\n result = False\n v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL\n print(v)\n if result:\n print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)\n else:\n print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)\n return result\n\n\ndata1 = '100000011110111011111111011110000003111111011111020000'\ngold_df1 = '100000011110111011111111011110000555111111511111055540'\ndata2 = '200000011011111011000001011111011003111111011111000011111111011'\ngold_df2 = '555555511411111511444441511111411555111111011111000011111111011'\ndata3 = '100000011111011011000011011111011003110011011111200011'\ngold_df3 = '100000011111011011000011011111411555110411511111555511'\nall_passed = True\ngold_dfmap1 = common.init_map()\ncommon.set_map(gold_dfmap1, gold_df1)\ndfmap1 = common.init_map()\ncommon.set_map(dfmap1, data1)\ndf1 = student_code.astar_search(dfmap1)\ntdf1 = 'Reachable goal:'\ncdf1 = check_result(tdf1, dfmap1, gold_dfmap1)\nall_passed = all_passed and cdf1 and df1\ngold_dfmap2 = common.init_map()\ncommon.set_map(gold_dfmap2, gold_df2)\ndfmap2 = common.init_map()\ncommon.set_map(dfmap2, data2)\ndf2 = student_code.astar_search(dfmap2)\ntdf2 = 'Reachable goal:'\ncdf2 = check_result(tdf2, dfmap2, gold_dfmap2)\nall_passed = all_passed and cdf2 and df2\ngold_dfmap3 = common.init_map()\ncommon.set_map(gold_dfmap3, gold_df3)\ndfmap3 = common.init_map()\ncommon.set_map(dfmap3, data3)\ndf3 = student_code.astar_search(dfmap3)\ntdf3 = 'Reachable goal:'\ncdf3 = check_result(tdf3, dfmap3, gold_dfmap3)\nall_passed = all_passed and cdf3 and df3\nall_passed = all_passed and cdf5 and df5\nif all_passed:\n exit(0)\nelse:\n exit(1)\n",
"step-5": "import common\r\nimport student_code\r\n\r\nclass bcolors:\r\n\tRED = \"\\x1b[31m\"\r\n\tGREEN = \"\\x1b[32m\"\r\n\tNORMAL = \"\\x1b[0m\"\r\n\r\ndef check_result(title, map1, map2):\r\n\tresult=True\r\n\tprint(title)\r\n\tfor y in range(0,common.constants.MAP_HEIGHT):\r\n\t\tv=\"\"\r\n\t\tfor x in range(0,common.constants.MAP_WIDTH):\r\n\t\t\tif (map1[y][x]==map2[y][x]):\r\n\t\t\t\tv+=bcolors.GREEN+str(map1[y][x])+bcolors.NORMAL\r\n\t\t\telse:\r\n\t\t\t\tresult = False\r\n\t\t\t\tv+=bcolors.RED+str(map1[y][x])+bcolors.NORMAL\r\n\t\tprint(v)\r\n\tif (result):\r\n\t\tprint(\"Test Result: \" + bcolors.GREEN+\"Passed\"+bcolors.NORMAL)\r\n\telse:\r\n\t\tprint(\"Test Result: \" + bcolors.RED+\"Failed\"+bcolors.NORMAL)\r\n\treturn result\r\n\r\ndata1 = (\r\n\"100000011\"\r\n\"110111011\"\r\n\"111111011\"\r\n\"110000003\"\r\n\"111111011\"\r\n\"111020000\")\r\n\r\ngold_df1 = (\"100000011\"\r\n\"110111011\"\r\n\"111111011\"\r\n\"110000555\"\r\n\"111111511\"\r\n\"111055540\")\r\n\r\ndata2 = (\r\n\"200000011\"\r\n\"011111011\"\r\n\"000001011\"\r\n\"111011003\"\r\n\"111111011\"\r\n\"111000011\"\r\n\"111111011\")\r\n\r\ngold_df2 = (\"555555511\"\r\n\"411111511\"\r\n\"444441511\"\r\n\"111411555\"\r\n\"111111011\"\r\n\"111000011\"\r\n\"111111011\")\r\n\r\n\r\ndata3 = (\r\n\"100000011\"\r\n\"111011011\"\r\n\"000011011\"\r\n\"111011003\"\r\n\"110011011\"\r\n\"111200011\")\r\n\r\ngold_df3 = (\r\n\"100000011\"\r\n\"111011011\"\r\n\"000011011\"\r\n\"111411555\"\r\n\"110411511\"\r\n\"111555511\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\nall_passed = True\r\n\r\ngold_dfmap1 = common.init_map();\r\ncommon.set_map(gold_dfmap1, gold_df1)\r\n\r\ndfmap1 = common.init_map()\r\ncommon.set_map(dfmap1, data1)\r\ndf1 = student_code.astar_search(dfmap1)\r\ntdf1 =\"Reachable goal:\"\r\ncdf1 = check_result(tdf1,dfmap1,gold_dfmap1)\r\n\r\nall_passed = all_passed and cdf1 and df1 \r\n\r\ngold_dfmap2 = common.init_map();\r\ncommon.set_map(gold_dfmap2, gold_df2)\r\n\r\ndfmap2 = common.init_map()\r\ncommon.set_map(dfmap2, data2)\r\ndf2 = student_code.astar_search(dfmap2)\r\ntdf2 =\"Reachable goal:\"\r\ncdf2 = check_result(tdf2,dfmap2,gold_dfmap2)\r\n\r\nall_passed = all_passed and cdf2 and df2 \r\n\r\ngold_dfmap3 = common.init_map();\r\ncommon.set_map(gold_dfmap3, gold_df3)\r\n\r\ndfmap3 = common.init_map()\r\ncommon.set_map(dfmap3, data3)\r\ndf3 = student_code.astar_search(dfmap3)\r\ntdf3 =\"Reachable goal:\"\r\ncdf3 = check_result(tdf3,dfmap3,gold_dfmap3)\r\n\r\n\r\nall_passed = all_passed and cdf3 and df3 \r\n\r\n\r\n\r\n\r\nall_passed = all_passed and cdf5 and df5\r\n\r\nif all_passed:\r\n\texit(0)\r\nelse:\r\n\texit(1)\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from sqlalchemy import Integer, String, Column
from sqlalchemy.orm import Query
from server import db
class Formation(db):
__tablename__ = "formation"
query: Query
id_form = Column(Integer, primary_key=True)
filiere = Column(String, nullable=False)
lieu = Column(String, nullable=False)
niveau = Column(String, nullable=False)
@staticmethod
def create(filiere: str, lieu: str, niveau: str):
return Formation(filiere=filiere, lieu=lieu, niveau=niveau)
def to_json(self):
return {
'id': self.id_form,
'branch': self.filiere,
'location': self.lieu,
'level': self.niveau,
}
|
normal
|
{
"blob_id": "fff70312fa7c3259cf4c3d9e7ebd8ca5b9a56887",
"index": 2714,
"step-1": "<mask token>\n\n\nclass Formation(db):\n <mask token>\n query: Query\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def create(filiere: str, lieu: str, niveau: str):\n return Formation(filiere=filiere, lieu=lieu, niveau=niveau)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Formation(db):\n <mask token>\n query: Query\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def create(filiere: str, lieu: str, niveau: str):\n return Formation(filiere=filiere, lieu=lieu, niveau=niveau)\n\n def to_json(self):\n return {'id': self.id_form, 'branch': self.filiere, 'location':\n self.lieu, 'level': self.niveau}\n",
"step-3": "<mask token>\n\n\nclass Formation(db):\n __tablename__ = 'formation'\n query: Query\n id_form = Column(Integer, primary_key=True)\n filiere = Column(String, nullable=False)\n lieu = Column(String, nullable=False)\n niveau = Column(String, nullable=False)\n\n @staticmethod\n def create(filiere: str, lieu: str, niveau: str):\n return Formation(filiere=filiere, lieu=lieu, niveau=niveau)\n\n def to_json(self):\n return {'id': self.id_form, 'branch': self.filiere, 'location':\n self.lieu, 'level': self.niveau}\n",
"step-4": "from sqlalchemy import Integer, String, Column\nfrom sqlalchemy.orm import Query\nfrom server import db\n\n\nclass Formation(db):\n __tablename__ = 'formation'\n query: Query\n id_form = Column(Integer, primary_key=True)\n filiere = Column(String, nullable=False)\n lieu = Column(String, nullable=False)\n niveau = Column(String, nullable=False)\n\n @staticmethod\n def create(filiere: str, lieu: str, niveau: str):\n return Formation(filiere=filiere, lieu=lieu, niveau=niveau)\n\n def to_json(self):\n return {'id': self.id_form, 'branch': self.filiere, 'location':\n self.lieu, 'level': self.niveau}\n",
"step-5": "from sqlalchemy import Integer, String, Column\nfrom sqlalchemy.orm import Query\nfrom server import db\n\nclass Formation(db):\n __tablename__ = \"formation\"\n query: Query\n\n id_form = Column(Integer, primary_key=True)\n filiere = Column(String, nullable=False)\n lieu = Column(String, nullable=False)\n niveau = Column(String, nullable=False)\n\n @staticmethod\n def create(filiere: str, lieu: str, niveau: str):\n return Formation(filiere=filiere, lieu=lieu, niveau=niveau)\n\n def to_json(self):\n return {\n 'id': self.id_form,\n 'branch': self.filiere,\n 'location': self.lieu,\n 'level': self.niveau,\n }\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.