Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
4,200 |
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (__HOLE__, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
|
KeyboardInterrupt
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/pydoc.py/Helper.interact
|
4,201 |
def showtopic(self, topic):
try:
import pydoc_topics
except __HOLE__:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target)
label, xrefs = target
try:
doc = pydoc_topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
|
ImportError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/pydoc.py/Helper.showtopic
|
4,202 |
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
try: import warnings
except __HOLE__: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# --------------------------------------------------- web browser interface
|
ImportError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/pydoc.py/apropos
|
4,203 |
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except __HOLE__: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <[email protected]></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
|
IOError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/pydoc.py/serve
|
4,204 |
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except __HOLE__: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
elif sys.platform == 'mac':
try: import ic
except ImportError: pass
else: ic.launchurl(url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
|
ImportError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/pydoc.py/gui
|
4,205 |
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default.
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except __HOLE__:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
|
ValueError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/pydoc.py/cli
|
4,206 |
def testQueryShouldBehaveLikeDict(self):
try:
self.query['zap']
self.fail()
except __HOLE__:
pass
self.query['zap'] = 'x'
self.assert_(self.query['zap'] == 'x')
|
KeyError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-build/python-libs/gdata/tests/gdata_tests/service_test.py/QueryTest.testQueryShouldBehaveLikeDict
|
4,207 |
def __eq__(self, other):
""" Compare the JID to another instance or to string for equality. """
try: other=JID(other)
except __HOLE__: return 0
return self.resource==other.resource and self.__str__(0) == other.__str__(0)
|
ValueError
|
dataset/ETHPy150Open CouchPotato/CouchPotatoServer/libs/xmpp/protocol.py/JID.__eq__
|
4,208 |
def setRequired(self,req=1):
""" Change the state of the 'required' flag. """
if req: self.setTag('required')
else:
try: self.delChild('required')
except __HOLE__: return
|
ValueError
|
dataset/ETHPy150Open CouchPotato/CouchPotatoServer/libs/xmpp/protocol.py/DataField.setRequired
|
4,209 |
def get_by_name(self, region, container_name, list_objects=False):
"""Get a container by its name.
As two containers with the same name can exist in two
different regions we need to limit the search to one region.
:param container_name: Name of the container to retrieve
:raises ResourceNotFoundError: Container does not exist
"""
try:
region_name = region.name
except __HOLE__:
region_name = region
res = self._swift_call(region_name,
'head_container',
container_name)
return self._en_dict_to_obj(container_name, region_name, res)
|
AttributeError
|
dataset/ETHPy150Open runabove/python-runabove/runabove/storage.py/ContainerManager.get_by_name
|
4,210 |
def _swift_call(self, region, action, *args, **kwargs):
"""Wrap calls to swiftclient to allow retry."""
try:
region_name = region.name
except __HOLE__:
region_name = region
retries = 0
while retries < 3:
if region_name not in self.swifts:
self.swifts[region_name] = self._get_swift_client(region_name)
swift = self.swifts[region_name]['client']
call = getattr(swift, action.lower())
try:
return call(*args, **kwargs)
except swiftclient.exceptions.ClientException as e:
if e.http_status == 401:
# Token is invalid, regenerate swift clients
del self.swifts[region_name]
if e.http_status == 404:
raise ResourceNotFoundError(msg=e.msg)
else:
raise e
raise APIError(msg='Impossible to get a valid token')
|
AttributeError
|
dataset/ETHPy150Open runabove/python-runabove/runabove/storage.py/ContainerManager._swift_call
|
4,211 |
def delete(self, region, container):
"""Delete a container.
:param region: Region where the container will be deleted
:param container: Container to delete
"""
try:
container_name = container.name
except __HOLE__:
container_name = container
self._swift_call(region, 'delete_container', container_name)
|
AttributeError
|
dataset/ETHPy150Open runabove/python-runabove/runabove/storage.py/ContainerManager.delete
|
4,212 |
def set_public(self, region, container, public=True):
"""Set a container publicly available.
:param region: Region where the container is
:param container: Container to make public
:param public: Set container private if False
"""
try:
container_name = container.name
except __HOLE__:
container_name = container
if public:
headers = {'X-Container-Read': '.r:*,.rlistings'}
else:
headers = {'X-Container-Read': ''}
self._swift_call(region, 'post_container',
container_name, headers=headers)
|
AttributeError
|
dataset/ETHPy150Open runabove/python-runabove/runabove/storage.py/ContainerManager.set_public
|
4,213 |
def get_region_url(self, region):
"""Get the URL endpoint for storage in a region.
:param region: Region to get the endpoint for
"""
try:
region_name = region.name
except __HOLE__:
region_name = region
try:
return self.swifts[region_name]['endpoint']
except KeyError:
raise ResourceNotFoundError(msg='Region does not exist')
|
AttributeError
|
dataset/ETHPy150Open runabove/python-runabove/runabove/storage.py/ContainerManager.get_region_url
|
4,214 |
def copy_object(self, region, from_container, stored_object,
to_container=None, new_object_name=None):
"""Server copy an object from a container to another one.
Containers must be in the same region. Both containers may be
the same. Meta-data is read and copied from the original object.
:param region: Region where the containers are
:param from_container: Container where the original object is
:param stored_object: Object to copy
:param to_container: Container where the object will be copied
to. If None copy into the same container.
:param new_object_name: Name of the new object. If None new name
is taken from the original name.
"""
try:
region_name = region.name
except __HOLE__:
region_name = region
try:
from_container_name = from_container.name
except AttributeError:
from_container_name = from_container
try:
stored_object_name = stored_object.name
headers = stored_object.meta
except AttributeError:
stored_object_name = stored_object
headers = {}
if to_container:
try:
to_container_name = to_container.name
except AttributeError:
to_container_name = to_container
else:
to_container_name = from_container_name
if not new_object_name:
new_object_name = stored_object_name
original_location = '/%s/%s'%(from_container_name, stored_object_name)
headers['X-Copy-From'] = original_location
headers['content-length'] = 0
self._swift_call(region_name,
'put_object',
to_container_name,
new_object_name,
None,
headers=headers)
|
AttributeError
|
dataset/ETHPy150Open runabove/python-runabove/runabove/storage.py/ContainerManager.copy_object
|
4,215 |
def get_object_by_name(self, object_name, download=False):
"""Get an object stored by its name.
Does not download the content of the object by default.
:param object_name: Name of the object to create
:param download: If True download also the object content
"""
if download:
call = 'get_object'
else :
call = 'head_object'
res = self._manager._swift_call(self.region.name,
call,
self.name,
object_name)
try:
return self._en_dict_to_obj(object_name, res[0], data=res[1])
except __HOLE__:
return self._en_dict_to_obj(object_name, res)
|
KeyError
|
dataset/ETHPy150Open runabove/python-runabove/runabove/storage.py/Container.get_object_by_name
|
4,216 |
def delete_object(self, object_stored):
"""Delete an object from a container.
:param object_stored: the object to delete
"""
try:
object_name = object_stored.name
except __HOLE__:
object_name = object_stored
self._manager._swift_call(self.region,
'delete_object',
self.name,
object_name)
|
AttributeError
|
dataset/ETHPy150Open runabove/python-runabove/runabove/storage.py/Container.delete_object
|
4,217 |
def execute(self, args=None):
self.logger.debug('Running ' + self.file)
try:
start = time.time()
proc = subprocess.Popen([self.plugin_path + '/' + self.file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
successful = False
stdout, stderr = proc.communicate()
if proc.returncode is 0:
for line in stdout.split('\n'):
match = re.match(r'^([^ ]+)\.value (.+)$', line)
if line and match:
(key, value) = match.groups()
self.datastore.push(key, value)
successful = True
if not successful:
self.logger.warn('No valid output from ' + self.file)
else:
code = proc.returncode
self.logger.error('Plugin returned with exit code %s' % code)
elapsed = time.time() - start
# Warn if execution takes more time than the interval
if elapsed > self.interval:
params = (self.interval, round(elapsed), self.file)
message = 'Execution exceeds interval (%ss > %ss): %s' % params
self.logger.warn(message)
except __HOLE__:
self.logger.error('Failed to execute plugin: ' + self.file)
|
OSError
|
dataset/ETHPy150Open tiwilliam/syscollect/syscollect/plugin.py/Plugin.execute
|
4,218 |
def ip_address(x):
try:
x = x.decode('ascii')
except __HOLE__:
pass
return _ip_address(x)
|
AttributeError
|
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/application/healthcheck.py/ip_address
|
4,219 |
def drop_privileges(user, group):
"""Drop privileges to specified user and group"""
if group is not None:
import grp
gid = grp.getgrnam(group).gr_gid
logger.debug("Dropping privileges to group {0}/{1}".format(group, gid))
try:
os.setresgid(gid, gid, gid)
except AttributeError:
os.setregid(gid, gid)
if user is not None:
import pwd
uid = pwd.getpwnam(user).pw_uid
logger.debug("Dropping privileges to user {0}/{1}".format(user, uid))
try:
os.setresuid(uid, uid, uid)
except __HOLE__:
os.setreuid(uid, uid)
|
AttributeError
|
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/application/healthcheck.py/drop_privileges
|
4,220 |
def categories(collection=None, collections=None, reverse=None, order_by=None,
_pod=None, use_cache=False):
if isinstance(collection, collection_lib.Collection):
collection = collection
elif isinstance(collection, basestring):
collection = _pod.get_collection(collection)
else:
text = '{} must be a Collection instance or a collection path, found: {}.'
raise ValueError(text.format(collection, type(collection)))
category_list = collection.list_categories()
def order_func(doc):
try:
return category_list.index(doc.category)
except __HOLE__:
return 0
docs = [doc for doc in collection.list_docs(reverse=reverse)]
docs = sorted(docs, key=order_func)
items = itertools.groupby(docs, key=order_func)
return ((category_list[index], pages) for index, pages in items)
|
ValueError
|
dataset/ETHPy150Open grow/grow/grow/pods/tags.py/categories
|
4,221 |
def handle(self, *args, **options):
self.stdout.write("Processing products...\n")
products = set()
total = StockState.objects.count()
failed = []
for i, ss in enumerate(StockState.objects.all()):
if i % 500 == 0:
self.stdout.write('done {}/{}'.format(i, total))
if ss.product_id not in products:
try:
product = Product.get(ss.product_id)
assert product.doc_type == 'Product'
products.add(ss.product_id)
except (ResourceNotFound, __HOLE__):
try:
case = CommCareCase.get(ss.case_id)
except ResourceNotFound:
case = CommCareCase()
failed.append((ss, case))
if failed:
for ss, case in failed:
self.stdout.write('No product with ID "{}" found! case ID: {}, domain {}'.format(
ss.product_id, ss.case_id, case.domain
))
self.stderr.write('{}/{} stock states FAILED check'.format(len(failed), total))
|
AssertionError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/commtrack/management/commands/check_product_migration.py/Command.handle
|
4,222 |
def add_action(self, action, sub_menu='Advanced'):
"""
Adds an action to the editor's context menu.
:param action: QAction to add to the context menu.
:param sub_menu: The name of a sub menu where to put the action.
'Advanced' by default. If None or empty, the action will be added
at the root of the submenu.
"""
if sub_menu:
try:
mnu = self._sub_menus[sub_menu]
except __HOLE__:
mnu = QtWidgets.QMenu(sub_menu)
self.add_menu(mnu)
self._sub_menus[sub_menu] = mnu
finally:
mnu.addAction(action)
else:
self._actions.append(action)
action.setShortcutContext(QtCore.Qt.WidgetShortcut)
self.addAction(action)
|
KeyError
|
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/api/code_edit.py/CodeEdit.add_action
|
4,223 |
def add_separator(self, sub_menu='Advanced'):
"""
Adds a sepqrator to the editor's context menu.
:return: The sepator that has been added.
:rtype: QtWidgets.QAction
"""
action = QtWidgets.QAction(self)
action.setSeparator(True)
if sub_menu:
try:
mnu = self._sub_menus[sub_menu]
except __HOLE__:
pass
else:
mnu.addAction(action)
else:
self._actions.append(action)
return action
|
KeyError
|
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/api/code_edit.py/CodeEdit.add_separator
|
4,224 |
def remove_action(self, action, sub_menu='Advanced'):
"""
Removes an action/separator from the editor's context menu.
:param action: Action/seprator to remove.
:param advanced: True to remove the action from the advanced submenu.
"""
if sub_menu:
try:
mnu = self._sub_menus[sub_menu]
except __HOLE__:
pass
else:
mnu.removeAction(action)
else:
try:
self._actions.remove(action)
except ValueError:
pass
self.removeAction(action)
|
KeyError
|
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/api/code_edit.py/CodeEdit.remove_action
|
4,225 |
def setReadOnly(self, read_only):
if read_only != self.isReadOnly():
super().setReadOnly(read_only)
from pyqode.core.panels import ReadOnlyPanel
try:
panel = self.panels.get(ReadOnlyPanel)
except __HOLE__:
self.panels.append(
ReadOnlyPanel(), ReadOnlyPanel.Position.TOP)
else:
panel.setVisible(read_only)
|
KeyError
|
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/api/code_edit.py/CodeEdit.setReadOnly
|
4,226 |
def _reset_stylesheet(self):
""" Resets stylesheet"""
self.setFont(QtGui.QFont(self._font_family,
self._font_size + self._zoom_level))
flg_stylesheet = hasattr(self, '_flg_stylesheet')
if QtWidgets.QApplication.instance().styleSheet() or flg_stylesheet:
self._flg_stylesheet = True
# On Window, if the application once had a stylesheet, we must
# keep on using a stylesheet otherwise strange colors appear
# see https://github.com/OpenCobolIDE/OpenCobolIDE/issues/65
# Also happen on plasma 5
try:
plasma = os.environ['DESKTOP_SESSION'] == 'plasma'
except __HOLE__:
plasma = False
if sys.platform == 'win32' or plasma:
self.setStyleSheet('''QPlainTextEdit
{
background-color: %s;
color: %s;
}
''' % (self.background.name(), self.foreground.name()))
else:
# on linux/osx we just have to set an empty stylesheet to
# cancel any previous stylesheet and still keep a correct
# style for scrollbars
self.setStyleSheet('')
else:
p = self.palette()
p.setColor(QtGui.QPalette.Base, self.background)
p.setColor(QtGui.QPalette.Text, self.foreground)
p.setColor(QtGui.QPalette.Highlight,
self.selection_background)
p.setColor(QtGui.QPalette.HighlightedText,
self.selection_foreground)
self.setPalette(p)
self.repaint()
|
KeyError
|
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/api/code_edit.py/CodeEdit._reset_stylesheet
|
4,227 |
def compile_messages():
# check if gettext is installed
try:
pipe = subprocess.Popen(['msgfmt', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__ as e:
raise CommandError('Unable to run msgfmt (gettext) command. You probably don\'t have gettext installed. {}'.format(e))
basedirs = [os.path.join('conf', 'locale'), 'locale']
if os.environ.get('DJANGO_SETTINGS_MODULE'):
from django.conf import settings
basedirs.extend(settings.LOCALE_PATHS)
# Gather existing directories.
basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))
if not basedirs:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree, or with the settings module specified.")
for basedir in basedirs:
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
fn = os.path.join(dirpath, f)
if has_bom(fn):
raise CommandError("The %s file has a BOM (Byte Order Mark). Django only supports .po files encoded in UTF-8 and without any BOM." % fn)
pf = os.path.splitext(fn)[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
if sys.platform == 'win32': # Different shell-variable syntax
bits = ['msgfmt', '--check-format', '-o', pf + '.mo', pf + '.po']
else:
bits = ['msgfmt', '--check-format', '-o', pf + '.mo', pf + '.po']
pipe = subprocess.Popen(bits, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = pipe.communicate()[-1]
if pipe.returncode != 0:
return False, stderr
return True, ''
|
OSError
|
dataset/ETHPy150Open divio/django-cms/cms/tests/test_po.py/compile_messages
|
4,228 |
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s' % os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink') and not is_win:
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (__HOLE__, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest)
|
OSError
|
dataset/ETHPy150Open femmerling/EmeraldBox/virtualenv.py/copyfile
|
4,229 |
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = ['.', here,
join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except __HOLE__:
pass
else:
dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
|
ImportError
|
dataset/ETHPy150Open femmerling/EmeraldBox/virtualenv.py/file_search_dirs
|
4,230 |
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except __HOLE__:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
|
UnicodeDecodeError
|
dataset/ETHPy150Open femmerling/EmeraldBox/virtualenv.py/call_subprocess
|
4,231 |
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if is_win:
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except __HOLE__:
print('Error: the path "%s" has a space in it' % home_dir)
print('To handle these kinds of paths, the win32api module must be installed:')
print(' http://sourceforge.net/projects/pywin32/')
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
if is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
elif not is_win:
lib_dir = join(home_dir, 'lib', py_version)
multiarch_exec = '/usr/bin/multiarch-platform'
if is_executable_file(multiarch_exec):
# In Mageia (2) and Mandriva distros the include dir must be like:
# virtualenv/include/multiarch-x86_64-linux/python2.7
# instead of being virtualenv/include/python2.7
p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# stdout.strip is needed to remove newline character
inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags)
else:
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
|
ImportError
|
dataset/ETHPy150Open femmerling/EmeraldBox/virtualenv.py/path_locations
|
4,232 |
def copy_required_modules(dst_prefix):
import imp
# If we are running under -p, we need to remove the current
# directory from sys.path temporarily here, so that we
# definitely get the modules from the site directory of
# the interpreter we are running under, not the one
# virtualenv.py is installed under (which might lead to py2/py3
# incompatibility issues)
_prev_sys_path = sys.path
if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
sys.path = sys.path[1:]
try:
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except __HOLE__:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1])
finally:
sys.path = _prev_sys_path
|
ImportError
|
dataset/ETHPy150Open femmerling/EmeraldBox/virtualenv.py/copy_required_modules
|
4,233 |
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir)
stdlib_dirs = [os.path.dirname(os.__file__)]
if is_win:
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif is_darwin:
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn))
# ...and modules
copy_required_modules(home_dir)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir)
else:
logger.debug('No include dir %s' % stdinc_dir)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if is_win:
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn))
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name))
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
# OS X framework builds cause validation to break
# https://github.com/pypa/virtualenv/issues/322
if os.environ.get('__PYVENV_LAUNCHER__'):
os.unsetenv('__PYVENV_LAUNCHER__')
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
pcbuild_dir = os.path.dirname(sys.executable)
pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth')
if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')):
logger.notify('Detected python running from build directory %s', pcbuild_dir)
logger.notify('Writing .pth file linking to build directory for *.pyd files')
writefile(pyd_pth, pcbuild_dir)
else:
pcbuild_dir = None
if os.path.exists(pyd_pth):
logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth)
os.unlink(pyd_pth)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
if is_cygwin and os.path.exists(executable + '.exe'):
# Cygwin misreports sys.executable sometimes
executable += '.exe'
py_executable += '.exe'
logger.info('Executable actually exists in %s' % executable)
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if is_win or is_cygwin:
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe')
python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe')
if os.path.exists(python_d):
logger.info('Also created python_d.exe')
shutil.copyfile(python_d, python_d_dest)
elif os.path.exists(python_d_dest):
logger.info('Removed python_d.exe as it is no longer at the source')
os.unlink(python_d_dest)
# we need to copy the DLL to enforce that windows will load the correct one.
# may not exist if we are cygwin.
py_executable_dll = 'python%s%s.dll' % (
sys.version_info[0], sys.version_info[1])
py_executable_dll_d = 'python%s%s_d.dll' % (
sys.version_info[0], sys.version_info[1])
pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll)
pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d)
pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d)
if os.path.exists(pythondll):
logger.info('Also created %s' % py_executable_dll)
shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll))
if os.path.exists(pythondll_d):
logger.info('Also created %s' % py_executable_dll_d)
shutil.copyfile(pythondll_d, pythondll_d_dest)
elif os.path.exists(pythondll_d_dest):
logger.info('Removed %s as the source does not exist' % pythondll_d_dest)
os.unlink(pythondll_d_dest)
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
if sys.platform in ('win32', 'cygwin'):
python_executable += '.exe'
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable)
if is_win:
for name in 'libexpat.dll', 'libpypy.dll', 'libpypy-c.dll', 'libeay32.dll', 'ssleay32.dll', 'sqlite.dll':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(bin_dir, name))
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if '.framework' in prefix:
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
if 'EPD' in prefix:
logger.debug('EPD framework detected')
original_python = os.path.join(prefix, 'bin/python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib)
# And then change the install_name of the copied python executable
try:
mach_o_change(py_executable,
os.path.join(prefix, 'Python'),
'@executable_path/../.Python')
except:
e = sys.exc_info()[1]
logger.warn("Could not call mach_o_change: %s. "
"Trying to call install_name_tool instead." % e)
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal("Could not call install_name_tool -- you must "
"have Apple's development tools installed")
raise
# Some tools depend on pythonX.Y being present
py_executable_version = '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if not py_executable.endswith(py_executable_version):
# symlinking pythonX.Y > python
pth = py_executable + '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if os.path.exists(pth):
os.unlink(pth)
os.symlink('python', pth)
else:
# reverse symlinking python -> pythonX.Y (with --python)
pth = join(bin_dir, 'python')
if os.path.exists(pth):
os.unlink(pth)
os.symlink(os.path.basename(py_executable), pth)
if is_win and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
# NOTE: keep this check as one line, cmd.exe doesn't cope with line breaks
cmd = [py_executable, '-c', 'import sys;out=sys.stdout;'
'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))']
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except __HOLE__:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if is_win:
logger.fatal(
'Note: some Windows users have reported this error when they '
'installed Python for "Only this user" or have multiple '
'versions of Python installed. Copying the appropriate '
'PythonXX.dll to the virtualenv Scripts/ directory may fix '
'this problem.')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
fix_local_scheme(home_dir)
if site_packages:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
return py_executable
|
OSError
|
dataset/ETHPy150Open femmerling/EmeraldBox/virtualenv.py/install_python
|
4,234 |
def fix_local_scheme(home_dir):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except __HOLE__:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.mkdir(local_path)
for subdir_name in os.listdir(home_dir):
if subdir_name == 'local':
continue
os.symlink(os.path.abspath(os.path.join(home_dir, subdir_name)), \
os.path.join(local_path, subdir_name))
|
ImportError
|
dataset/ETHPy150Open femmerling/EmeraldBox/virtualenv.py/fix_local_scheme
|
4,235 |
def fixup_scripts(home_dir):
# This is what we expect at the top of scripts:
shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir))
# This is what we'll put:
new_shebang = '#!/usr/bin/env python%s' % sys.version[:3]
if is_win:
bin_suffix = 'Scripts'
else:
bin_suffix = 'bin'
bin_dir = os.path.join(home_dir, bin_suffix)
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
try:
try:
lines = f.read().decode('utf-8').splitlines()
except __HOLE__:
# This is probably a binary program instead
# of a script, so just ignore it.
continue
finally:
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
if not lines[0].strip().startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
script = relative_script([new_shebang] + lines[1:])
f = open(filename, 'wb')
f.write('\n'.join(script).encode('utf-8'))
f.close()
|
UnicodeDecodeError
|
dataset/ETHPy150Open femmerling/EmeraldBox/virtualenv.py/fixup_scripts
|
4,236 |
def __get__(self, obj, cls=None):
try:
e = SaunterWebDriver.find_element_by_locator(self.locator)
return int(e.text)
except __HOLE__ as e:
if str(e) == "'SeleniumWrapper' object has no attribute 'connection'":
pass
else:
raise e
except ElementNotFound as e:
msg = "Element %s was not found. It is used in the %s page object in the %s module." % (self.locator, obj.__class__.__name__, self.__module__)
raise ElementNotFound(msg)
|
AttributeError
|
dataset/ETHPy150Open Element-34/py.saunter/saunter/po/webdriver/number.py/Number.__get__
|
4,237 |
def process(self, client_secret, raw_response, x_hub_signature):
if not self._verify_signature(client_secret, raw_response, x_hub_signature):
raise SubscriptionVerifyError("X-Hub-Signature and hmac digest did not match")
try:
response = simplejson.loads(raw_response)
except __HOLE__:
raise SubscriptionError('Unable to parse response, not valid JSON.')
for update in response:
self._process_update(update)
|
ValueError
|
dataset/ETHPy150Open facebookarchive/python-instagram/instagram/subscriptions.py/SubscriptionsReactor.process
|
4,238 |
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except __HOLE__:
raise AttributeError("no such move, %r" % (name,))
|
KeyError
|
dataset/ETHPy150Open alecthomas/importmagic/importmagic/six.py/remove_move
|
4,239 |
def culaCheckStatus(status):
"""
Raise an exception corresponding to the specified CULA status
code.
Parameters
----------
status : int
CULA status code.
"""
if status != 0:
error = culaGetErrorInfo()
try:
raise culaExceptions[status](error)
except __HOLE__:
raise culaError(error)
|
KeyError
|
dataset/ETHPy150Open lebedov/scikit-cuda/skcuda/cula.py/culaCheckStatus
|
4,240 |
def test_mutatingiteration(self):
d = {}
d[1] = 1
try:
for i in d:
d[i+1] = 1
except __HOLE__:
pass
else:
self.fail("changing dict size during iteration doesn't raise Error")
|
RuntimeError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_dict.py/DictTest.test_mutatingiteration
|
4,241 |
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertEqual(hasattr(dict, "__missing__"), False)
self.assertEqual(hasattr({}, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assert_(2 not in d)
self.assert_(2 not in d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except __HOLE__, err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
try:
f[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(dict):
pass
g = G()
try:
g[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
|
RuntimeError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_dict.py/DictTest.test_missing
|
4,242 |
def test_tuple_keyerror(self):
# SF #1576657
d = {}
try:
d[(1,)]
except __HOLE__, e:
self.assertEqual(e.args, ((1,),))
else:
self.fail("missing KeyError")
|
KeyError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_dict.py/DictTest.test_tuple_keyerror
|
4,243 |
def main():
global REQUIRED, IGNORED
if len(sys.argv) < 2:
print USAGE
# make target folder
target = sys.argv[1]
os.mkdir(target)
# change to os specificsep
REQUIRED = REQUIRED.replace('/', os.sep)
IGNORED = IGNORED.replace('/', os.sep)
# make a list of all files to include
files = [x.strip() for x in REQUIRED.split('\n')
if x and not x[0] == '#']
ignore = [x.strip() for x in IGNORED.split('\n')
if x and not x[0] == '#']
def accept(filename):
for p in ignore:
if filename.startswith(p):
return False
return True
pattern = os.path.join('gluon', '*.py')
while True:
newfiles = [x for x in glob.glob(pattern) if accept(x)]
if not newfiles:
break
files += newfiles
pattern = os.path.join(pattern[:-3], '*.py')
# copy all files, make missing folder, build default.py
files.sort()
defaultpy = os.path.join(
'applications', 'welcome', 'controllers', 'default.py')
for f in files:
dirs = f.split(os.path.sep)
for i in range(1, len(dirs)):
try:
os.mkdir(target + os.sep + os.path.join(*dirs[:i]))
except __HOLE__:
pass
if f == defaultpy:
open(os.path.join(
target, f), 'w').write('def index(): return "hello"\n')
else:
shutil.copyfile(f, os.path.join(target, f))
|
OSError
|
dataset/ETHPy150Open uwdata/termite-data-server/web2py/scripts/make_min_web2py.py/main
|
4,244 |
def test_get_debug_values_exc():
"""tests that get_debug_value raises an exception when
debugger is set to raise and a value is missing """
prev_value = config.compute_test_value
try:
config.compute_test_value = 'raise'
x = T.vector()
try:
for x_val in op.get_debug_values(x):
# this assert catches the case where we
# erroneously get a value returned
assert False
raised = False
except __HOLE__:
raised = True
# this assert catches the case where we got []
# returned, and possibly issued a warning,
# rather than raising an exception
assert raised
finally:
config.compute_test_value = prev_value
|
AttributeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/tests/test_op.py/test_get_debug_values_exc
|
4,245 |
def test_debug_error_message():
"""tests that debug_error_message raises an
exception when it should."""
prev_value = config.compute_test_value
for mode in ['ignore', 'raise']:
try:
config.compute_test_value = mode
try:
op.debug_error_message('msg')
raised = False
except __HOLE__:
raised = True
assert raised
finally:
config.compute_test_value = prev_value
|
ValueError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/tests/test_op.py/test_debug_error_message
|
4,246 |
def run(self):
try:
while 1:
self.listen()
except __HOLE__:
return
|
KeyboardInterrupt
|
dataset/ETHPy150Open kdart/pycopia/net/pycopia/simpleserver.py/GenericServer.run
|
4,247 |
def run(self):
try:
try:
while 1:
line = raw_input("> ")
self._sock.send(line)
data = self._sock.recv(1024)
if not data:
break
else:
print data
except __HOLE__:
pass
finally:
self._sock.close()
|
KeyboardInterrupt
|
dataset/ETHPy150Open kdart/pycopia/net/pycopia/simpleserver.py/GenericClient.run
|
4,248 |
def get_homedir():
"""return home directory, or best approximation
On Windows, this returns the Roaming Profile APPDATA
(use CSIDL_LOCAL_APPDATA for Local Profile)
"""
homedir = '.'
if os.name == 'nt':
# For Windows, ask for parent of Roaming 'Application Data' directory
try:
from win32com.shell import shellcon, shell
homedir = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0)
except __HOLE__: # if win32com is not found
homedir = os.get_environ('HOME', '.')
else:
try:
os.path.expanduser("~")
except:
pass
return homedir
|
ImportError
|
dataset/ETHPy150Open xraypy/xraylarch/plugins/io/fileutils.py/get_homedir
|
4,249 |
def getAttributeNames(object, includeMagic=1, includeSingle=1,
includeDouble=1):
"""Return list of unique attributes, including inherited, for object."""
attributes = []
dict = {}
if not introspect.hasattrAlwaysReturnsTrue(object):
# Add some attributes that don't always get picked up.
special_attrs = ['__bases__', '__class__', '__dict__', '__name__',
'func_closure', 'func_code', 'func_defaults',
'func_dict', 'func_doc', 'func_globals', 'func_name']
attributes += [attr for attr in special_attrs \
if hasattr(object, attr)]
# For objects that have traits, get all the trait names since
# these do not show up in dir(object).
if hasattr(object, 'trait_names'):
try:
attributes += object.trait_names()
except __HOLE__:
pass
if includeMagic:
try: attributes += object._getAttributeNames()
except: pass
# Get all attribute names.
attrdict = getAllAttributeNames(object)
# Store the object's dir.
object_dir = dir(object)
for (obj_type_name, technique, count), attrlist in attrdict.items():
# This complexity is necessary to avoid accessing all the
# attributes of the object. This is very handy for objects
# whose attributes are lazily evaluated.
if type(object).__name__ == obj_type_name and technique == 'dir':
attributes += attrlist
else:
attributes += [attr for attr in attrlist \
if attr not in object_dir and \
hasattr(object, attr)]
# Remove duplicates from the attribute list.
for item in attributes:
dict[item] = None
attributes = dict.keys()
# new-style swig wrappings can result in non-string attributes
# e.g. ITK http://www.itk.org/
attributes = [attribute for attribute in attributes \
if isinstance(attribute, basestring)]
attributes.sort(lambda x, y: cmp(x.upper(), y.upper()))
if not includeSingle:
attributes = filter(lambda item: item[0]!='_' \
or item[1]=='_', attributes)
if not includeDouble:
attributes = filter(lambda item: item[:2]!='__', attributes)
return attributes
# Replace introspect's version with ours.
|
TypeError
|
dataset/ETHPy150Open enthought/pyface/pyface/util/fix_introspect_bug.py/getAttributeNames
|
4,250 |
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python.
Note that an Amazon AWS secret key can contain the forward slash,
which is entirely retarded, and breaks urlparse miserably.
This function works around that issue.
"""
# Make sure that URIs that contain multiple schemes, such as:
# s3://accesskey:secretkey@https://s3.amazonaws.com/bucket/key-id
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"s3://accesskey:secretkey@"
"https://s3.amazonaws.com/bucket/key-id"
", you need to change it to use the "
"s3+https:// scheme, like so: "
"s3+https://accesskey:secretkey@"
"s3.amazonaws.com/bucket/key-id")
LOG.info(_LI("Invalid store uri: %s") % reason)
raise exceptions.BadStoreUri(message=reason)
pieces = urllib.parse.urlparse(uri)
assert pieces.scheme in ('s3', 's3+http', 's3+https')
self.scheme = pieces.scheme
path = pieces.path.strip('/')
netloc = pieces.netloc.strip('/')
entire_path = (netloc + '/' + path).strip('/')
if '@' in uri:
creds, path = entire_path.split('@')
cred_parts = creds.split(':')
try:
access_key = cred_parts[0]
secret_key = cred_parts[1]
if six.PY2:
# NOTE(jaypipes): Need to encode to UTF-8 here because of a
# bug in the HMAC library that boto uses.
# See: http://bugs.python.org/issue5285
# See: http://trac.edgewall.org/ticket/8083
access_key = access_key.encode('utf-8')
secret_key = secret_key.encode('utf-8')
self.accesskey = access_key
self.secretkey = secret_key
except __HOLE__:
reason = _("Badly formed S3 credentials")
LOG.info(reason)
raise exceptions.BadStoreUri(message=reason)
else:
self.accesskey = None
path = entire_path
try:
path_parts = path.split('/')
self.key = path_parts.pop()
self.bucket = path_parts.pop()
if path_parts:
self.s3serviceurl = '/'.join(path_parts).strip('/')
else:
reason = _("Badly formed S3 URI. Missing s3 service URL.")
raise exceptions.BadStoreUri(message=reason)
except IndexError:
reason = _("Badly formed S3 URI")
LOG.info(reason)
raise exceptions.BadStoreUri(message=reason)
|
IndexError
|
dataset/ETHPy150Open openstack/glance_store/glance_store/_drivers/s3.py/StoreLocation.parse_uri
|
4,251 |
def add_multipart(self, image_file, image_size, bucket_obj, obj_name, loc,
verifier):
"""
Stores an image file with a multi part upload to S3 backend
:param image_file: The image data to write, as a file-like object
:param bucket_obj: S3 bucket object
:param obj_name: The object name to be stored(image identifier)
:param verifier: An object used to verify signatures for images
:loc: The Store Location Info
"""
checksum = hashlib.md5()
pool_size = self.s3_store_thread_pools
pool = eventlet.greenpool.GreenPool(size=pool_size)
mpu = bucket_obj.initiate_multipart_upload(obj_name)
LOG.debug("Multipart initiate key=%(obj_name)s, "
"UploadId=%(UploadId)s" %
{'obj_name': obj_name,
'UploadId': mpu.id})
cstart = 0
plist = []
chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM))
write_chunk_size = max(self.s3_store_large_object_chunk_size,
chunk_size)
it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
buffered_chunk = b''
while True:
try:
buffered_clen = len(buffered_chunk)
if buffered_clen < write_chunk_size:
# keep reading data
read_chunk = next(it)
buffered_chunk += read_chunk
continue
else:
write_chunk = buffered_chunk[:write_chunk_size]
remained_data = buffered_chunk[write_chunk_size:]
checksum.update(write_chunk)
if verifier:
verifier.update(write_chunk)
fp = six.BytesIO(write_chunk)
fp.seek(0)
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
pool.spawn_n(run_upload, part)
plist.append(part)
cstart += 1
buffered_chunk = remained_data
except __HOLE__:
if len(buffered_chunk) > 0:
# Write the last chunk data
write_chunk = buffered_chunk
checksum.update(write_chunk)
if verifier:
verifier.update(write_chunk)
fp = six.BytesIO(write_chunk)
fp.seek(0)
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
pool.spawn_n(run_upload, part)
plist.append(part)
break
pedict = {}
total_size = 0
pool.waitall()
for part in plist:
pedict.update(part.etag)
total_size += part.size
success = True
for part in plist:
if not part.success:
success = False
if success:
# Complete
xml = get_mpu_xml(pedict)
bucket_obj.complete_multipart_upload(obj_name,
mpu.id,
xml)
checksum_hex = checksum.hexdigest()
LOG.info(_LI("Multipart complete key=%(obj_name)s "
"UploadId=%(UploadId)s "
"Wrote %(total_size)d bytes to S3 key"
"named %(obj_name)s "
"with checksum %(checksum_hex)s") %
{'obj_name': obj_name,
'UploadId': mpu.id,
'total_size': total_size,
'checksum_hex': checksum_hex})
return (loc.get_uri(), total_size, checksum_hex, {})
else:
# Abort
bucket_obj.cancel_multipart_upload(obj_name, mpu.id)
LOG.error(_LE("Some parts failed to upload to S3. "
"Aborted the object key=%(obj_name)s") %
{'obj_name': obj_name})
msg = (_("Failed to add image object to S3. "
"key=%(obj_name)s") % {'obj_name': obj_name})
raise glance_store.BackendException(msg)
|
StopIteration
|
dataset/ETHPy150Open openstack/glance_store/glance_store/_drivers/s3.py/Store.add_multipart
|
4,252 |
def append_files(in_file1, character, in_file2, out_file):
"""
Created on 24 Oct 2014
Created for use with Wonderware Archestra.
Takes a file1 and appends file2 contents joined by a character, line for line.
USAGE:
%prog in_file1.csv %character in_file2.txt
@author: Roan Fourie
@mail: [email protected]
"""
return_data = 0
write_data = ''
i = 0
try:
with open(in_file1, 'rt') as fi1:
lines1 = fi1.readlines() # Read all the lines in fi1 as a tuple
with open(in_file2, 'rt') as fi2:
lines2 = fi2.readlines() # Read all the lines in fi2 as a tuple
with open(out_file, 'at') as fo:
fo.seek(0,2)
while i < len(lines1):
lines1[i] = lines1[i].rstrip('\n')
#lines1[i] = lines1[i].rstrip('\r')
fo.write(lines1[i] + character + lines2[i])
i = i + 1
print(write_data)
except __HOLE__:
print("Error in reading/writing file.")
return_data = 2
else:
print('Operation completed successfully.')
return_data = 1
finally:
fi2.close()
fi1.close()
fo.close()
print("done")
return return_data
|
IOError
|
dataset/ETHPy150Open RoanFourie/ArchestrA-Tools/aaTools/aaAppend.py/append_files
|
4,253 |
def create_job(self, name, user, template_name, **kwargs):
"""
Creates a new job from a JobTemplate.
Args:
name (str): The name of the job.
user (User): A User object for the user who creates the job.
template_name (str): The name of the JobTemplate from which to create the job.
**kwargs
Returns:
A new job object of the type specified by the JobTemplate
"""
try:
template = self.job_templates[template_name]
except __HOLE__, e:
raise KeyError('A job template with name %s was not defined' % (template_name,))
JobClass = template.type
user_workspace = self.app.get_user_workspace(user)
kwrgs = dict(name=name, user=user, label=self.label, workspace=user_workspace.path)
parameters = self._replace_workspaces(template.parameters, user_workspace)
kwrgs.update(parameters)
kwrgs.update(kwargs)
job = JobClass(**kwrgs)
return job
|
KeyError
|
dataset/ETHPy150Open tethysplatform/tethys/tethys_compute/job_manager.py/JobManager.create_job
|
4,254 |
def deserialize(self, cassette_data):
try:
deserialized_data = json.loads(cassette_data)
except __HOLE__:
deserialized_data = {}
return deserialized_data
|
ValueError
|
dataset/ETHPy150Open sigmavirus24/betamax/betamax/serializers/json_serializer.py/JSONSerializer.deserialize
|
4,255 |
def timedelta_total_seconds(delta):
try:
delta.total_seconds
except __HOLE__:
# On Python 2.6, timedelta instances do not have
# a .total_seconds() method.
total_seconds = delta.days * 24 * 60 * 60 + delta.seconds
else:
total_seconds = delta.total_seconds()
return total_seconds
|
AttributeError
|
dataset/ETHPy150Open jpadilla/pyjwt/jwt/compat.py/timedelta_total_seconds
|
4,256 |
def _user_exists(self, username):
try:
pwd.getpwnam(username)[0] # user exists
except __HOLE__:
return False
home_dir = os.path.join('/home', '{0}'.format(username))
auth_key_file = os.path.join(home_dir, '.ssh', 'authorized_keys')
if not os.path.exists(home_dir): # user home not exists
return False
if not os.path.exists(auth_key_file): # authorized_keys not exists
return False
return True
|
KeyError
|
dataset/ETHPy150Open koshinuke/koshinuke.py/tests/auth_test.py/AuthTestCase._user_exists
|
4,257 |
def _load_from_packages(self, packages):
try:
for package in packages:
for module in walk_modules(package):
self._load_module(module)
except __HOLE__ as e:
message = 'Problem loading extensions from package {}: {}'
raise LoaderError(message.format(package, e.message))
|
ImportError
|
dataset/ETHPy150Open ARM-software/workload-automation/wlauto/core/extension_loader.py/ExtensionLoader._load_from_packages
|
4,258 |
def _load_from_paths(self, paths, ignore_paths):
self.logger.debug('Loading from paths.')
for path in paths:
self.logger.debug('Checking path %s', path)
for root, _, files in os.walk(path, followlinks=True):
should_skip = False
for igpath in ignore_paths:
if root.startswith(igpath):
should_skip = True
break
if should_skip:
continue
for fname in files:
if os.path.splitext(fname)[1].lower() != '.py':
continue
filepath = os.path.join(root, fname)
try:
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
module = imp.load_source(modname, filepath)
self._load_module(module)
except (__HOLE__, ImportError), e:
if self.keep_going:
self.logger.warn('Failed to load {}'.format(filepath))
self.logger.warn('Got: {}'.format(e))
else:
raise LoaderError('Failed to load {}'.format(filepath), sys.exc_info())
except Exception as e:
message = 'Problem loading extensions from {}: {}'
raise LoaderError(message.format(filepath, e))
|
SystemExit
|
dataset/ETHPy150Open ARM-software/workload-automation/wlauto/core/extension_loader.py/ExtensionLoader._load_from_paths
|
4,259 |
def get_axes_properties(ax):
props = {'axesbg': color_to_hex(ax.patch.get_facecolor()),
'axesbgalpha': ax.patch.get_alpha(),
'bounds': ax.get_position().bounds,
'dynamic': ax.get_navigate(),
'axison': ax.axison,
'frame_on': ax.get_frame_on(),
'axes': [get_axis_properties(ax.xaxis),
get_axis_properties(ax.yaxis)]}
for axname in ['x', 'y']:
axis = getattr(ax, axname + 'axis')
domain = getattr(ax, 'get_{0}lim'.format(axname))()
lim = domain
if isinstance(axis.converter, matplotlib.dates.DateConverter):
scale = 'date'
try:
import pandas as pd
from pandas.tseries.converter import PeriodConverter
except __HOLE__:
pd = None
if (pd is not None and isinstance(axis.converter,
PeriodConverter)):
_dates = [pd.Period(ordinal=int(d), freq=axis.freq)
for d in domain]
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second, 0)
for d in _dates]
else:
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second,
d.microsecond * 1E-3)
for d in matplotlib.dates.num2date(domain)]
else:
scale = axis.get_scale()
if scale not in ['date', 'linear', 'log']:
raise ValueError("Unknown axis scale: "
"{0}".format(axis[axname].get_scale()))
props[axname + 'scale'] = scale
props[axname + 'lim'] = lim
props[axname + 'domain'] = domain
return props
|
ImportError
|
dataset/ETHPy150Open plotly/plotly.py/plotly/matplotlylib/mplexporter/utils.py/get_axes_properties
|
4,260 |
def trigsimp(expr, **opts):
"""
reduces expression by using known trig identities
Notes
=====
method:
- Determine the method to use. Valid choices are 'matching' (default),
'groebner', 'combined', and 'fu'. If 'matching', simplify the
expression recursively by targeting common patterns. If 'groebner', apply
an experimental groebner basis algorithm. In this case further options
are forwarded to ``trigsimp_groebner``, please refer to its docstring.
If 'combined', first run the groebner basis algorithm with small
default parameters, then run the 'matching' algorithm. 'fu' runs the
collection of trigonometric transformations described by Fu, et al.
(see the `fu` docstring).
Examples
========
>>> from sympy import trigsimp, sin, cos, log
>>> from sympy.abc import x, y
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e)
2
Simplification occurs wherever trigonometric functions are located.
>>> trigsimp(log(e))
log(2)
Using `method="groebner"` (or `"combined"`) might lead to greater
simplification.
The old trigsimp routine can be accessed as with method 'old'.
>>> from sympy import coth, tanh
>>> t = 3*tanh(x)**7 - 2/coth(x)**7
>>> trigsimp(t, method='old') == t
True
>>> trigsimp(t)
tanh(x)**7
"""
from sympy.simplify.fu import fu
expr = sympify(expr)
try:
return expr._eval_trigsimp(**opts)
except __HOLE__:
pass
old = opts.pop('old', False)
if not old:
opts.pop('deep', None)
recursive = opts.pop('recursive', None)
method = opts.pop('method', 'matching')
else:
method = 'old'
def groebnersimp(ex, **opts):
def traverse(e):
if e.is_Atom:
return e
args = [traverse(x) for x in e.args]
if e.is_Function or e.is_Pow:
args = [trigsimp_groebner(x, **opts) for x in args]
return e.func(*args)
new = traverse(ex)
if not isinstance(new, Expr):
return new
return trigsimp_groebner(new, **opts)
trigsimpfunc = {
'fu': (lambda x: fu(x, **opts)),
'matching': (lambda x: futrig(x)),
'groebner': (lambda x: groebnersimp(x, **opts)),
'combined': (lambda x: futrig(groebnersimp(x,
polynomial=True, hints=[2, tan]))),
'old': lambda x: trigsimp_old(x, **opts),
}[method]
return trigsimpfunc(expr)
|
AttributeError
|
dataset/ETHPy150Open sympy/sympy/sympy/simplify/trigsimp.py/trigsimp
|
4,261 |
@cacheit
def __trigsimp(expr, deep=False):
"""recursive helper for trigsimp"""
from sympy.simplify.fu import TR10i
if _trigpat is None:
_trigpats()
a, b, c, d, matchers_division, matchers_add, \
matchers_identity, artifacts = _trigpat
if expr.is_Mul:
# do some simplifications like sin/cos -> tan:
if not expr.is_commutative:
com, nc = expr.args_cnc()
expr = _trigsimp(Mul._from_args(com), deep)*Mul._from_args(nc)
else:
for i, (pattern, simp, ok1, ok2) in enumerate(matchers_division):
if not _dotrig(expr, pattern):
continue
newexpr = _match_div_rewrite(expr, i)
if newexpr is not None:
if newexpr != expr:
expr = newexpr
break
else:
continue
# use SymPy matching instead
res = expr.match(pattern)
if res and res.get(c, 0):
if not res[c].is_integer:
ok = ok1.subs(res)
if not ok.is_positive:
continue
ok = ok2.subs(res)
if not ok.is_positive:
continue
# if "a" contains any of trig or hyperbolic funcs with
# argument "b" then skip the simplification
if any(w.args[0] == res[b] for w in res[a].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
# simplify and finish:
expr = simp.subs(res)
break # process below
if expr.is_Add:
args = []
for term in expr.args:
if not term.is_commutative:
com, nc = term.args_cnc()
nc = Mul._from_args(nc)
term = Mul._from_args(com)
else:
nc = S.One
term = _trigsimp(term, deep)
for pattern, result in matchers_identity:
res = term.match(pattern)
if res is not None:
term = result.subs(res)
break
args.append(term*nc)
if args != expr.args:
expr = Add(*args)
expr = min(expr, expand(expr), key=count_ops)
if expr.is_Add:
for pattern, result in matchers_add:
if not _dotrig(expr, pattern):
continue
expr = TR10i(expr)
if expr.has(HyperbolicFunction):
res = expr.match(pattern)
# if "d" contains any trig or hyperbolic funcs with
# argument "a" or "b" then skip the simplification;
# this isn't perfect -- see tests
if res is None or not (a in res and b in res) or any(
w.args[0] in (res[a], res[b]) for w in res[d].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
expr = result.subs(res)
break
# Reduce any lingering artifacts, such as sin(x)**2 changing
# to 1 - cos(x)**2 when sin(x)**2 was "simpler"
for pattern, result, ex in artifacts:
if not _dotrig(expr, pattern):
continue
# Substitute a new wild that excludes some function(s)
# to help influence a better match. This is because
# sometimes, for example, 'a' would match sec(x)**2
a_t = Wild('a', exclude=[ex])
pattern = pattern.subs(a, a_t)
result = result.subs(a, a_t)
m = expr.match(pattern)
was = None
while m and was != expr:
was = expr
if m[a_t] == 0 or \
-m[a_t] in m[c].args or m[a_t] + m[c] == 0:
break
if d in m and m[a_t]*m[d] + m[c] == 0:
break
expr = result.subs(m)
m = expr.match(pattern)
m.setdefault(c, S.Zero)
elif expr.is_Mul or expr.is_Pow or deep and expr.args:
expr = expr.func(*[_trigsimp(a, deep) for a in expr.args])
try:
if not expr.has(*_trigs):
raise TypeError
e = expr.atoms(exp)
new = expr.rewrite(exp, deep=deep)
if new == e:
raise TypeError
fnew = factor(new)
if fnew != new:
new = sorted([new, factor(new)], key=count_ops)[0]
# if all exp that were introduced disappeared then accept it
if not (new.atoms(exp) - e):
expr = new
except __HOLE__:
pass
return expr
#------------------- end of old trigsimp routines --------------------
|
TypeError
|
dataset/ETHPy150Open sympy/sympy/sympy/simplify/trigsimp.py/__trigsimp
|
4,262 |
@classmethod
def _parse_split(cls, repo, splitmap):
name = splitmap.pop('name')
patterns = splitmap.pop('paths')
try:
return Split(repo, name, patterns)
except __HOLE__:
raise ConfigError("Problem creating split: %s\n%s\n\n%s", name, splitmap,
traceback.format_exc())
|
KeyError
|
dataset/ETHPy150Open jsirois/sapling/saplib/config.py/Config._parse_split
|
4,263 |
def get_category_lists(init_kwargs=None, additional_parents_aliases=None, obj=None):
"""Returns a list of CategoryList objects, optionally associated with
a given model instance.
:param dict|None init_kwargs:
:param list|None additional_parents_aliases:
:param Model|None obj: Model instance to get categories for
:rtype: list
:return:
"""
init_kwargs = init_kwargs or {}
additional_parents_aliases = additional_parents_aliases or []
parent_aliases = additional_parents_aliases
if obj is not None:
ctype = ContentType.objects.get_for_model(obj)
cat_ids = [
item[0] for item in
get_tie_model().objects.filter(content_type=ctype, object_id=obj.id).values_list('category_id').all()
]
parent_aliases = list(get_cache().get_parents_for(cat_ids).union(additional_parents_aliases))
lists = []
aliases = get_cache().sort_aliases(parent_aliases)
categories_cache = get_cache().get_categories(aliases, obj)
for parent_alias in aliases:
catlist = CategoryList(parent_alias, **init_kwargs) # TODO Burned in class name. Make more customizable.
if obj is not None:
catlist.set_obj(obj)
# Optimization. To get DB hits down.
cache = []
try:
cache = categories_cache[parent_alias]
except __HOLE__:
pass
catlist.set_get_categories_cache(cache)
lists.append(catlist)
return lists
|
KeyError
|
dataset/ETHPy150Open idlesign/django-sitecats/sitecats/toolbox.py/get_category_lists
|
4,264 |
def backend_meta_data(self, backend_obj):
"""Query's the database for the object's current values for:
- created_on
- created_by
- modified_on
- modified_by
- deleted_on
- deleted_by
Returns a dictionary of these keys and their values.
Used to verify that the CRUD methods are updating these
values.
"""
actual = {}
try:
actual["created_by"] = backend_obj.created_by.username
except __HOLE__:
actual["created_by"] = None
try:
actual["modified_by"] = backend_obj.modified_by.username
except AttributeError:
actual["modified_by"] = None
try:
actual["deleted_by"] = backend_obj.deleted_by.username
except AttributeError:
actual["deleted_by"] = None
actual["created_on"] = backend_obj.created_on
actual["modified_on"] = backend_obj.modified_on
actual["deleted_on"] = backend_obj.deleted_on
return actual
|
AttributeError
|
dataset/ETHPy150Open mozilla/moztrap/tests/case/api/crud.py/ApiCrudCases.backend_meta_data
|
4,265 |
def do_img(self, attrs):
align = ''
alt = '(image)'
ismap = ''
src = ''
width = 0
height = 0
for attrname, value in attrs:
if attrname == 'align':
align = value
if attrname == 'alt':
alt = value
if attrname == 'ismap':
ismap = value
if attrname == 'src':
src = value
if attrname == 'width':
try: width = int(value)
except ValueError: pass
if attrname == 'height':
try: height = int(value)
except __HOLE__: pass
self.handle_image(src, alt, ismap, align, width, height)
# --- Really Old Unofficial Deprecated Stuff
|
ValueError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/htmllib.py/HTMLParser.do_img
|
4,266 |
def test(args = None):
import sys, formatter
if not args:
args = sys.argv[1:]
silent = args and args[0] == '-s'
if silent:
del args[0]
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except __HOLE__, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
if silent:
f = formatter.NullFormatter()
else:
f = formatter.AbstractFormatter(formatter.DumbWriter())
p = HTMLParser(f)
p.feed(data)
p.close()
|
IOError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/htmllib.py/test
|
4,267 |
def briefstr(x):
try:
return getattr(x, 'brief')
except __HOLE__:
if isinstance(x, tuple):
return '(%s)'%(','.join([briefstr(xi) for xi in x]))
return str(x)
|
AttributeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Spec.py/briefstr
|
4,268 |
def func_argnames(self, f, args):
try:
code = f.func_code
return self.getargnames(code) == args
except __HOLE__:
return False
|
AttributeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Spec.py/ArgNamesFamily.func_argnames
|
4,269 |
def meth_argnames(self, m, args):
try:
f = m.im_func
code = f.func_code
return self.getargnames(code)[1:] == args
except __HOLE__:
return False
|
AttributeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Spec.py/ArgNamesFamily.meth_argnames
|
4,270 |
def c_test_contains(self, a, b, env):
try:
func = a.func
except __HOLE__:
expr = a.arg
func = a.func = env.eval('lambda self:%s'%expr)
s = func(self.specmod.Nothing)
try:
tf = env.test_contains(s, b, 'recur with Nothing, ok to fail')
if not tf:
raise TestError
except : # TestError: eg for match, we got a TypeError..
s = func(a)
rl = a.recursion_level
try:
if rl >= a.recursion_limit:
return env.failed('recurself: recursion_level = %s'%a.recursion_limit)
else:
a.recursion_level = rl + 1
tf = env.test_contains(s, b, 'recur')
finally:
a.recursion_level = rl
return tf
|
AttributeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Spec.py/RecurSelfFamily.c_test_contains
|
4,271 |
def __init__(self, mod, Spec):
self.mod = mod
self.messages = []
self.examples = {}
if Spec is not None:
self.spec = spec = Spec()
try:
lex = spec.LocalEnvExpr
except __HOLE__:
lex = ''
LE = LocalEnv(mod, lex)
LE._OBJ_ = mod
self.LE = LE
self.topspec = self.eval(spec.GlueTypeExpr)
|
AttributeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Spec.py/TestEnv.__init__
|
4,272 |
def eval(self, expr):
mod = self.mod
types = mod._root.types
if isinstance(expr, types.StringTypes):
func = self.mod.eval('lambda LE:(\n%s\n)'%expr)
return func(self.LE)
ls = []
selfset = None
#print 1
names = expr.__dict__.keys()
names.sort()
for name in names:
f = getattr(expr, name)
try:
co = f.func_code
except __HOLE__:
continue
if co.co_varnames[:co.co_argcount] == ('IN',):
d = mod._load_names(mod._root.guppy.etc.Code.co_findloadednames(co))
#d = mod._load_names()
nf = mod._root.new.function(
f.func_code,
d,
f.func_name,
f.func_defaults,
f.func_closure)
s = nf(())
if name == '_SELF_':
selfset = s
else:
ls.append(mod.attr(name, s))
else:
raise SpecError, 'TestEnv.eval: invalid argument mode'
# Constructing an AND in one sweep = faster
# We assume they are not disjoint - which
# would be determined by testing that we are going to do
# (We know they are all attr's of different names here)
# Except that selfset may perhaps be disjoint; but why care here
#
if selfset is not None:
ls.append(selfset)
# Alternatively: r = r & selfset afterwards,
# but could be unnecessarily slow
#print 2
r = mod.UniSet.fam_And._cons(ls)
#print 3
return r
|
AttributeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Spec.py/TestEnv.eval
|
4,273 |
def get_examples(self, collection):
try:
it = iter(collection)
except __HOLE__:
try:
ex = self.examples[collection]
except KeyError:
if isinstance(collection, self.mod.UniSet.UniSet):
ex = collection.get_examples(self)
else:
ex = list(collection)
it = iter(ex)
return it
|
TypeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Spec.py/TestEnv.get_examples
|
4,274 |
@staticmethod
def _is_json(data):
try:
json.loads(data)
except (__HOLE__, ValueError):
return False
return True
|
TypeError
|
dataset/ETHPy150Open bulkan/robotframework-requests/src/RequestsLibrary/RequestsKeywords.py/RequestsKeywords._is_json
|
4,275 |
def invoke(self, cli, args=None, input=None, env=None,
catch_exceptions=True, color=False, **extra):
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
.. versionadded:: 3.0
The ``catch_exceptions`` parameter was added.
.. versionchanged:: 3.0
The result object now has an `exc_info` attribute with the
traceback if available.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param cli: the command to invoke
:param args: the arguments to invoke
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
exc_info = None
with self.isolation(input=input, env=env, color=color) as out:
exception = None
exit_code = 0
try:
cli.main(args=args or (),
prog_name=self.get_default_prog_name(cli), **extra)
except __HOLE__ as e:
if e.code != 0:
exception = e
exc_info = sys.exc_info()
exit_code = e.code
if not isinstance(exit_code, int):
sys.stdout.write(str(exit_code))
sys.stdout.write('\n')
exit_code = 1
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = -1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
output = out.getvalue()
return Result(runner=self,
output_bytes=output,
exit_code=exit_code,
exception=exception,
exc_info=exc_info)
|
SystemExit
|
dataset/ETHPy150Open pallets/click/click/testing.py/CliRunner.invoke
|
4,276 |
@contextlib.contextmanager
def isolated_filesystem(self):
"""A context manager that creates a temporary folder and changes
the current working directory to it for isolated filesystem tests.
"""
cwd = os.getcwd()
t = tempfile.mkdtemp()
os.chdir(t)
try:
yield t
finally:
os.chdir(cwd)
try:
shutil.rmtree(t)
except (__HOLE__, IOError):
pass
|
OSError
|
dataset/ETHPy150Open pallets/click/click/testing.py/CliRunner.isolated_filesystem
|
4,277 |
def disable(self):
self._enabled = False
try:
self._client.unschedule(self)
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open dpkp/kafka-python/kafka/coordinator/consumer.py/AutoCommitTask.disable
|
4,278 |
def readAndGroupTable(infile, options):
"""read table from infile and group.
"""
fields, table = CSV.readTable(
infile, with_header=options.has_headers, as_rows=True)
options.columns = getColumns(fields, options.columns)
assert options.group_column not in options.columns
converter = float
new_fields = [fields[options.group_column]] + [fields[x]
for x in options.columns]
if options.group_function == "min":
f = min
elif options.group_function == "max":
f = max
elif options.group_function == "sum":
f = lambda z: reduce(lambda x, y: x + y, z)
elif options.group_function == "mean":
f = scipy.mean
elif options.group_function == "cat":
f = lambda x: ";".join([y for y in x if y != ""])
converter = str
elif options.group_function == "uniq":
f = lambda x: ";".join([y for y in set(x) if y != ""])
converter = str
elif options.group_function == "stats":
f = lambda x: str(Stats.DistributionalParameters(x))
# update headers
new_fields = [fields[options.group_column]]
for c in options.columns:
new_fields += list(map(lambda x: "%s_%s" %
(fields[c], x), Stats.DistributionalParameters().getHeaders()))
# convert values to floats (except for group_column)
# Delete rows with unconvertable values and not in options.columns
new_table = []
for row in table:
skip = False
new_row = [row[options.group_column]]
for c in options.columns:
if row[c] == options.missing_value:
new_row.append(row[c])
else:
try:
new_row.append(converter(row[c]))
except __HOLE__:
skip = True
break
if not skip:
new_table.append(new_row)
table = new_table
new_rows = CSV.groupTable(table,
group_column=0,
group_function=f)
options.stdout.write("\t".join(new_fields) + "\n")
for row in new_rows:
options.stdout.write("\t".join(map(str, row)) + "\n")
|
ValueError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/table2table.py/readAndGroupTable
|
4,279 |
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option(
"-m", "--method", dest="methods", type="choice", action="append",
choices=("transpose", "normalize-by-max", "normalize-by-value",
"multiply-by-value",
"percentile", "remove-header", "normalize-by-table",
"upper-bound", "lower-bound", "kullback-leibler",
"expand", "compress", "fdr", "grep"),
help="""actions to perform on table.""")
parser.add_option("-s", "--scale", dest="scale", type="float",
help="factor to scale matrix by.")
parser.add_option("-f", "--format", dest="format", type="string",
help="output number format [default]")
parser.add_option("-p", "--parameters", dest="parameters", type="string",
help="Parameters for various functions.")
parser.add_option(
"-t", "--header-names", dest="has_headers", action="store_true",
help="matrix has row/column headers.")
parser.add_option("--transpose", dest="transpose", action="store_true",
help="transpose table.")
parser.add_option(
"--set-transpose-field", dest="set_transpose_field", type="string",
help="set first field (row 1 and col 1) to this value [%default].")
parser.add_option(
"--transpose-format", dest="transpose_format", type="choice",
choices=("default", "separated", ),
help="input format of un-transposed table")
parser.add_option(
"--expand", dest="expand_table", action="store_true",
help="expand table - multi-value cells with be expanded over "
"several rows.")
parser.add_option("--no-headers", dest="has_headers", action="store_false",
help="matrix has no row/column headers.")
parser.add_option("--columns", dest="columns", type="string",
help="columns to use.")
parser.add_option("--file", dest="file", type="string",
help="columns to test from table.",
metavar="FILE")
parser.add_option("-d", "--delimiter", dest="delimiter", type="string",
help="delimiter of columns.",
metavar="DELIM")
parser.add_option(
"-V", "--invert-match", dest="invert_match",
action="store_true",
help="invert match.")
parser.add_option("--sort-by-rows", dest="sort_rows", type="string",
help="output order for rows.")
parser.add_option("-a", "--value", dest="value", type="float",
help="value to use for various algorithms.")
parser.add_option(
"--group", dest="group_column", type="int",
help="group values by column. Supply an integer column "
"[default=%default]")
parser.add_option("--group-function", dest="group_function", type="choice",
choices=(
"min", "max", "sum", "mean", "stats", "cat", "uniq"),
help="function to group values by.")
parser.add_option("--join-table", dest="join_column", type="int",
help="join rows in a table by columns.")
parser.add_option(
"--collapse-table", dest="collapse_table", type="string",
help="collapse a table. Value determines the missing variable "
"[%default].")
parser.add_option(
"--join-column-name", dest="join_column_name", type="int",
help="use this column as a prefix.")
parser.add_option(
"--flatten-table", dest="flatten_table", action="store_true",
help="flatten a table [%default].")
parser.add_option("--as-column", dest="as_column", action="store_true",
help="output table as a single column.")
parser.add_option(
"--split-fields", dest="split_fields", action="store_true",
help="split fields.")
parser.add_option(
"--separator", dest="separator", type="string",
help="separator for multi-valued fields [default=%default].")
parser.add_option(
"--fdr-method", dest="fdr_method", type="choice",
choices=(
"BH", "bonferroni", "holm", "hommel", "hochberg", "BY"),
help="method to perform multiple testing correction by controlling "
"the fdr [default=%default].")
parser.add_option(
"--fdr-add-column", dest="fdr_add_column", type="string",
help="add new column instead of replacing existing columns. "
"The value of the option will be used as prefix if there are "
"multiple columns [%default]")
# IMS: add option to use a column as the row id in flatten
parser.add_option(
"--id-column", dest="id_column", type="string",
help="list of column(s) to use as the row id when flattening "
"the table. If None, then row number is used. [default=%default].")
parser.add_option(
"--variable-name", dest="variable_name", type="string",
help="the column header for the 'variable' column when flattening "
"[default=%default].")
parser.add_option(
"--value-name", dest="value_name", type="string",
help="the column header for the 'value' column when flattening "
"[default=%default].")
parser.set_defaults(
methods=[],
scale=1.0,
has_headers=True,
format=None,
value=0.0,
parameters="",
columns="all",
transpose=False,
set_transpose_field=None,
transpose_format="default",
group=False,
group_column=0,
group_function="mean",
missing_value="na",
sort_rows=None,
flatten_table=False,
collapse_table=None,
separator=";",
expand=False,
join_column=None,
join_column_name=None,
compute_fdr=None,
as_column=False,
fdr_method="BH",
fdr_add_column=None,
id_column=None,
variable_name="column",
value_name="value",
file=None,
delimiter="\t",
invert_match=False,
)
(options, args) = E.Start(parser, add_pipe_options=True)
options.parameters = options.parameters.split(",")
if options.group_column:
options.group = True
options.group_column -= 1
######################################################################
######################################################################
######################################################################
# if only to remove header, do this quickly
if options.methods == ["remove-header"]:
first = True
for line in options.stdin:
if line[0] == "#":
continue
if first:
first = False
continue
options.stdout.write(line)
elif options.transpose or "transpose" in options.methods:
readAndTransposeTable(options.stdin, options)
elif options.flatten_table:
# IMS: bug fixed to make work. Also added options for keying
# on a particular and adding custom column headings
fields, table = CSV.readTable(
options.stdin, with_header=options.has_headers, as_rows=True)
options.columns = getColumns(fields, options.columns)
if options.id_column:
id_columns = map(
lambda x: int(x) - 1, options.id_column.split(","))
id_header = "\t".join([fields[id_column]
for id_column in id_columns])
options.columns = [
x for x in options.columns if x not in id_columns]
else:
id_header = "row"
options.stdout.write(
"%s\t%s\t%s\n" % (id_header, options.variable_name,
options.value_name))
for x, row in enumerate(table):
if options.id_column:
row_id = "\t".join([row[int(x) - 1]
for x in options.id_column.split(",")])
else:
row_id = str(x)
for y in options.columns:
options.stdout.write(
"%s\t%s\t%s\n" % (row_id, fields[y], row[y]))
elif options.as_column:
fields, table = CSV.readTable(
options.stdin, with_header=options.has_headers, as_rows=True)
options.columns = getColumns(fields, options.columns)
table = zip(*table)
options.stdout.write("value\n")
for column in options.columns:
options.stdout.write("\n".join(table[column]) + "\n")
elif options.split_fields:
# split comma separated fields
fields, table = CSV.readTable(options.stdin,
with_header=options.has_headers,
as_rows=True)
options.stdout.write("%s\n" % ("\t".join(fields)))
for row in table:
row = [x.split(options.separator) for x in row]
for d in itertools.product(*row):
options.stdout.write("%s\n" % "\t".join(d))
elif options.group:
readAndGroupTable(options.stdin, options)
elif options.join_column:
readAndJoinTable(options.stdin, options)
elif options.expand_table:
readAndExpandTable(options.stdin, options)
elif options.collapse_table is not None:
readAndCollapseTable(options.stdin, options, options.collapse_table)
elif "grep" in options.methods:
options.columns = map(lambda x: int(x) - 1, options.columns.split(","))
patterns = []
if options.file:
infile = open(options.file, "r")
for line in infile:
if line[0] == "#":
continue
patterns.append(line[:-1].split(options.delimiter)[0])
else:
patterns = args
for line in options.stdin:
data = line[:-1].split(options.delimiter)
found = False
for c in options.columns:
if data[c] in patterns:
found = True
break
if (not found and options.invert_match) or (found and not options.invert_match):
print line[:-1]
else:
######################################################################
######################################################################
######################################################################
# Apply remainder of transformations
fields, table = CSV.readTable(
options.stdin, with_header=options.has_headers, as_rows=False)
# convert columns to list
table = [list(x) for x in table]
ncols = len(fields)
if len(table) == 0:
raise ValueError("table is empty")
nrows = len(table[0])
E.info("processing table with %i rows and %i columns" % (nrows, ncols))
options.columns = getColumns(fields, options.columns)
# convert all values to float
for c in options.columns:
for r in range(nrows):
try:
table[c][r] = float(table[c][r])
except __HOLE__:
continue
for method in options.methods:
if method == "normalize-by-value":
value = float(options.parameters[0])
del options.parameters[0]
for c in options.columns:
table[c] = map(lambda x: x / value, table[c])
elif method == "multiply-by-value":
value = float(options.parameters[0])
del options.parameters[0]
for c in options.columns:
table[c] = map(lambda x: x * value, table[c])
elif method == "normalize-by-max":
for c in options.columns:
m = max(table[c])
table[c] = map(lambda x: x / m, table[c])
elif method == "kullback-leibler":
options.stdout.write("category1\tcategory2\tkl1\tkl2\tmean\n")
format = options.format
if format is None:
format = "%f"
for x in range(0, len(options.columns) - 1):
for y in range(x + 1, len(options.columns)):
c1 = options.columns[x]
c2 = options.columns[y]
e1 = 0
e2 = 0
for z in range(nrows):
p = table[c1][z]
q = table[c2][z]
e1 += p * math.log(p / q)
e2 += q * math.log(q / p)
options.stdout.write("%s\t%s\t%s\t%s\t%s\n" % (
fields[c1], fields[c2],
format % e1,
format % e2,
format % ((e1 + e2) / 2)))
E.Stop()
sys.exit(0)
elif method == "rank":
for c in options.columns:
tt = table[c]
t = zip(tt, range(nrows))
t.sort()
for i, n in zip(map(lambda x: x[1], t), range(nrows)):
tt[i] = n
elif method in ("lower-bound", "upper-bound"):
boundary = float(options.parameters[0])
del options.parameters[0]
new_value = float(options.parameters[0])
del options.parameters[0]
if method == "upper-bound":
for c in options.columns:
for r in range(nrows):
if isinstance(table[c][r], float) and \
table[c][r] > boundary:
table[c][r] = new_value
else:
for c in options.columns:
for r in range(nrows):
if isinstance(table[c][r], float) and \
table[c][r] < boundary:
table[c][r] = new_value
elif method == "fdr":
pvalues = []
for c in options.columns:
pvalues.extend(table[c])
assert max(pvalues) <= 1.0, "pvalues > 1 in table: max=%s" % \
str(max(pvalues))
assert min(pvalues) >= 0, "pvalue < 0 in table: min=%s" % \
str(min(pvalues))
# convert to str to avoid test for float downstream
qvalues = map(
str, Stats.adjustPValues(pvalues,
method=options.fdr_method))
if options.fdr_add_column is None:
x = 0
for c in options.columns:
table[c] = qvalues[x:x + nrows]
x += nrows
else:
# add new column headers
if len(options.columns) == 1:
fields.append(options.fdr_add_column)
else:
for co in options.columns:
fields.append(options.fdr_add_column + fields[c])
x = 0
for c in options.columns:
# add a new column
table.append(qvalues[x:x + nrows])
x += nrows
ncols += len(options.columns)
elif method == "normalize-by-table":
other_table_name = options.parameters[0]
del options.parameters[0]
other_fields, other_table = CSV.readTable(
open(other_table_name, "r"),
with_header=options.has_headers,
as_rows=False)
# convert all values to float
for c in options.columns:
for r in range(nrows):
try:
other_table[c][r] = float(other_table[c][r])
except ValueError:
continue
# set 0s to 1 in the other matrix
for c in options.columns:
for r in range(nrows):
if isinstance(table[c][r], float) and \
isinstance(other_table[c][r], float) and \
other_table[c][r] != 0:
table[c][r] /= other_table[c][r]
else:
table[c][r] = options.missing_value
# convert back
if options.format is not None:
for c in options.columns:
for r in range(nrows):
if isinstance(table[c][r], float):
table[c][r] = format % table[c][r]
options.stdout.write("\t".join(fields) + "\n")
if options.sort_rows:
old2new = {}
for r in range(nrows):
old2new[table[0][r]] = r
for x in options.sort_rows.split(","):
if x not in old2new:
continue
r = old2new[x]
options.stdout.write(
"\t".join(map(str,
[table[c][r] for c in range(ncols)])) + "\n")
else:
for r in range(nrows):
options.stdout.write(
"\t".join(map(str,
[table[c][r] for c in range(ncols)])) + "\n")
E.Stop()
|
ValueError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/table2table.py/main
|
4,280 |
def request_instance(vm_):
'''
Request a single GCE instance from a data dict.
'''
if not GCE_VM_NAME_REGEX.match(vm_['name']):
raise SaltCloudSystemExit(
'VM names must start with a letter, only contain letters, numbers, or dashes '
'and cannot end in a dash.'
)
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'gce',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
conn = get_conn()
kwargs = {
'name': vm_['name'],
'size': __get_size(conn, vm_),
'image': __get_image(conn, vm_),
'location': __get_location(conn, vm_),
'ex_network': __get_network(conn, vm_),
'ex_tags': __get_tags(vm_),
'ex_metadata': __get_metadata(vm_),
}
external_ip = config.get_cloud_config_value(
'external_ip', vm_, __opts__, default='ephemeral'
)
if external_ip.lower() == 'ephemeral':
external_ip = 'ephemeral'
elif external_ip == 'None':
external_ip = None
else:
region = '-'.join(kwargs['location'].name.split('-')[:2])
external_ip = __create_orget_address(conn, external_ip, region)
kwargs['external_ip'] = external_ip
vm_['external_ip'] = external_ip
if LIBCLOUD_VERSION_INFO > (0, 15, 1):
kwargs.update({
'ex_disk_type': config.get_cloud_config_value(
'ex_disk_type', vm_, __opts__, default='pd-standard'),
'ex_disk_auto_delete': config.get_cloud_config_value(
'ex_disk_auto_delete', vm_, __opts__, default=True),
'ex_disks_gce_struct': config.get_cloud_config_value(
'ex_disks_gce_struct', vm_, __opts__, default=None),
'ex_service_accounts': config.get_cloud_config_value(
'ex_service_accounts', vm_, __opts__, default=None),
'ex_can_ip_forward': config.get_cloud_config_value(
'ip_forwarding', vm_, __opts__, default=False
)
})
if kwargs.get('ex_disk_type') not in ('pd-standard', 'pd-ssd'):
raise SaltCloudSystemExit(
'The value of \'ex_disk_type\' needs to be one of: '
'\'pd-standard\', \'pd-ssd\''
)
log.info('Creating GCE instance {0} in {1}'.format(vm_['name'],
kwargs['location'].name)
)
log.debug('Create instance kwargs {0}'.format(str(kwargs)))
salt.utils.cloud.fire_event(
'event',
'create instance',
'salt/cloud/{0}/creating'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
)
try:
node_data = conn.create_node(**kwargs)
except Exception as exc: # pylint: disable=W0703
log.error(
'Error creating {0} on GCE\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], exc
),
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
node_dict = show_instance(node_data['name'], 'action')
except __HOLE__:
# node_data is a libcloud Node which is unsubscriptable
node_dict = show_instance(node_data.name, 'action')
return node_dict, node_data
|
TypeError
|
dataset/ETHPy150Open saltstack/salt/salt/cloud/clouds/gce.py/request_instance
|
4,281 |
def get_file_timestamp(fn):
"""
Returns timestamp of the given file
"""
from frappe.utils import cint
try:
return str(cint(os.stat(fn).st_mtime))
except __HOLE__, e:
if e.args[0]!=2:
raise
else:
return None
# to be deprecated
|
OSError
|
dataset/ETHPy150Open frappe/frappe/frappe/utils/__init__.py/get_file_timestamp
|
4,282 |
def watch(path, handler=None, debug=True):
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Handler(FileSystemEventHandler):
def on_any_event(self, event):
if debug:
print "File {0}: {1}".format(event.event_type, event.src_path)
if not handler:
print "No handler specified"
return
handler(event.src_path, event.event_type)
event_handler = Handler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except __HOLE__:
observer.stop()
observer.join()
|
KeyboardInterrupt
|
dataset/ETHPy150Open frappe/frappe/frappe/utils/__init__.py/watch
|
4,283 |
def is_json(text):
try:
json.loads(text)
except __HOLE__:
return False
else:
return True
|
ValueError
|
dataset/ETHPy150Open frappe/frappe/frappe/utils/__init__.py/is_json
|
4,284 |
def env():
""" Returns a simple to use dictionary of common operating system details"""
p = {}
p['platform'] = platform.platform() or None
p['python_version'] = platform.python_version() or None
try:
p['python_major_version'] = p['python_version'][0]
except TypeError:
p['python_major_version'] = None
p['python_version_feature_branch'] = '.'.join(platform.python_version().split('.')[0:2]) or None
p['python_installed_packages'] = ["%s==%s" % (pkg.key, pkg.version) for pkg in pip.get_installed_distributions()]
p['homedir'] = os.path.join(os.path.expanduser('~'))
p['current_user_desktop'] = os.path.join(p['homedir'], 'Desktop') or None
p['tmp_dir'] = tempfile.gettempdir()
# Might fail on Windows. Open a PR or issue on github if this is important to you.
# This should probably get cleaned up by separating the getgrpid from getpwuid, but AFAIK, it's pass/fail
try:
pwuid = pwd.getpwuid(os.getuid())
p['current_uid'] = pwuid[2]
p['current_gid'] = pwuid[3]
p['current_user'] = pwuid[0]
p['current_user_group'] = grp.getgrgid(pwd.getpwnam(p['current_user']).pw_gid).gr_name
except __HOLE__:
try:
p['current_user'] = getpass.getuser()
except AttributeError:
# User is on some unknown OS
p['current_user'] = None
finally:
p['current_uid'] = p['current_gid'] = p['current_user_group'] = None
# Start OS-specific calls.
if platform.system() == 'Darwin':
try:
p['type'] = 'Darwin'
p['os'] = platform.system_alias(platform.system(), platform.release(), platform.mac_ver())[0] or None
p['release'] = platform.mac_ver()[0] or None
except Exception as e:
raise Exception('Fatal error retrieving OS details on OSX: {}'.format(e))
elif platform.system() == 'Linux':
try:
dist_info = platform.linux_distribution()
p['type'] = 'Linux'
p['os'] = dist_info[0] or None
p['release'] = dist_info[1] or None
except Exception as e:
raise Exception('Fatal error retrieving OS details on Linux: {}'.format(e))
elif platform.system() == 'Windows':
try:
p['type'] = 'Windows'
p['os'] = str(platform.system() + platform.release()) or None
p['release'] = platform.win32_ver()[0] or None
except Exception as e:
raise Exception('Fatal error retrieving OS details on Windows: {}'.format(e))
else:
# unknown OS. likely odd/new variant of linux/unix or windows
# linx/unix is more important, so we default to that:
try:
dist_info = platform.linux_distribution()
p['os'] = dist_info[0] or None
p['type'] = 'unknown'
p['release'] = dist_info[1] or None
except Exception as e:
raise NotImplementedError('Could not get platform information for unknown OS: {}'.format(e))
return p
|
NameError
|
dataset/ETHPy150Open tristanfisher/easyos/easyos/easyos.py/env
|
4,285 |
def extract_content(self, selector, attr, default):
"""
Method for performing the content extraction for the given XPath expression.
The XPath selector expression can be used to extract content \
from the element tree corresponding to the fetched web page.
If the selector is "url", the URL of the current web page is returned.
Otherwise, the selector expression is used to extract content. The particular \
attribute to be extracted ("text", "href", etc.) is specified in the method \
arguments, and this is used to extract the required content. If the content \
extracted is a link (from an attr value of "href" or "src"), the URL is parsed \
to convert the relative path into an absolute path.
If the selector does not fetch any content, the default value is returned. \
If no default value is specified, an exception is raised.
:param selector: The XPath expression
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:return: The extracted content
"""
try:
if selector == "url":
return self.url
if attr == "text":
tag = self.tree.xpath(selector)[0]
content = " ".join([make_ascii(x).strip() for x in tag.itertext()])
content = content.replace("\n", " ").strip()
else:
content = self.tree.xpath(selector)[0].get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
return content
except __HOLE__:
if default is not "":
return default
raise Exception("There is no content for the selector " + selector)
except XPathError:
raise Exception("Invalid XPath selector " + selector)
|
IndexError
|
dataset/ETHPy150Open AlexMathew/scrapple/scrapple/selectors/xpath.py/XpathSelector.extract_content
|
4,286 |
def extract_tabular(self, result={}, table_type="rows", header=[], prefix="", suffix="", selector="", attr="text", default="", verbosity=0):
"""
Method for performing the extraction of tabular data.
:param result:
:param table_type:
:param header:
:param prefix:
:param suffix:
:param selector:
:param attr:
:param default:
:param verbosity:
:return: A 2-tuple containing the list of all the column headers extracted and the list of \
dictionaries which contain (header, content) pairs
"""
result_list = []
if type(header) in [str, unicode]:
try:
header_list = self.tree.xpath(header)
table_headers = [prefix + h.text + suffix for h in header_list]
except XPathError:
raise Exception("Invalid XPath selector " + header)
else:
table_headers = [prefix + h + suffix for h in header]
if table_type not in ["rows", "columns"]:
raise Exception("Specify 'rows' or 'columns' in table_type")
if table_type == "rows":
try:
values = self.tree.xpath(selector)
if len(table_headers) >= len(values):
from itertools import izip_longest
pairs = izip_longest(table_headers, values, fillvalue=default)
else:
from itertools import izip
pairs = izip(table_headers, values)
for head, val in pairs:
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
if attr == "text":
try:
content = " ".join([make_ascii(x).strip() for x in val.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = val.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
result[head] = content
result_list.append(result)
except XPathError:
raise Exception("Invalid XPath selector " + selector)
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
else:
try:
if type(selector) in [str, unicode]:
selectors = [selector]
elif type(selector) == list:
selectors = selector
else:
raise Exception("Use a list of selector expressions for the various columns")
from itertools import izip, count
pairs = izip(table_headers, selectors)
columns = {}
for head, selector in pairs:
columns[head] = self.tree.xpath(selector)
try:
for i in count(start=0):
r = result.copy()
for head in columns.keys():
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
col = columns[head][i]
if attr == "text":
try:
content = " ".join([make_ascii(x).strip() for x in col.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = col.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
r[head] = content
result_list.append(r)
except __HOLE__:
pass
except XPathError:
raise Exception("Invalid XPath selector " + selector)
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return table_headers, result_list
|
IndexError
|
dataset/ETHPy150Open AlexMathew/scrapple/scrapple/selectors/xpath.py/XpathSelector.extract_tabular
|
4,287 |
@cached_property
def preview_size(self):
# This error checking might be too aggressive...
preview_width, preview_height = PREVIEW_WIDTH, PREVIEW_HEIGHT
preview_size = self.request.GET.get('preview_size', '').split('x')
if len(preview_size) != 2:
preview_size = (PREVIEW_WIDTH, PREVIEW_HEIGHT)
try:
preview_width = int(preview_size[0])
except (ValueError, __HOLE__):
pass
else:
try:
preview_height = int(preview_size[1])
except (ValueError, TypeError):
pass
return (preview_width, preview_height)
|
TypeError
|
dataset/ETHPy150Open theatlantic/django-cropduster/cropduster/views/__init__.py/CropDusterIndex.preview_size
|
4,288 |
@cached_property
def db_image(self):
try:
db_image = Image.objects.get(pk=self.request.GET.get('id'))
except (__HOLE__, Image.DoesNotExist):
return None
image_filename = getattr(self.image_file, 'name', None)
if image_filename and image_filename != db_image.image.name:
# New images should get new rows (and thus new pks)
db_image.pk = None
return db_image
|
ValueError
|
dataset/ETHPy150Open theatlantic/django-cropduster/cropduster/views/__init__.py/CropDusterIndex.db_image
|
4,289 |
@cached_property
def thumbs(self):
thumb_ids = filter(None, self.request.GET.get('thumbs', '').split(','))
try:
thumb_ids = map(int, thumb_ids)
except __HOLE__:
thumbs = Thumb.objects.none()
else:
thumbs = Thumb.objects.filter(pk__in=thumb_ids)
thumb_dict = dict([(t.name, t) for t in thumbs])
ordered_thumbs = [thumb_dict.get(s.name, Thumb(name=s.name)) for s in self.sizes]
return FakeQuerySet(ordered_thumbs, thumbs)
|
TypeError
|
dataset/ETHPy150Open theatlantic/django-cropduster/cropduster/views/__init__.py/CropDusterIndex.thumbs
|
4,290 |
@csrf_exempt
def crop(request):
if request.method == "GET":
return json_error(request, 'crop', action="cropping image",
errors=["Form submission invalid"])
crop_form = CropForm(request.POST, request.FILES, prefix='crop')
if not crop_form.is_valid():
return json_error(request, 'crop', action='submitting form', forms=[crop_form],
log=True, exc_info=full_exc_info())
crop_data = copy.deepcopy(crop_form.cleaned_data)
db_image = Image(image=crop_data['orig_image'])
try:
pil_image = PIL.Image.open(db_image.image.path)
except __HOLE__:
pil_image = None
FormSet = modelformset_factory(Thumb, form=ThumbForm, formset=ThumbFormSet)
thumb_formset = FormSet(request.POST, request.FILES, prefix='thumbs')
if not thumb_formset.is_valid():
return json_error(request, 'crop', action='submitting form', formsets=[thumb_formset],
log=True, exc_info=full_exc_info())
cropped_thumbs = thumb_formset.save(commit=False)
non_model_fields = set(ThumbForm.declared_fields) - set([f.name for f in Thumb._meta.fields])
# The fields we will pull from when populating the ThumbForm initial data
json_thumb_fields = ['id', 'name', 'width', 'height']
thumbs_with_crops = [t for t in cropped_thumbs if t.crop_w and t.crop_h]
thumbs_data = [f.cleaned_data for f in thumb_formset]
standalone_mode = crop_data['standalone']
for i, (thumb, thumb_form) in enumerate(zip(cropped_thumbs, thumb_formset)):
changed_fields = set(thumb_form.changed_data) - non_model_fields
thumb_form._changed_data = list(changed_fields)
thumb_data = thumbs_data[i]
size = thumb_data['size']
if changed_fields & set(['crop_x', 'crop_y', 'crop_w', 'crop_h']):
# Clear existing primary key to force new thumb creation
thumb.pk = None
thumb.width = min(filter(None, [thumb.width, thumb.crop_w]))
thumb.height = min(filter(None, [thumb.height, thumb.crop_h]))
try:
new_thumbs = db_image.save_size(size, thumb, tmp=True, standalone=standalone_mode)
except CropDusterResizeException as e:
return json_error(request, 'crop',
action="saving size", errors=[force_unicode(e)])
if not new_thumbs:
continue
if standalone_mode:
thumb = new_thumbs
new_thumbs = {thumb.name: thumb}
cropped_thumbs[i] = thumb = new_thumbs.get(thumb.name, thumb)
update_props = ['crop_x', 'crop_y', 'crop_w', 'crop_h', 'width', 'height', 'id', 'name']
for prop in update_props:
thumbs_data[i][prop] = getattr(thumb, prop)
thumbs_data[i].update({
'changed': True,
'url': db_image.get_image_url(thumb.name),
})
for name, new_thumb in six.iteritems(new_thumbs):
thumb_data = dict([(k, getattr(new_thumb, k)) for k in json_thumb_fields])
crop_data['thumbs'].update({name: thumb_data})
if new_thumb.reference_thumb_id:
continue
thumbs_data[i]['thumbs'].update({name: thumb_data})
elif thumb.pk and thumb.name and thumb.crop_w and thumb.crop_h:
thumb_path = db_image.get_image_path(thumb.name, tmp=False)
tmp_thumb_path = db_image.get_image_path(thumb.name, tmp=True)
if os.path.exists(thumb_path):
if not thumb_form.cleaned_data.get('changed') or not os.path.exists(tmp_thumb_path):
shutil.copy(thumb_path, tmp_thumb_path)
if not thumb.pk and not thumb.crop_w and not thumb.crop_h:
if not len(thumbs_with_crops):
continue
best_fit = thumb_form.cleaned_data['size'].fit_to_crop(
thumbs_with_crops[0], original_image=pil_image)
if best_fit:
thumbs_data[i].update({
'crop_x': best_fit.box.x1,
'crop_y': best_fit.box.y1,
'crop_w': best_fit.box.w,
'crop_h': best_fit.box.h,
'changed': True,
'id': None,
})
for thumb_data in thumbs_data:
if isinstance(thumb_data['id'], Thumb):
thumb_data['id'] = thumb_data['id'].pk
return HttpResponse(json.dumps({
'crop': crop_data,
'thumbs': thumbs_data,
'initial': True,
}), content_type='application/json')
|
IOError
|
dataset/ETHPy150Open theatlantic/django-cropduster/cropduster/views/__init__.py/crop
|
4,291 |
def shortcut(request, content_type_id, object_id):
"Redirect to an object's page based on a content-type ID and an object ID."
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404("Content type %s object has no associated model" % content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise http.Http404("Content type %s object %s doesn't exist" % (content_type_id, object_id))
try:
absurl = obj.get_absolute_url()
except AttributeError:
raise http.Http404("%s objects don't have get_absolute_url() methods" % content_type.name)
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith('http://') or absurl.startswith('https://'):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if Site._meta.installed:
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.rel.to is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except __HOLE__:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = get_current_site(request).domain
except Site.DoesNotExist:
pass
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.is_secure() and 'https' or 'http'
return http.HttpResponseRedirect('%s://%s%s' % (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
|
IndexError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/contrib/contenttypes/views.py/shortcut
|
4,292 |
def writing_file(FILENAME, att='a', MESSAGE=""):
try:
with open(FILENAME, att) as f:
f.write(MESSAGE)
f.write('\n')
except __HOLE__ as e:
print "I/O error({0}): {1} for file {2}".format(e.errno, e.strerror, FILENAME)
|
IOError
|
dataset/ETHPy150Open bt3gl/Neat-Problems-in-Python-and-Flask/Version-Control/src/system_operations.py/writing_file
|
4,293 |
def copying_file(SOURCE, DESTINATION):
try:
shutil.copy(SOURCE, DESTINATION)
# could also use
# os.system ("copy %s %s" % (filename1, filename2))
# also, for recursively copy a tree
# shutil.copytree(src, dest)
# source and destination are the same file
except shutil.Error as e:
print("Error: %s" % e)
# source does not exist
except __HOLE__ as e:
print "I/O error({0}): {1} for file {2}".format(e.errno, e.strerror, SOURCE)
|
IOError
|
dataset/ETHPy150Open bt3gl/Neat-Problems-in-Python-and-Flask/Version-Control/src/system_operations.py/copying_file
|
4,294 |
def moving_dir(SOURCE, DESTINATION):
try:
shutil.move(SOURCE, DESTINATION)
# source and destination are the same file
except shutil.Error as e:
print("Error: %s" % e)
# source does not exist
except __HOLE__ as e:
print "I/O error({0}): {1} for file {2}".format(e.errno, e.strerror, SOURCE)
|
IOError
|
dataset/ETHPy150Open bt3gl/Neat-Problems-in-Python-and-Flask/Version-Control/src/system_operations.py/moving_dir
|
4,295 |
def renaming_file(SOURCE, DESTINATION):
try:
shutil.move(SOURCE, DESTINATION)
# source and destination are the same file
except shutil.Error as e:
print("Error: %s" % e)
# source does not exist
except __HOLE__ as e:
print "I/O error({0}): {1} for file {2}".format(e.errno, e.strerror, SOURCE)
|
IOError
|
dataset/ETHPy150Open bt3gl/Neat-Problems-in-Python-and-Flask/Version-Control/src/system_operations.py/renaming_file
|
4,296 |
def creating_file(FILENAME, att='a'):
try:
f = open(FILENAME, att)
f.close()
except __HOLE__ as e:
print "I/O error({0}): {1} for file {2}".format(e.errno, e.strerror, FILENAME)
|
IOError
|
dataset/ETHPy150Open bt3gl/Neat-Problems-in-Python-and-Flask/Version-Control/src/system_operations.py/creating_file
|
4,297 |
def removing_dir_tree(DIR_TO_DELETE):
try:
# we could also use
shutil.rmtree(DIR_TO_DELETE)
#os.rmdir(DIR_TO_DELETE)
except __HOLE__, e:
print ("Error: %s - %s." % (e.filename,e.strerror))
|
OSError
|
dataset/ETHPy150Open bt3gl/Neat-Problems-in-Python-and-Flask/Version-Control/src/system_operations.py/removing_dir_tree
|
4,298 |
def print_lines(FILENAME):
try:
with open(FILENAME, 'r') as f:
lines = f.readlines()
print("---------- %s ----------" %FILENAME)
for line in lines:
print(line.strip('\n'))
print(" ")
except __HOLE__ as e:
print "I/O error({0}): {1} for file {2}".format(e.errno, e.strerror, FILENAME)
return lines
|
IOError
|
dataset/ETHPy150Open bt3gl/Neat-Problems-in-Python-and-Flask/Version-Control/src/system_operations.py/print_lines
|
4,299 |
def compressing_file(FILE_TO_COMPRESS):
try:
f_in = open(FILE_TO_COMPRESS)
f_out = gzip.open(FILE_TO_COMPRESS + '.gz', 'wb')
f_out.writelines(f_in)
f_in.close()
f_out.close()
except __HOLE__ as e:
print "I/O error({0}): {1} for file {2}".format(e.errno, e.strerror, FILE_TO_COMPRESS)
|
IOError
|
dataset/ETHPy150Open bt3gl/Neat-Problems-in-Python-and-Flask/Version-Control/src/system_operations.py/compressing_file
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.