text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Add an action to make when an object is selected.
<END_TASK>
<USER_TASK:>
Description:
def on_select(self, item, action):
"""
Add an action to make when an object is selected.
Only one action can be stored this way.
""" |
if not isinstance(item, int):
item = self.items.index(item)
self._on_select[item] = action |
<SYSTEM_TASK:>
Add an action to make when an object is unfocused.
<END_TASK>
<USER_TASK:>
Description:
def on_unselect(self, item, action):
"""Add an action to make when an object is unfocused.""" |
if not isinstance(item, int):
item = self.items.index(item)
self._on_unselect[item] = action |
<SYSTEM_TASK:>
Add a widget to the widows.
<END_TASK>
<USER_TASK:>
Description:
def add(self, widget, condition=lambda: 42):
"""
Add a widget to the widows.
The widget will auto render. You can use the function like that if you want to keep the widget accecible :
self.my_widget = self.add(my_widget)
""" |
assert callable(condition)
assert isinstance(widget, BaseWidget)
self._widgets.append((widget, condition))
return widget |
<SYSTEM_TASK:>
Remove a widget from the window.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, widget):
"""Remove a widget from the window.""" |
for i, (wid, _) in enumerate(self._widgets):
if widget is wid:
del self._widgets[i]
return True
raise ValueError('Widget not in list') |
<SYSTEM_TASK:>
Render the screen. Here you must draw everything.
<END_TASK>
<USER_TASK:>
Description:
def render(self):
"""Render the screen. Here you must draw everything.""" |
self.screen.fill(self.BACKGROUND_COLOR)
for wid, cond in self._widgets:
if cond():
wid.render(self.screen)
if self.BORDER_COLOR is not None:
pygame.draw.rect(self.screen, self.BORDER_COLOR, ((0, 0), self.SCREEN_SIZE), 1)
if self.SHOW_FPS:
self.fps.render(self.screen) |
<SYSTEM_TASK:>
Refresh the screen. You don't need to override this except to update only small portins of the screen.
<END_TASK>
<USER_TASK:>
Description:
def update_screen(self):
"""Refresh the screen. You don't need to override this except to update only small portins of the screen.""" |
self.clock.tick(self.FPS)
pygame.display.update() |
<SYSTEM_TASK:>
Makes a new screen with a size of SCREEN_SIZE, and VIDEO_OPTION as flags. Sets the windows name to NAME.
<END_TASK>
<USER_TASK:>
Description:
def new_screen(self):
"""Makes a new screen with a size of SCREEN_SIZE, and VIDEO_OPTION as flags. Sets the windows name to NAME.""" |
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.display.set_caption(self.NAME)
screen_s = self.SCREEN_SIZE
video_options = self.VIDEO_OPTIONS
if FULLSCREEN & self.VIDEO_OPTIONS:
video_options ^= FULLSCREEN
video_options |= NOFRAME
screen_s = (0, 0)
screen = pygame.display.set_mode(screen_s, video_options)
if FULLSCREEN & self.VIDEO_OPTIONS:
self.SCREEN_SIZE = screen.get_size()
if not QUIT in self.EVENT_ALLOWED:
self.EVENT_ALLOWED = list(self.EVENT_ALLOWED)
self.EVENT_ALLOWED.append(QUIT)
pygame.event.set_allowed(self.EVENT_ALLOWED)
return screen |
<SYSTEM_TASK:>
Return the smallest rect containning two rects
<END_TASK>
<USER_TASK:>
Description:
def merge_rects(rect1, rect2):
"""Return the smallest rect containning two rects""" |
r = pygame.Rect(rect1)
t = pygame.Rect(rect2)
right = max(r.right, t.right)
bot = max(r.bottom, t.bottom)
x = min(t.x, r.x)
y = min(t.y, r.y)
return pygame.Rect(x, y, right - x, bot - y) |
<SYSTEM_TASK:>
Return a vecor noraml to this one with a norm of one
<END_TASK>
<USER_TASK:>
Description:
def normnorm(self):
"""
Return a vecor noraml to this one with a norm of one
:return: V2
""" |
n = self.norm()
return V2(-self.y / n, self.x / n) |
<SYSTEM_TASK:>
Draws an antialiased line on the surface.
<END_TASK>
<USER_TASK:>
Description:
def line(surf, start, end, color=BLACK, width=1, style=FLAT):
"""Draws an antialiased line on the surface.""" |
width = round(width, 1)
if width == 1:
# return pygame.draw.aaline(surf, color, start, end)
return gfxdraw.line(surf, *start, *end, color)
start = V2(*start)
end = V2(*end)
line_vector = end - start
half_side = line_vector.normnorm() * width / 2
point1 = start + half_side
point2 = start - half_side
point3 = end - half_side
point4 = end + half_side
# noinspection PyUnresolvedReferences
liste = [
(point1.x, point1.y),
(point2.x, point2.y),
(point3.x, point3.y),
(point4.x, point4.y)
]
rect = polygon(surf, liste, color)
if style == ROUNDED:
_ = circle(surf, start, width / 2, color)
rect = merge_rects(rect, _)
_ = circle(surf, end, width / 2, color)
rect = merge_rects(rect, _)
return rect |
<SYSTEM_TASK:>
Draw an antialiased filled circle on the given surface
<END_TASK>
<USER_TASK:>
Description:
def circle(surf, xy, r, color=BLACK):
"""Draw an antialiased filled circle on the given surface""" |
x, y = xy
x = round(x)
y = round(y)
r = round(r)
gfxdraw.filled_circle(surf, x, y, r, color)
gfxdraw.aacircle(surf, x, y, r, color)
r += 1
return pygame.Rect(x - r, y - r, 2 * r, 2 * r) |
<SYSTEM_TASK:>
Draw an antialiased round rectangle on the surface.
<END_TASK>
<USER_TASK:>
Description:
def roundrect(surface, rect, color, rounding=5, unit=PIXEL):
"""
Draw an antialiased round rectangle on the surface.
surface : destination
rect : rectangle
color : rgb or rgba
radius : 0 <= radius <= 1
:source: http://pygame.org/project-AAfilledRoundedRect-2349-.html
""" |
if unit == PERCENT:
rounding = int(min(rect.size) / 2 * rounding / 100)
rect = pygame.Rect(rect)
color = pygame.Color(*color)
alpha = color.a
color.a = 0
pos = rect.topleft
rect.topleft = 0, 0
rectangle = pygame.Surface(rect.size, SRCALPHA)
circle = pygame.Surface([min(rect.size) * 3] * 2, SRCALPHA)
pygame.draw.ellipse(circle, (0, 0, 0), circle.get_rect(), 0)
circle = pygame.transform.smoothscale(circle, (rounding, rounding))
rounding = rectangle.blit(circle, (0, 0))
rounding.bottomright = rect.bottomright
rectangle.blit(circle, rounding)
rounding.topright = rect.topright
rectangle.blit(circle, rounding)
rounding.bottomleft = rect.bottomleft
rectangle.blit(circle, rounding)
rectangle.fill((0, 0, 0), rect.inflate(-rounding.w, 0))
rectangle.fill((0, 0, 0), rect.inflate(0, -rounding.h))
rectangle.fill(color, special_flags=BLEND_RGBA_MAX)
rectangle.fill((255, 255, 255, alpha), special_flags=BLEND_RGBA_MIN)
return surface.blit(rectangle, pos) |
<SYSTEM_TASK:>
Draw an antialiased filled polygon on a surface
<END_TASK>
<USER_TASK:>
Description:
def polygon(surf, points, color):
"""Draw an antialiased filled polygon on a surface""" |
gfxdraw.aapolygon(surf, points, color)
gfxdraw.filled_polygon(surf, points, color)
x = min([x for (x, y) in points])
y = min([y for (x, y) in points])
xm = max([x for (x, y) in points])
ym = max([y for (x, y) in points])
return pygame.Rect(x, y, xm - x, ym - y) |
<SYSTEM_TASK:>
Return the color of the button, depending on its state
<END_TASK>
<USER_TASK:>
Description:
def _get_color(self):
"""Return the color of the button, depending on its state""" |
if self.clicked and self.hovered: # the mouse is over the button
color = mix(self.color, BLACK, 0.8)
elif self.hovered and not self.flags & self.NO_HOVER:
color = mix(self.color, BLACK, 0.93)
else:
color = self.color
self.text.bg_color = color
return color |
<SYSTEM_TASK:>
Return the offset of the colored part.
<END_TASK>
<USER_TASK:>
Description:
def _front_delta(self):
"""Return the offset of the colored part.""" |
if self.flags & self.NO_MOVE:
return Separator(0, 0)
if self.clicked and self.hovered: # the mouse is over the button
delta = 2
elif self.hovered and not self.flags & self.NO_HOVER:
delta = 0
else:
delta = 0
return Separator(delta, delta) |
<SYSTEM_TASK:>
Update the button with the events.
<END_TASK>
<USER_TASK:>
Description:
def update(self, event_or_list):
"""Update the button with the events.""" |
for e in super().update(event_or_list):
if e.type == MOUSEBUTTONDOWN:
if e.pos in self:
self.click()
else:
self.release(force_no_call=True)
elif e.type == MOUSEBUTTONUP:
self.release(force_no_call=e.pos not in self)
elif e.type == MOUSEMOTION:
if e.pos in self:
self.hovered = True
else:
self.hovered = False |
<SYSTEM_TASK:>
Render the button on a surface.
<END_TASK>
<USER_TASK:>
Description:
def render(self, surf):
"""Render the button on a surface.""" |
pos, size = self.topleft, self.size
if not self.flags & self.NO_SHADOW:
if self.flags & self.NO_ROUNDING:
pygame.draw.rect(surf, LIGHT_GREY, (pos + self._bg_delta, size))
else:
roundrect(surf, (pos + self._bg_delta, size), LIGHT_GREY + (100,), 5)
if self.flags & self.NO_ROUNDING:
pygame.draw.rect(surf, self._get_color(), (pos + self._front_delta, size))
else:
roundrect(surf, (pos + self._front_delta, size), self._get_color(), 5)
self.text.center = self.center + self._front_delta
self.text.render(surf) |
<SYSTEM_TASK:>
Draw the button on the surface.
<END_TASK>
<USER_TASK:>
Description:
def render(self, surf):
"""Draw the button on the surface.""" |
if not self.flags & self.NO_SHADOW:
circle(surf, self.center + self._bg_delta, self.width / 2, LIGHT_GREY)
circle(surf, self.center + self._front_delta, self.width / 2, self._get_color())
self.text.center = self.center + self._front_delta
self.text.render(surf) |
<SYSTEM_TASK:>
Render the button
<END_TASK>
<USER_TASK:>
Description:
def render(self, surf):
"""Render the button""" |
if self.clicked:
icon = self.icon_pressed
else:
icon = self.icon
surf.blit(icon, self) |
<SYSTEM_TASK:>
Set the value of the bar. If the value is out of bound, sets it to an extremum
<END_TASK>
<USER_TASK:>
Description:
def set(self, value):
"""Set the value of the bar. If the value is out of bound, sets it to an extremum""" |
value = min(self.max, max(self.min, value))
self._value = value
start_new_thread(self.func, (self.get(),)) |
<SYSTEM_TASK:>
Starts checking if the SB is shifted
<END_TASK>
<USER_TASK:>
Description:
def _start(self):
"""Starts checking if the SB is shifted""" |
# TODO : make an update method instead
last_call = 42
while self._focus:
sleep(1 / 100)
mouse = pygame.mouse.get_pos()
last_value = self.get()
self.value_px = mouse[0]
# we do not need to do anything when it the same value
if self.get() == last_value:
continue
if last_call + self.interval / 1000 < time():
last_call = time()
self.func(self.get()) |
<SYSTEM_TASK:>
The position in pixels of the cursor
<END_TASK>
<USER_TASK:>
Description:
def value_px(self):
"""The position in pixels of the cursor""" |
step = self.w / (self.max - self.min)
return self.x + step * (self.get() - self.min) |
<SYSTEM_TASK:>
Renders the bar on the display
<END_TASK>
<USER_TASK:>
Description:
def render(self, display):
"""Renders the bar on the display""" |
# the bar
bar_rect = pygame.Rect(0, 0, self.width, self.height // 3)
bar_rect.center = self.center
display.fill(self.bg_color, bar_rect)
# the cursor
circle(display, (self.value_px, self.centery), self.height // 2, self.color)
# the value
if self.show_val:
self.text_val.render(display) |
<SYSTEM_TASK:>
This is called each time an attribute is asked, to be sure every params are updated, beceause of callbacks.
<END_TASK>
<USER_TASK:>
Description:
def __update(self):
"""
This is called each time an attribute is asked, to be sure every params are updated, beceause of callbacks.
""" |
# I can not set the size attr because it is my property, so I set the width and height separately
width, height = self.size
super(BaseWidget, self).__setattr__("width", width)
super(BaseWidget, self).__setattr__("height", height)
super(BaseWidget, self).__setattr__(self.anchor, self.pos) |
<SYSTEM_TASK:>
Make a compatible version of pip importable. Raise a RuntimeError if we
<END_TASK>
<USER_TASK:>
Description:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't.""" |
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier) |
<SYSTEM_TASK:>
Return the path and line number of the file from which an
<END_TASK>
<USER_TASK:>
Description:
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
""" |
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line) |
<SYSTEM_TASK:>
Yield hashes from contiguous comment lines before line ``line_number``.
<END_TASK>
<USER_TASK:>
Description:
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
""" |
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None)) |
<SYSTEM_TASK:>
Return an iterable of filtered arguments.
<END_TASK>
<USER_TASK:>
Description:
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
""" |
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg |
<SYSTEM_TASK:>
Return the peep hash of one or more files, returning a shell status code
<END_TASK>
<USER_TASK:>
Description:
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
""" |
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR |
<SYSTEM_TASK:>
Memoize a method that should return the same result every time on a
<END_TASK>
<USER_TASK:>
Description:
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
""" |
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer |
<SYSTEM_TASK:>
Return a PackageFinder respecting command-line options.
<END_TASK>
<USER_TASK:>
Description:
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
""" |
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs) |
<SYSTEM_TASK:>
Return a map of key -> list of things.
<END_TASK>
<USER_TASK:>
Description:
def bucket(things, key):
"""Return a map of key -> list of things.""" |
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret |
<SYSTEM_TASK:>
Execute something before the first item of iter, something else for each
<END_TASK>
<USER_TASK:>
Description:
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
""" |
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item) |
<SYSTEM_TASK:>
Return a list of DownloadedReqs representing the requirements parsed
<END_TASK>
<USER_TASK:>
Description:
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
""" |
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)] |
<SYSTEM_TASK:>
Perform the ``peep install`` subcommand, returning a shell status code
<END_TASK>
<USER_TASK:>
Description:
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
""" |
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output)) |
<SYSTEM_TASK:>
Convert a peep requirements file to one compatble with pip-8 hashing.
<END_TASK>
<USER_TASK:>
Description:
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
""" |
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print() |
<SYSTEM_TASK:>
Deduce the version number of the downloaded package from its filename.
<END_TASK>
<USER_TASK:>
Description:
def _version(self):
"""Deduce the version number of the downloaded package from its filename.""" |
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name()) |
<SYSTEM_TASK:>
Returns whether this requirement is always unsatisfied
<END_TASK>
<USER_TASK:>
Description:
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
""" |
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False |
<SYSTEM_TASK:>
Download a file, and return its name within my temp dir.
<END_TASK>
<USER_TASK:>
Description:
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
""" |
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename |
<SYSTEM_TASK:>
Download the package's archive if necessary, and return its
<END_TASK>
<USER_TASK:>
Description:
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
""" |
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,)) |
<SYSTEM_TASK:>
Install the package I represent, without dependencies.
<END_TASK>
<USER_TASK:>
Description:
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
""" |
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path]) |
<SYSTEM_TASK:>
Return the inner Requirement's "unsafe name".
<END_TASK>
<USER_TASK:>
Description:
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
""" |
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.') |
<SYSTEM_TASK:>
Return the class I should be, spanning a continuum of goodness.
<END_TASK>
<USER_TASK:>
Description:
def _class(self):
"""Return the class I should be, spanning a continuum of goodness.""" |
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq |
<SYSTEM_TASK:>
Creates a response object with the given params and option
<END_TASK>
<USER_TASK:>
Description:
def __get_response(self, uri, params=None, method="get", stream=False):
"""Creates a response object with the given params and option
Parameters
----------
url : string
The full URL to request.
params: dict
A list of parameters to send with the request. This
will be sent as data for methods that accept a request
body and will otherwise be sent as query parameters.
method : str
The HTTP method to use.
stream : bool
Whether to stream the response.
Returns a requests.Response object.
""" |
if not hasattr(self, "session") or not self.session:
self.session = requests.Session()
if self.access_token:
self.session.headers.update(
{'Authorization': 'Bearer {}'.format(self.access_token)}
)
# Remove empty params
if params:
params = {k: v for k, v in params.items() if v is not None}
kwargs = {
"url": uri,
"verify": True,
"stream": stream
}
kwargs["params" if method == "get" else "data"] = params
return getattr(self.session, method)(**kwargs) |
<SYSTEM_TASK:>
Only returns the response, nor the status_code
<END_TASK>
<USER_TASK:>
Description:
def __call(self, uri, params=None, method="get"):
"""Only returns the response, nor the status_code
""" |
try:
resp = self.__get_response(uri, params, method, False)
rjson = resp.json(**self.json_options)
assert resp.ok
except AssertionError:
msg = "OCode-{}: {}".format(resp.status_code, rjson["message"])
raise BadRequest(msg)
except Exception as e:
msg = "Bad response: {}".format(e)
log.error(msg, exc_info=True)
raise BadRequest(msg)
else:
return rjson |
<SYSTEM_TASK:>
Returns an stream response
<END_TASK>
<USER_TASK:>
Description:
def __call_stream(self, uri, params=None, method="get"):
"""Returns an stream response
""" |
try:
resp = self.__get_response(uri, params, method, True)
assert resp.ok
except AssertionError:
raise BadRequest(resp.status_code)
except Exception as e:
log.error("Bad response: {}".format(e), exc_info=True)
else:
return resp |
<SYSTEM_TASK:>
Get a list of open trades
<END_TASK>
<USER_TASK:>
Description:
def get_trades(self, max_id=None, count=None, instrument=None, ids=None):
""" Get a list of open trades
Parameters
----------
max_id : int
The server will return trades with id less than or equal
to this, in descending order (for pagination)
count : int
Maximum number of open trades to return. Default: 50 Max
value: 500
instrument : str
Retrieve open trades for a specific instrument only
Default: all
ids : list
A list of trades to retrieve. Maximum number of ids: 50.
No other parameter may be specified with the ids
parameter.
See more:
http://developer.oanda.com/rest-live/trades/#getListOpenTrades
""" |
url = "{0}/{1}/accounts/{2}/trades".format(
self.domain,
self.API_VERSION,
self.account_id
)
params = {
"maxId": int(max_id) if max_id and max_id > 0 else None,
"count": int(count) if count and count > 0 else None,
"instrument": instrument,
"ids": ','.join(ids) if ids else None
}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False |
<SYSTEM_TASK:>
Modify an existing trade.
<END_TASK>
<USER_TASK:>
Description:
def update_trade(
self,
trade_id,
stop_loss=None,
take_profit=None,
trailing_stop=None
):
""" Modify an existing trade.
Note: Only the specified parameters will be modified. All
other parameters will remain unchanged. To remove an
optional parameter, set its value to 0.
Parameters
----------
trade_id : int
The id of the trade to modify.
stop_loss : number
Stop Loss value.
take_profit : number
Take Profit value.
trailing_stop : number
Trailing Stop distance in pips, up to one decimal place
See more:
http://developer.oanda.com/rest-live/trades/#modifyExistingTrade
""" |
url = "{0}/{1}/accounts/{2}/trades/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
trade_id
)
params = {
"stopLoss": stop_loss,
"takeProfit": take_profit,
"trailingStop": trailing_stop
}
try:
return self._Client__call(uri=url, params=params, method="patch")
except RequestException:
return False
except AssertionError:
return False
raise NotImplementedError() |
<SYSTEM_TASK:>
Request full account history.
<END_TASK>
<USER_TASK:>
Description:
def request_transaction_history(self):
""" Request full account history.
Submit a request for a full transaction history. A
successfully accepted submission results in a response
containing a URL in the Location header to a file that will
be available once the request is served. Response for the
URL will be HTTP 404 until the file is ready. Once served
the URL will be valid for a certain amount of time.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getFullAccountHistory
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
""" |
url = "{0}/{1}/accounts/{2}/alltransactions".format(
self.domain,
self.API_VERSION,
self.account_id
)
try:
resp = self.__get_response(url)
return resp.headers['location']
except RequestException:
return False
except AssertionError:
return False |
<SYSTEM_TASK:>
Download full account history.
<END_TASK>
<USER_TASK:>
Description:
def get_transaction_history(self, max_wait=5.0):
""" Download full account history.
Uses request_transaction_history to get the transaction
history URL, then polls the given URL until it's ready (or
the max_wait time is reached) and provides the decoded
response.
Parameters
----------
max_wait : float
The total maximum time to spend waiting for the file to
be ready; if this is exceeded a failed response will be
returned. This is not guaranteed to be strictly
followed, as one last attempt will be made to check the
file before giving up.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getFullAccountHistory
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
""" |
url = self.request_transaction_history()
if not url:
return False
ready = False
start = time()
delay = 0.1
while not ready and delay:
response = requests.head(url)
ready = response.ok
if not ready:
sleep(delay)
time_remaining = max_wait - time() + start
max_delay = max(0., time_remaining - .1)
delay = min(delay * 2, max_delay)
if not ready:
return False
response = requests.get(url)
try:
with ZipFile(BytesIO(response.content)) as container:
files = container.namelist()
if not files:
log.error('Transaction ZIP has no files.')
return False
history = container.open(files[0])
raw = history.read().decode('ascii')
except BadZipfile:
log.error('Response is not a valid ZIP file', exc_info=True)
return False
return json.loads(raw, **self.json_options) |
<SYSTEM_TASK:>
Get a list of accounts owned by the user.
<END_TASK>
<USER_TASK:>
Description:
def get_accounts(self, username=None):
""" Get a list of accounts owned by the user.
Parameters
----------
username : string
The name of the user. Note: This is only required on the
sandbox, on production systems your access token will
identify you.
See more:
http://developer.oanda.com/rest-sandbox/accounts/#-a-name-getaccountsforuser-a-get-accounts-for-a-user
""" |
url = "{0}/{1}/accounts".format(self.domain, self.API_VERSION)
params = {"username": username}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False |
<SYSTEM_TASK:>
Marks the item as the one the user is in.
<END_TASK>
<USER_TASK:>
Description:
def choose(self):
"""Marks the item as the one the user is in.""" |
if not self.choosed:
self.choosed = True
self.pos = self.pos + Sep(5, 0) |
<SYSTEM_TASK:>
Marks the item as the one the user is not in.
<END_TASK>
<USER_TASK:>
Description:
def stop_choose(self):
"""Marks the item as the one the user is not in.""" |
if self.choosed:
self.choosed = False
self.pos = self.pos + Sep(-5, 0) |
<SYSTEM_TASK:>
The color of the clicked version of the MenuElement. Darker than the normal one.
<END_TASK>
<USER_TASK:>
Description:
def get_darker_color(self):
"""The color of the clicked version of the MenuElement. Darker than the normal one.""" |
# we change a bit the color in one direction
if bw_contrasted(self._true_color, 30) == WHITE:
color = mix(self._true_color, WHITE, 0.9)
else:
color = mix(self._true_color, BLACK, 0.9)
return color |
<SYSTEM_TASK:>
Convert a size in pxel to a size in points.
<END_TASK>
<USER_TASK:>
Description:
def px_to_pt(self, px):
"""Convert a size in pxel to a size in points.""" |
if px < 200:
pt = self.PX_TO_PT[px]
else:
pt = int(floor((px - 1.21) / 1.332))
return pt |
<SYSTEM_TASK:>
Set the size of the font, in px or pt.
<END_TASK>
<USER_TASK:>
Description:
def set_size(self, pt=None, px=None):
"""
Set the size of the font, in px or pt.
The px method is a bit inacurate, there can be one or two px less, and max 4 for big numbers (like 503)
but the size is never over-estimated. It makes almost the good value.
""" |
assert (pt, px) != (None, None)
if pt is not None:
self.__init__(pt, self.font_name)
else:
self.__init__(self.px_to_pt(px), self.font_name) |
<SYSTEM_TASK:>
Set the font size to the desired size, in pt or px.
<END_TASK>
<USER_TASK:>
Description:
def set_font_size(self, pt=None, px=None):
"""Set the font size to the desired size, in pt or px.""" |
self.font.set_size(pt, px)
self._render() |
<SYSTEM_TASK:>
The position of the cursor in the text.
<END_TASK>
<USER_TASK:>
Description:
def cursor(self):
"""The position of the cursor in the text.""" |
if self._cursor < 0:
self.cursor = 0
if self._cursor > len(self):
self.cursor = len(self)
return self._cursor |
<SYSTEM_TASK:>
Delete one letter the right or the the left of the cursor.
<END_TASK>
<USER_TASK:>
Description:
def delete_one_letter(self, letter=RIGHT):
"""Delete one letter the right or the the left of the cursor.""" |
assert letter in (self.RIGHT, self.LEFT)
if letter == self.LEFT:
papy = self.cursor
self.text = self.text[:self.cursor - 1] + self.text[self.cursor:]
self.cursor = papy - 1
else:
self.text = self.text[:self.cursor] + self.text[self.cursor + 1:] |
<SYSTEM_TASK:>
Delete one word the right or the the left of the cursor.
<END_TASK>
<USER_TASK:>
Description:
def delete_one_word(self, word=RIGHT):
"""Delete one word the right or the the left of the cursor.""" |
assert word in (self.RIGHT, self.LEFT)
if word == self.RIGHT:
papy = self.text.find(' ', self.cursor) + 1
if not papy:
papy = len(self.text)
self.text = self.text[:self.cursor] + self.text[papy:]
else:
papy = self.text.rfind(' ', 0, self.cursor)
if papy == -1:
papy = 0
self.text = self.text[:papy] + self.text[self.cursor:]
self.cursor = papy |
<SYSTEM_TASK:>
Add a letter at the cursor pos.
<END_TASK>
<USER_TASK:>
Description:
def add_letter(self, letter):
"""Add a letter at the cursor pos.""" |
assert isinstance(letter, str)
assert len(letter) == 1
self.text = self.text[:self.cursor] + letter + self.text[self.cursor:]
self.cursor += 1 |
<SYSTEM_TASK:>
Update the text and position of cursor according to the event passed.
<END_TASK>
<USER_TASK:>
Description:
def update(self, event_or_list):
"""Update the text and position of cursor according to the event passed.""" |
event_or_list = super().update(event_or_list)
for e in event_or_list:
if e.type == KEYDOWN:
if e.key == K_RIGHT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.RIGHT)
else:
self.move_cursor_one_letter(self.RIGHT)
elif e.key == K_LEFT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.LEFT)
else:
self.move_cursor_one_letter(self.LEFT)
elif e.key == K_BACKSPACE:
if self.cursor == 0:
continue
if e.mod & KMOD_CTRL:
self.delete_one_word(self.LEFT)
else:
self.delete_one_letter(self.LEFT)
elif e.key == K_DELETE:
if e.mod & KMOD_CTRL:
self.delete_one_word(self.RIGHT)
else:
self.delete_one_letter(self.RIGHT)
elif e.unicode != '' and e.unicode.isprintable():
self.add_letter(e.unicode) |
<SYSTEM_TASK:>
The text displayed instead of the real one.
<END_TASK>
<USER_TASK:>
Description:
def shawn_text(self):
"""The text displayed instead of the real one.""" |
if len(self._shawn_text) == len(self):
return self._shawn_text
if self.style == self.DOTS:
return chr(0x2022) * len(self)
ranges = [
(902, 1366),
(192, 683),
(33, 122)
]
s = ''
while len(s) < len(self.text):
apolo = randint(33, 1366)
for a, b in ranges:
if a <= apolo <= b:
s += chr(apolo)
break
self._shawn_text = s
return s |
<SYSTEM_TASK:>
Convert the name of a color into its RGB value
<END_TASK>
<USER_TASK:>
Description:
def name2rgb(name):
"""Convert the name of a color into its RGB value""" |
try:
import colour
except ImportError:
raise ImportError('You need colour to be installed: pip install colour')
c = colour.Color(name)
color = int(c.red * 255), int(c.green * 255), int(c.blue * 255)
return color |
<SYSTEM_TASK:>
Parse a command line string and return username, password, remote hostname and remote path.
<END_TASK>
<USER_TASK:>
Description:
def parse_username_password_hostname(remote_url):
"""
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
""" |
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path |
<SYSTEM_TASK:>
Ask the SSH agent for a list of keys, and return it.
<END_TASK>
<USER_TASK:>
Description:
def get_ssh_agent_keys(logger):
"""
Ask the SSH agent for a list of keys, and return it.
:return: A reference to the SSH agent and a list of keys.
""" |
agent, agent_keys = None, None
try:
agent = paramiko.agent.Agent()
_agent_keys = agent.get_keys()
if not _agent_keys:
agent.close()
logger.error(
"SSH agent didn't provide any valid key. Trying to continue..."
)
else:
agent_keys = tuple(k for k in _agent_keys)
except paramiko.SSHException:
if agent:
agent.close()
agent = None
logger.error("SSH agent speaks a non-compatible protocol. Ignoring it.")
finally:
return agent, agent_keys |
<SYSTEM_TASK:>
Return True if the remote correspondent of local_path has to be deleted.
<END_TASK>
<USER_TASK:>
Description:
def _must_be_deleted(local_path, r_st):
"""Return True if the remote correspondent of local_path has to be deleted.
i.e. if it doesn't exists locally or if it has a different type from the remote one.""" |
# if the file doesn't exists
if not os.path.lexists(local_path):
return True
# or if the file type is different
l_st = os.lstat(local_path)
if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode):
return True
return False |
<SYSTEM_TASK:>
Upload local_path to remote_path and set permission and mtime.
<END_TASK>
<USER_TASK:>
Description:
def file_upload(self, local_path, remote_path, l_st):
"""Upload local_path to remote_path and set permission and mtime.""" |
self.sftp.put(local_path, remote_path)
self._match_modes(remote_path, l_st) |
<SYSTEM_TASK:>
Remove the remote directory node.
<END_TASK>
<USER_TASK:>
Description:
def remote_delete(self, remote_path, r_st):
"""Remove the remote directory node.""" |
# If it's a directory, then delete content and directory
if S_ISDIR(r_st.st_mode):
for item in self.sftp.listdir_attr(remote_path):
full_path = path_join(remote_path, item.filename)
self.remote_delete(full_path, item)
self.sftp.rmdir(remote_path)
# Or simply delete files
else:
try:
self.sftp.remove(remote_path)
except FileNotFoundError as e:
self.logger.error(
"error while removing {}. trace: {}".format(remote_path, e)
) |
<SYSTEM_TASK:>
Traverse the entire remote_path tree.
<END_TASK>
<USER_TASK:>
Description:
def check_for_deletion(self, relative_path=None):
"""Traverse the entire remote_path tree.
Find files/directories that need to be deleted,
not being present in the local folder.
""" |
if not relative_path:
relative_path = str() # root of shared directory tree
remote_path = path_join(self.remote_path, relative_path)
local_path = path_join(self.local_path, relative_path)
for remote_st in self.sftp.listdir_attr(remote_path):
r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename))
inner_remote_path = path_join(remote_path, remote_st.filename)
inner_local_path = path_join(local_path, remote_st.filename)
# check if remote_st is a symlink
# otherwise could delete file outside shared directory
if S_ISLNK(r_lstat.st_mode):
if self._must_be_deleted(inner_local_path, r_lstat):
self.remote_delete(inner_remote_path, r_lstat)
continue
if self._must_be_deleted(inner_local_path, remote_st):
self.remote_delete(inner_remote_path, remote_st)
elif S_ISDIR(remote_st.st_mode):
self.check_for_deletion(
path_join(relative_path, remote_st.filename)
) |
<SYSTEM_TASK:>
Run the sync.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Run the sync.
Confront the local and the remote directories and perform the needed changes.""" |
# Check if remote path is present
try:
self.sftp.stat(self.remote_path)
except FileNotFoundError as e:
if self.create_remote_directory:
self.sftp.mkdir(self.remote_path)
self.logger.info(
"Created missing remote dir: '" + self.remote_path + "'")
else:
self.logger.error(
"Remote folder does not exists. "
"Add '-r' to create it if missing.")
sys.exit(1)
try:
if self.delete:
# First check for items to be removed
self.check_for_deletion()
# Now scan local for items to upload/create
self.check_for_upload_create()
except FileNotFoundError:
# If this happens, probably the remote folder doesn't exist.
self.logger.error(
"Error while opening remote folder. Are you sure it does exist?")
sys.exit(1) |
<SYSTEM_TASK:>
tree unix command replacement.
<END_TASK>
<USER_TASK:>
Description:
def list_files(start_path):
"""tree unix command replacement.""" |
s = u'\n'
for root, dirs, files in os.walk(start_path):
level = root.replace(start_path, '').count(os.sep)
indent = ' ' * 4 * level
s += u'{}{}/\n'.format(indent, os.path.basename(root))
sub_indent = ' ' * 4 * (level + 1)
for f in files:
s += u'{}{}\n'.format(sub_indent, f)
return s |
<SYSTEM_TASK:>
Create a nested dictionary that represents the folder structure of `start_path`.
<END_TASK>
<USER_TASK:>
Description:
def file_tree(start_path):
"""
Create a nested dictionary that represents the folder structure of `start_path`.
Liberally adapted from
http://code.activestate.com/recipes/577879-create-a-nested-dictionary-from-oswalk/
""" |
nested_dirs = {}
root_dir = start_path.rstrip(os.sep)
start = root_dir.rfind(os.sep) + 1
for path, dirs, files in os.walk(root_dir):
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files)
parent = reduce(dict.get, folders[:-1], nested_dirs)
parent[folders[-1]] = subdir
return nested_dirs |
<SYSTEM_TASK:>
Override user environmental variables with custom one.
<END_TASK>
<USER_TASK:>
Description:
def override_env_variables():
"""Override user environmental variables with custom one.""" |
env_vars = ("LOGNAME", "USER", "LNAME", "USERNAME")
old = [os.environ[v] if v in os.environ else None for v in env_vars]
for v in env_vars:
os.environ[v] = "test"
yield
for i, v in enumerate(env_vars):
if old[i]:
os.environ[v] = old[i] |
<SYSTEM_TASK:>
Get the configurations from .tldrrc and return it as a dict.
<END_TASK>
<USER_TASK:>
Description:
def get_config():
"""Get the configurations from .tldrrc and return it as a dict.""" |
config_path = path.join(
(os.environ.get('TLDR_CONFIG_DIR') or path.expanduser('~')),
'.tldrrc')
if not path.exists(config_path):
sys.exit("Can't find config file at: {0}. You may use `tldr init` "
"to init the config file.".format(config_path))
with io.open(config_path, encoding='utf-8') as f:
try:
config = yaml.safe_load(f)
except yaml.scanner.ScannerError:
sys.exit("The config file is not a valid YAML file.")
supported_colors = ['black', 'red', 'green', 'yellow', 'blue',
'magenta', 'cyan', 'white']
if not set(config['colors'].values()).issubset(set(supported_colors)):
sys.exit("Unsupported colors in config file: {0}.".format(
', '.join(set(config['colors'].values()) - set(supported_colors))))
if not path.exists(config['repo_directory']):
sys.exit("Can't find the tldr repo, check the `repo_directory` "
"setting in config file.")
return config |
<SYSTEM_TASK:>
Parse the man page and return the parsed lines.
<END_TASK>
<USER_TASK:>
Description:
def parse_man_page(command, platform):
"""Parse the man page and return the parsed lines.""" |
page_path = find_page_location(command, platform)
output_lines = parse_page(page_path)
return output_lines |
<SYSTEM_TASK:>
Find the command man page in the pages directory.
<END_TASK>
<USER_TASK:>
Description:
def find_page_location(command, specified_platform):
"""Find the command man page in the pages directory.""" |
repo_directory = get_config()['repo_directory']
default_platform = get_config()['platform']
command_platform = (
specified_platform if specified_platform else default_platform)
with io.open(path.join(repo_directory, 'pages/index.json'),
encoding='utf-8') as f:
index = json.load(f)
command_list = [item['name'] for item in index['commands']]
if command not in command_list:
sys.exit(
("Sorry, we don't support command: {0} right now.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
supported_platforms = index['commands'][
command_list.index(command)]['platform']
if command_platform in supported_platforms:
platform = command_platform
elif 'common' in supported_platforms:
platform = 'common'
else:
platform = ''
if not platform:
sys.exit(
("Sorry, command {0} is not supported on your platform.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
page_path = path.join(path.join(repo_directory, 'pages'),
path.join(platform, command + '.md'))
return page_path |
<SYSTEM_TASK:>
Locate the command's man page.
<END_TASK>
<USER_TASK:>
Description:
def locate(command, on):
"""Locate the command's man page.""" |
location = find_page_location(command, on)
click.echo(location) |
<SYSTEM_TASK:>
Configure a mapping to the given attrname.
<END_TASK>
<USER_TASK:>
Description:
def map_to(self, attrname, tablename=None, selectable=None,
schema=None, base=None, mapper_args=util.immutabledict()):
"""Configure a mapping to the given attrname.
This is the "master" method that can be used to create any
configuration.
:param attrname: String attribute name which will be
established as an attribute on this :class:.`.SQLSoup`
instance.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
:param tablename: String name of a :class:`.Table` to be
reflected. If a :class:`.Table` is already available,
use the ``selectable`` argument. This argument is
mutually exclusive versus the ``selectable`` argument.
:param selectable: a :class:`.Table`, :class:`.Join`, or
:class:`.Select` object which will be mapped. This
argument is mutually exclusive versus the ``tablename``
argument.
:param schema: String schema name to use if the
``tablename`` argument is present.
""" |
if attrname in self._cache:
raise SQLSoupError(
"Attribute '%s' is already mapped to '%s'" % (
attrname,
class_mapper(self._cache[attrname]).mapped_table
))
if tablename is not None:
if not isinstance(tablename, basestring):
raise ArgumentError("'tablename' argument must be a string."
)
if selectable is not None:
raise ArgumentError("'tablename' and 'selectable' "
"arguments are mutually exclusive")
selectable = Table(tablename,
self._metadata,
autoload=True,
autoload_with=self.bind,
schema=schema or self.schema)
elif schema:
raise ArgumentError("'tablename' argument is required when "
"using 'schema'.")
elif selectable is not None:
if not isinstance(selectable, expression.FromClause):
raise ArgumentError("'selectable' argument must be a "
"table, select, join, or other "
"selectable construct.")
else:
raise ArgumentError("'tablename' or 'selectable' argument is "
"required.")
if not selectable.primary_key.columns and not \
'primary_key' in mapper_args:
if tablename:
raise SQLSoupError(
"table '%s' does not have a primary "
"key defined" % tablename)
else:
raise SQLSoupError(
"selectable '%s' does not have a primary "
"key defined" % selectable)
mapped_cls = _class_for_table(
self.session,
self.engine,
selectable,
base or self.base,
mapper_args
)
self._cache[attrname] = mapped_cls
return mapped_cls |
<SYSTEM_TASK:>
Map a selectable directly.
<END_TASK>
<USER_TASK:>
Description:
def map(self, selectable, base=None, **mapper_args):
"""Map a selectable directly.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
""" |
return _class_for_table(
self.session,
self.engine,
selectable,
base or self.base,
mapper_args
) |
<SYSTEM_TASK:>
Map a selectable directly, wrapping the
<END_TASK>
<USER_TASK:>
Description:
def with_labels(self, selectable, base=None, **mapper_args):
"""Map a selectable directly, wrapping the
selectable in a subquery with labels.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
""" |
# TODO give meaningful aliases
return self.map(
expression._clause_element_as_expr(selectable).
select(use_labels=True).
alias('foo'), base=base, **mapper_args) |
<SYSTEM_TASK:>
return the nearest n features strictly to the left of a Feature f.
<END_TASK>
<USER_TASK:>
Description:
def left(self, f, n=1):
"""return the nearest n features strictly to the left of a Feature f.
Overlapping features are not considered as to the left.
f: a Feature object
n: the number of features to return
""" |
intervals = self.intervals[f.chrom]
if intervals == []: return []
iright = binsearch_left_start(intervals, f.start, 0 , len(intervals)) + 1
ileft = binsearch_left_start(intervals, f.start - self.max_len[f.chrom] - 1, 0, 0)
results = sorted((distance(other, f), other) for other in intervals[ileft:iright] if other.end < f.start and distance(f, other) != 0)
if len(results) == n:
return [r[1] for r in results]
# have to do some extra work here since intervals are sorted
# by starts, and we dont know which end may be around...
# in this case, we got some extras, just return as many as
# needed once we see a gap in distances.
for i in range(n, len(results)):
if results[i - 1][0] != results[i][0]:
return [r[1] for r in results[:i]]
if ileft == 0:
return [r[1] for r in results]
# here, didn't get enough, so move left and try again.
1/0 |
<SYSTEM_TASK:>
return the nearest n features strictly to the right of a Feature f.
<END_TASK>
<USER_TASK:>
Description:
def right(self, f, n=1):
"""return the nearest n features strictly to the right of a Feature f.
Overlapping features are not considered as to the right.
f: a Feature object
n: the number of features to return
""" |
intervals = self.intervals[f.chrom]
ilen = len(intervals)
iright = binsearch_right_end(intervals, f.end, 0, ilen)
results = []
while iright < ilen:
i = len(results)
if i > n:
if distance(f, results[i - 1]) != distance(f, results[i - 2]):
return results[:i - 1]
other = intervals[iright]
iright += 1
if distance(other, f) == 0: continue
results.append(other)
return results |
<SYSTEM_TASK:>
find n upstream features where upstream is determined by
<END_TASK>
<USER_TASK:>
Description:
def upstream(self, f, n=1):
"""find n upstream features where upstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
""" |
if f.strand == -1:
return self.right(f, n)
return self.left(f, n) |
<SYSTEM_TASK:>
find n downstream features where downstream is determined by
<END_TASK>
<USER_TASK:>
Description:
def downstream(self, f, n=1):
"""find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
""" |
if f.strand == -1:
return self.left(f, n)
return self.right(f, n) |
<SYSTEM_TASK:>
return the sequence for a region using the UCSC DAS
<END_TASK>
<USER_TASK:>
Description:
def sequence(db, chrom, start, end):
"""
return the sequence for a region using the UCSC DAS
server. note the start is 1-based
each feature will have it's own .sequence method which sends
the correct start and end to this function.
>>> sequence('hg18', 'chr2', 2223, 2230)
'caacttag'
""" |
url = "http://genome.ucsc.edu/cgi-bin/das/%s" % db
url += "/dna?segment=%s:%i,%i"
xml = U.urlopen(url % (chrom, start, end)).read()
return _seq_from_xml(xml) |
<SYSTEM_TASK:>
alter the table to work between different
<END_TASK>
<USER_TASK:>
Description:
def set_table(genome, table, table_name, connection_string, metadata):
"""
alter the table to work between different
dialects
""" |
table = Table(table_name, genome._metadata, autoload=True,
autoload_with=genome.bind, extend_existing=True)
#print "\t".join([c.name for c in table.columns])
# need to prefix the indexes with the table name to avoid collisions
for i, idx in enumerate(table.indexes):
idx.name = table_name + "." + idx.name + "_ix" + str(i)
cols = []
for i, col in enumerate(table.columns):
# convert mysql-specific types to varchar
#print col.name, col.type, isinstance(col.type, ENUM)
if isinstance(col.type, (LONGBLOB, ENUM)):
if 'sqlite' in connection_string:
col.type = VARCHAR()
elif 'postgres' in connection_string:
if isinstance(col.type, ENUM):
#print dir(col)
col.type = PG_ENUM(*col.type.enums, name=col.name,
create_type=True)
else:
col.type = VARCHAR()
elif str(col.type) == "VARCHAR" \
and ("mysql" in connection_string \
or "postgres" in connection_string):
if col.type.length is None:
col.type.length = 48 if col.name != "description" else None
if not "mysql" in connection_string:
if str(col.type).lower().startswith("set("):
col.type = VARCHAR(15)
cols.append(col)
table = Table(table_name, genome._metadata, *cols,
autoload_replace=True, extend_existing=True)
return table |
<SYSTEM_TASK:>
miror a set of `tables` from `dest_url`
<END_TASK>
<USER_TASK:>
Description:
def mirror(self, tables, dest_url):
"""
miror a set of `tables` from `dest_url`
Returns a new Genome object
Parameters
----------
tables : list
an iterable of tables
dest_url: str
a dburl string, e.g. 'sqlite:///local.db'
""" |
from mirror import mirror
return mirror(self, tables, dest_url) |
<SYSTEM_TASK:>
create a pandas dataframe from a table or query
<END_TASK>
<USER_TASK:>
Description:
def dataframe(self, table):
"""
create a pandas dataframe from a table or query
Parameters
----------
table : table
a table in this database or a query
limit: integer
an integer limit on the query
offset: integer
an offset for the query
""" |
from pandas import DataFrame
if isinstance(table, six.string_types):
table = getattr(self, table)
try:
rec = table.first()
except AttributeError:
rec = table[0]
if hasattr(table, "all"):
records = table.all()
else:
records = [tuple(t) for t in table]
cols = [c.name for c in rec._table.columns]
return DataFrame.from_records(records, columns=cols) |
<SYSTEM_TASK:>
open a web-browser to the DAVID online enrichment tool
<END_TASK>
<USER_TASK:>
Description:
def david_go(refseq_list, annot=('SP_PIR_KEYWORDS', 'GOTERM_BP_FAT',
'GOTERM_CC_FAT', 'GOTERM_MF_FAT')):
"""
open a web-browser to the DAVID online enrichment tool
Parameters
----------
refseq_list : list
list of refseq names to check for enrichment
annot : list
iterable of DAVID annotations to check for enrichment
""" |
URL = "http://david.abcc.ncifcrf.gov/api.jsp?type=REFSEQ_MRNA&ids=%s&tool=term2term&annot="
import webbrowser
webbrowser.open(URL % ",".join(set(refseq_list)) + ",".join(annot)) |
<SYSTEM_TASK:>
perform an efficient spatial query using the bin column if available.
<END_TASK>
<USER_TASK:>
Description:
def bin_query(self, table, chrom, start, end):
"""
perform an efficient spatial query using the bin column if available.
The possible bins are calculated from the `start` and `end` sent to
this function.
Parameters
----------
table : str or table
table to query
chrom : str
chromosome for the query
start : int
0-based start postion
end : int
0-based end position
""" |
if isinstance(table, six.string_types):
table = getattr(self, table)
try:
tbl = table._table
except AttributeError:
tbl = table.column_descriptions[0]['type']._table
q = table.filter(tbl.c.chrom == chrom)
if hasattr(tbl.c, "bin"):
bins = Genome.bins(start, end)
if len(bins) < 100:
q = q.filter(tbl.c.bin.in_(bins))
if hasattr(tbl.c, "txStart"):
return q.filter(tbl.c.txStart <= end).filter(tbl.c.txEnd >= start)
return q.filter(tbl.c.chromStart <= end).filter(tbl.c.chromEnd >= start) |
<SYSTEM_TASK:>
Return k-nearest upstream features
<END_TASK>
<USER_TASK:>
Description:
def upstream(self, table, chrom_or_feat, start=None, end=None, k=1):
"""
Return k-nearest upstream features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of upstream neighbors to return
""" |
res = self.knearest(table, chrom_or_feat, start, end, k, "up")
end = getattr(chrom_or_feat, "end", end)
start = getattr(chrom_or_feat, "start", start)
rev = getattr(chrom_or_feat, "strand", "+") == "-"
if rev:
return [x for x in res if x.end > start]
else:
return [x for x in res if x.start < end] |
<SYSTEM_TASK:>
Return k-nearest features
<END_TASK>
<USER_TASK:>
Description:
def knearest(self, table, chrom_or_feat, start=None, end=None, k=1,
_direction=None):
"""
Return k-nearest features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of downstream neighbors to return
_direction : (None, "up", "down")
internal (don't use this)
""" |
assert _direction in (None, "up", "down")
# they sent in a feature
if start is None:
assert end is None
chrom, start, end = chrom_or_feat.chrom, chrom_or_feat.start, chrom_or_feat.end
# if the query is directional and the feature as a strand,
# adjust...
if _direction in ("up", "down") and getattr(chrom_or_feat,
"strand", None) == "-":
_direction = "up" if _direction == "down" else "up"
else:
chrom = chrom_or_feat
qstart, qend = long(start), long(end)
res = self.bin_query(table, chrom, qstart, qend)
i, change = 1, 350
try:
while res.count() < k:
if _direction in (None, "up"):
if qstart == 0 and _direction == "up": break
qstart = max(0, qstart - change)
if _direction in (None, "down"):
qend += change
i += 1
change *= (i + 5)
res = self.bin_query(table, chrom, qstart, qend)
except BigException:
return []
def dist(f):
d = 0
if start > f.end:
d = start - f.end
elif f.start > end:
d = f.start - end
# add dist as an attribute to the feature
return d
dists = sorted([(dist(f), f) for f in res])
if len(dists) == 0:
return []
dists, res = zip(*dists)
if len(res) == k:
return res
if k > len(res): # had to break because of end of chrom
if k == 0: return []
k = len(res)
ndist = dists[k - 1]
# include all features that are the same distance as the nth closest
# feature (accounts for ties).
while k < len(res) and dists[k] == ndist:
k = k + 1
return res[:k] |
<SYSTEM_TASK:>
annotate a file with a number of tables
<END_TASK>
<USER_TASK:>
Description:
def annotate(self, fname, tables, feature_strand=False, in_memory=False,
header=None, out=sys.stdout, parallel=False):
"""
annotate a file with a number of tables
Parameters
----------
fname : str or file
file name or file-handle
tables : list
list of tables with which to annotate `fname`
feature_strand : bool
if this is True, then the up/downstream designations are based on
the features in `tables` rather than the features in `fname`
in_memoory : bool
if True, then tables are read into memory. This usually makes the
annotation much faster if there are more than 500 features in
`fname` and the number of features in the table is less than 100K.
header : str
header to print out (if True, use existing header)
out : file
where to print output
parallel : bool
if True, use multiprocessing library to execute the annotation of
each chromosome in parallel. Uses more memory.
""" |
from .annotate import annotate
return annotate(self, fname, tables, feature_strand, in_memory, header=header,
out=out, parallel=parallel) |
<SYSTEM_TASK:>
Get all the bin numbers for a particular interval defined by
<END_TASK>
<USER_TASK:>
Description:
def bins(start, end):
"""
Get all the bin numbers for a particular interval defined by
(start, end]
""" |
if end - start < 536870912:
offsets = [585, 73, 9, 1]
else:
raise BigException
offsets = [4681, 585, 73, 9, 1]
binFirstShift = 17
binNextShift = 3
start = start >> binFirstShift
end = (end - 1) >> binFirstShift
bins = [1]
for offset in offsets:
bins.extend(range(offset + start, offset + end + 1))
start >>= binNextShift
end >>= binNextShift
return frozenset(bins) |
<SYSTEM_TASK:>
Look for filename in all MEDIA_ROOTS, and return the first one found.
<END_TASK>
<USER_TASK:>
Description:
def _find_filepath_in_roots(filename):
"""Look for filename in all MEDIA_ROOTS, and return the first one found.""" |
for root in settings.DJANGO_STATIC_MEDIA_ROOTS:
filepath = _filename2filepath(filename, root)
if os.path.isfile(filepath):
return filepath, root
# havent found it in DJANGO_STATIC_MEDIA_ROOTS look for apps' files if we're
# in DEBUG mode
if settings.DEBUG:
try:
from django.contrib.staticfiles import finders
absolute_path = finders.find(filename)
if absolute_path:
root, filepath = os.path.split(absolute_path)
return absolute_path, root
except ImportError:
pass
return None, None |
<SYSTEM_TASK:>
Return a new filename to use as the combined file name for a
<END_TASK>
<USER_TASK:>
Description:
def default_combine_filenames_generator(filenames, max_length=40):
"""Return a new filename to use as the combined file name for a
bunch of files.
A precondition is that they all have the same file extension
Given that the list of files can have different paths, we aim to use the
most common path.
Example:
/somewhere/else/foo.js
/somewhere/bar.js
/somewhere/different/too/foobar.js
The result will be
/somewhere/foo_bar_foobar.js
Another thing to note, if the filenames have timestamps in them, combine
them all and use the highest timestamp.
""" |
path = None
names = []
extension = None
timestamps = []
for filename in filenames:
name = os.path.basename(filename)
if not extension:
extension = os.path.splitext(name)[1]
elif os.path.splitext(name)[1] != extension:
raise ValueError("Can't combine multiple file extensions")
for each in re.finditer('\.\d{10}\.', name):
timestamps.append(int(each.group().replace('.','')))
name = name.replace(each.group(), '.')
name = os.path.splitext(name)[0]
names.append(name)
if path is None:
path = os.path.dirname(filename)
else:
if len(os.path.dirname(filename)) < len(path):
path = os.path.dirname(filename)
new_filename = '_'.join(names)
if timestamps:
new_filename += ".%s" % max(timestamps)
new_filename = new_filename[:max_length]
new_filename += extension
return os.path.join(path, new_filename) |
<SYSTEM_TASK:>
check for overlap with the other interval
<END_TASK>
<USER_TASK:>
Description:
def overlaps(self, other):
"""
check for overlap with the other interval
""" |
if self.chrom != other.chrom: return False
if self.start >= other.end: return False
if other.start >= self.end: return False
return True |
<SYSTEM_TASK:>
check if this is upstream of the `other` interval taking the strand of
<END_TASK>
<USER_TASK:>
Description:
def is_upstream_of(self, other):
"""
check if this is upstream of the `other` interval taking the strand of
the other interval into account
""" |
if self.chrom != other.chrom: return None
if getattr(other, "strand", None) == "+":
return self.end <= other.start
# other feature is on - strand, so this must have higher start
return self.start >= other.end |
<SYSTEM_TASK:>
return a list of features for the gene features of this object.
<END_TASK>
<USER_TASK:>
Description:
def gene_features(self):
"""
return a list of features for the gene features of this object.
This would include exons, introns, utrs, etc.
""" |
nm, strand = self.gene_name, self.strand
feats = [(self.chrom, self.start, self.end, nm, strand, 'gene')]
for feat in ('introns', 'exons', 'utr5', 'utr3', 'cdss'):
fname = feat[:-1] if feat[-1] == 's' else feat
res = getattr(self, feat)
if res is None or all(r is None for r in res): continue
if not isinstance(res, list): res = [res]
feats.extend((self.chrom, s, e, nm, strand, fname) for s, e in res)
tss = self.tss(down=1)
if tss is not None:
feats.append((self.chrom, tss[0], tss[1], nm, strand, 'tss'))
prom = self.promoter()
feats.append((self.chrom, prom[0], prom[1], nm, strand, 'promoter'))
return sorted(feats, key=itemgetter(1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.