text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Check that the used custom interpreters are still valid.
<END_TASK>
<USER_TASK:>
Description:
def validate_custom_interpreters_list(self):
"""Check that the used custom interpreters are still valid."""
|
custom_list = self.get_option('custom_interpreters_list')
valid_custom_list = []
for value in custom_list:
if (osp.isfile(value) and programs.is_python_interpreter(value)
and value != get_python_executable()):
valid_custom_list.append(value)
self.set_option('custom_interpreters_list', valid_custom_list)
|
<SYSTEM_TASK:>
Fills the panel background using QPalette.
<END_TASK>
<USER_TASK:>
Description:
def paintEvent(self, event):
"""Fills the panel background using QPalette."""
|
if self.isVisible() and self.position != self.Position.FLOATING:
# fill background
self._background_brush = QBrush(QColor(
self.editor.sideareas_color))
self._foreground_pen = QPen(QColor(
self.palette().windowText().color()))
painter = QPainter(self)
painter.fillRect(event.rect(), self._background_brush)
|
<SYSTEM_TASK:>
Set geometry for floating panels.
<END_TASK>
<USER_TASK:>
Description:
def set_geometry(self, crect):
"""Set geometry for floating panels.
Normally you don't need to override this method, you should override
`geometry` instead.
"""
|
x0, y0, width, height = self.geometry()
if width is None:
width = crect.width()
if height is None:
height = crect.height()
# Calculate editor coordinates with their offsets
offset = self.editor.contentOffset()
x = self.editor.blockBoundingGeometry(self.editor.firstVisibleBlock())\
.translated(offset.x(), offset.y()).left() \
+ self.editor.document().documentMargin() \
+ self.editor.panels.margin_size(Panel.Position.LEFT)
y = crect.top() + self.editor.panels.margin_size(Panel.Position.TOP)
self.setGeometry(QRect(x+x0, y+y0, width, height))
|
<SYSTEM_TASK:>
Configure associated namespace browser widget
<END_TASK>
<USER_TASK:>
Description:
def configure_namespacebrowser(self):
"""Configure associated namespace browser widget"""
|
# Update namespace view
self.sig_namespace_view.connect(lambda data:
self.namespacebrowser.process_remote_view(data))
# Update properties of variables
self.sig_var_properties.connect(lambda data:
self.namespacebrowser.set_var_properties(data))
|
<SYSTEM_TASK:>
Reimplemented to handle communications between Spyder
<END_TASK>
<USER_TASK:>
Description:
def _handle_execute_reply(self, msg):
"""
Reimplemented to handle communications between Spyder
and the kernel
"""
|
msg_id = msg['parent_header']['msg_id']
info = self._request_info['execute'].get(msg_id)
# unset reading flag, because if execute finished, raw_input can't
# still be pending.
self._reading = False
# Refresh namespacebrowser after the kernel starts running
exec_count = msg['content'].get('execution_count', '')
if exec_count == 0 and self._kernel_is_starting:
if self.namespacebrowser is not None:
self.set_namespace_view_settings()
self.refresh_namespacebrowser()
self._kernel_is_starting = False
self.ipyclient.t0 = time.monotonic()
# Handle silent execution of kernel methods
if info and info.kind == 'silent_exec_method' and not self._hidden:
self.handle_exec_method(msg)
self._request_info['execute'].pop(msg_id)
else:
super(NamepaceBrowserWidget, self)._handle_execute_reply(msg)
|
<SYSTEM_TASK:>
Reimplemented to refresh the namespacebrowser after kernel
<END_TASK>
<USER_TASK:>
Description:
def _handle_status(self, msg):
"""
Reimplemented to refresh the namespacebrowser after kernel
restarts
"""
|
state = msg['content'].get('execution_state', '')
msg_type = msg['parent_header'].get('msg_type', '')
if state == 'starting':
# This is needed to show the time a kernel
# has been alive in each console.
self.ipyclient.t0 = time.monotonic()
self.ipyclient.timer.timeout.connect(self.ipyclient.show_time)
self.ipyclient.timer.start(1000)
# This handles restarts when the kernel dies
# unexpectedly
if not self._kernel_is_starting:
self._kernel_is_starting = True
elif state == 'idle' and msg_type == 'shutdown_request':
# This handles restarts asked by the user
if self.namespacebrowser is not None:
self.set_namespace_view_settings()
self.refresh_namespacebrowser()
self.ipyclient.t0 = time.monotonic()
else:
super(NamepaceBrowserWidget, self)._handle_status(msg)
|
<SYSTEM_TASK:>
If this is the first time the plugin is shown, perform actions to
<END_TASK>
<USER_TASK:>
Description:
def initialize_plugin_in_mainwindow_layout(self):
"""
If this is the first time the plugin is shown, perform actions to
initialize plugin position in Spyder's window layout.
Use on_first_registration to define the actions to be run
by your plugin
"""
|
if self.get_option('first_time', True):
try:
self.on_first_registration()
except NotImplementedError:
return
self.set_option('first_time', False)
|
<SYSTEM_TASK:>
Update plugin title, i.e. dockwidget or window title
<END_TASK>
<USER_TASK:>
Description:
def update_plugin_title(self):
"""Update plugin title, i.e. dockwidget or window title"""
|
if self.dockwidget is not None:
win = self.dockwidget
elif self.undocked_window is not None:
win = self.undocked_window
else:
return
win.setWindowTitle(self.get_plugin_title())
|
<SYSTEM_TASK:>
Add to parent QMainWindow as a dock widget
<END_TASK>
<USER_TASK:>
Description:
def create_dockwidget(self):
"""Add to parent QMainWindow as a dock widget"""
|
# Creating dock widget
dock = SpyderDockWidget(self.get_plugin_title(), self.main)
# Set properties
dock.setObjectName(self.__class__.__name__+"_dw")
dock.setAllowedAreas(self.ALLOWED_AREAS)
dock.setFeatures(self.FEATURES)
dock.setWidget(self)
self.update_margins()
dock.visibilityChanged.connect(self.visibility_changed)
dock.topLevelChanged.connect(self.on_top_level_changed)
dock.sig_plugin_closed.connect(self.plugin_closed)
self.dockwidget = dock
if self.shortcut is not None:
sc = QShortcut(QKeySequence(self.shortcut), self.main,
self.switch_to_plugin)
self.register_shortcut(sc, "_", "Switch to %s" % self.CONF_SECTION)
return (dock, self.LOCATION)
|
<SYSTEM_TASK:>
Create configuration dialog box page widget
<END_TASK>
<USER_TASK:>
Description:
def create_configwidget(self, parent):
"""Create configuration dialog box page widget"""
|
if self.CONFIGWIDGET_CLASS is not None:
configwidget = self.CONFIGWIDGET_CLASS(self, parent)
configwidget.initialize()
return configwidget
|
<SYSTEM_TASK:>
Return plugin font option.
<END_TASK>
<USER_TASK:>
Description:
def get_plugin_font(self, rich_text=False):
"""
Return plugin font option.
All plugins in Spyder use a global font. This is a convenience method
in case some plugins will have a delta size based on the default size.
"""
|
if rich_text:
option = 'rich_font'
font_size_delta = self.RICH_FONT_SIZE_DELTA
else:
option = 'font'
font_size_delta = self.FONT_SIZE_DELTA
return get_font(option=option, font_size_delta=font_size_delta)
|
<SYSTEM_TASK:>
Show message in main window's status bar
<END_TASK>
<USER_TASK:>
Description:
def show_message(self, message, timeout=0):
"""Show message in main window's status bar"""
|
self.main.statusBar().showMessage(message, timeout)
|
<SYSTEM_TASK:>
Associate a toggle view action with each plugin
<END_TASK>
<USER_TASK:>
Description:
def create_toggle_view_action(self):
"""Associate a toggle view action with each plugin"""
|
title = self.get_plugin_title()
if self.CONF_SECTION == 'editor':
title = _('Editor')
if self.shortcut is not None:
action = create_action(self, title,
toggled=lambda checked: self.toggle_view(checked),
shortcut=QKeySequence(self.shortcut),
context=Qt.WidgetShortcut)
else:
action = create_action(self, title, toggled=lambda checked:
self.toggle_view(checked))
self.toggle_view_action = action
|
<SYSTEM_TASK:>
Close QMainWindow instance that contains this plugin.
<END_TASK>
<USER_TASK:>
Description:
def close_window(self):
"""Close QMainWindow instance that contains this plugin."""
|
if self.undocked_window is not None:
self.undocked_window.close()
self.undocked_window = None
# Oddly, these actions can appear disabled after the Dock
# action is pressed
self.undock_action.setDisabled(False)
self.close_plugin_action.setDisabled(False)
|
<SYSTEM_TASK:>
Create a QMainWindow instance containing this plugin.
<END_TASK>
<USER_TASK:>
Description:
def create_window(self):
"""Create a QMainWindow instance containing this plugin."""
|
self.undocked_window = window = PluginWindow(self)
window.setAttribute(Qt.WA_DeleteOnClose)
icon = self.get_plugin_icon()
if is_text_string(icon):
icon = self.get_icon(icon)
window.setWindowIcon(icon)
window.setWindowTitle(self.get_plugin_title())
window.setCentralWidget(self)
window.resize(self.size())
self.refresh_plugin()
self.dockwidget.setFloating(False)
self.dockwidget.setVisible(False)
window.show()
|
<SYSTEM_TASK:>
Actions to perform when a plugin is undocked to be moved.
<END_TASK>
<USER_TASK:>
Description:
def on_top_level_changed(self, top_level):
"""Actions to perform when a plugin is undocked to be moved."""
|
if top_level:
self.undock_action.setDisabled(True)
else:
self.undock_action.setDisabled(False)
|
<SYSTEM_TASK:>
Reimplemented Qt Method to avoid removing the header.
<END_TASK>
<USER_TASK:>
Description:
def keyPressEvent(self, event):
"""Reimplemented Qt Method to avoid removing the header."""
|
event, text, key, ctrl, shift = restore_keyevent(event)
cursor_position = self.get_position('cursor')
if cursor_position < self.header_end_pos:
self.restrict_cursor_position(self.header_end_pos, 'eof')
elif key == Qt.Key_Delete:
if self.has_selected_text():
self.remove_text()
else:
self.stdkey_clear()
elif key == Qt.Key_Backspace:
if self.has_selected_text():
self.remove_text()
elif self.header_end_pos == cursor_position:
return
else:
self.stdkey_backspace()
elif key == Qt.Key_X and ctrl:
self.cut()
else:
CodeEditor.keyPressEvent(self, event)
|
<SYSTEM_TASK:>
Action to take when pressing the submit button.
<END_TASK>
<USER_TASK:>
Description:
def _submit_to_github(self):
"""Action to take when pressing the submit button."""
|
# Get reference to the main window
if self.parent() is not None:
if getattr(self.parent(), 'main', False):
# This covers the case when the dialog is attached
# to the internal console
main = self.parent().main
else:
# Else the dialog is attached to the main window
# directly
main = self.parent()
else:
main = None
# Getting description and traceback
title = self.title.text()
description = self.input_description.toPlainText()
traceback = self.error_traceback[:-1] # Remove last EOL
# Render issue
if main is not None:
issue_text = main.render_issue(description=description,
traceback=traceback)
else:
issue_text = description
try:
if main is None:
org = 'ccordoba12'
else:
org = 'spyder-ide'
github_backend = GithubBackend(org, 'spyder', parent_widget=main)
github_report = github_backend.send_report(title, issue_text)
if github_report:
self.close()
except Exception:
ret = QMessageBox.question(
self, _('Error'),
_("An error occurred while trying to send the issue to "
"Github automatically. Would you like to open it "
"manually?<br><br>"
"If so, please make sure to paste your clipboard "
"into the issue report box that will appear in a new "
"browser tab before clicking <i>Submit</i> on that "
"page."))
if ret in [QMessageBox.Yes, QMessageBox.Ok]:
QApplication.clipboard().setText(issue_text)
issue_body = (
" \n<!--- *** BEFORE SUBMITTING: PASTE CLIPBOARD HERE "
"TO COMPLETE YOUR REPORT *** ---!>\n")
if main is not None:
main.report_issue(body=issue_body, title=title,
open_webpage=True)
else:
pass
|
<SYSTEM_TASK:>
Checks if there is an unmatched brackets in the 'text'.
<END_TASK>
<USER_TASK:>
Description:
def unmatched_brackets_in_line(self, text, closing_brackets_type=None):
"""
Checks if there is an unmatched brackets in the 'text'.
The brackets type can be general or specified by closing_brackets_type
(')', ']' or '}')
"""
|
if closing_brackets_type is None:
opening_brackets = self.BRACKETS_LEFT.values()
closing_brackets = self.BRACKETS_RIGHT.values()
else:
closing_brackets = [closing_brackets_type]
opening_brackets = [{')': '(', '}': '{',
']': '['}[closing_brackets_type]]
block = self.editor.textCursor().block()
line_pos = block.position()
for pos, char in enumerate(text):
if char in opening_brackets:
match = self.editor.find_brace_match(line_pos+pos, char,
forward=True)
if (match is None) or (match > line_pos+len(text)):
return True
if char in closing_brackets:
match = self.editor.find_brace_match(line_pos+pos, char,
forward=False)
if (match is None) or (match < line_pos):
return True
return False
|
<SYSTEM_TASK:>
Control automatic insertation of brackets in various situations.
<END_TASK>
<USER_TASK:>
Description:
def _autoinsert_brackets(self, key):
"""Control automatic insertation of brackets in various situations."""
|
char = self.BRACKETS_CHAR[key]
pair = self.BRACKETS_PAIR[key]
line_text = self.editor.get_text('sol', 'eol')
line_to_cursor = self.editor.get_text('sol', 'cursor')
cursor = self.editor.textCursor()
trailing_text = self.editor.get_text('cursor', 'eol').strip()
if self.editor.has_selected_text():
text = self.editor.get_selected_text()
self.editor.insert_text("{0}{1}{2}".format(pair[0], text, pair[1]))
# Keep text selected, for inserting multiple brackets
cursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor, 1)
cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor,
len(text))
self.editor.setTextCursor(cursor)
elif key in self.BRACKETS_LEFT:
if (not trailing_text or
trailing_text[0] in self.BRACKETS_RIGHT.values() or
trailing_text[0] in [',', ':', ';']):
# Automatic insertion of brackets
self.editor.insert_text(pair)
cursor.movePosition(QTextCursor.PreviousCharacter)
self.editor.setTextCursor(cursor)
else:
self.editor.insert_text(char)
if char in self.editor.signature_completion_characters:
self.editor.request_signature()
elif key in self.BRACKETS_RIGHT:
if (self.editor.next_char() == char and
not self.editor.textCursor().atBlockEnd() and
not self.unmatched_brackets_in_line(
cursor.block().text(), char)):
# Overwrite an existing brackets if all in line are matched
cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor, 1)
cursor.clearSelection()
self.editor.setTextCursor(cursor)
else:
self.editor.insert_text(char)
|
<SYSTEM_TASK:>
Utility for drawing text with line breaks
<END_TASK>
<USER_TASK:>
Description:
def put_text(img, text, org, font_face, font_scale, color, thickness=1, line_type=8, bottom_left_origin=False):
"""Utility for drawing text with line breaks
:param img: Image.
:param text: Text string to be drawn.
:param org: Bottom-left corner of the first line of the text string in the image.
:param font_face: Font type. One of FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX,
FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL,
FONT_HERSHEY_SCRIPT_SIMPLEX, or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID’s
can be combined with FONT_ITALIC to get the slanted letters.
:param font_scale: Font scale factor that is multiplied by the font-specific base size.
:param color: Text color.
:param thickness: Thickness of the lines used to draw a text.
:param line_type: Line type. See the line for details.
:param bottom_left_origin: When true, the image data origin is at the bottom-left corner.
Otherwise, it is at the top-left corner.
:return: None; image is modified in place
"""
|
# Break out drawing coords
x, y = org
# Break text into list of text lines
text_lines = text.split('\n')
# Get height of text lines in pixels (height of all lines is the same)
_, line_height = cv2.getTextSize('', font_face, font_scale, thickness)[0]
# Set distance between lines in pixels
line_gap = line_height // 3
for i, text_line in enumerate(text_lines):
# Find total size of text block before this line
line_y_adjustment = i * (line_gap + line_height)
# Move text down from original line based on line number
if not bottom_left_origin:
line_y = y + line_y_adjustment
else:
line_y = y - line_y_adjustment
# Draw text
cv2.putText(img,
text=text_lines[i],
org=(x, line_y),
fontFace=font_face,
fontScale=font_scale,
color=color,
thickness=thickness,
lineType=line_type,
bottomLeftOrigin=bottom_left_origin)
|
<SYSTEM_TASK:>
Utility for drawing vertically & horizontally centered text with line breaks
<END_TASK>
<USER_TASK:>
Description:
def put_centered_text(img, text, font_face, font_scale, color, thickness=1, line_type=8):
"""Utility for drawing vertically & horizontally centered text with line breaks
:param img: Image.
:param text: Text string to be drawn.
:param font_face: Font type. One of FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX,
FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL,
FONT_HERSHEY_SCRIPT_SIMPLEX, or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID’s
can be combined with FONT_ITALIC to get the slanted letters.
:param font_scale: Font scale factor that is multiplied by the font-specific base size.
:param color: Text color.
:param thickness: Thickness of the lines used to draw a text.
:param line_type: Line type. See the line for details.
:return: None; image is modified in place
"""
|
# Save img dimensions
img_h, img_w = img.shape[:2]
# Break text into list of text lines
text_lines = text.split('\n')
# Get height of text lines in pixels (height of all lines is the same; width differs)
_, line_height = cv2.getTextSize('', font_face, font_scale, thickness)[0]
# Set distance between lines in pixels
line_gap = line_height // 3
# Calculate total text block height for centering
text_block_height = len(text_lines) * (line_height + line_gap)
text_block_height -= line_gap # There's one less gap than lines
for i, text_line in enumerate(text_lines):
# Get width of text line in pixels (height of all lines is the same)
line_width, _ = cv2.getTextSize(text_line, font_face, font_scale, thickness)[0]
# Center line with image dimensions
x = (img_w - line_width) // 2
y = (img_h + line_height) // 2
# Find total size of text block before this line
line_adjustment = i * (line_gap + line_height)
# Adjust line y and re-center relative to total text block height
y += line_adjustment - text_block_height // 2 + line_gap
# Draw text
cv2.putText(img,
text=text_lines[i],
org=(x, y),
fontFace=font_face,
fontScale=font_scale,
color=color,
thickness=thickness,
lineType=line_type)
|
<SYSTEM_TASK:>
Return True if `path` is a directory containing a setup.py file.
<END_TASK>
<USER_TASK:>
Description:
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
|
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, "setup.py")
if os.path.isfile(setup_py):
return True
return False
|
<SYSTEM_TASK:>
Returns a list of requirements for building, as strings
<END_TASK>
<USER_TASK:>
Description:
def get_requires_for_build_wheel(config_settings=None):
"""
Returns a list of requirements for building, as strings
"""
|
poetry = Poetry.create(".")
main, _ = SdistBuilder.convert_dependencies(poetry.package, poetry.package.requires)
return main
|
<SYSTEM_TASK:>
Builds a wheel, places it in wheel_directory
<END_TASK>
<USER_TASK:>
Description:
def build_wheel(wheel_directory, config_settings=None, metadata_directory=None):
"""Builds a wheel, places it in wheel_directory"""
|
poetry = Poetry.create(".")
return unicode(
WheelBuilder.make_in(
poetry, SystemEnv(Path(sys.prefix)), NullIO(), Path(wheel_directory)
)
)
|
<SYSTEM_TASK:>
Builds an sdist, places it in sdist_directory
<END_TASK>
<USER_TASK:>
Description:
def build_sdist(sdist_directory, config_settings=None):
"""Builds an sdist, places it in sdist_directory"""
|
poetry = Poetry.create(".")
path = SdistBuilder(poetry, SystemEnv(Path(sys.prefix)), NullIO()).build(
Path(sdist_directory)
)
return unicode(path.name)
|
<SYSTEM_TASK:>
Returns all external incompatibilities in this incompatibility's
<END_TASK>
<USER_TASK:>
Description:
def external_incompatibilities(self): # type: () -> Generator[Incompatibility]
"""
Returns all external incompatibilities in this incompatibility's
derivation graph.
"""
|
if isinstance(self._cause, ConflictCause):
cause = self._cause # type: ConflictCause
for incompatibility in cause.conflict.external_incompatibilities:
yield incompatibility
for incompatibility in cause.other.external_incompatibilities:
yield incompatibility
else:
yield self
|
<SYSTEM_TASK:>
Adds an assignment of package as a decision
<END_TASK>
<USER_TASK:>
Description:
def decide(self, package): # type: (Package) -> None
"""
Adds an assignment of package as a decision
and increments the decision level.
"""
|
# When we make a new decision after backtracking, count an additional
# attempted solution. If we backtrack multiple times in a row, though, we
# only want to count one, since we haven't actually started attempting a
# new solution.
if self._backtracking:
self._attempted_solutions += 1
self._backtracking = False
self._decisions[package.name] = package
self._assign(
Assignment.decision(package, self.decision_level, len(self._assignments))
)
|
<SYSTEM_TASK:>
Adds an assignment of package as a derivation.
<END_TASK>
<USER_TASK:>
Description:
def derive(
self, dependency, is_positive, cause
): # type: (Dependency, bool, Incompatibility) -> None
"""
Adds an assignment of package as a derivation.
"""
|
self._assign(
Assignment.derivation(
dependency,
is_positive,
cause,
self.decision_level,
len(self._assignments),
)
)
|
<SYSTEM_TASK:>
Adds an Assignment to _assignments and _positive or _negative.
<END_TASK>
<USER_TASK:>
Description:
def _assign(self, assignment): # type: (Assignment) -> None
"""
Adds an Assignment to _assignments and _positive or _negative.
"""
|
self._assignments.append(assignment)
self._register(assignment)
|
<SYSTEM_TASK:>
Resets the current decision level to decision_level, and removes all
<END_TASK>
<USER_TASK:>
Description:
def backtrack(self, decision_level): # type: (int) -> None
"""
Resets the current decision level to decision_level, and removes all
assignments made after that level.
"""
|
self._backtracking = True
packages = set()
while self._assignments[-1].decision_level > decision_level:
removed = self._assignments.pop(-1)
packages.add(removed.dependency.name)
if removed.is_decision():
del self._decisions[removed.dependency.name]
# Re-compute _positive and _negative for the packages that were removed.
for package in packages:
if package in self._positive:
del self._positive[package]
if package in self._negative:
del self._negative[package]
for assignment in self._assignments:
if assignment.dependency.name in packages:
self._register(assignment)
|
<SYSTEM_TASK:>
Registers an Assignment in _positive or _negative.
<END_TASK>
<USER_TASK:>
Description:
def _register(self, assignment): # type: (Assignment) -> None
"""
Registers an Assignment in _positive or _negative.
"""
|
name = assignment.dependency.name
old_positive = self._positive.get(name)
if old_positive is not None:
self._positive[name] = old_positive.intersect(assignment)
return
ref = assignment.dependency.name
negative_by_ref = self._negative.get(name)
old_negative = None if negative_by_ref is None else negative_by_ref.get(ref)
if old_negative is None:
term = assignment
else:
term = assignment.intersect(old_negative)
if term.is_positive():
if name in self._negative:
del self._negative[name]
self._positive[name] = term
else:
if name not in self._negative:
self._negative[name] = {}
self._negative[name][ref] = term
|
<SYSTEM_TASK:>
Returns the first Assignment in this solution such that the sublist of
<END_TASK>
<USER_TASK:>
Description:
def satisfier(self, term): # type: (Term) -> Assignment
"""
Returns the first Assignment in this solution such that the sublist of
assignments up to and including that entry collectively satisfies term.
"""
|
assigned_term = None # type: Term
for assignment in self._assignments:
if assignment.dependency.name != term.dependency.name:
continue
if (
not assignment.dependency.is_root
and not assignment.dependency.name == term.dependency.name
):
if not assignment.is_positive():
continue
assert not term.is_positive()
return assignment
if assigned_term is None:
assigned_term = assignment
else:
assigned_term = assigned_term.intersect(assignment)
# As soon as we have enough assignments to satisfy term, return them.
if assigned_term.satisfies(term):
return assignment
raise RuntimeError("[BUG] {} is not satisfied.".format(term))
|
<SYSTEM_TASK:>
Checks the validity of a configuration
<END_TASK>
<USER_TASK:>
Description:
def check(cls, config, strict=False): # type: (dict, bool) -> Dict[str, List[str]]
"""
Checks the validity of a configuration
"""
|
result = {"errors": [], "warnings": []}
# Schema validation errors
validation_errors = validate_object(config, "poetry-schema")
result["errors"] += validation_errors
if strict:
# If strict, check the file more thoroughly
# Checking license
license = config.get("license")
if license:
try:
license_by_id(license)
except ValueError:
result["errors"].append("{} is not a valid license".format(license))
if "dependencies" in config:
python_versions = config["dependencies"]["python"]
if python_versions == "*":
result["warnings"].append(
"A wildcard Python dependency is ambiguous. "
"Consider specifying a more explicit one."
)
# Checking for scripts with extras
if "scripts" in config:
scripts = config["scripts"]
for name, script in scripts.items():
if not isinstance(script, dict):
continue
extras = script["extras"]
for extra in extras:
if extra not in config["extras"]:
result["errors"].append(
'Script "{}" requires extra "{}" which is not defined.'.format(
name, extra
)
)
return result
|
<SYSTEM_TASK:>
Discover subpackages and data.
<END_TASK>
<USER_TASK:>
Description:
def find_packages(self, include):
"""
Discover subpackages and data.
It also retrieves necessary files.
"""
|
pkgdir = None
if include.source is not None:
pkgdir = str(include.base)
base = str(include.elements[0].parent)
pkg_name = include.package
pkg_data = defaultdict(list)
# Undocumented distutils feature:
# the empty string matches all package names
pkg_data[""].append("*")
packages = [pkg_name]
subpkg_paths = set()
def find_nearest_pkg(rel_path):
parts = rel_path.split(os.sep)
for i in reversed(range(1, len(parts))):
ancestor = "/".join(parts[:i])
if ancestor in subpkg_paths:
pkg = ".".join([pkg_name] + parts[:i])
return pkg, "/".join(parts[i:])
# Relative to the top-level package
return pkg_name, Path(rel_path).as_posix()
excluded_files = self.find_excluded_files()
for path, dirnames, filenames in os.walk(str(base), topdown=True):
if os.path.basename(path) == "__pycache__":
continue
from_top_level = os.path.relpath(path, base)
if from_top_level == ".":
continue
is_subpkg = "__init__.py" in filenames
if is_subpkg:
subpkg_paths.add(from_top_level)
parts = from_top_level.split(os.sep)
packages.append(".".join([pkg_name] + parts))
else:
pkg, from_nearest_pkg = find_nearest_pkg(from_top_level)
data_elements = [
f.relative_to(self._path)
for f in Path(path).glob("*")
if not f.is_dir()
]
data = [e for e in data_elements if not self.is_excluded(e)]
if not data:
continue
if len(data) == len(data_elements):
pkg_data[pkg].append(pjoin(from_nearest_pkg, "*"))
else:
for d in data:
if d.is_dir():
continue
pkg_data[pkg] += [pjoin(from_nearest_pkg, d.name) for d in data]
# Sort values in pkg_data
pkg_data = {k: sorted(v) for (k, v) in pkg_data.items() if v}
return pkgdir, sorted(packages), pkg_data
|
<SYSTEM_TASK:>
Clean metadata from a TarInfo object to make it more reproducible.
<END_TASK>
<USER_TASK:>
Description:
def clean_tarinfo(cls, tar_info):
"""
Clean metadata from a TarInfo object to make it more reproducible.
- Set uid & gid to 0
- Set uname and gname to ""
- Normalise permissions to 644 or 755
- Set mtime if not None
"""
|
ti = copy(tar_info)
ti.uid = 0
ti.gid = 0
ti.uname = ""
ti.gname = ""
ti.mode = normalize_file_permissions(ti.mode)
return ti
|
<SYSTEM_TASK:>
Search for the specifications that match the given dependency.
<END_TASK>
<USER_TASK:>
Description:
def search_for(self, dependency): # type: (Dependency) -> List[Package]
"""
Search for the specifications that match the given dependency.
The specifications in the returned list will be considered in reverse
order, so the latest version ought to be last.
"""
|
if dependency.is_root:
return PackageCollection(dependency, [self._package])
for constraint in self._search_for.keys():
if (
constraint.name == dependency.name
and constraint.constraint.intersect(dependency.constraint)
== dependency.constraint
):
packages = [
p
for p in self._search_for[constraint]
if dependency.constraint.allows(p.version)
]
packages.sort(
key=lambda p: (
not p.is_prerelease() and not dependency.allows_prereleases(),
p.version,
),
reverse=True,
)
return PackageCollection(dependency, packages)
if dependency.is_vcs():
packages = self.search_for_vcs(dependency)
elif dependency.is_file():
packages = self.search_for_file(dependency)
elif dependency.is_directory():
packages = self.search_for_directory(dependency)
else:
constraint = dependency.constraint
packages = self._pool.find_packages(
dependency.name,
constraint,
extras=dependency.extras,
allow_prereleases=dependency.allows_prereleases(),
)
packages.sort(
key=lambda p: (
not p.is_prerelease() and not dependency.allows_prereleases(),
p.version,
),
reverse=True,
)
self._search_for[dependency] = packages
return PackageCollection(dependency, packages)
|
<SYSTEM_TASK:>
Search for the specifications that match the given VCS dependency.
<END_TASK>
<USER_TASK:>
Description:
def search_for_vcs(self, dependency): # type: (VCSDependency) -> List[Package]
"""
Search for the specifications that match the given VCS dependency.
Basically, we clone the repository in a temporary directory
and get the information we need by checking out the specified reference.
"""
|
if dependency.vcs != "git":
raise ValueError("Unsupported VCS dependency {}".format(dependency.vcs))
tmp_dir = Path(mkdtemp(prefix="pypoetry-git-{}".format(dependency.name)))
try:
git = Git()
git.clone(dependency.source, tmp_dir)
git.checkout(dependency.reference, tmp_dir)
revision = git.rev_parse(dependency.reference, tmp_dir).strip()
if dependency.tag or dependency.rev:
revision = dependency.reference
directory_dependency = DirectoryDependency(
dependency.name,
tmp_dir,
category=dependency.category,
optional=dependency.is_optional(),
)
for extra in dependency.extras:
directory_dependency.extras.append(extra)
package = self.search_for_directory(directory_dependency)[0]
package.source_type = "git"
package.source_url = dependency.source
package.source_reference = revision
except Exception:
raise
finally:
safe_rmtree(str(tmp_dir))
return [package]
|
<SYSTEM_TASK:>
Returns incompatibilities that encapsulate a given package's dependencies,
<END_TASK>
<USER_TASK:>
Description:
def incompatibilities_for(
self, package
): # type: (DependencyPackage) -> List[Incompatibility]
"""
Returns incompatibilities that encapsulate a given package's dependencies,
or that it can't be safely selected.
If multiple subsequent versions of this package have the same
dependencies, this will return incompatibilities that reflect that. It
won't return incompatibilities that have already been returned by a
previous call to _incompatibilities_for().
"""
|
if package.is_root():
dependencies = package.all_requires
else:
dependencies = package.requires
if not package.python_constraint.allows_all(
self._package.python_constraint
):
intersection = package.python_constraint.intersect(
package.dependency.transitive_python_constraint
)
difference = package.dependency.transitive_python_constraint.difference(
intersection
)
if (
package.dependency.transitive_python_constraint.is_any()
or self._package.python_constraint.intersect(
package.dependency.python_constraint
).is_empty()
or intersection.is_empty()
or not difference.is_empty()
):
return [
Incompatibility(
[Term(package.to_dependency(), True)],
PythonCause(
package.python_versions, self._package.python_versions
),
)
]
dependencies = [
dep
for dep in dependencies
if dep.name not in self.UNSAFE_PACKAGES
and self._package.python_constraint.allows_any(dep.python_constraint)
]
return [
Incompatibility(
[Term(package.to_dependency(), True), Term(dep, False)],
DependencyCause(),
)
for dep in dependencies
]
|
<SYSTEM_TASK:>
Finds a set of dependencies that match the root package's constraints,
<END_TASK>
<USER_TASK:>
Description:
def solve(self): # type: () -> SolverResult
"""
Finds a set of dependencies that match the root package's constraints,
or raises an error if no such set is available.
"""
|
start = time.time()
root_dependency = Dependency(self._root.name, self._root.version)
root_dependency.is_root = True
self._add_incompatibility(
Incompatibility([Term(root_dependency, False)], RootCause())
)
try:
next = self._root.name
while next is not None:
self._propagate(next)
next = self._choose_package_version()
return self._result()
except Exception:
raise
finally:
self._log(
"Version solving took {:.3f} seconds.\n"
"Tried {} solutions.".format(
time.time() - start, self._solution.attempted_solutions
)
)
|
<SYSTEM_TASK:>
Performs unit propagation on incompatibilities transitively
<END_TASK>
<USER_TASK:>
Description:
def _propagate(self, package): # type: (str) -> None
"""
Performs unit propagation on incompatibilities transitively
related to package to derive new assignments for _solution.
"""
|
changed = set()
changed.add(package)
while changed:
package = changed.pop()
# Iterate in reverse because conflict resolution tends to produce more
# general incompatibilities as time goes on. If we look at those first,
# we can derive stronger assignments sooner and more eagerly find
# conflicts.
for incompatibility in reversed(self._incompatibilities[package]):
result = self._propagate_incompatibility(incompatibility)
if result is _conflict:
# If the incompatibility is satisfied by the solution, we use
# _resolve_conflict() to determine the root cause of the conflict as a
# new incompatibility.
#
# It also backjumps to a point in the solution
# where that incompatibility will allow us to derive new assignments
# that avoid the conflict.
root_cause = self._resolve_conflict(incompatibility)
# Back jumping erases all the assignments we did at the previous
# decision level, so we clear [changed] and refill it with the
# newly-propagated assignment.
changed.clear()
changed.add(str(self._propagate_incompatibility(root_cause)))
break
elif result is not None:
changed.add(result)
|
<SYSTEM_TASK:>
If incompatibility is almost satisfied by _solution, adds the
<END_TASK>
<USER_TASK:>
Description:
def _propagate_incompatibility(
self, incompatibility
): # type: (Incompatibility) -> Union[str, _conflict, None]
"""
If incompatibility is almost satisfied by _solution, adds the
negation of the unsatisfied term to _solution.
If incompatibility is satisfied by _solution, returns _conflict. If
incompatibility is almost satisfied by _solution, returns the
unsatisfied term's package name.
Otherwise, returns None.
"""
|
# The first entry in incompatibility.terms that's not yet satisfied by
# _solution, if one exists. If we find more than one, _solution is
# inconclusive for incompatibility and we can't deduce anything.
unsatisfied = None
for term in incompatibility.terms:
relation = self._solution.relation(term)
if relation == SetRelation.DISJOINT:
# If term is already contradicted by _solution, then
# incompatibility is contradicted as well and there's nothing new we
# can deduce from it.
return
elif relation == SetRelation.OVERLAPPING:
# If more than one term is inconclusive, we can't deduce anything about
# incompatibility.
if unsatisfied is not None:
return
# If exactly one term in incompatibility is inconclusive, then it's
# almost satisfied and [term] is the unsatisfied term. We can add the
# inverse of the term to _solution.
unsatisfied = term
# If *all* terms in incompatibility are satisfied by _solution, then
# incompatibility is satisfied and we have a conflict.
if unsatisfied is None:
return _conflict
self._log(
"derived: {}{}".format(
"not " if unsatisfied.is_positive() else "", unsatisfied.dependency
)
)
self._solution.derive(
unsatisfied.dependency, not unsatisfied.is_positive(), incompatibility
)
return unsatisfied.dependency.name
|
<SYSTEM_TASK:>
Tries to select a version of a required package.
<END_TASK>
<USER_TASK:>
Description:
def _choose_package_version(self): # type: () -> Union[str, None]
"""
Tries to select a version of a required package.
Returns the name of the package whose incompatibilities should be
propagated by _propagate(), or None indicating that version solving is
complete and a solution has been found.
"""
|
unsatisfied = self._solution.unsatisfied
if not unsatisfied:
return
# Prefer packages with as few remaining versions as possible,
# so that if a conflict is necessary it's forced quickly.
def _get_min(dependency):
if dependency.name in self._use_latest:
# If we're forced to use the latest version of a package, it effectively
# only has one version to choose from.
return 1
if dependency.name in self._locked:
return 1
try:
return len(self._provider.search_for(dependency))
except ValueError:
return 0
if len(unsatisfied) == 1:
dependency = unsatisfied[0]
else:
dependency = min(*unsatisfied, key=_get_min)
locked = self._get_locked(dependency.name)
if locked is None or not dependency.constraint.allows(locked.version):
try:
packages = self._provider.search_for(dependency)
except ValueError as e:
self._add_incompatibility(
Incompatibility([Term(dependency, True)], PackageNotFoundCause(e))
)
return dependency.name
try:
version = packages[0]
except IndexError:
version = None
else:
version = locked
if version is None:
# If there are no versions that satisfy the constraint,
# add an incompatibility that indicates that.
self._add_incompatibility(
Incompatibility([Term(dependency, True)], NoVersionsCause())
)
return dependency.name
version = self._provider.complete_package(version)
conflict = False
for incompatibility in self._provider.incompatibilities_for(version):
self._add_incompatibility(incompatibility)
# If an incompatibility is already satisfied, then selecting version
# would cause a conflict.
#
# We'll continue adding its dependencies, then go back to
# unit propagation which will guide us to choose a better version.
conflict = conflict or all(
[
term.dependency.name == dependency.name
or self._solution.satisfies(term)
for term in incompatibility.terms
]
)
if not conflict:
self._solution.decide(version)
self._log(
"selecting {} ({})".format(version.name, version.full_pretty_version)
)
return dependency.name
|
<SYSTEM_TASK:>
Run a command inside the Python environment.
<END_TASK>
<USER_TASK:>
Description:
def run(self, bin, *args, **kwargs):
"""
Run a command inside the Python environment.
"""
|
bin = self._bin(bin)
cmd = [bin] + list(args)
shell = kwargs.get("shell", False)
call = kwargs.pop("call", False)
input_ = kwargs.pop("input_", None)
if shell:
cmd = list_to_shell_command(cmd)
try:
if self._is_windows:
kwargs["shell"] = True
if input_:
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs
)
output = p.communicate(encode(input_))[0]
elif call:
return subprocess.call(cmd, stderr=subprocess.STDOUT, **kwargs)
else:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT, **kwargs
)
except CalledProcessError as e:
raise EnvCommandError(e)
return decode(output)
|
<SYSTEM_TASK:>
This helper will help in transforming
<END_TASK>
<USER_TASK:>
Description:
def format_python_constraint(constraint):
"""
This helper will help in transforming
disjunctive constraint into proper constraint.
"""
|
if isinstance(constraint, Version):
if constraint.precision >= 3:
return "=={}".format(str(constraint))
# Transform 3.6 or 3
if constraint.precision == 2:
# 3.6
constraint = parse_constraint(
"~{}.{}".format(constraint.major, constraint.minor)
)
else:
constraint = parse_constraint("^{}.0".format(constraint.major))
if not isinstance(constraint, VersionUnion):
return str(constraint)
formatted = []
accepted = []
for version in PYTHON_VERSION:
version_constraint = parse_constraint(version)
matches = constraint.allows_any(version_constraint)
if not matches:
formatted.append("!=" + version)
else:
accepted.append(version)
# Checking lower bound
low = accepted[0]
formatted.insert(0, ">=" + ".".join(low.split(".")[:2]))
return ", ".join(formatted)
|
<SYSTEM_TASK:>
Determines if the given package matches this dependency.
<END_TASK>
<USER_TASK:>
Description:
def accepts(self, package): # type: (poetry.packages.Package) -> bool
"""
Determines if the given package matches this dependency.
"""
|
return (
self._name == package.name
and self._constraint.allows(package.version)
and (not package.is_prerelease() or self.allows_prereleases())
)
|
<SYSTEM_TASK:>
Set the dependency as optional.
<END_TASK>
<USER_TASK:>
Description:
def deactivate(self):
"""
Set the dependency as optional.
"""
|
if not self._optional:
self._optional = True
self._activated = False
|
<SYSTEM_TASK:>
Returns whether this term satisfies another.
<END_TASK>
<USER_TASK:>
Description:
def satisfies(self, other): # type: (Term) -> bool
"""
Returns whether this term satisfies another.
"""
|
return (
self.dependency.name == other.dependency.name
and self.relation(other) == SetRelation.SUBSET
)
|
<SYSTEM_TASK:>
Returns the relationship between the package versions
<END_TASK>
<USER_TASK:>
Description:
def relation(self, other): # type: (Term) -> int
"""
Returns the relationship between the package versions
allowed by this term and another.
"""
|
if self.dependency.name != other.dependency.name:
raise ValueError(
"{} should refer to {}".format(other, self.dependency.name)
)
other_constraint = other.constraint
if other.is_positive():
if self.is_positive():
if not self._compatible_dependency(other.dependency):
return SetRelation.DISJOINT
# foo ^1.5.0 is a subset of foo ^1.0.0
if other_constraint.allows_all(self.constraint):
return SetRelation.SUBSET
# foo ^2.0.0 is disjoint with foo ^1.0.0
if not self.constraint.allows_any(other_constraint):
return SetRelation.DISJOINT
return SetRelation.OVERLAPPING
else:
if not self._compatible_dependency(other.dependency):
return SetRelation.OVERLAPPING
# not foo ^1.0.0 is disjoint with foo ^1.5.0
if self.constraint.allows_all(other_constraint):
return SetRelation.DISJOINT
# not foo ^1.5.0 overlaps foo ^1.0.0
# not foo ^2.0.0 is a superset of foo ^1.5.0
return SetRelation.OVERLAPPING
else:
if self.is_positive():
if not self._compatible_dependency(other.dependency):
return SetRelation.SUBSET
# foo ^2.0.0 is a subset of not foo ^1.0.0
if not other_constraint.allows_any(self.constraint):
return SetRelation.SUBSET
# foo ^1.5.0 is disjoint with not foo ^1.0.0
if other_constraint.allows_all(self.constraint):
return SetRelation.DISJOINT
# foo ^1.0.0 overlaps not foo ^1.5.0
return SetRelation.OVERLAPPING
else:
if not self._compatible_dependency(other.dependency):
return SetRelation.OVERLAPPING
# not foo ^1.0.0 is a subset of not foo ^1.5.0
if self.constraint.allows_all(other_constraint):
return SetRelation.SUBSET
# not foo ^2.0.0 overlaps not foo ^1.0.0
# not foo ^1.5.0 is a superset of not foo ^1.0.0
return SetRelation.OVERLAPPING
|
<SYSTEM_TASK:>
Returns a Term that represents the packages
<END_TASK>
<USER_TASK:>
Description:
def intersect(self, other): # type: (Term) -> Union[Term, None]
"""
Returns a Term that represents the packages
allowed by both this term and another
"""
|
if self.dependency.name != other.dependency.name:
raise ValueError(
"{} should refer to {}".format(other, self.dependency.name)
)
if self._compatible_dependency(other.dependency):
if self.is_positive() != other.is_positive():
# foo ^1.0.0 ∩ not foo ^1.5.0 → foo >=1.0.0 <1.5.0
positive = self if self.is_positive() else other
negative = other if self.is_positive() else self
return self._non_empty_term(
positive.constraint.difference(negative.constraint), True
)
elif self.is_positive():
# foo ^1.0.0 ∩ foo >=1.5.0 <3.0.0 → foo ^1.5.0
return self._non_empty_term(
self.constraint.intersect(other.constraint), True
)
else:
# not foo ^1.0.0 ∩ not foo >=1.5.0 <3.0.0 → not foo >=1.0.0 <3.0.0
return self._non_empty_term(
self.constraint.union(other.constraint), False
)
elif self.is_positive() != other.is_positive():
return self if self.is_positive() else other
else:
return
|
<SYSTEM_TASK:>
Finds all files to add to the tarball
<END_TASK>
<USER_TASK:>
Description:
def find_files_to_add(self, exclude_build=True): # type: (bool) -> list
"""
Finds all files to add to the tarball
"""
|
to_add = []
for include in self._module.includes:
for file in include.elements:
if "__pycache__" in str(file):
continue
if file.is_dir():
continue
file = file.relative_to(self._path)
if self.is_excluded(file) and isinstance(include, PackageInclude):
continue
if file.suffix == ".pyc":
continue
if file in to_add:
# Skip duplicates
continue
self._io.writeln(
" - Adding: <comment>{}</comment>".format(str(file)),
verbosity=self._io.VERBOSITY_VERY_VERBOSE,
)
to_add.append(file)
# Include project files
self._io.writeln(
" - Adding: <comment>pyproject.toml</comment>",
verbosity=self._io.VERBOSITY_VERY_VERBOSE,
)
to_add.append(Path("pyproject.toml"))
# If a license file exists, add it
for license_file in self._path.glob("LICENSE*"):
self._io.writeln(
" - Adding: <comment>{}</comment>".format(
license_file.relative_to(self._path)
),
verbosity=self._io.VERBOSITY_VERY_VERBOSE,
)
to_add.append(license_file.relative_to(self._path))
# If a README is specificed we need to include it
# to avoid errors
if "readme" in self._poetry.local_config:
readme = self._path / self._poetry.local_config["readme"]
if readme.exists():
self._io.writeln(
" - Adding: <comment>{}</comment>".format(
readme.relative_to(self._path)
),
verbosity=self._io.VERBOSITY_VERY_VERBOSE,
)
to_add.append(readme.relative_to(self._path))
# If a build script is specified and explicitely required
# we add it to the list of files
if self._package.build and not exclude_build:
to_add.append(Path(self._package.build))
return sorted(to_add)
|
<SYSTEM_TASK:>
Checks whether the lock file is still up to date with the current hash.
<END_TASK>
<USER_TASK:>
Description:
def is_fresh(self): # type: () -> bool
"""
Checks whether the lock file is still up to date with the current hash.
"""
|
lock = self._lock.read()
metadata = lock.get("metadata", {})
if "content-hash" in metadata:
return self._content_hash == lock["metadata"]["content-hash"]
return False
|
<SYSTEM_TASK:>
Searches and returns a repository of locked packages.
<END_TASK>
<USER_TASK:>
Description:
def locked_repository(
self, with_dev_reqs=False
): # type: (bool) -> poetry.repositories.Repository
"""
Searches and returns a repository of locked packages.
"""
|
if not self.is_locked():
return poetry.repositories.Repository()
lock_data = self.lock_data
packages = poetry.repositories.Repository()
if with_dev_reqs:
locked_packages = lock_data["package"]
else:
locked_packages = [
p for p in lock_data["package"] if p["category"] == "main"
]
if not locked_packages:
return packages
for info in locked_packages:
package = poetry.packages.Package(
info["name"], info["version"], info["version"]
)
package.description = info.get("description", "")
package.category = info["category"]
package.optional = info["optional"]
package.hashes = lock_data["metadata"]["hashes"][info["name"]]
package.python_versions = info["python-versions"]
if "marker" in info:
package.marker = parse_marker(info["marker"])
else:
# Compatibility for old locks
if "requirements" in info:
dep = poetry.packages.Dependency("foo", "0.0.0")
for name, value in info["requirements"].items():
if name == "python":
dep.python_versions = value
elif name == "platform":
dep.platform = value
split_dep = dep.to_pep_508(False).split(";")
if len(split_dep) > 1:
package.marker = parse_marker(split_dep[1].strip())
for dep_name, constraint in info.get("dependencies", {}).items():
if isinstance(constraint, list):
for c in constraint:
package.add_dependency(dep_name, c)
continue
package.add_dependency(dep_name, constraint)
if "source" in info:
package.source_type = info["source"]["type"]
package.source_url = info["source"]["url"]
package.source_reference = info["source"]["reference"]
packages.add_package(package)
return packages
|
<SYSTEM_TASK:>
Returns the sha256 hash of the sorted content of the pyproject file.
<END_TASK>
<USER_TASK:>
Description:
def _get_content_hash(self): # type: () -> str
"""
Returns the sha256 hash of the sorted content of the pyproject file.
"""
|
content = self._local_config
relevant_content = {}
for key in self._relevant_keys:
relevant_content[key] = content.get(key)
content_hash = sha256(
json.dumps(relevant_content, sort_keys=True).encode()
).hexdigest()
return content_hash
|
<SYSTEM_TASK:>
Load installed packages.
<END_TASK>
<USER_TASK:>
Description:
def load(cls, env): # type: (Env) -> InstalledRepository
"""
Load installed packages.
For now, it uses the pip "freeze" command.
"""
|
repo = cls()
freeze_output = env.run("pip", "freeze")
for line in freeze_output.split("\n"):
if "==" in line:
name, version = re.split("={2,3}", line)
repo.add_package(Package(name, version, version))
elif line.startswith("-e "):
line = line[3:].strip()
if line.startswith("git+"):
url = line.lstrip("git+")
if "@" in url:
url, rev = url.rsplit("@", 1)
else:
rev = "master"
name = url.split("/")[-1].rstrip(".git")
if "#egg=" in rev:
rev, name = rev.split("#egg=")
package = Package(name, "0.0.0")
package.source_type = "git"
package.source_url = url
package.source_reference = rev
repo.add_package(package)
return repo
|
<SYSTEM_TASK:>
Given a package name and optional version,
<END_TASK>
<USER_TASK:>
Description:
def find_best_candidate(
self,
package_name, # type: str
target_package_version=None, # type: Union[str, None]
allow_prereleases=False, # type: bool
): # type: (...) -> Union[Package, bool]
"""
Given a package name and optional version,
returns the latest Package that matches
"""
|
if target_package_version:
constraint = parse_constraint(target_package_version)
else:
constraint = parse_constraint("*")
candidates = self._pool.find_packages(
package_name, constraint, allow_prereleases=allow_prereleases
)
if not candidates:
return False
dependency = Dependency(package_name, constraint)
# Select highest version if we have many
package = candidates[0]
for candidate in candidates:
if candidate.is_prerelease() and not dependency.allows_prereleases():
continue
# Select highest version of the two
if package.version < candidate.version:
package = candidate
return package
|
<SYSTEM_TASK:>
Retrieve the release information.
<END_TASK>
<USER_TASK:>
Description:
def package(
self, name, version, extras=None
): # type: (...) -> poetry.packages.Package
"""
Retrieve the release information.
This is a heavy task which takes time.
We have to download a package to get the dependencies.
We also need to download every file matching this release
to get the various hashes.
Note that, this will be cached so the subsequent operations
should be much faster.
"""
|
try:
index = self._packages.index(
poetry.packages.Package(name, version, version)
)
return self._packages[index]
except ValueError:
if extras is None:
extras = []
release_info = self.get_release_info(name, version)
package = poetry.packages.Package(name, version, version)
if release_info["requires_python"]:
package.python_versions = release_info["requires_python"]
package.source_type = "legacy"
package.source_url = self._url
package.source_reference = self.name
requires_dist = release_info["requires_dist"] or []
for req in requires_dist:
try:
dependency = dependency_from_pep_508(req)
except InvalidMarker:
# Invalid marker
# We strip the markers hoping for the best
req = req.split(";")[0]
dependency = dependency_from_pep_508(req)
except ValueError:
# Likely unable to parse constraint so we skip it
self._log(
"Invalid constraint ({}) found in {}-{} dependencies, "
"skipping".format(req, package.name, package.version),
level="debug",
)
continue
if dependency.in_extras:
for extra in dependency.in_extras:
if extra not in package.extras:
package.extras[extra] = []
package.extras[extra].append(dependency)
if not dependency.is_optional():
package.requires.append(dependency)
# Adding description
package.description = release_info.get("summary", "")
# Adding hashes information
package.hashes = release_info["digests"]
# Activate extra dependencies
for extra in extras:
if extra in package.extras:
for dep in package.extras[extra]:
dep.activate()
package.requires += package.extras[extra]
self._packages.append(package)
return package
|
<SYSTEM_TASK:>
Initialize command.
<END_TASK>
<USER_TASK:>
Description:
def run(self, i, o): # type: () -> int
"""
Initialize command.
"""
|
self.input = i
self.output = PoetryStyle(i, o)
for logger in self._loggers:
self.register_logger(logging.getLogger(logger))
return super(BaseCommand, self).run(i, o)
|
<SYSTEM_TASK:>
Register a package to a repository.
<END_TASK>
<USER_TASK:>
Description:
def _register(self, session, url):
"""
Register a package to a repository.
"""
|
dist = self._poetry.file.parent / "dist"
file = dist / "{}-{}.tar.gz".format(
self._package.name, normalize_version(self._package.version.text)
)
if not file.exists():
raise RuntimeError('"{0}" does not exist.'.format(file.name))
data = self.post_data(file)
data.update({":action": "submit", "protocol_version": "1"})
data_to_send = self._prepare_data(data)
encoder = MultipartEncoder(data_to_send)
resp = session.post(
url,
data=encoder,
allow_redirects=False,
headers={"Content-Type": encoder.content_type},
)
resp.raise_for_status()
return resp
|
<SYSTEM_TASK:>
Find packages on the remote server.
<END_TASK>
<USER_TASK:>
Description:
def find_packages(
self,
name, # type: str
constraint=None, # type: Union[VersionConstraint, str, None]
extras=None, # type: Union[list, None]
allow_prereleases=False, # type: bool
): # type: (...) -> List[Package]
"""
Find packages on the remote server.
"""
|
if constraint is None:
constraint = "*"
if not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
if isinstance(constraint, VersionRange):
if (
constraint.max is not None
and constraint.max.is_prerelease()
or constraint.min is not None
and constraint.min.is_prerelease()
):
allow_prereleases = True
info = self.get_package_info(name)
packages = []
for version, release in info["releases"].items():
if not release:
# Bad release
self._log(
"No release information found for {}-{}, skipping".format(
name, version
),
level="debug",
)
continue
try:
package = Package(name, version)
except ParseVersionError:
self._log(
'Unable to parse version "{}" for the {} package, skipping'.format(
version, name
),
level="debug",
)
continue
if package.is_prerelease() and not allow_prereleases:
continue
if not constraint or (constraint and constraint.allows(package.version)):
if extras is not None:
package.requires_extras = extras
packages.append(package)
self._log(
"{} packages found for {} {}".format(len(packages), name, str(constraint)),
level="debug",
)
return packages
|
<SYSTEM_TASK:>
Return the package information given its name.
<END_TASK>
<USER_TASK:>
Description:
def get_package_info(self, name): # type: (str) -> dict
"""
Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
|
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store("packages").remember_forever(
name, lambda: self._get_package_info(name)
)
|
<SYSTEM_TASK:>
Return the release information given a package name and a version.
<END_TASK>
<USER_TASK:>
Description:
def get_release_info(self, name, version): # type: (str, str) -> dict
"""
Return the release information given a package name and a version.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
|
if self._disable_cache:
return self._get_release_info(name, version)
cached = self._cache.remember_forever(
"{}:{}".format(name, version), lambda: self._get_release_info(name, version)
)
cache_version = cached.get("_cache_version", "0.0.0")
if parse_constraint(cache_version) != self.CACHE_VERSION:
# The cache must be updated
self._log(
"The cache for {} {} is outdated. Refreshing.".format(name, version),
level="debug",
)
cached = self._get_release_info(name, version)
self._cache.forever("{}:{}".format(name, version), cached)
return cached
|
<SYSTEM_TASK:>
Prepare the installer for locking only.
<END_TASK>
<USER_TASK:>
Description:
def lock(self): # type: () -> Installer
"""
Prepare the installer for locking only.
"""
|
self.update()
self.execute_operations(False)
self._lock = True
return self
|
<SYSTEM_TASK:>
Execute a given operation.
<END_TASK>
<USER_TASK:>
Description:
def _execute(self, operation): # type: (Operation) -> None
"""
Execute a given operation.
"""
|
method = operation.job_type
getattr(self, "_execute_{}".format(method))(operation)
|
<SYSTEM_TASK:>
Returns all packages required by extras.
<END_TASK>
<USER_TASK:>
Description:
def _get_extra_packages(self, repo):
"""
Returns all packages required by extras.
Maybe we just let the solver handle it?
"""
|
if self._update:
extras = {k: [d.name for d in v] for k, v in self._package.extras.items()}
else:
extras = self._locker.lock_data.get("extras", {})
extra_packages = []
for extra_name, packages in extras.items():
if extra_name not in self._extras:
continue
extra_packages += [Dependency(p, "*") for p in packages]
def _extra_packages(packages):
pkgs = []
for package in packages:
for pkg in repo.packages:
if pkg.name == package.name:
pkgs.append(package)
pkgs += _extra_packages(pkg.requires)
break
return pkgs
return _extra_packages(extra_packages)
|
<SYSTEM_TASK:>
Returns the json representation of the dep graph
<END_TASK>
<USER_TASK:>
Description:
def _fetch_json(self):
"""Returns the json representation of the dep graph"""
|
print("Fetching from url: " + self.graph_url)
resp = urlopen(self.graph_url).read()
return json.loads(resp.decode('utf-8'))
|
<SYSTEM_TASK:>
Searches for jobs matching the given ``job_name_prefix``.
<END_TASK>
<USER_TASK:>
Description:
def prefix_search(self, job_name_prefix):
"""Searches for jobs matching the given ``job_name_prefix``."""
|
json = self._fetch_json()
jobs = json['response']
for job in jobs:
if job.startswith(job_name_prefix):
yield self._build_results(jobs, job)
|
<SYSTEM_TASK:>
Searches for jobs matching the given ``status``.
<END_TASK>
<USER_TASK:>
Description:
def status_search(self, status):
"""Searches for jobs matching the given ``status``."""
|
json = self._fetch_json()
jobs = json['response']
for job in jobs:
job_info = jobs[job]
if job_info['status'].lower() == status.lower():
yield self._build_results(jobs, job)
|
<SYSTEM_TASK:>
Move file atomically. If source and destination are located
<END_TASK>
<USER_TASK:>
Description:
def move(self, old_path, new_path, raise_if_exists=False):
"""
Move file atomically. If source and destination are located
on different filesystems, atomicity is approximated
but cannot be guaranteed.
"""
|
if raise_if_exists and os.path.exists(new_path):
raise FileAlreadyExists('Destination exists: %s' % new_path)
d = os.path.dirname(new_path)
if d and not os.path.exists(d):
self.mkdir(d)
try:
os.rename(old_path, new_path)
except OSError as err:
if err.errno == errno.EXDEV:
new_path_tmp = '%s-%09d' % (new_path, random.randint(0, 999999999))
shutil.copy(old_path, new_path_tmp)
os.rename(new_path_tmp, new_path)
os.remove(old_path)
else:
raise err
|
<SYSTEM_TASK:>
Create all parent folders if they do not exist.
<END_TASK>
<USER_TASK:>
Description:
def makedirs(self):
"""
Create all parent folders if they do not exist.
"""
|
normpath = os.path.normpath(self.path)
parentfolder = os.path.dirname(normpath)
if parentfolder:
try:
os.makedirs(parentfolder)
except OSError:
pass
|
<SYSTEM_TASK:>
Read in the error file from bsub
<END_TASK>
<USER_TASK:>
Description:
def fetch_task_failures(self):
"""
Read in the error file from bsub
"""
|
error_file = os.path.join(self.tmp_dir, "job.err")
if os.path.isfile(error_file):
with open(error_file, "r") as f_err:
errors = f_err.readlines()
else:
errors = ''
return errors
|
<SYSTEM_TASK:>
Read in the output file
<END_TASK>
<USER_TASK:>
Description:
def fetch_task_output(self):
"""
Read in the output file
"""
|
# Read in the output file
if os.path.isfile(os.path.join(self.tmp_dir, "job.out")):
with open(os.path.join(self.tmp_dir, "job.out"), "r") as f_out:
outputs = f_out.readlines()
else:
outputs = ''
return outputs
|
<SYSTEM_TASK:>
Build a bsub argument that will run lsf_runner.py on the directory we've specified.
<END_TASK>
<USER_TASK:>
Description:
def _run_job(self):
"""
Build a bsub argument that will run lsf_runner.py on the directory we've specified.
"""
|
args = []
if isinstance(self.output(), list):
log_output = os.path.split(self.output()[0].path)
else:
log_output = os.path.split(self.output().path)
args += ["bsub", "-q", self.queue_flag]
args += ["-n", str(self.n_cpu_flag)]
args += ["-M", str(self.memory_flag)]
args += ["-R", "rusage[%s]" % self.resource_flag]
args += ["-W", str(self.runtime_flag)]
if self.job_name_flag:
args += ["-J", str(self.job_name_flag)]
args += ["-o", os.path.join(log_output[0], "job.out")]
args += ["-e", os.path.join(log_output[0], "job.err")]
if self.extra_bsub_args:
args += self.extra_bsub_args.split()
# Find where the runner file is
runner_path = os.path.abspath(lsf_runner.__file__)
args += [runner_path]
args += [self.tmp_dir]
# That should do it. Let the world know what we're doing.
LOGGER.info("### LSF SUBMISSION ARGS: %s",
" ".join([str(a) for a in args]))
# Submit the job
run_job_proc = subprocess.Popen(
[str(a) for a in args],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=self.tmp_dir)
output = run_job_proc.communicate()[0]
# ASSUMPTION
# The result will be of the format
# Job <123> is submitted ot queue <myqueue>
# So get the number in those first brackets.
# I cannot think of a better workaround that leaves logic on the Task side of things.
LOGGER.info("### JOB SUBMISSION OUTPUT: %s", str(output))
self.job_id = int(output.split("<")[1].split(">")[0])
LOGGER.info(
"Job %ssubmitted as job %s",
self.job_name_flag + ' ',
str(self.job_id)
)
self._track_job()
# If we want to save the job temporaries, then do so
# We'll move them to be next to the job output
if self.save_job_info:
LOGGER.info("Saving up temporary bits")
# dest_dir = self.output().path
shutil.move(self.tmp_dir, "/".join(log_output[0:-1]))
# Now delete the temporaries, if they're there.
self._finish()
|
<SYSTEM_TASK:>
Returns the target output for this task.
<END_TASK>
<USER_TASK:>
Description:
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`~luigi.target.Target`)
"""
|
return luigi.contrib.hdfs.HdfsTarget('data-matrix', format=luigi.format.Gzip)
|
<SYSTEM_TASK:>
Runs one instance of the API server.
<END_TASK>
<USER_TASK:>
Description:
def run(api_port=8082, address=None, unix_socket=None, scheduler=None):
"""
Runs one instance of the API server.
"""
|
if scheduler is None:
scheduler = Scheduler()
# load scheduler state
scheduler.load()
_init_api(
scheduler=scheduler,
api_port=api_port,
address=address,
unix_socket=unix_socket,
)
# prune work DAG every 60 seconds
pruner = tornado.ioloop.PeriodicCallback(scheduler.prune, 60000)
pruner.start()
def shutdown_handler(signum, frame):
exit_handler()
sys.exit(0)
@atexit.register
def exit_handler():
logger.info("Scheduler instance shutting down")
scheduler.dump()
stop()
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
if os.name == 'nt':
signal.signal(signal.SIGBREAK, shutdown_handler)
else:
signal.signal(signal.SIGQUIT, shutdown_handler)
logger.info("Scheduler starting up")
tornado.ioloop.IOLoop.instance().start()
|
<SYSTEM_TASK:>
Gets queried columns names.
<END_TASK>
<USER_TASK:>
Description:
def get_soql_fields(soql):
"""
Gets queried columns names.
"""
|
soql_fields = re.search('(?<=select)(?s)(.*)(?=from)', soql, re.IGNORECASE) # get fields
soql_fields = re.sub(' ', '', soql_fields.group()) # remove extra spaces
soql_fields = re.sub('\t', '', soql_fields) # remove tabs
fields = re.split(',|\n|\r|', soql_fields) # split on commas and newlines
fields = [field for field in fields if field != ''] # remove empty strings
return fields
|
<SYSTEM_TASK:>
Merges the resulting files of a multi-result batch bulk query.
<END_TASK>
<USER_TASK:>
Description:
def merge_batch_results(self, result_ids):
"""
Merges the resulting files of a multi-result batch bulk query.
"""
|
outfile = open(self.output().path, 'w')
if self.content_type.lower() == 'csv':
for i, result_id in enumerate(result_ids):
with open("%s.%d" % (self.output().path, i), 'r') as f:
header = f.readline()
if i == 0:
outfile.write(header)
for line in f:
outfile.write(line)
else:
raise Exception("Batch result merging not implemented for %s" % self.content_type)
outfile.close()
|
<SYSTEM_TASK:>
Starts a Salesforce session and determines which SF instance to use for future requests.
<END_TASK>
<USER_TASK:>
Description:
def start_session(self):
"""
Starts a Salesforce session and determines which SF instance to use for future requests.
"""
|
if self.has_active_session():
raise Exception("Session already in progress.")
response = requests.post(self._get_login_url(),
headers=self._get_login_headers(),
data=self._get_login_xml())
response.raise_for_status()
root = ET.fromstring(response.text)
for e in root.iter("%ssessionId" % self.SOAP_NS):
if self.session_id:
raise Exception("Invalid login attempt. Multiple session ids found.")
self.session_id = e.text
for e in root.iter("%sserverUrl" % self.SOAP_NS):
if self.server_url:
raise Exception("Invalid login attempt. Multiple server urls found.")
self.server_url = e.text
if not self.has_active_session():
raise Exception("Invalid login attempt resulted in null sessionId [%s] and/or serverUrl [%s]." %
(self.session_id, self.server_url))
self.hostname = urlsplit(self.server_url).hostname
|
<SYSTEM_TASK:>
Return the result of a Salesforce SOQL query as a dict decoded from the Salesforce response JSON payload.
<END_TASK>
<USER_TASK:>
Description:
def query(self, query, **kwargs):
"""
Return the result of a Salesforce SOQL query as a dict decoded from the Salesforce response JSON payload.
:param query: the SOQL query to send to Salesforce, e.g. "SELECT id from Lead WHERE email = '[email protected]'"
"""
|
params = {'q': query}
response = requests.get(self._get_norm_query_url(),
headers=self._get_rest_headers(),
params=params,
**kwargs)
if response.status_code != requests.codes.ok:
raise Exception(response.content)
return response.json()
|
<SYSTEM_TASK:>
Retrieves more results from a query that returned more results
<END_TASK>
<USER_TASK:>
Description:
def query_more(self, next_records_identifier, identifier_is_url=False, **kwargs):
"""
Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
:param next_records_identifier: either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
:param identifier_is_url: True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifer` should be treated as
an Id.
"""
|
if identifier_is_url:
# Don't use `self.base_url` here because the full URI is provided
url = (u'https://{instance}{next_record_url}'
.format(instance=self.hostname,
next_record_url=next_records_identifier))
else:
url = self._get_norm_query_url() + '{next_record_id}'
url = url.format(next_record_id=next_records_identifier)
response = requests.get(url, headers=self._get_rest_headers(), **kwargs)
response.raise_for_status()
return response.json()
|
<SYSTEM_TASK:>
Gets all details for existing job
<END_TASK>
<USER_TASK:>
Description:
def get_job_details(self, job_id):
"""
Gets all details for existing job
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: job info as xml
"""
|
response = requests.get(self._get_job_details_url(job_id))
response.raise_for_status()
return response
|
<SYSTEM_TASK:>
Abort an existing job. When a job is aborted, no more records are processed.
<END_TASK>
<USER_TASK:>
Description:
def abort_job(self, job_id):
"""
Abort an existing job. When a job is aborted, no more records are processed.
Changes to data may already have been committed and aren't rolled back.
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: abort response as xml
"""
|
response = requests.post(self._get_abort_job_url(job_id),
headers=self._get_abort_job_headers(),
data=self._get_abort_job_xml())
response.raise_for_status()
return response
|
<SYSTEM_TASK:>
Creates a batch with either a string of data or a file containing data.
<END_TASK>
<USER_TASK:>
Description:
def create_batch(self, job_id, data, file_type):
"""
Creates a batch with either a string of data or a file containing data.
If a file is provided, this will pull the contents of the file_target into memory when running.
That shouldn't be a problem for any files that meet the Salesforce single batch upload
size limit (10MB) and is done to ensure compressed files can be uploaded properly.
:param job_id: job_id as returned by 'create_operation_job(...)'
:param data:
:return: Returns batch_id
"""
|
if not job_id or not self.has_active_session():
raise Exception("Can not create a batch without a valid job_id and an active session.")
headers = self._get_create_batch_content_headers(file_type)
headers['Content-Length'] = str(len(data))
response = requests.post(self._get_create_batch_url(job_id),
headers=headers,
data=data)
response.raise_for_status()
root = ET.fromstring(response.text)
batch_id = root.find('%sid' % self.API_NS).text
return batch_id
|
<SYSTEM_TASK:>
Get result IDs of a batch that has completed processing.
<END_TASK>
<USER_TASK:>
Description:
def get_batch_result_ids(self, job_id, batch_id):
"""
Get result IDs of a batch that has completed processing.
:param job_id: job_id as returned by 'create_operation_job(...)'
:param batch_id: batch_id as returned by 'create_batch(...)'
:return: list of batch result IDs to be used in 'get_batch_result(...)'
"""
|
response = requests.get(self._get_batch_results_url(job_id, batch_id),
headers=self._get_batch_info_headers())
response.raise_for_status()
root = ET.fromstring(response.text)
result_ids = [r.text for r in root.findall('%sresult' % self.API_NS)]
return result_ids
|
<SYSTEM_TASK:>
Parse "state" column from `qstat` output for given job_id
<END_TASK>
<USER_TASK:>
Description:
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
|
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u'
|
<SYSTEM_TASK:>
Writes data in JSON format into the task's output target.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Writes data in JSON format into the task's output target.
The data objects have the following attributes:
* `_id` is the default Elasticsearch id field,
* `text`: the text,
* `date`: the day when the data was created.
"""
|
today = datetime.date.today()
with self.output().open('w') as output:
for i in range(5):
output.write(json.dumps({'_id': i, 'text': 'Hi %s' % i,
'date': str(today)}))
output.write('\n')
|
<SYSTEM_TASK:>
Creates the client as specified in the `luigi.cfg` configuration.
<END_TASK>
<USER_TASK:>
Description:
def get_autoconfig_client(client_cache=_AUTOCONFIG_CLIENT):
"""
Creates the client as specified in the `luigi.cfg` configuration.
"""
|
try:
return client_cache.client
except AttributeError:
configured_client = hdfs_config.get_configured_hdfs_client()
if configured_client == "webhdfs":
client_cache.client = hdfs_webhdfs_client.WebHdfsClient()
elif configured_client == "snakebite":
client_cache.client = hdfs_snakebite_client.SnakebiteHdfsClient()
elif configured_client == "snakebite_with_hadoopcli_fallback":
client_cache.client = luigi.contrib.target.CascadingClient([
hdfs_snakebite_client.SnakebiteHdfsClient(),
hdfs_hadoopcli_clients.create_hadoopcli_client(),
])
elif configured_client == "hadoopcli":
client_cache.client = hdfs_hadoopcli_clients.create_hadoopcli_client()
else:
raise Exception("Unknown hdfs client " + configured_client)
return client_cache.client
|
<SYSTEM_TASK:>
Sends notification through AWS SES.
<END_TASK>
<USER_TASK:>
Description:
def send_email_ses(sender, subject, message, recipients, image_png):
"""
Sends notification through AWS SES.
Does not handle access keys. Use either
1/ configuration file
2/ EC2 instance profile
See also https://boto3.readthedocs.io/en/latest/guide/configuration.html.
"""
|
from boto3 import client as boto3_client
client = boto3_client('ses')
msg_root = generate_email(sender, subject, message, recipients, image_png)
response = client.send_raw_email(Source=sender,
Destinations=recipients,
RawMessage={'Data': msg_root.as_string()})
logger.debug(("Message sent to SES.\nMessageId: {},\nRequestId: {},\n"
"HTTPSStatusCode: {}").format(response['MessageId'],
response['ResponseMetadata']['RequestId'],
response['ResponseMetadata']['HTTPStatusCode']))
|
<SYSTEM_TASK:>
Sends notification through AWS SNS. Takes Topic ARN from recipients.
<END_TASK>
<USER_TASK:>
Description:
def send_email_sns(sender, subject, message, topic_ARN, image_png):
"""
Sends notification through AWS SNS. Takes Topic ARN from recipients.
Does not handle access keys. Use either
1/ configuration file
2/ EC2 instance profile
See also https://boto3.readthedocs.io/en/latest/guide/configuration.html.
"""
|
from boto3 import resource as boto3_resource
sns = boto3_resource('sns')
topic = sns.Topic(topic_ARN[0])
# Subject is max 100 chars
if len(subject) > 100:
subject = subject[0:48] + '...' + subject[-49:]
response = topic.publish(Subject=subject, Message=message)
logger.debug(("Message sent to SNS.\nMessageId: {},\nRequestId: {},\n"
"HTTPSStatusCode: {}").format(response['MessageId'],
response['ResponseMetadata']['RequestId'],
response['ResponseMetadata']['HTTPStatusCode']))
|
<SYSTEM_TASK:>
Decides whether to send notification. Notification is cancelled if there are
<END_TASK>
<USER_TASK:>
Description:
def send_email(subject, message, sender, recipients, image_png=None):
"""
Decides whether to send notification. Notification is cancelled if there are
no recipients or if stdout is onto tty or if in debug mode.
Dispatches on config value email.method. Default is 'smtp'.
"""
|
notifiers = {
'ses': send_email_ses,
'sendgrid': send_email_sendgrid,
'smtp': send_email_smtp,
'sns': send_email_sns,
}
subject = _prefix(subject)
if not recipients or recipients == (None,):
return
if _email_disabled_reason():
logger.info("Not sending email to %r because %s",
recipients, _email_disabled_reason())
return
# Clean the recipients lists to allow multiple email addresses, comma
# separated in luigi.cfg
recipients_tmp = []
for r in recipients:
recipients_tmp.extend([a.strip() for a in r.split(',') if a.strip()])
# Replace original recipients with the clean list
recipients = recipients_tmp
logger.info("Sending email to %r", recipients)
# Get appropriate sender and call it to send the notification
email_sender = notifiers[email().method]
email_sender(sender, subject, message, recipients, image_png)
|
<SYSTEM_TASK:>
Sends an email to the configured error email, if it's configured.
<END_TASK>
<USER_TASK:>
Description:
def send_error_email(subject, message, additional_recipients=None):
"""
Sends an email to the configured error email, if it's configured.
"""
|
recipients = _email_recipients(additional_recipients)
sender = email().sender
send_email(
subject=subject,
message=message,
sender=sender,
recipients=recipients
)
|
<SYSTEM_TASK:>
Format a message body for an error email related to a luigi.task.Task
<END_TASK>
<USER_TASK:>
Description:
def format_task_error(headline, task, command, formatted_exception=None):
"""
Format a message body for an error email related to a luigi.task.Task
:param headline: Summary line for the message
:param task: `luigi.task.Task` instance where this error occurred
:param formatted_exception: optional string showing traceback
:return: message body
"""
|
if formatted_exception:
formatted_exception = wrap_traceback(formatted_exception)
else:
formatted_exception = ""
if email().format == 'html':
msg_template = textwrap.dedent('''
<html>
<body>
<h2>{headline}</h2>
<table style="border-top: 1px solid black; border-bottom: 1px solid black">
<thead>
<tr><th>name</th><td>{name}</td></tr>
</thead>
<tbody>
{param_rows}
</tbody>
</table>
</pre>
<h2>Command line</h2>
<pre>
{command}
</pre>
<h2>Traceback</h2>
{traceback}
</body>
</html>
''')
str_params = task.to_str_params()
params = '\n'.join('<tr><th>{}</th><td>{}</td></tr>'.format(*items) for items in str_params.items())
body = msg_template.format(headline=headline, name=task.task_family, param_rows=params,
command=command, traceback=formatted_exception)
else:
msg_template = textwrap.dedent('''\
{headline}
Name: {name}
Parameters:
{params}
Command line:
{command}
{traceback}
''')
str_params = task.to_str_params()
max_width = max([0] + [len(x) for x in str_params.keys()])
params = '\n'.join(' {:{width}}: {}'.format(*items, width=max_width) for items in str_params.items())
body = msg_template.format(headline=headline, name=task.task_family, params=params,
command=command, traceback=formatted_exception)
return body
|
<SYSTEM_TASK:>
Returns true if the path exists and false otherwise.
<END_TASK>
<USER_TASK:>
Description:
def exists(self, path):
"""
Returns true if the path exists and false otherwise.
"""
|
import hdfs
try:
self.client.status(path)
return True
except hdfs.util.HdfsError as e:
if str(e).startswith('File does not exist: '):
return False
else:
raise e
|
<SYSTEM_TASK:>
Open the target for reading or writing
<END_TASK>
<USER_TASK:>
Description:
def open(self, mode):
"""
Open the target for reading or writing
:param char mode:
'r' for reading and 'w' for writing.
'b' is not supported and will be stripped if used. For binary mode, use `format`
:return:
* :class:`.ReadableAzureBlobFile` if 'r'
* :class:`.AtomicAzureBlobFile` if 'w'
"""
|
if mode not in ('r', 'w'):
raise ValueError("Unsupported open mode '%s'" % mode)
if mode == 'r':
return self.format.pipe_reader(ReadableAzureBlobFile(self.container, self.blob, self.client, self.download_when_reading, **self.azure_blob_options))
else:
return self.format.pipe_writer(AtomicAzureBlobFile(self.container, self.blob, self.client, **self.azure_blob_options))
|
<SYSTEM_TASK:>
Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments
<END_TASK>
<USER_TASK:>
Description:
def get_path(self):
"""
Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments
"""
|
md5_hash = hashlib.md5(self.task_id.encode()).hexdigest()
logger.debug('Hash %s corresponds to task %s', md5_hash, self.task_id)
return os.path.join(self.temp_dir, str(self.unique.value), md5_hash)
|
<SYSTEM_TASK:>
Creates temporary file to mark the task as `done`
<END_TASK>
<USER_TASK:>
Description:
def done(self):
"""
Creates temporary file to mark the task as `done`
"""
|
logger.info('Marking %s as done', self)
fn = self.get_path()
try:
os.makedirs(os.path.dirname(fn))
except OSError:
pass
open(fn, 'w').close()
|
<SYSTEM_TASK:>
Does provided path exist on S3?
<END_TASK>
<USER_TASK:>
Description:
def exists(self, path):
"""
Does provided path exist on S3?
"""
|
(bucket, key) = self._path_to_bucket_and_key(path)
# root always exists
if self._is_root(key):
return True
# file
if self._exists(bucket, key):
return True
if self.isdir(path):
return True
logger.debug('Path %s does not exist', path)
return False
|
<SYSTEM_TASK:>
Returns the object summary at the path
<END_TASK>
<USER_TASK:>
Description:
def get_key(self, path):
"""
Returns the object summary at the path
"""
|
(bucket, key) = self._path_to_bucket_and_key(path)
if self._exists(bucket, key):
return self.s3.ObjectSummary(bucket, key)
|
<SYSTEM_TASK:>
Get an object stored in S3 and write it to a local path.
<END_TASK>
<USER_TASK:>
Description:
def get(self, s3_path, destination_local_path):
"""
Get an object stored in S3 and write it to a local path.
"""
|
(bucket, key) = self._path_to_bucket_and_key(s3_path)
# download the file
self.s3.meta.client.download_file(bucket, key, destination_local_path)
|
<SYSTEM_TASK:>
Get the contents of an object stored in S3 as bytes
<END_TASK>
<USER_TASK:>
Description:
def get_as_bytes(self, s3_path):
"""
Get the contents of an object stored in S3 as bytes
:param s3_path: URL for target S3 location
:return: File contents as pure bytes
"""
|
(bucket, key) = self._path_to_bucket_and_key(s3_path)
obj = self.s3.Object(bucket, key)
contents = obj.get()['Body'].read()
return contents
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.