sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _improve_method_docs(obj, name, lines):
"""Improve the documentation of various methods.
:param obj: the instance of the method to document.
:param name: full dotted path to the object.
:param lines: expected documentation lines.
"""
if not lines:
# Not doing obj.__module__ lookups to avoid performance issues.
if name.endswith('_display'):
match = RE_GET_FOO_DISPLAY.search(name)
if match is not None:
# Django get_..._display method
lines.append("**Autogenerated:** Shows the label of the :attr:`{field}`".format(
field=match.group('field')
))
elif '.get_next_by_' in name:
match = RE_GET_NEXT_BY.search(name)
if match is not None:
lines.append("**Autogenerated:** Finds next instance"
" based on :attr:`{field}`.".format(
field=match.group('field')
))
elif '.get_previous_by_' in name:
match = RE_GET_PREVIOUS_BY.search(name)
if match is not None:
lines.append("**Autogenerated:** Finds previous instance"
" based on :attr:`{field}`.".format(
field=match.group('field')
)) | Improve the documentation of various methods.
:param obj: the instance of the method to document.
:param name: full dotted path to the object.
:param lines: expected documentation lines. | entailment |
def attr_names(cls) -> List[str]:
"""
Returns annotated attribute names
:return: List[str]
"""
return [k for k, v in cls.attr_types().items()] | Returns annotated attribute names
:return: List[str] | entailment |
def elliptic_fourier_descriptors(contour, order=10, normalize=False):
"""Calculate elliptical Fourier descriptors for a contour.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:param int order: The order of Fourier coefficients to calculate.
:param bool normalize: If the coefficients should be normalized;
see references for details.
:return: A ``[order x 4]`` array of Fourier coefficients.
:rtype: :py:class:`numpy.ndarray`
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0., ]), np.cumsum(dt)])
T = t[-1]
phi = (2 * np.pi * t) / T
coeffs = np.zeros((order, 4))
for n in _range(1, order + 1):
const = T / (2 * n * n * np.pi * np.pi)
phi_n = phi * n
d_cos_phi_n = np.cos(phi_n[1:]) - np.cos(phi_n[:-1])
d_sin_phi_n = np.sin(phi_n[1:]) - np.sin(phi_n[:-1])
a_n = const * np.sum((dxy[:, 0] / dt) * d_cos_phi_n)
b_n = const * np.sum((dxy[:, 0] / dt) * d_sin_phi_n)
c_n = const * np.sum((dxy[:, 1] / dt) * d_cos_phi_n)
d_n = const * np.sum((dxy[:, 1] / dt) * d_sin_phi_n)
coeffs[n - 1, :] = a_n, b_n, c_n, d_n
if normalize:
coeffs = normalize_efd(coeffs)
return coeffs | Calculate elliptical Fourier descriptors for a contour.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:param int order: The order of Fourier coefficients to calculate.
:param bool normalize: If the coefficients should be normalized;
see references for details.
:return: A ``[order x 4]`` array of Fourier coefficients.
:rtype: :py:class:`numpy.ndarray` | entailment |
def normalize_efd(coeffs, size_invariant=True):
"""Normalizes an array of Fourier coefficients.
See [#a]_ and [#b]_ for details.
:param numpy.ndarray coeffs: A ``[n x 4]`` Fourier coefficient array.
:param bool size_invariant: If size invariance normalizing should be done as well.
Default is ``True``.
:return: The normalized ``[n x 4]`` Fourier coefficient array.
:rtype: :py:class:`numpy.ndarray`
"""
# Make the coefficients have a zero phase shift from
# the first major axis. Theta_1 is that shift angle.
theta_1 = 0.5 * np.arctan2(
2 * ((coeffs[0, 0] * coeffs[0, 1]) + (coeffs[0, 2] * coeffs[0, 3])),
((coeffs[0, 0] ** 2) - (coeffs[0, 1] ** 2) + (coeffs[0, 2] ** 2) - (coeffs[0, 3] ** 2)))
# Rotate all coefficients by theta_1.
for n in _range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = np.dot(
np.array([[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]]]),
np.array([[np.cos(n * theta_1), -np.sin(n * theta_1)],
[np.sin(n * theta_1), np.cos(n * theta_1)]])).flatten()
# Make the coefficients rotation invariant by rotating so that
# the semi-major axis is parallel to the x-axis.
psi_1 = np.arctan2(coeffs[0, 2], coeffs[0, 0])
psi_rotation_matrix = np.array([[np.cos(psi_1), np.sin(psi_1)],
[-np.sin(psi_1), np.cos(psi_1)]])
# Rotate all coefficients by -psi_1.
for n in _range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = psi_rotation_matrix.dot(
np.array([[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]]])).flatten()
if size_invariant:
# Obtain size-invariance by normalizing.
coeffs /= np.abs(coeffs[0, 0])
return coeffs | Normalizes an array of Fourier coefficients.
See [#a]_ and [#b]_ for details.
:param numpy.ndarray coeffs: A ``[n x 4]`` Fourier coefficient array.
:param bool size_invariant: If size invariance normalizing should be done as well.
Default is ``True``.
:return: The normalized ``[n x 4]`` Fourier coefficient array.
:rtype: :py:class:`numpy.ndarray` | entailment |
def calculate_dc_coefficients(contour):
"""Calculate the :math:`A_0` and :math:`C_0` coefficients of the elliptic Fourier series.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:return: The :math:`A_0` and :math:`C_0` coefficients.
:rtype: tuple
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0., ]), np.cumsum(dt)])
T = t[-1]
xi = np.cumsum(dxy[:, 0]) - (dxy[:, 0] / dt) * t[1:]
A0 = (1 / T) * np.sum(((dxy[:, 0] / (2 * dt)) * np.diff(t ** 2)) + xi * dt)
delta = np.cumsum(dxy[:, 1]) - (dxy[:, 1] / dt) * t[1:]
C0 = (1 / T) * np.sum(((dxy[:, 1] / (2 * dt)) * np.diff(t ** 2)) + delta * dt)
# A0 and CO relate to the first point of the contour array as origin.
# Adding those values to the coefficients to make them relate to true origin.
return contour[0, 0] + A0, contour[0, 1] + C0 | Calculate the :math:`A_0` and :math:`C_0` coefficients of the elliptic Fourier series.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:return: The :math:`A_0` and :math:`C_0` coefficients.
:rtype: tuple | entailment |
def plot_efd(coeffs, locus=(0., 0.), image=None, contour=None, n=300):
"""Plot a ``[2 x (N / 2)]`` grid of successive truncations of the series.
.. note::
Requires `matplotlib <http://matplotlib.org/>`_!
:param numpy.ndarray coeffs: ``[N x 4]`` Fourier coefficient array.
:param list, tuple or numpy.ndarray locus:
The :math:`A_0` and :math:`C_0` elliptic locus in [#a]_ and [#b]_.
:param int n: Number of points to use for plotting of Fourier series.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print("Cannot plot: matplotlib was not installed.")
return
N = coeffs.shape[0]
N_half = int(np.ceil(N / 2))
n_rows = 2
t = np.linspace(0, 1.0, n)
xt = np.ones((n,)) * locus[0]
yt = np.ones((n,)) * locus[1]
for n in _range(coeffs.shape[0]):
xt += (coeffs[n, 0] * np.cos(2 * (n + 1) * np.pi * t)) + \
(coeffs[n, 1] * np.sin(2 * (n + 1) * np.pi * t))
yt += (coeffs[n, 2] * np.cos(2 * (n + 1) * np.pi * t)) + \
(coeffs[n, 3] * np.sin(2 * (n + 1) * np.pi * t))
ax = plt.subplot2grid((n_rows, N_half), (n // N_half, n % N_half))
ax.set_title(str(n + 1))
if contour is not None:
ax.plot(contour[:, 1], contour[:, 0], 'c--', linewidth=2)
ax.plot(yt, xt, 'r', linewidth=2)
if image is not None:
ax.imshow(image, plt.cm.gray)
plt.show() | Plot a ``[2 x (N / 2)]`` grid of successive truncations of the series.
.. note::
Requires `matplotlib <http://matplotlib.org/>`_!
:param numpy.ndarray coeffs: ``[N x 4]`` Fourier coefficient array.
:param list, tuple or numpy.ndarray locus:
The :math:`A_0` and :math:`C_0` elliptic locus in [#a]_ and [#b]_.
:param int n: Number of points to use for plotting of Fourier series. | entailment |
def _errcheck(result, func, arguments):
"""
Error checker for functions returning an integer indicating
success (0) / failure (1).
Raises a XdoException in case of error, otherwise just
returns ``None`` (returning the original code, 0, would be
useless anyways..)
"""
if result != 0:
raise XdoException(
'Function {0} returned error code {1}'
.format(func.__name__, result))
return None | Error checker for functions returning an integer indicating
success (0) / failure (1).
Raises a XdoException in case of error, otherwise just
returns ``None`` (returning the original code, 0, would be
useless anyways..) | entailment |
def _gen_input_mask(mask):
"""Generate input mask from bytemask"""
return input_mask(
shift=bool(mask & MOD_Shift),
lock=bool(mask & MOD_Lock),
control=bool(mask & MOD_Control),
mod1=bool(mask & MOD_Mod1),
mod2=bool(mask & MOD_Mod2),
mod3=bool(mask & MOD_Mod3),
mod4=bool(mask & MOD_Mod4),
mod5=bool(mask & MOD_Mod5)) | Generate input mask from bytemask | entailment |
def move_mouse(self, x, y, screen=0):
"""
Move the mouse to a specific location.
:param x: the target X coordinate on the screen in pixels.
:param y: the target Y coordinate on the screen in pixels.
:param screen: the screen (number) you want to move on.
"""
# todo: apparently the "screen" argument is not behaving properly
# and sometimes even making the interpreter crash..
# Figure out why (changed API / using wrong header?)
# >>> xdo.move_mouse(3000,200,1)
# X Error of failed request: BadWindow (invalid Window parameter)
# Major opcode of failed request: 41 (X_WarpPointer)
# Resource id in failed request: 0x2a4fca0
# Serial number of failed request: 25
# Current serial number in output stream: 26
# Just to be safe..
# screen = 0
x = ctypes.c_int(x)
y = ctypes.c_int(y)
screen = ctypes.c_int(screen)
_libxdo.xdo_move_mouse(self._xdo, x, y, screen) | Move the mouse to a specific location.
:param x: the target X coordinate on the screen in pixels.
:param y: the target Y coordinate on the screen in pixels.
:param screen: the screen (number) you want to move on. | entailment |
def move_mouse_relative_to_window(self, window, x, y):
"""
Move the mouse to a specific location relative to the top-left corner
of a window.
:param x: the target X coordinate on the screen in pixels.
:param y: the target Y coordinate on the screen in pixels.
"""
_libxdo.xdo_move_mouse_relative_to_window(
self._xdo, ctypes.c_ulong(window), x, y) | Move the mouse to a specific location relative to the top-left corner
of a window.
:param x: the target X coordinate on the screen in pixels.
:param y: the target Y coordinate on the screen in pixels. | entailment |
def move_mouse_relative(self, x, y):
"""
Move the mouse relative to it's current position.
:param x: the distance in pixels to move on the X axis.
:param y: the distance in pixels to move on the Y axis.
"""
_libxdo.xdo_move_mouse_relative(self._xdo, x, y) | Move the mouse relative to it's current position.
:param x: the distance in pixels to move on the X axis.
:param y: the distance in pixels to move on the Y axis. | entailment |
def mouse_down(self, window, button):
"""
Send a mouse press (aka mouse down) for a given button at
the current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down.
"""
_libxdo.xdo_mouse_down(
self._xdo, ctypes.c_ulong(window), ctypes.c_int(button)) | Send a mouse press (aka mouse down) for a given button at
the current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down. | entailment |
def mouse_up(self, window, button):
"""
Send a mouse release (aka mouse up) for a given button at
the current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down.
"""
_libxdo.xdo_mouse_up(
self._xdo, ctypes.c_ulong(window), ctypes.c_int(button)) | Send a mouse release (aka mouse up) for a given button at
the current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down. | entailment |
def get_mouse_location(self):
"""
Get the current mouse location (coordinates and screen number).
:return: a namedtuple with ``x``, ``y`` and ``screen_num`` fields
"""
x = ctypes.c_int(0)
y = ctypes.c_int(0)
screen_num = ctypes.c_int(0)
_libxdo.xdo_get_mouse_location(
self._xdo, ctypes.byref(x), ctypes.byref(y),
ctypes.byref(screen_num))
return mouse_location(x.value, y.value, screen_num.value) | Get the current mouse location (coordinates and screen number).
:return: a namedtuple with ``x``, ``y`` and ``screen_num`` fields | entailment |
def get_window_at_mouse(self):
"""
Get the window the mouse is currently over
"""
window_ret = ctypes.c_ulong(0)
_libxdo.xdo_get_window_at_mouse(self._xdo, ctypes.byref(window_ret))
return window_ret.value | Get the window the mouse is currently over | entailment |
def get_mouse_location2(self):
"""
Get all mouse location-related data.
:return: a namedtuple with ``x``, ``y``, ``screen_num``
and ``window`` fields
"""
x = ctypes.c_int(0)
y = ctypes.c_int(0)
screen_num_ret = ctypes.c_ulong(0)
window_ret = ctypes.c_ulong(0)
_libxdo.xdo_get_mouse_location2(
self._xdo, ctypes.byref(x), ctypes.byref(y),
ctypes.byref(screen_num_ret), ctypes.byref(window_ret))
return mouse_location2(x.value, y.value, screen_num_ret.value,
window_ret.value) | Get all mouse location-related data.
:return: a namedtuple with ``x``, ``y``, ``screen_num``
and ``window`` fields | entailment |
def wait_for_mouse_move_from(self, origin_x, origin_y):
"""
Wait for the mouse to move from a location. This function will block
until the condition has been satisified.
:param origin_x: the X position you expect the mouse to move from
:param origin_y: the Y position you expect the mouse to move from
"""
_libxdo.xdo_wait_for_mouse_move_from(self._xdo, origin_x, origin_y) | Wait for the mouse to move from a location. This function will block
until the condition has been satisified.
:param origin_x: the X position you expect the mouse to move from
:param origin_y: the Y position you expect the mouse to move from | entailment |
def wait_for_mouse_move_to(self, dest_x, dest_y):
"""
Wait for the mouse to move to a location. This function will block
until the condition has been satisified.
:param dest_x: the X position you expect the mouse to move to
:param dest_y: the Y position you expect the mouse to move to
"""
_libxdo.xdo_wait_for_mouse_move_from(self._xdo, dest_x, dest_y) | Wait for the mouse to move to a location. This function will block
until the condition has been satisified.
:param dest_x: the X position you expect the mouse to move to
:param dest_y: the Y position you expect the mouse to move to | entailment |
def click_window(self, window, button):
"""
Send a click for a specific mouse button at the current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down.
"""
_libxdo.xdo_click_window(self._xdo, window, button) | Send a click for a specific mouse button at the current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down. | entailment |
def click_window_multiple(self, window, button, repeat=2, delay=100000):
"""
Send a one or more clicks for a specific mouse button at the
current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down.
:param repeat: number of repetitions (default: 2)
:param delay: delay between clicks, in microseconds (default: 100k)
"""
_libxdo.xdo_click_window_multiple(
self._xdo, window, button, repeat, delay) | Send a one or more clicks for a specific mouse button at the
current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down.
:param repeat: number of repetitions (default: 2)
:param delay: delay between clicks, in microseconds (default: 100k) | entailment |
def enter_text_window(self, window, string, delay=12000):
"""
Type a string to the specified window.
If you want to send a specific key or key sequence, such as
"alt+l", you want instead ``send_keysequence_window(...)``.
:param window:
The window you want to send keystrokes to or CURRENTWINDOW
:param string:
The string to type, like "Hello world!"
:param delay:
The delay between keystrokes in microseconds.
12000 is a decent choice if you don't have other plans.
"""
return _libxdo.xdo_enter_text_window(self._xdo, window, string, delay) | Type a string to the specified window.
If you want to send a specific key or key sequence, such as
"alt+l", you want instead ``send_keysequence_window(...)``.
:param window:
The window you want to send keystrokes to or CURRENTWINDOW
:param string:
The string to type, like "Hello world!"
:param delay:
The delay between keystrokes in microseconds.
12000 is a decent choice if you don't have other plans. | entailment |
def send_keysequence_window(self, window, keysequence, delay=12000):
"""
Send a keysequence to the specified window.
This allows you to send keysequences by symbol name. Any combination
of X11 KeySym names separated by '+' are valid. Single KeySym names
are valid, too.
Examples:
"l"
"semicolon"
"alt+Return"
"Alt_L+Tab"
If you want to type a string, such as "Hello world." you want to
instead use xdo_enter_text_window.
:param window: The window you want to send the keysequence to or
CURRENTWINDOW
:param keysequence: The string keysequence to send.
:param delay: The delay between keystrokes in microseconds.
"""
_libxdo.xdo_send_keysequence_window(
self._xdo, window, keysequence, delay) | Send a keysequence to the specified window.
This allows you to send keysequences by symbol name. Any combination
of X11 KeySym names separated by '+' are valid. Single KeySym names
are valid, too.
Examples:
"l"
"semicolon"
"alt+Return"
"Alt_L+Tab"
If you want to type a string, such as "Hello world." you want to
instead use xdo_enter_text_window.
:param window: The window you want to send the keysequence to or
CURRENTWINDOW
:param keysequence: The string keysequence to send.
:param delay: The delay between keystrokes in microseconds. | entailment |
def send_keysequence_window_up(self, window, keysequence, delay=12000):
"""Send key release (up) events for the given key sequence"""
_libxdo.xdo_send_keysequence_window_up(
self._xdo, window, keysequence, ctypes.c_ulong(delay)) | Send key release (up) events for the given key sequence | entailment |
def send_keysequence_window_down(self, window, keysequence, delay=12000):
"""Send key press (down) events for the given key sequence"""
_libxdo.xdo_send_keysequence_window_down(
self._xdo, window, keysequence, ctypes.c_ulong(delay)) | Send key press (down) events for the given key sequence | entailment |
def send_keysequence_window_list_do(
self, window, keys, pressed=1, modifier=None, delay=120000):
"""
Send a series of keystrokes.
:param window: The window to send events to or CURRENTWINDOW
:param keys: The array of charcodemap_t entities to send.
:param pressed: 1 for key press, 0 for key release.
:param modifier:
Pointer to integer to record the modifiers
activated by the keys being pressed. If NULL, we don't save
the modifiers.
:param delay:
The delay between keystrokes in microseconds.
"""
# todo: how to properly use charcodes_t in a nice way?
_libxdo.xdo_send_keysequence_window_list_do(
self._xdo, window, keys, len(keys), pressed, modifier, delay) | Send a series of keystrokes.
:param window: The window to send events to or CURRENTWINDOW
:param keys: The array of charcodemap_t entities to send.
:param pressed: 1 for key press, 0 for key release.
:param modifier:
Pointer to integer to record the modifiers
activated by the keys being pressed. If NULL, we don't save
the modifiers.
:param delay:
The delay between keystrokes in microseconds. | entailment |
def get_active_keys_to_keycode_list(self):
"""Get a list of active keys. Uses XQueryKeymap"""
try:
_libxdo.xdo_get_active_keys_to_keycode_list
except AttributeError:
# Apparently, this was implemented in a later version..
raise NotImplementedError()
keys = POINTER(charcodemap_t)
nkeys = ctypes.c_int(0)
_libxdo.xdo_get_active_keys_to_keycode_list(
self._xdo, ctypes.byref(keys), ctypes.byref(nkeys))
# todo: make sure this returns a list of charcodemap_t!
return keys.value | Get a list of active keys. Uses XQueryKeymap | entailment |
def wait_for_window_map_state(self, window, state):
"""
Wait for a window to have a specific map state.
State possibilities:
IsUnmapped - window is not displayed.
IsViewable - window is mapped and shown (though may be
clipped by windows on top of it)
IsUnviewable - window is mapped but a parent window is unmapped.
:param window: the window you want to wait for.
:param state: the state to wait for.
"""
_libxdo.xdo_wait_for_window_map_state(self._xdo, window, state) | Wait for a window to have a specific map state.
State possibilities:
IsUnmapped - window is not displayed.
IsViewable - window is mapped and shown (though may be
clipped by windows on top of it)
IsUnviewable - window is mapped but a parent window is unmapped.
:param window: the window you want to wait for.
:param state: the state to wait for. | entailment |
def move_window(self, window, x, y):
"""
Move a window to a specific location.
The top left corner of the window will be moved to the x,y coordinate.
:param wid: the window to move
:param x: the X coordinate to move to.
:param y: the Y coordinate to move to.
"""
_libxdo.xdo_move_window(self._xdo, window, x, y) | Move a window to a specific location.
The top left corner of the window will be moved to the x,y coordinate.
:param wid: the window to move
:param x: the X coordinate to move to.
:param y: the Y coordinate to move to. | entailment |
def translate_window_with_sizehint(self, window, width, height):
"""
Apply a window's sizing hints (if any) to a given width and height.
This function wraps XGetWMNormalHints() and applies any
resize increment and base size to your given width and height values.
:param window: the window to use
:param width: the unit width you want to translate
:param height: the unit height you want to translate
:return: (width, height)
"""
width_ret = ctypes.c_uint(0)
height_ret = ctypes.c_uint(0)
_libxdo.xdo_translate_window_with_sizehint(
self._xdo, window, width, height,
ctypes.byref(width_ret),
ctypes.byref(height_ret))
return width_ret.value, height_ret.value | Apply a window's sizing hints (if any) to a given width and height.
This function wraps XGetWMNormalHints() and applies any
resize increment and base size to your given width and height values.
:param window: the window to use
:param width: the unit width you want to translate
:param height: the unit height you want to translate
:return: (width, height) | entailment |
def set_window_size(self, window, w, h, flags=0):
"""
Change the window size.
:param wid: the window to resize
:param w: the new desired width
:param h: the new desired height
:param flags: if 0, use pixels for units. If SIZE_USEHINTS, then
the units will be relative to the window size hints.
"""
_libxdo.xdo_set_window_size(self._xdo, window, w, h, flags) | Change the window size.
:param wid: the window to resize
:param w: the new desired width
:param h: the new desired height
:param flags: if 0, use pixels for units. If SIZE_USEHINTS, then
the units will be relative to the window size hints. | entailment |
def set_window_property(self, window, name, value):
"""
Change a window property.
Example properties you can change are WM_NAME, WM_ICON_NAME, etc.
:param wid: The window to change a property of.
:param name: the string name of the property.
:param value: the string value of the property.
"""
_libxdo.xdo_set_window_property(self._xdo, window, name, value) | Change a window property.
Example properties you can change are WM_NAME, WM_ICON_NAME, etc.
:param wid: The window to change a property of.
:param name: the string name of the property.
:param value: the string value of the property. | entailment |
def set_window_class(self, window, name, class_):
"""
Change the window's classname and or class.
:param name: The new class name. If ``None``, no change.
:param class_: The new class. If ``None``, no change.
"""
_libxdo.xdo_set_window_class(self._xdo, window, name, class_) | Change the window's classname and or class.
:param name: The new class name. If ``None``, no change.
:param class_: The new class. If ``None``, no change. | entailment |
def set_window_urgency(self, window, urgency):
"""Sets the urgency hint for a window"""
_libxdo.xdo_set_window_urgency(self._xdo, window, urgency) | Sets the urgency hint for a window | entailment |
def set_window_override_redirect(self, window, override_redirect):
"""
Set the override_redirect value for a window. This generally means
whether or not a window manager will manage this window.
If you set it to 1, the window manager will usually not draw
borders on the window, etc. If you set it to 0, the window manager
will see it like a normal application window.
"""
_libxdo.xdo_set_window_override_redirect(
self._xdo, window, override_redirect) | Set the override_redirect value for a window. This generally means
whether or not a window manager will manage this window.
If you set it to 1, the window manager will usually not draw
borders on the window, etc. If you set it to 0, the window manager
will see it like a normal application window. | entailment |
def get_focused_window(self):
"""
Get the window currently having focus.
:param window_ret:
Pointer to a window where the currently-focused window
will be stored.
"""
window_ret = window_t(0)
_libxdo.xdo_get_focused_window(self._xdo, ctypes.byref(window_ret))
return window_ret.value | Get the window currently having focus.
:param window_ret:
Pointer to a window where the currently-focused window
will be stored. | entailment |
def wait_for_window_focus(self, window, want_focus):
"""
Wait for a window to have or lose focus.
:param window: The window to wait on
:param want_focus: If 1, wait for focus. If 0, wait for loss of focus.
"""
_libxdo.xdo_wait_for_window_focus(self._xdo, window, want_focus) | Wait for a window to have or lose focus.
:param window: The window to wait on
:param want_focus: If 1, wait for focus. If 0, wait for loss of focus. | entailment |
def get_focused_window_sane(self):
"""
Like xdo_get_focused_window, but return the first ancestor-or-self
window * having a property of WM_CLASS. This allows you to get
the "real" or top-level-ish window having focus rather than something
you may not expect to be the window having focused.
:param window_ret:
Pointer to a window where the currently-focused window
will be stored.
"""
window_ret = window_t(0)
_libxdo.xdo_get_focused_window_sane(
self._xdo, ctypes.byref(window_ret))
return window_ret.value | Like xdo_get_focused_window, but return the first ancestor-or-self
window * having a property of WM_CLASS. This allows you to get
the "real" or top-level-ish window having focus rather than something
you may not expect to be the window having focused.
:param window_ret:
Pointer to a window where the currently-focused window
will be stored. | entailment |
def wait_for_window_active(self, window, active=1):
"""
Wait for a window to be active or not active.
Requires your window manager to support this.
Uses _NET_ACTIVE_WINDOW from the EWMH spec.
:param window: the window to wait on
:param active: If 1, wait for active. If 0, wait for inactive.
"""
_libxdo.xdo_wait_for_window_active(self._xdo, window, active) | Wait for a window to be active or not active.
Requires your window manager to support this.
Uses _NET_ACTIVE_WINDOW from the EWMH spec.
:param window: the window to wait on
:param active: If 1, wait for active. If 0, wait for inactive. | entailment |
def reparent_window(self, window_source, window_target):
"""
Reparents a window
:param wid_source: the window to reparent
:param wid_target: the new parent window
"""
_libxdo.xdo_reparent_window(self._xdo, window_source, window_target) | Reparents a window
:param wid_source: the window to reparent
:param wid_target: the new parent window | entailment |
def get_window_location(self, window):
"""
Get a window's location.
"""
screen_ret = Screen()
x_ret = ctypes.c_int(0)
y_ret = ctypes.c_int(0)
_libxdo.xdo_get_window_location(
self._xdo, window, ctypes.byref(x_ret), ctypes.byref(y_ret),
ctypes.byref(screen_ret))
return window_location(x_ret.value, y_ret.value, screen_ret) | Get a window's location. | entailment |
def get_window_size(self, window):
"""
Get a window's size.
"""
w_ret = ctypes.c_uint(0)
h_ret = ctypes.c_uint(0)
_libxdo.xdo_get_window_size(self._xdo, window, ctypes.byref(w_ret),
ctypes.byref(h_ret))
return window_size(w_ret.value, h_ret.value) | Get a window's size. | entailment |
def get_active_window(self):
"""
Get the currently-active window.
Requires your window manager to support this.
Uses ``_NET_ACTIVE_WINDOW`` from the EWMH spec.
"""
window_ret = window_t(0)
_libxdo.xdo_get_active_window(self._xdo, ctypes.byref(window_ret))
return window_ret.value | Get the currently-active window.
Requires your window manager to support this.
Uses ``_NET_ACTIVE_WINDOW`` from the EWMH spec. | entailment |
def select_window_with_click(self):
"""
Get a window ID by clicking on it.
This function blocks until a selection is made.
"""
window_ret = window_t(0)
_libxdo.xdo_select_window_with_click(
self._xdo, ctypes.byref(window_ret))
return window_ret.value | Get a window ID by clicking on it.
This function blocks until a selection is made. | entailment |
def get_number_of_desktops(self):
"""
Get the current number of desktops.
Uses ``_NET_NUMBER_OF_DESKTOPS`` of the EWMH spec.
:param ndesktops:
pointer to long where the current number of desktops is stored
"""
ndesktops = ctypes.c_long(0)
_libxdo.xdo_get_number_of_desktops(self._xdo, ctypes.byref(ndesktops))
return ndesktops.value | Get the current number of desktops.
Uses ``_NET_NUMBER_OF_DESKTOPS`` of the EWMH spec.
:param ndesktops:
pointer to long where the current number of desktops is stored | entailment |
def get_current_desktop(self):
"""
Get the current desktop.
Uses ``_NET_CURRENT_DESKTOP`` of the EWMH spec.
"""
desktop = ctypes.c_long(0)
_libxdo.xdo_get_current_desktop(self._xdo, ctypes.byref(desktop))
return desktop.value | Get the current desktop.
Uses ``_NET_CURRENT_DESKTOP`` of the EWMH spec. | entailment |
def set_desktop_for_window(self, window, desktop):
"""
Move a window to another desktop
Uses _NET_WM_DESKTOP of the EWMH spec.
:param wid: the window to move
:param desktop: the desktop destination for the window
"""
_libxdo.xdo_set_desktop_for_window(self._xdo, window, desktop) | Move a window to another desktop
Uses _NET_WM_DESKTOP of the EWMH spec.
:param wid: the window to move
:param desktop: the desktop destination for the window | entailment |
def get_desktop_for_window(self, window):
"""
Get the desktop a window is on.
Uses _NET_WM_DESKTOP of the EWMH spec.
If your desktop does not support ``_NET_WM_DESKTOP``, then '*desktop'
remains unmodified.
:param wid: the window to query
"""
desktop = ctypes.c_long(0)
_libxdo.xdo_get_desktop_for_window(
self._xdo, window, ctypes.byref(desktop))
return desktop.value | Get the desktop a window is on.
Uses _NET_WM_DESKTOP of the EWMH spec.
If your desktop does not support ``_NET_WM_DESKTOP``, then '*desktop'
remains unmodified.
:param wid: the window to query | entailment |
def search_windows(
self, winname=None, winclass=None, winclassname=None,
pid=None, only_visible=False, screen=None, require=False,
searchmask=0, desktop=None, limit=0, max_depth=-1):
"""
Search for windows.
:param winname:
Regexp to be matched against window name
:param winclass:
Regexp to be matched against window class
:param winclassname:
Regexp to be matched against window class name
:param pid:
Only return windows from this PID
:param only_visible:
If True, only return visible windows
:param screen:
Search only windows on this screen
:param require:
If True, will match ALL conditions. Otherwise, windows matching
ANY condition will be returned.
:param searchmask:
Search mask, for advanced usage. Leave this alone if you
don't kwnow what you are doing.
:param limit:
Maximum number of windows to list. Zero means no limit.
:param max_depth:
Maximum depth to return. Defaults to -1, meaning "no limit".
:return:
A list of window ids matching query.
"""
windowlist_ret = ctypes.pointer(window_t(0))
nwindows_ret = ctypes.c_uint(0)
search = xdo_search_t(searchmask=searchmask)
if winname is not None:
search.winname = winname
search.searchmask |= SEARCH_NAME
if winclass is not None:
search.winclass = winclass
search.searchmask |= SEARCH_CLASS
if winclassname is not None:
search.winclassname = winclassname
search.searchmask |= SEARCH_CLASSNAME
if pid is not None:
search.pid = pid
search.searchmask |= SEARCH_PID
if only_visible:
search.only_visible = True
search.searchmask |= SEARCH_ONLYVISIBLE
if screen is not None:
search.screen = screen
search.searchmask |= SEARCH_SCREEN
if screen is not None:
search.screen = desktop
search.searchmask |= SEARCH_DESKTOP
search.limit = limit
search.max_depth = max_depth
_libxdo.xdo_search_windows(
self._xdo, search,
ctypes.byref(windowlist_ret),
ctypes.byref(nwindows_ret))
return [windowlist_ret[i] for i in range(nwindows_ret.value)] | Search for windows.
:param winname:
Regexp to be matched against window name
:param winclass:
Regexp to be matched against window class
:param winclassname:
Regexp to be matched against window class name
:param pid:
Only return windows from this PID
:param only_visible:
If True, only return visible windows
:param screen:
Search only windows on this screen
:param require:
If True, will match ALL conditions. Otherwise, windows matching
ANY condition will be returned.
:param searchmask:
Search mask, for advanced usage. Leave this alone if you
don't kwnow what you are doing.
:param limit:
Maximum number of windows to list. Zero means no limit.
:param max_depth:
Maximum depth to return. Defaults to -1, meaning "no limit".
:return:
A list of window ids matching query. | entailment |
def get_symbol_map(self):
"""
If you need the symbol map, use this method.
The symbol map is an array of string pairs mapping common tokens
to X Keysym strings, such as "alt" to "Alt_L"
:return: array of strings.
"""
# todo: make sure we return a list of strings!
sm = _libxdo.xdo_get_symbol_map()
# Return value is like:
# ['alt', 'Alt_L', ..., None, None, None, ...]
# We want to return only values up to the first None.
# todo: any better solution than this?
i = 0
ret = []
while True:
c = sm[i]
if c is None:
return ret
ret.append(c)
i += 1 | If you need the symbol map, use this method.
The symbol map is an array of string pairs mapping common tokens
to X Keysym strings, such as "alt" to "Alt_L"
:return: array of strings. | entailment |
def get_active_modifiers(self):
"""
Get a list of active keys. Uses XQueryKeymap.
:return: list of charcodemap_t instances
"""
keys = ctypes.pointer(charcodemap_t())
nkeys = ctypes.c_int(0)
_libxdo.xdo_get_active_modifiers(
self._xdo, ctypes.byref(keys), ctypes.byref(nkeys))
return [keys[i] for i in range(nkeys.value)] | Get a list of active keys. Uses XQueryKeymap.
:return: list of charcodemap_t instances | entailment |
def get_window_name(self, win_id):
"""
Get a window's name, if any.
"""
window = window_t(win_id)
name_ptr = ctypes.c_char_p()
name_len = ctypes.c_int(0)
name_type = ctypes.c_int(0)
_libxdo.xdo_get_window_name(
self._xdo, window, ctypes.byref(name_ptr),
ctypes.byref(name_len), ctypes.byref(name_type))
name = name_ptr.value
_libX11.XFree(name_ptr) # Free the string allocated by Xlib
return name | Get a window's name, if any. | entailment |
def import_metadata(module_paths):
"""Import all the given modules"""
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.insert(0, cwd)
modules = []
try:
for path in module_paths:
modules.append(import_module(path))
except ImportError as e:
err = RuntimeError('Could not import {}: {}'.format(path, str(e)))
raise_from(err, e)
return modules | Import all the given modules | entailment |
def load_metadata(stream):
"""Load JSON metadata from opened stream."""
try:
metadata = json.load(
stream, encoding='utf8', object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
err = RuntimeError('Error parsing {}: {}'.format(stream.name, e))
raise_from(err, e)
else:
# convert changelog keys back to ints for sorting
for group in metadata:
if group == '$version':
continue
apis = metadata[group]['apis']
for api in apis.values():
int_changelog = OrderedDict()
for version, log in api.get('changelog', {}).items():
int_changelog[int(version)] = log
api['changelog'] = int_changelog
finally:
stream.close()
return metadata | Load JSON metadata from opened stream. | entailment |
def strip_punctuation_space(value):
"Strip excess whitespace prior to punctuation."
def strip_punctuation(string):
replacement_list = (
(' .', '.'),
(' :', ':'),
('( ', '('),
(' )', ')'),
)
for match, replacement in replacement_list:
string = string.replace(match, replacement)
return string
if value == None:
return None
if type(value) == list:
return [strip_punctuation(v) for v in value]
return strip_punctuation(value) | Strip excess whitespace prior to punctuation. | entailment |
def join_sentences(string1, string2, glue='.'):
"concatenate two sentences together with punctuation glue"
if not string1 or string1 == '':
return string2
if not string2 or string2 == '':
return string1
# both are strings, continue joining them together with the glue and whitespace
new_string = string1.rstrip()
if not new_string.endswith(glue):
new_string += glue
new_string += ' ' + string2.lstrip()
return new_string | concatenate two sentences together with punctuation glue | entailment |
def coerce_to_int(val, default=0xDEADBEEF):
"""Attempts to cast given value to an integer, return the original value if failed or the default if one provided."""
try:
return int(val)
except (TypeError, ValueError):
if default != 0xDEADBEEF:
return default
return val | Attempts to cast given value to an integer, return the original value if failed or the default if one provided. | entailment |
def nullify(function):
"Decorator. If empty list, returns None, else list."
def wrapper(*args, **kwargs):
value = function(*args, **kwargs)
if(type(value) == list and len(value) == 0):
return None
return value
return wrapper | Decorator. If empty list, returns None, else list. | entailment |
def strippen(function):
"Decorator. Strip excess whitespace from return value."
def wrapper(*args, **kwargs):
return strip_strings(function(*args, **kwargs))
return wrapper | Decorator. Strip excess whitespace from return value. | entailment |
def inten(function):
"Decorator. Attempts to convert return value to int"
def wrapper(*args, **kwargs):
return coerce_to_int(function(*args, **kwargs))
return wrapper | Decorator. Attempts to convert return value to int | entailment |
def date_struct(year, month, day, tz = "UTC"):
"""
Given year, month and day numeric values and a timezone
convert to structured date object
"""
ymdtz = (year, month, day, tz)
if None in ymdtz:
#logger.debug("a year, month, day or tz value was empty: %s" % str(ymdtz))
return None # return early if we have a bad value
try:
return time.strptime("%s-%s-%s %s" % ymdtz, "%Y-%m-%d %Z")
except(TypeError, ValueError):
#logger.debug("date failed to convert: %s" % str(ymdtz))
pass | Given year, month and day numeric values and a timezone
convert to structured date object | entailment |
def date_struct_nn(year, month, day, tz="UTC"):
"""
Assemble a date object but if day or month is none set them to 1
to make it easier to deal with partial dates
"""
if not day:
day = 1
if not month:
month = 1
return date_struct(year, month, day, tz) | Assemble a date object but if day or month is none set them to 1
to make it easier to deal with partial dates | entailment |
def doi_uri_to_doi(value):
"Strip the uri schema from the start of DOI URL strings"
if value is None:
return value
replace_values = ['http://dx.doi.org/', 'https://dx.doi.org/',
'http://doi.org/', 'https://doi.org/']
for replace_value in replace_values:
value = value.replace(replace_value, '')
return value | Strip the uri schema from the start of DOI URL strings | entailment |
def remove_doi_paragraph(tags):
"Given a list of tags, only return those whose text doesn't start with 'DOI:'"
p_tags = list(filter(lambda tag: not starts_with_doi(tag), tags))
p_tags = list(filter(lambda tag: not paragraph_is_only_doi(tag), p_tags))
return p_tags | Given a list of tags, only return those whose text doesn't start with 'DOI: | entailment |
def orcid_uri_to_orcid(value):
"Strip the uri schema from the start of ORCID URL strings"
if value is None:
return value
replace_values = ['http://orcid.org/', 'https://orcid.org/']
for replace_value in replace_values:
value = value.replace(replace_value, '')
return value | Strip the uri schema from the start of ORCID URL strings | entailment |
def component_acting_parent_tag(parent_tag, tag):
"""
Only intended for use in getting components, look for tag name of fig-group
and if so, find the first fig tag inside it as the acting parent tag
"""
if parent_tag.name == "fig-group":
if (len(tag.find_previous_siblings("fig")) > 0):
acting_parent_tag = first(extract_nodes(parent_tag, "fig"))
else:
# Do not return the first fig as parent of itself
return None
else:
acting_parent_tag = parent_tag
return acting_parent_tag | Only intended for use in getting components, look for tag name of fig-group
and if so, find the first fig tag inside it as the acting parent tag | entailment |
def extract_nodes(soup, nodename, attr = None, value = None):
"""
Returns a list of tags (nodes) from the given soup matching the given nodename.
If an optional attribute and value are given, these are used to filter the results
further."""
tags = soup.find_all(nodename)
if attr != None and value != None:
return list(filter(lambda tag: tag.get(attr) == value, tags))
return list(tags) | Returns a list of tags (nodes) from the given soup matching the given nodename.
If an optional attribute and value are given, these are used to filter the results
further. | entailment |
def node_contents_str(tag):
"""
Return the contents of a tag, including it's children, as a string.
Does not include the root/parent of the tag.
"""
if not tag:
return None
tag_string = ''
for child_tag in tag.children:
if isinstance(child_tag, Comment):
# BeautifulSoup does not preserve comment tags, add them back
tag_string += '<!--%s-->' % unicode_value(child_tag)
else:
tag_string += unicode_value(child_tag)
return tag_string if tag_string != '' else None | Return the contents of a tag, including it's children, as a string.
Does not include the root/parent of the tag. | entailment |
def first_parent(tag, nodename):
"""
Given a beautiful soup tag, look at its parents and return the first
tag name that matches nodename or the list nodename
"""
if nodename is not None and type(nodename) == str:
nodename = [nodename]
return first(list(filter(lambda tag: tag.name in nodename, tag.parents))) | Given a beautiful soup tag, look at its parents and return the first
tag name that matches nodename or the list nodename | entailment |
def tag_fig_ordinal(tag):
"""
Meant for finding the position of fig tags with respect to whether
they are for a main figure or a child figure
"""
tag_count = 0
if 'specific-use' not in tag.attrs:
# Look for tags with no "specific-use" attribute
return len(list(filter(lambda tag: 'specific-use' not in tag.attrs,
tag.find_all_previous(tag.name)))) + 1 | Meant for finding the position of fig tags with respect to whether
they are for a main figure or a child figure | entailment |
def tag_limit_sibling_ordinal(tag, stop_tag_name):
"""
Count previous tags of the same name until it
reaches a tag name of type stop_tag, then stop counting
"""
tag_count = 1
for prev_tag in tag.previous_elements:
if prev_tag.name == tag.name:
tag_count += 1
if prev_tag.name == stop_tag_name:
break
return tag_count | Count previous tags of the same name until it
reaches a tag name of type stop_tag, then stop counting | entailment |
def tag_media_sibling_ordinal(tag):
"""
Count sibling ordinal differently depending on if the
mimetype is video or not
"""
if hasattr(tag, 'name') and tag.name != 'media':
return None
nodenames = ['fig','supplementary-material','sub-article']
first_parent_tag = first_parent(tag, nodenames)
sibling_ordinal = None
if first_parent_tag:
# Start counting at 0
sibling_ordinal = 0
for media_tag in first_parent_tag.find_all(tag.name):
if 'mimetype' in tag.attrs and tag['mimetype'] == 'video':
# Count all video type media tags
if 'mimetype' in media_tag.attrs and tag['mimetype'] == 'video':
sibling_ordinal += 1
if media_tag == tag:
break
else:
# Count all non-video type media tags
if (('mimetype' not in media_tag.attrs)
or ('mimetype' in media_tag.attrs and tag['mimetype'] != 'video')):
sibling_ordinal += 1
if media_tag == tag:
break
else:
# Start counting at 1
sibling_ordinal = 1
for prev_tag in tag.find_all_previous(tag.name):
if not first_parent(prev_tag, nodenames):
if 'mimetype' in tag.attrs and tag['mimetype'] == 'video':
# Count all video type media tags
if supp_asset(prev_tag) == supp_asset(tag) and 'mimetype' in prev_tag.attrs:
sibling_ordinal += 1
else:
if supp_asset(prev_tag) == supp_asset(tag) and 'mimetype' not in prev_tag.attrs:
sibling_ordinal += 1
return sibling_ordinal | Count sibling ordinal differently depending on if the
mimetype is video or not | entailment |
def tag_supplementary_material_sibling_ordinal(tag):
"""
Strategy is to count the previous supplementary-material tags
having the same asset value to get its sibling ordinal.
The result is its position inside any parent tag that
are the same asset type
"""
if hasattr(tag, 'name') and tag.name != 'supplementary-material':
return None
nodenames = ['fig','media','sub-article']
first_parent_tag = first_parent(tag, nodenames)
sibling_ordinal = 1
if first_parent_tag:
# Within the parent tag of interest, count the tags
# having the same asset value
for supp_tag in first_parent_tag.find_all(tag.name):
if tag == supp_tag:
# Stop once we reach the same tag we are checking
break
if supp_asset(supp_tag) == supp_asset(tag):
sibling_ordinal += 1
else:
# Look in all previous elements that do not have a parent
# and count the tags having the same asset value
for prev_tag in tag.find_all_previous(tag.name):
if not first_parent(prev_tag, nodenames):
if supp_asset(prev_tag) == supp_asset(tag):
sibling_ordinal += 1
return sibling_ordinal | Strategy is to count the previous supplementary-material tags
having the same asset value to get its sibling ordinal.
The result is its position inside any parent tag that
are the same asset type | entailment |
def supp_asset(tag):
"""
Given a supplementary-material tag, the asset value depends on
its label text. This also informs in what order (its ordinal) it
has depending on how many of each type is present
"""
# Default
asset = 'supp'
if first(extract_nodes(tag, "label")):
label_text = node_text(first(extract_nodes(tag, "label"))).lower()
# Keyword match the label
if label_text.find('code') > 0:
asset = 'code'
elif label_text.find('data') > 0:
asset = 'data'
return asset | Given a supplementary-material tag, the asset value depends on
its label text. This also informs in what order (its ordinal) it
has depending on how many of each type is present | entailment |
def text_to_title(value):
"""when a title is required, generate one from the value"""
title = None
if not value:
return title
words = value.split(" ")
keep_words = []
for word in words:
if word.endswith(".") or word.endswith(":"):
keep_words.append(word)
if len(word) > 1 and "<italic>" not in word and "<i>" not in word:
break
else:
keep_words.append(word)
if len(keep_words) > 0:
title = " ".join(keep_words)
if title.split(" ")[-1] != "spp.":
title = title.rstrip(" .:")
return title | when a title is required, generate one from the value | entailment |
def escape_unmatched_angle_brackets(string, allowed_tag_fragments=()):
"""
In order to make an XML string less malformed, escape
unmatched less than tags that are not part of an allowed tag
Note: Very, very basic, and do not try regex \1 style replacements
on unicode ever again! Instead this uses string replace
allowed_tag_fragments is a tuple of tag name matches for use with startswith()
"""
if not string:
return string
# Split string on tags
tags = re.split('(<.*?>)', string)
#print tags
for i, val in enumerate(tags):
# Use angle bracket character counts to find unmatched tags
# as well as our allowed_tags list to ignore good tags
if val.count('<') == val.count('>') and not val.startswith(allowed_tag_fragments):
val = val.replace('<', '<')
val = val.replace('>', '>')
else:
# Count how many unmatched tags we have
while val.count('<') != val.count('>'):
if val.count('<') != val.count('>') and val.count('<') > 0:
val = val.replace('<', '<', 1)
elif val.count('<') != val.count('>') and val.count('>') > 0:
val = val.replace('>', '>', 1)
if val.count('<') == val.count('>') and not val.startswith(allowed_tag_fragments):
# Send it through again in case there are nested unmatched tags
val = escape_unmatched_angle_brackets(val, allowed_tag_fragments)
tags[i] = val
return ''.join(tags) | In order to make an XML string less malformed, escape
unmatched less than tags that are not part of an allowed tag
Note: Very, very basic, and do not try regex \1 style replacements
on unicode ever again! Instead this uses string replace
allowed_tag_fragments is a tuple of tag name matches for use with startswith() | entailment |
def escape_ampersand(string):
"""
Quick convert unicode ampersand characters not associated with
a numbered entity or not starting with allowed characters to a plain &
"""
if not string:
return string
start_with_match = r"(\#x(....);|lt;|gt;|amp;)"
# The pattern below is match & that is not immediately followed by #
string = re.sub(r"&(?!" + start_with_match + ")", '&', string)
return string | Quick convert unicode ampersand characters not associated with
a numbered entity or not starting with allowed characters to a plain & | entailment |
def parse(filename, return_doctype_dict=False):
"""
to extract the doctype details from the file when parsed and return the data
for later use, set return_doctype_dict to True
"""
doctype_dict = {}
# check for python version, doctype in ElementTree is deprecated 3.2 and above
if sys.version_info < (3,2):
parser = CustomXMLParser(html=0, target=None, encoding='utf-8')
else:
# Assume greater than Python 3.2, get the doctype from the TreeBuilder
tree_builder = CustomTreeBuilder()
parser = ElementTree.XMLParser(html=0, target=tree_builder, encoding='utf-8')
tree = ElementTree.parse(filename, parser)
root = tree.getroot()
if sys.version_info < (3,2):
doctype_dict = parser.doctype_dict
else:
doctype_dict = tree_builder.doctype_dict
if return_doctype_dict is True:
return root, doctype_dict
else:
return root | to extract the doctype details from the file when parsed and return the data
for later use, set return_doctype_dict to True | entailment |
def add_tag_before(tag_name, tag_text, parent_tag, before_tag_name):
"""
Helper function to refactor the adding of new tags
especially for when converting text to role tags
"""
new_tag = Element(tag_name)
new_tag.text = tag_text
if get_first_element_index(parent_tag, before_tag_name):
parent_tag.insert( get_first_element_index(parent_tag, before_tag_name) - 1, new_tag)
return parent_tag | Helper function to refactor the adding of new tags
especially for when converting text to role tags | entailment |
def get_first_element_index(root, tag_name):
"""
In order to use Element.insert() in a convenient way,
this function will find the first child tag with tag_name
and return its index position
The index can then be used to insert an element before or after the
found tag using Element.insert()
"""
tag_index = 1
for tag in root:
if tag.tag == tag_name:
# Return the first one found if there is a match
return tag_index
tag_index = tag_index + 1
# Default
return None | In order to use Element.insert() in a convenient way,
this function will find the first child tag with tag_name
and return its index position
The index can then be used to insert an element before or after the
found tag using Element.insert() | entailment |
def rewrite_subject_group(root, subjects, subject_group_type, overwrite=True):
"add or rewrite subject tags inside subj-group tags"
parent_tag_name = 'subj-group'
tag_name = 'subject'
wrap_tag_name = 'article-categories'
tag_attribute = 'subj-group-type'
# the parent tag where it should be found
xpath_parent = './/front/article-meta/article-categories'
# the wraping tag in case article-categories does not exist
xpath_article_meta = './/front/article-meta'
# the xpath to find the subject tags we are interested in
xpath = './/{parent_tag_name}[@{tag_attribute}="{group_type}"]'.format(
parent_tag_name=parent_tag_name,
tag_attribute=tag_attribute,
group_type=subject_group_type)
count = 0
# get the parent tag
parent_tag = root.find(xpath_parent)
if parent_tag is None:
# parent tag not found, add one
wrap_tag = root.find(xpath_article_meta)
article_categories_tag = SubElement(wrap_tag, wrap_tag_name)
parent_tag = article_categories_tag
insert_index = 0
# iterate all tags to find the index of the first tag we are interested in
if parent_tag is not None:
for tag_index, tag in enumerate(parent_tag.findall('*')):
if tag.tag == parent_tag_name and tag.get(tag_attribute) == subject_group_type:
insert_index = tag_index
if overwrite is True:
# if overwriting use the first one found
break
# if not overwriting, use the last one found + 1
if overwrite is not True:
insert_index += 1
# remove the tag if overwriting the existing values
if overwrite is True:
# remove all the tags
for tag in root.findall(xpath):
parent_tag.remove(tag)
# add the subjects
for subject in subjects:
subj_group_tag = Element(parent_tag_name)
subj_group_tag.set(tag_attribute, subject_group_type)
subject_tag = SubElement(subj_group_tag, tag_name)
subject_tag.text = subject
parent_tag.insert(insert_index, subj_group_tag)
count += 1
insert_index += 1
return count | add or rewrite subject tags inside subj-group tags | entailment |
def build_doctype(qualifiedName, publicId=None, systemId=None, internalSubset=None):
"""
Instantiate an ElifeDocumentType, a subclass of minidom.DocumentType, with
some properties so it is more testable
"""
doctype = ElifeDocumentType(qualifiedName)
doctype._identified_mixin_init(publicId, systemId)
if internalSubset:
doctype.internalSubset = internalSubset
return doctype | Instantiate an ElifeDocumentType, a subclass of minidom.DocumentType, with
some properties so it is more testable | entailment |
def append_minidom_xml_to_elementtree_xml(parent, xml, recursive=False, attributes=None):
"""
Recursively,
Given an ElementTree.Element as parent, and a minidom instance as xml,
append the tags and content from xml to parent
Used primarily for adding a snippet of XML with <italic> tags
attributes: a list of attribute names to copy
"""
# Get the root tag name
if recursive is False:
tag_name = xml.documentElement.tagName
node = xml.getElementsByTagName(tag_name)[0]
new_elem = SubElement(parent, tag_name)
if attributes:
for attribute in attributes:
if xml.documentElement.getAttribute(attribute):
new_elem.set(attribute, xml.documentElement.getAttribute(attribute))
else:
node = xml
tag_name = node.tagName
new_elem = parent
i = 0
for child_node in node.childNodes:
if child_node.nodeName == '#text':
if not new_elem.text and i <= 0:
new_elem.text = child_node.nodeValue
elif not new_elem.text and i > 0:
new_elem_sub.tail = child_node.nodeValue
else:
new_elem_sub.tail = child_node.nodeValue
elif child_node.childNodes is not None:
new_elem_sub = SubElement(new_elem, child_node.tagName)
new_elem_sub = append_minidom_xml_to_elementtree_xml(new_elem_sub, child_node,
True, attributes)
i = i + 1
# Debug
#encoding = 'utf-8'
#rough_string = ElementTree.tostring(parent, encoding)
#print rough_string
return parent | Recursively,
Given an ElementTree.Element as parent, and a minidom instance as xml,
append the tags and content from xml to parent
Used primarily for adding a snippet of XML with <italic> tags
attributes: a list of attribute names to copy | entailment |
def rewrite_json(rewrite_type, soup, json_content):
"""
Due to XML content that will not conform with the strict JSON schema validation rules,
for elife articles only, rewrite the JSON to make it valid
"""
if not soup:
return json_content
if not elifetools.rawJATS.doi(soup) or not elifetools.rawJATS.journal_id(soup):
return json_content
# Hook only onto elife articles for rewriting currently
journal_id_tag = elifetools.rawJATS.journal_id(soup)
doi_tag = elifetools.rawJATS.doi(soup)
journal_id = elifetools.utils.node_text(journal_id_tag)
doi = elifetools.utils.doi_uri_to_doi(elifetools.utils.node_text(doi_tag))
if journal_id.lower() == "elife":
function_name = rewrite_function_name(journal_id, rewrite_type)
if function_name:
try:
json_content = globals()[function_name](json_content, doi)
except KeyError:
pass
return json_content | Due to XML content that will not conform with the strict JSON schema validation rules,
for elife articles only, rewrite the JSON to make it valid | entailment |
def rewrite_elife_references_json(json_content, doi):
""" this does the work of rewriting elife references json """
references_rewrite_json = elife_references_rewrite_json()
if doi in references_rewrite_json:
json_content = rewrite_references_json(json_content, references_rewrite_json[doi])
# Edge case delete one reference
if doi == "10.7554/eLife.12125":
for i, ref in enumerate(json_content):
if ref.get("id") and ref.get("id") == "bib11":
del json_content[i]
return json_content | this does the work of rewriting elife references json | entailment |
def rewrite_references_json(json_content, rewrite_json):
""" general purpose references json rewriting by matching the id value """
for ref in json_content:
if ref.get("id") and ref.get("id") in rewrite_json:
for key, value in iteritems(rewrite_json.get(ref.get("id"))):
ref[key] = value
return json_content | general purpose references json rewriting by matching the id value | entailment |
def elife_references_rewrite_json():
""" Here is the DOI and references json replacements data for elife """
references_rewrite_json = {}
references_rewrite_json["10.7554/eLife.00051"] = {"bib25": {"date": "2012"}}
references_rewrite_json["10.7554/eLife.00278"] = {"bib11": {"date": "2013"}}
references_rewrite_json["10.7554/eLife.00444"] = {"bib2": {"date": "2013"}}
references_rewrite_json["10.7554/eLife.00569"] = {"bib74": {"date": "1996"}}
references_rewrite_json["10.7554/eLife.00592"] = {"bib8": {"date": "2013"}}
references_rewrite_json["10.7554/eLife.00633"] = {"bib38": {"date": "2004"}}
references_rewrite_json["10.7554/eLife.00646"] = {"bib1": {"date": "2012"}}
references_rewrite_json["10.7554/eLife.00813"] = {"bib33": {"date": "2007"}}
references_rewrite_json["10.7554/eLife.01355"] = {"bib9": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.01530"] = {"bib12": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.01681"] = {"bib5": {"date": "2000"}}
references_rewrite_json["10.7554/eLife.01917"] = {"bib35": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.02030"] = {"bib53": {"date": "2013"}, "bib56": {"date": "2013"}}
references_rewrite_json["10.7554/eLife.02076"] = {"bib93a": {"date": "1990"}}
references_rewrite_json["10.7554/eLife.02217"] = {"bib27": {"date": "2009"}}
references_rewrite_json["10.7554/eLife.02535"] = {"bib12": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.02862"] = {"bib8": {"date": "2010"}}
references_rewrite_json["10.7554/eLife.03711"] = {"bib35": {"date": "2012"}}
references_rewrite_json["10.7554/eLife.03819"] = {"bib37": {"date": "2008"}}
references_rewrite_json["10.7554/eLife.04069"] = {"bib8": {"date": "2011"}}
references_rewrite_json["10.7554/eLife.04247"] = {"bib19a": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.04333"] = {
"bib3": {"date": "1859"},
"bib37": {"date": "1959"}}
references_rewrite_json["10.7554/eLife.04478"] = {"bib49": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.04580"] = {"bib139": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.05042"] = {"bib78": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.05323"] = {"bib102": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.05423"] = {"bib102": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.05503"] = {"bib94": {"date": "2016"}}
references_rewrite_json["10.7554/eLife.05849"] = {"bib82": {"date": "2005"}}
references_rewrite_json["10.7554/eLife.06072"] = {"bib17": {"date": "2003"}}
references_rewrite_json["10.7554/eLife.06315"] = {"bib19": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.06426"] = {"bib39": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.07361"] = {"bib76": {"date": "2011"}}
references_rewrite_json["10.7554/eLife.07460"] = {
"bib1": {"date": "2013"},
"bib2": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.08500"] = {"bib55": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.09066"] = {"bib46": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.09100"] = {"bib50": {"date": "2011"}}
references_rewrite_json["10.7554/eLife.09148"] = {
"bib47": {"articleTitle": "97–104"},
"bib59": {"articleTitle": "1913–1918"}}
references_rewrite_json["10.7554/eLife.09186"] = {
"bib31": {"date": "2015"},
"bib54": {"date": "2014"},
"bib56": {"date": "2014"},
"bib65": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.09215"] = {"bib5": {"date": "2012"}}
references_rewrite_json["10.7554/eLife.09520"] = {
"bib35": OrderedDict([
("conference", OrderedDict([
("name", ["WHO Expert Committee on Malaria"])
])),
("articleTitle", "WHO Expert Committee on Malaria [meeting held in Geneva from 19 to 30 October 1970]: fifteenth report"),
("publisher", OrderedDict([
("name", ["World Health Organization"]),
("address", OrderedDict([
("formatted", ["Geneva"]),
("components", OrderedDict([
("locality", ["Geneva"])
])),
])),
])),
])
}
references_rewrite_json["10.7554/eLife.09579"] = {
"bib19": {"date": "2007"},
"bib49": {"date": "2002"}}
references_rewrite_json["10.7554/eLife.09600"] = {"bib13": {"date": "2009"}}
references_rewrite_json["10.7554/eLife.09672"] = {
"bib25": {"conference": {"name": ["Seventeenth Meeting of the RBM Partnership Monitoring and Evaluation Reference Group (MERG)"]}}}
references_rewrite_json["10.7554/eLife.09771"] = {"bib22": {"date": "2012"}}
references_rewrite_json["10.7554/eLife.09972"] = {"bib61": {"date": "2007", "discriminator": "a"}}
references_rewrite_json["10.7554/eLife.09977"] = {"bib41": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.10032"] = {"bib45": {"date": "2016"}}
references_rewrite_json["10.7554/eLife.10042"] = {"bib14": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.10070"] = {"bib15": {"date": "2015"}, "bib38": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.10222"] = {"bib30": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.10670"] = {"bib7": {"date": "2015"}, "bib8": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.10781"] = {"bib32": {"date": "2003"}}
references_rewrite_json["10.7554/eLife.11273"] = {"bib43": {"date": "2004"}}
references_rewrite_json["10.7554/eLife.11305"] = {"bib68": {"date": "2000"}}
references_rewrite_json["10.7554/eLife.11416"] = {"bib22": {"date": "1997"}}
references_rewrite_json["10.7554/eLife.11860"] = {"bib48": {"title": "Light-switchable gene expression system"}}
references_rewrite_json["10.7554/eLife.12401"] = {"bib25": {"date": "2011"}}
references_rewrite_json["10.7554/eLife.12366"] = {"bib10": {"date": "2008"}}
references_rewrite_json["10.7554/eLife.12703"] = {"bib27": {"date": "2013"}}
references_rewrite_json["10.7554/eLife.12735"] = {"bib35": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.12830"] = {"bib118": {"date": "1982"}}
references_rewrite_json["10.7554/eLife.13133"] = {"bib11": {"date": "2011"}}
references_rewrite_json["10.7554/eLife.13152"] = {"bib25": {"date": "2000"}}
references_rewrite_json["10.7554/eLife.13195"] = {"bib6": {"date": "2013"}, "bib12": {"date": "2003"}}
references_rewrite_json["10.7554/eLife.13479"] = {"bib5": {"date": "2016"}}
references_rewrite_json["10.7554/eLife.13463"] = {"bib15": {"date": "2016"}}
references_rewrite_json["10.7554/eLife.14119"] = {"bib40": {"date": "2007"}}
references_rewrite_json["10.7554/eLife.14169"] = {"bib6": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.14523"] = {"bib7": {"date": "2013"}}
references_rewrite_json["10.7554/eLife.15272"] = {"bib78": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.15504"] = {"bib67": {"isbn": "9780198524304"}}
references_rewrite_json["10.7554/eLife.16105"] = {"bib2": {"date": "2013"}}
references_rewrite_json["10.7554/eLife.16349"] = {"bib68": {"date": "2005"}}
references_rewrite_json["10.7554/eLife.16394"] = {
"bib6": {"type": "thesis",
"author": {"type": "person", "name": {"preferred": "B Berret","index": "Berret, B" }},
"publisher": {"name": ["Université de Bourgogne"]}}}
references_rewrite_json["10.7554/eLife.16443"] = {"bib58": {"date": "1987"}}
references_rewrite_json["10.7554/eLife.16764"] = {"bib4": {"date": "2013"}}
references_rewrite_json["10.7554/eLife.17092"] = {"bib102": {"date": "1980"}}
references_rewrite_json["10.7554/eLife.18044"] = {"bib25": {"date": "2005"}}
references_rewrite_json["10.7554/eLife.18370"] = {"bib1": {"date": "2006"}}
references_rewrite_json["10.7554/eLife.18425"] = {"bib54": {"date": "2014"}}
references_rewrite_json["10.7554/eLife.18683"] = {"bib47": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.19532"] = {"bib27": {"date": "2015"}}
references_rewrite_json["10.7554/eLife.19545"] = {"bib51": {"date": "1996"}}
references_rewrite_json["10.7554/eLife.19571"] = {"bib56": {"date": "2016"}}
references_rewrite_json["10.7554/eLife.20352"] = {"bib53": {"country": "United States"}}
references_rewrite_json["10.7554/eLife.21864"] = {"bib2": {"date": "2016-10-24"}}
references_rewrite_json["10.7554/eLife.20522"] = {
"bib42": {"date": "2016"},
"bib110": {"date": "1996"}}
references_rewrite_json["10.7554/eLife.22053"] = {"bib123": {"date": "2016"}}
# Reference authors data to replace, processed further below into json
references_authors = []
references_authors.append(("10.7554/eLife.00036", "bib8", "authors", [
{"surname": "Butler", "given-names": "H"},
{"surname": "Juurlink", "given-names": "BHJ"}
]))
references_authors.append(("10.7554/eLife.00036", "bib30", "authors", [
{"surname": "Joyner", "given-names": "AL"}
]))
references_authors.append(("10.7554/eLife.00048", "bib15", "authors", [
{"surname": "Guthrie", "given-names": "C"},
{"surname": "Fink", "given-names": "GR"}
]))
references_authors.append(("10.7554/eLife.00051", "bib21", "authors", [
{"surname": "Jamison", "given-names": "DT"},
{"surname": "Breman", "given-names": "JG"},
{"surname": "Measham", "given-names": "AR"},
{"surname": "Alleyne", "given-names": "G"},
{"surname": "Claeson", "given-names": "M"},
{"surname": "Evans", "given-names": "DB"},
{"surname": "Jha", "given-names": "P"},
{"surname": "Mills", "given-names": "A"},
{"surname": "Musgrove", "given-names": "P"}
]))
references_authors.append(("10.7554/eLife.00051", "bib36", "authors", [
{"surname": "Rogers", "given-names": "RG"},
{"surname": "Crimmins", "given-names": "EM"}
]))
references_authors.append(("10.7554/eLife.00668", "bib39", "authors", [
{"surname": "Rice", "given-names": "SA"}
]))
references_authors.append(("10.7554/eLife.01730", "bib75", "authors", [
{"collab": "Look AHEAD Research Group"}
]))
references_authors.append(("10.7554/eLife.03714", "bib64", "authors", [
{"surname": "Otwinowski", "given-names": "Z"},
{"surname": "Minor", "given-names": "W"}
]))
references_authors.append(("10.7554/eLife.04220", "bib31", "authors", [
{"surname": "Tishby", "given-names": "N"},
{"surname": "Polani", "given-names": "D"}
]))
references_authors.append(("10.7554/eLife.04395", "bib67", "authors", [
{"surname": "King", "given-names": "AMQ"},
{"surname": "Adams", "given-names": "MJ"},
{"surname": "Carstens", "given-names": "EB"},
{"surname": "Lefkowitz", "given-names": "E"}
]))
references_authors.append(("10.7554/eLife.04449", "bib62", "authors", [
{"surname": "Shaham", "given-names": "S"}
]))
references_authors.append(("10.7554/eLife.04659", "bib57", "authors", [
{"surname": "Sambrook", "given-names": "J"},
{"surname": "Russell", "given-names": "TW"}
]))
references_authors.append(("10.7554/eLife.05423", "bib4", "authors", [
{"surname": "Birkhead", "given-names": "TR"},
{"surname": "Møller", "given-names": "AP"}
]))
references_authors.append(("10.7554/eLife.05423", "bib5", "authors", [
{"surname": "Birkhead", "given-names": "TR"},
{"surname": "Møller", "given-names": "AP"}
]))
references_authors.append(("10.7554/eLife.05423", "bib90", "authors", [
{"surname": "Smith", "given-names": "RL"}
]))
references_authors.append(("10.7554/eLife.05564", "bib39", "authors", [
{"surname": "Pattyn", "given-names": "S"}
]))
references_authors.append(("10.7554/eLife.05959", "bib76", "authors", [
{"surname": "Macholán", "given-names": "M"},
{"surname": "Baird", "given-names": "SJE"},
{"surname": "Munclinger", "given-names": "P"},
{"surname": "Piálek", "given-names": "J"}
]))
references_authors.append(("10.7554/eLife.06565", "bib1", "authors", [
{"surname": "Ahringer", "given-names": "J"}
]))
references_authors.append(("10.7554/eLife.06576", "bib57", "authors", [
{"surname": "Moller", "given-names": "AR"}
]))
references_authors.append(("10.7554/eLife.06813", "bib54", "authors", [
{"surname": "King", "given-names": "JA"}
]))
references_authors.append(("10.7554/eLife.06813", "bib55", "authors", [
{"surname": "Kirkland", "given-names": "Gl"},
{"surname": "Layne", "given-names": "JN"}
]))
references_authors.append(("10.7554/eLife.07460", "bib1", "authors", [
{"surname": "Rallapalli", "given-names": "Ghanasyam"}
]))
references_authors.append(("10.7554/eLife.07460", "bib2", "authors", [
{"surname": "Bazyl", "given-names": "Steven"}
]))
references_authors.append(("10.7554/eLife.07847", "bib40", "authors", [
{"collab": "Nature Immunology"}
]))
references_authors.append(("10.7554/eLife.09666", "bib9", "authors", [
{"surname": "Schüler", "given-names": "D"}
]))
references_authors.append(("10.7554/eLife.09868", "bib5", "authors", [
{"surname": "Barlow", "given-names": "HB"}
]))
references_authors.append(("10.7554/eLife.10222", "bib30", "authors", [
{"collab": "PharmaMar"}
]))
references_authors.append(("10.7554/eLife.11860", "bib48", "authors", [
{"surname": "Yang", "given-names": "Y"},
{"surname": "Wang", "given-names": "X"},
{"surname": "Chen", "given-names": "X"},
]))
references_authors.append(("10.7554/eLife.11945", "bib23", "authors", [
{"surname": "Glimcher", "given-names": "P"},
{"surname": "Fehr", "given-names": "E"}
]))
references_authors.append(("10.7554/eLife.13135", "bib26", "authors", [
{"surname": "Ivanova", "given-names": "S"},
{"surname": "Herbreteau", "given-names": "B"},
{"surname": "Blasdell", "given-names": "K"},
{"surname": "Chaval", "given-names": "Y"},
{"surname": "Buchy", "given-names": "P"},
{"surname": "Guillard", "given-names": "B"},
{"surname": "Morand", "given-names": "S"},
]))
references_authors.append(("10.7554/eLife.13135", "bib27", "authors", [
{"surname": "King", "given-names": "AMQ"},
{"surname": "Adams", "given-names": "J"},
{"surname": "Carstens", "given-names": "EB"},
{"surname": "Lefkowitz", "given-names": "EJ"}
]))
references_authors.append(("10.7554/eLife.14188", "bib1", "authors", [
{"collab": "Avisoft Bioacoustics"}
]))
references_authors.append(("10.7554/eLife.17716", "bib7", "authors", [
{"collab": "World Health Organization"}
]))
references_authors.append(("10.7554/eLife.17956", "bib4", "authors", [
{"surname": "Barrett", "given-names": "SCH"}
]))
references_authors.append(("10.7554/eLife.18109", "bib39", "authors", [
{"surname": "Weber", "given-names": "EH"}
]))
# Now turn the authors data into the json
for author_row in references_authors:
ref_json = OrderedDict()
doi, id, author_type, authors = author_row
#if id not in ref_json:
ref_json[id] = OrderedDict()
ref_json[id][author_type] = []
for ref_author in authors:
if "collab" in ref_author:
author_json = elifetools.utils_html.references_author_collab(ref_author)
else:
author_json = elifetools.utils.references_author_person(ref_author)
if author_json:
ref_json[id][author_type].append(author_json)
# Add to json array, and do not verwrite existing rule of a specific bib id (if present)
if doi not in references_rewrite_json:
references_rewrite_json[doi] = ref_json
else:
for key, value in iteritems(ref_json):
if key not in references_rewrite_json[doi]:
references_rewrite_json[doi][key] = value
else:
# Append dict items
for k, v in iteritems(value):
references_rewrite_json[doi][key][k] = v
return references_rewrite_json | Here is the DOI and references json replacements data for elife | entailment |
def rewrite_elife_body_json(json_content, doi):
""" rewrite elife body json """
# Edge case add an id to a section
if doi == "10.7554/eLife.00013":
if (json_content and len(json_content) > 0):
if (json_content[0].get("type") and json_content[0].get("type") == "section"
and json_content[0].get("title") and json_content[0].get("title") =="Introduction"
and not json_content[0].get("id")):
json_content[0]["id"] = "s1"
# Edge case remove an extra section
if doi == "10.7554/eLife.04232":
if (json_content and len(json_content) > 0):
for outer_block in json_content:
if outer_block.get("id") and outer_block.get("id") == "s4":
for mid_block in outer_block.get("content"):
if mid_block.get("id") and mid_block.get("id") == "s4-6":
for inner_block in mid_block.get("content"):
if inner_block.get("content") and not inner_block.get("title"):
mid_block["content"] = inner_block.get("content")
# Edge case remove unwanted sections
if doi == "10.7554/eLife.04871":
if (json_content and len(json_content) > 0):
for i, outer_block in enumerate(json_content):
if (outer_block.get("id") and outer_block.get("id") in ["s7", "s8"]
and not outer_block.get("title")):
if outer_block.get("content"):
json_content[i] = outer_block.get("content")[0]
# Edge case remove an extra section
if doi == "10.7554/eLife.05519":
if (json_content and len(json_content) > 0):
for outer_block in json_content:
if outer_block.get("id") and outer_block.get("id") == "s4":
for mid_block in outer_block.get("content"):
if mid_block.get("content") and not mid_block.get("id"):
new_blocks = []
for inner_block in mid_block.get("content"):
new_blocks.append(inner_block)
outer_block["content"] = new_blocks
# Edge case add a title to a section
if doi == "10.7554/eLife.07157":
if (json_content and len(json_content) > 0):
if (json_content[0].get("type") and json_content[0].get("type") == "section"
and json_content[0].get("id") and json_content[0].get("id") == "s1"):
json_content[0]["title"] = "Main text"
# Edge case remove a section with no content
if doi == "10.7554/eLife.09977":
if (json_content and len(json_content) > 0):
i_index = j_index = None
for i, outer_block in enumerate(json_content):
if (outer_block.get("id") and outer_block.get("id") == "s4"
and outer_block.get("content")):
# We have i
i_index = i
break
if i_index is not None:
for j, inner_block in enumerate(json_content[i_index].get("content")):
if (inner_block.get("id") and inner_block.get("id") == "s4-11"
and inner_block.get("content") is None):
# Now we have i and j for deletion outside of the loop
j_index = j
break
# Do the deletion on the original json
if i_index is not None and j_index is not None:
del json_content[i_index]["content"][j_index]
# Edge case wrap sections differently
if doi == "10.7554/eLife.12844":
if (json_content and len(json_content) > 0 and json_content[0].get("type")
and json_content[0]["type"] == "section"):
new_body = OrderedDict()
for i, tag_block in enumerate(json_content):
if i == 0:
tag_block["title"] = "Main text"
new_body = tag_block
elif i > 0:
new_body["content"].append(tag_block)
json_content = [new_body]
return json_content | rewrite elife body json | entailment |
def rewrite_elife_funding_awards(json_content, doi):
""" rewrite elife funding awards """
# remove a funding award
if doi == "10.7554/eLife.00801":
for i, award in enumerate(json_content):
if "id" in award and award["id"] == "par-2":
del json_content[i]
# add funding award recipient
if doi == "10.7554/eLife.04250":
recipients_for_04250 = [{"type": "person", "name": {"preferred": "Eric Jonas", "index": "Jonas, Eric"}}]
for i, award in enumerate(json_content):
if "id" in award and award["id"] in ["par-2", "par-3", "par-4"]:
if "recipients" not in award:
json_content[i]["recipients"] = recipients_for_04250
# add funding award recipient
if doi == "10.7554/eLife.06412":
recipients_for_06412 = [{"type": "person", "name": {"preferred": "Adam J Granger", "index": "Granger, Adam J"}}]
for i, award in enumerate(json_content):
if "id" in award and award["id"] == "par-1":
if "recipients" not in award:
json_content[i]["recipients"] = recipients_for_06412
return json_content | rewrite elife funding awards | entailment |
def rewrite_elife_authors_json(json_content, doi):
""" this does the work of rewriting elife authors json """
# Convert doi from testing doi if applicable
article_doi = elifetools.utils.convert_testing_doi(doi)
# Edge case fix an affiliation name
if article_doi == "10.7554/eLife.06956":
for i, ref in enumerate(json_content):
if ref.get("orcid") and ref.get("orcid") == "0000-0001-6798-0064":
json_content[i]["affiliations"][0]["name"] = ["Cambridge"]
# Edge case fix an ORCID
if article_doi == "10.7554/eLife.09376":
for i, ref in enumerate(json_content):
if ref.get("orcid") and ref.get("orcid") == "000-0001-7224-925X":
json_content[i]["orcid"] = "0000-0001-7224-925X"
# Edge case competing interests
if article_doi == "10.7554/eLife.00102":
for i, ref in enumerate(json_content):
if not ref.get("competingInterests"):
if ref["name"]["index"].startswith("Chen,"):
json_content[i]["competingInterests"] = "ZJC: Reviewing Editor, <i>eLife</i>"
elif ref["name"]["index"].startswith("Li,"):
json_content[i]["competingInterests"] = "The remaining authors have no competing interests to declare."
if article_doi == "10.7554/eLife.00270":
for i, ref in enumerate(json_content):
if not ref.get("competingInterests"):
if ref["name"]["index"].startswith("Patterson,"):
json_content[i]["competingInterests"] = "MP: Managing Executive Editor, <i>eLife</i>"
# Remainder of competing interests rewrites
elife_author_competing_interests = {}
elife_author_competing_interests["10.7554/eLife.00133"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00190"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00230"] = "The authors have declared that no competing interests exist"
elife_author_competing_interests["10.7554/eLife.00288"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00352"] = "The author declares that no competing interest exist"
elife_author_competing_interests["10.7554/eLife.00362"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00475"] = "The remaining authors have no competing interests to declare."
elife_author_competing_interests["10.7554/eLife.00592"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00633"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.02725"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.02935"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.04126"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.04878"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.05322"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.06011"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.06416"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.07383"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.08421"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.08494"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.08648"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.08924"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.09083"] = "The other authors declare that no competing interests exists."
elife_author_competing_interests["10.7554/eLife.09102"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.09460"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.09591"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.09600"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.10113"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.10230"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.10453"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.10635"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.11407"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.11473"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.11750"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.12217"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.12620"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.12724"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.13023"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.13732"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.14116"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.14258"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.14694"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.15085"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.15312"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.16011"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.16940"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17023"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17092"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17218"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17267"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17523"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17556"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17769"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17834"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.18101"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.18515"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.18544"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.18648"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.19071"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.19334"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.19510"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.20183"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.20242"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.20375"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.20797"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.21454"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.21491"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.22187"] = "The authors declare that no competing interests exist."
if article_doi in elife_author_competing_interests:
for i, ref in enumerate(json_content):
if not ref.get("competingInterests"):
json_content[i]["competingInterests"] = elife_author_competing_interests[article_doi]
# Rewrite "other authors declare" ... competing interests statements using a string match
for i, ref in enumerate(json_content):
if (ref.get("competingInterests") and (
ref.get("competingInterests").startswith("The other author") or
ref.get("competingInterests").startswith("The others author") or
ref.get("competingInterests").startswith("The remaining authors") or
ref.get("competingInterests").startswith("The remaining have declared")
)):
json_content[i]["competingInterests"] = "No competing interests declared."
return json_content | this does the work of rewriting elife authors json | entailment |
def rewrite_elife_datasets_json(json_content, doi):
""" this does the work of rewriting elife datasets json """
# Add dates in bulk
elife_dataset_dates = []
elife_dataset_dates.append(("10.7554/eLife.00348", "used", "dataro17", u"2010"))
elife_dataset_dates.append(("10.7554/eLife.01179", "used", "dataro4", u"2016"))
elife_dataset_dates.append(("10.7554/eLife.01603", "used", "dataro2", u"2012"))
elife_dataset_dates.append(("10.7554/eLife.02304", "used", "dataro15", u"2005"))
elife_dataset_dates.append(("10.7554/eLife.02935", "used", "dataro2", u"2014"))
elife_dataset_dates.append(("10.7554/eLife.03583", "used", "dataro5", u"2013"))
if doi in map(lambda dataset: dataset[0], elife_dataset_dates):
for (match_doi, used_or_generated, id, dataset_date) in elife_dataset_dates:
if doi == match_doi:
if json_content.get(used_or_generated):
for dataset in json_content[used_or_generated]:
if dataset.get("id") and dataset["id"] == id:
if not dataset.get("date"):
dataset["date"] = dataset_date
# Continue with individual article JSON rewriting
if doi == "10.7554/eLife.01311":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] in ["dataro3", "dataro4", "dataro5"]:
if not dataset.get("date"):
dataset["date"] = u"2012"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Duke"}]
if dataset.get("id") and dataset["id"] == "dataro6":
if not dataset.get("date"):
dataset["date"] = u"2011"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "FlyBase"}]
if dataset.get("id") and dataset["id"] == "dataro7":
if not dataset.get("date"):
dataset["date"] = u"2011"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Baylor College of Medicine (BCM)"}]
if dataset.get("id") and dataset["id"] in ["dataro8", "dataro9"]:
if not dataset.get("date"):
dataset["date"] = u"2012"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "University of California, Berkeley"}]
if doi == "10.7554/eLife.01440":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "EnsemblMetazoa"}]
if doi == "10.7554/eLife.01535":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date") == "2000, 2005":
dataset["date"] = u"2000"
if doi == "10.7554/eLife.02304":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro11":
if not dataset.get("title"):
dataset["title"] = u"T.gondii LDH1 ternary complex with APAD+ and oxalate"
if doi == "10.7554/eLife.03574":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("date"):
dataset["date"] = u"2006"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Riley,M."}, {"type": "group", "name": "Abe,T."}, {"type": "group", "name": "Arnaud,M.B."}, {"type": "group", "name": "Berlyn,M.K."}, {"type": "group", "name": "Blattner,F.R."}, {"type": "group", "name": "Chaudhuri,R.R."}, {"type": "group", "name": "Glasner,J.D."}, {"type": "group", "name": "Horiuchi,T."}, {"type": "group", "name": "Keseler,I.M."}, {"type": "group", "name": "Kosuge,T."}, {"type": "group", "name": "Mori,H."}, {"type": "group", "name": "Perna,N.T."}, {"type": "group", "name": "Plunkett,G. III"}, {"type": "group", "name": "Rudd,K.E."}, {"type": "group", "name": "Serres,M.H."}, {"type": "group", "name": "Thomas,G.H."}, {"type": "group", "name": "Thomson,N.R."}, {"type": "group", "name": "Wishart,D."}, {"type": "group", "name": "Wanner,B.L."}]
if doi == "10.7554/eLife.03676":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro4":
if not dataset.get("date"):
dataset["date"] = u"2013"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Human Gene Sequencing Center"}]
if doi == "10.7554/eLife.03971":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Vanderperre B."}]
if doi == "10.7554/eLife.04660":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date") == "2014-2015":
dataset["date"] = u"2014"
if doi == "10.7554/eLife.06421":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if dataset.get("date") and dataset.get("date") == "NA":
dataset["date"] = u"2006"
if doi == "10.7554/eLife.08445":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "data-ro1":
if not dataset.get("date"):
dataset["date"] = u"2006"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "BDTNP SELEX"}]
if doi == "10.7554/eLife.08916":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if dataset.get("date") and dataset.get("date") == "2008, updated 2014":
dataset["date"] = u"2008"
if dataset.get("id") and dataset["id"] == "dataro3":
if dataset.get("date") and dataset.get("date") == "2013, updated 2014":
dataset["date"] = u"2013"
if doi == "10.7554/eLife.08955":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Kurdistani S"}, {"type": "group", "name": "Marrban C"}, {"type": "group", "name": "Su T"}]
if doi == "10.7554/eLife.09207":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Prostate Cancer Genome Sequencing Project"}]
if doi == "10.7554/eLife.10607":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "data-ro4":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Authors"}]
if doi == "10.7554/eLife.10670":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "data-ro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "HIVdb"}]
# Add dates, authors, other details
if doi == "10.7554/eLife.10856":
if json_content.get("generated"):
datasets_authors_for_10856 = [{"type": "group", "name": "Dagdas YF"}, {"type": "group", "name": "Belhaj K"}, {"type": "group", "name": "Maqbool A"}, {"type": "group", "name": "Chaparro-Garcia A"}, {"type": "group", "name": "Pandey P"}, {"type": "group", "name": "Petre B"}, {"type": "group", "name": "Tabassum N"}, {"type": "group", "name": "Cruz-Mireles N"}, {"type": "group", "name": "Hughes RK"}, {"type": "group", "name": "Sklenar J"}, {"type": "group", "name": "Win J"}, {"type": "group", "name": "Menke F"}, {"type": "group", "name": "Findlay K"}, {"type": "group", "name": "Banfield MJ"}, {"type": "group", "name": "Kamoun S"}, {"type": "group", "name": "Bozkurt TO"}]
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro7":
if not dataset.get("date"):
dataset["date"] = u"2016"
if not dataset.get("title"):
dataset["title"] = u"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor"
if not dataset.get("authors"):
dataset["authors"] = datasets_authors_for_10856
if dataset.get("uri") and dataset["uri"] == "http://www.ncbi.nlm.nih.":
dataset["uri"] = "https://www.ncbi.nlm.nih.gov/nuccore/976151098/"
if dataset.get("id") and dataset["id"] == "dataro8":
if not dataset.get("date"):
dataset["date"] = u"2015"
if not dataset.get("title"):
dataset["title"] = u"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor"
if not dataset.get("authors"):
dataset["authors"] = datasets_authors_for_10856
if dataset.get("uri") and dataset["uri"] == "http://www.ncbi.nlm.nih.":
dataset["uri"] = "https://www.ncbi.nlm.nih.gov/nuccore/976151096/"
if dataset.get("id") and dataset["id"] == "dataro9":
if not dataset.get("authors"):
dataset["authors"] = datasets_authors_for_10856
if doi == "10.7554/eLife.10877":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("title"):
dataset["title"] = u"Oct4 ChIP-Seq at G1 and G2/M phase of cell cycle in mouse embryonic stem cells"
if doi == "10.7554/eLife.10921":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Floor SN"}, {"type": "group", "name": "Doudna JA"}]
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Sidrauski C"}, {"type": "group", "name": "McGeachy A"}, {"type": "group", "name": "Ingolia N"}, {"type": "group", "name": "Walter P"}]
if doi == "10.7554/eLife.11117":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro14":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Authors"}]
if doi == "10.7554/eLife.12204":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Rhodes DR"}, {"type": "group", "name": "Kalyana-Sundaram S"}, {"type": "group", "name": "Mahavisno V"}, {"type": "group", "name": "Varambally R"}, {"type": "group", "name": "Yu J"}, {"type": "group", "name": "Briggs BB"}, {"type": "group", "name": "Barrette TR"}, {"type": "group", "name": "Anstet MJ"}, {"type": "group", "name": "Kincead-Beal C"}, {"type": "group", "name": "Kulkarni P"}, {"type": "group", "name": "Varambally S"}, {"type": "group", "name": "Ghosh D"}, {"type": "group", "name": "Chinnaiyan AM."}]
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Gaspar C"}, {"type": "group", "name": "Cardoso J"}, {"type": "group", "name": "Franken P"}, {"type": "group", "name": "Molenaar L"}, {"type": "group", "name": "Morreau H"}, {"type": "group", "name": "Möslein G"}, {"type": "group", "name": "Sampson J"}, {"type": "group", "name": "Boer JM"}, {"type": "group", "name": "de Menezes RX"}, {"type": "group", "name": "Fodde R."}]
if dataset.get("id") and dataset["id"] == "dataro3":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Graudens E"}, {"type": "group", "name": "Boulanger V"}, {"type": "group", "name": "Mollard C"}, {"type": "group", "name": "Mariage-Samson R"}, {"type": "group", "name": "Barlet X"}, {"type": "group", "name": "Grémy G"}, {"type": "group", "name": "Couillault C"}, {"type": "group", "name": "Lajémi M"}, {"type": "group", "name": "Piatier-Tonneau D"}, {"type": "group", "name": "Zaborski P"}, {"type": "group", "name": "Eveno E"}, {"type": "group", "name": "Auffray C"}, {"type": "group", "name": "Imbeaud S."}]
if dataset.get("id") and dataset["id"] == "dataro4":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Hong Y"}, {"type": "group", "name": "Downey T"}, {"type": "group", "name": "Eu KW"}, {"type": "group", "name": "Koh PK"},{"type": "group", "name": "Cheah PY"}]
if dataset.get("id") and dataset["id"] == "dataro5":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Kaiser S"}, {"type": "group", "name": "Park YK"}, {"type": "group", "name": "Franklin JL"}, {"type": "group", "name": "Halberg RB"}, {"type": "group", "name": "Yu M"}, {"type": "group", "name": "Jessen WJ"}, {"type": "group", "name": "Freudenberg J"}, {"type": "group", "name": "Chen X"}, {"type": "group", "name": "Haigis K"}, {"type": "group", "name": "Jegga AG"}, {"type": "group", "name": "Kong S"}, {"type": "group", "name": "Sakthivel B"}, {"type": "group", "name": "Xu H"}, {"type": "group", "name": "Reichling T"}, {"type": "group", "name": "Azhar M"}, {"type": "group", "name": "Boivin GP"}, {"type": "group", "name": "Roberts RB"}, {"type": "group", "name": "Bissahoyo AC"}, {"type": "group", "name": "Gonzales F"}, {"type": "group", "name": "Bloom GC"}, {"type": "group", "name": "Eschrich S"}, {"type": "group", "name": "Carter SL"}, {"type": "group", "name": "Aronow JE"}, {"type": "group", "name": "Kleimeyer J"}, {"type": "group", "name": "Kleimeyer M"}, {"type": "group", "name": "Ramaswamy V"}, {"type": "group", "name": "Settle SH"}, {"type": "group", "name": "Boone B"}, {"type": "group", "name": "Levy S"}, {"type": "group", "name": "Graff JM"}, {"type": "group", "name": "Doetschman T"}, {"type": "group", "name": "Groden J"}, {"type": "group", "name": "Dove WF"}, {"type": "group", "name": "Threadgill DW"}, {"type": "group", "name": "Yeatman TJ"}, {"type": "group", "name": "Coffey RJ Jr"}, {"type": "group", "name": "Aronow BJ."}]
if dataset.get("id") and dataset["id"] == "dataro6":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Muzny DM et al"}]
if dataset.get("id") and dataset["id"] == "dataro7":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Skrzypczak M"}, {"type": "group", "name": "Goryca K"}, {"type": "group", "name": "Rubel T"}, {"type": "group", "name": "Paziewska A"}, {"type": "group", "name": "Mikula M"}, {"type": "group", "name": "Jarosz D"}, {"type": "group", "name": "Pachlewski J"}, {"type": "group", "name": "Oledzki J"}, {"type": "group", "name": "Ostrowski J."}]
if dataset.get("id") and dataset["id"] == "dataro8":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Cancer Genome Atlas Network"}]
if doi == "10.7554/eLife.12876":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Department of Human Genetics, University of Utah"}]
if doi == "10.7554/eLife.13195":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Microbial Ecology Group, Colorado State University"}]
if doi == "10.7554/eLife.14158":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "data-ro1":
if not dataset.get("title"):
dataset["title"] = u"Bacterial initiation protein"
if dataset.get("id") and dataset["id"] == "data-ro2":
if not dataset.get("title"):
dataset["title"] = u"Bacterial initiation protein in complex with Phage inhibitor protein"
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro3":
if not dataset.get("date"):
dataset["date"] = u"2007"
if doi == "10.7554/eLife.14243":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Tramantano M"}, {"type": "group", "name": "Sun L"}, {"type": "group", "name": "Au C"}, {"type": "group", "name": "Labuz D"}, {"type": "group", "name": "Liu Z"}, {"type": "group", "name": "Chou M"}, {"type": "group", "name": "Shen C"}, {"type": "group", "name": "Luk E"}]
if doi == "10.7554/eLife.16078":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date") == "current manuscript":
dataset["date"] = u"2016"
if doi == "10.7554/eLife.17082":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "data-ro4":
if not dataset.get("date"):
dataset["date"] = u"2012"
if dataset.get("id") and dataset["id"] == "data-ro5":
if not dataset.get("date"):
dataset["date"] = u"2014"
if dataset.get("id") and dataset["id"] == "data-ro6":
if not dataset.get("date"):
dataset["date"] = u"2014"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "The Cancer Genome Atlas (TCGA)"}]
if doi == "10.7554/eLife.17473":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date").startswith("Release date"):
dataset["date"] = u"2016"
return json_content | this does the work of rewriting elife datasets json | entailment |
def rewrite_elife_editors_json(json_content, doi):
""" this does the work of rewriting elife editors json """
# Remove affiliations with no name value
for i, ref in enumerate(json_content):
if ref.get("affiliations"):
for aff in ref.get("affiliations"):
if "name" not in aff:
del(json_content[i]["affiliations"])
# Add editor role
editor_roles = {}
editor_roles["10.7554/eLife.00534"] = "Reviewing Editor"
editor_roles["10.7554/eLife.09376"] = "Reviewing Editor"
editor_roles["10.7554/eLife.10056"] = "Reviewing Editor"
editor_roles["10.7554/eLife.11031"] = "Reviewing Editor"
editor_roles["10.7554/eLife.12081"] = "Reviewing Editor"
editor_roles["10.7554/eLife.12241"] = "Reviewing Editor"
editor_roles["10.7554/eLife.12509"] = "Reviewing Editor"
editor_roles["10.7554/eLife.13023"] = "Reviewing Editor"
editor_roles["10.7554/eLife.13053"] = "Reviewing Editor"
editor_roles["10.7554/eLife.13426"] = "Reviewing Editor"
editor_roles["10.7554/eLife.13620"] = "Reviewing Editor"
editor_roles["10.7554/eLife.13810"] = "Reviewing Editor"
editor_roles["10.7554/eLife.13828"] = "Reviewing Editor"
editor_roles["10.7554/eLife.13887"] = "Reviewing Editor"
editor_roles["10.7554/eLife.13905"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14000"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14155"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14170"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14226"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14277"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14315"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14316"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14530"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14601"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14618"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14749"] = "Reviewing Editor"
editor_roles["10.7554/eLife.14814"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15266"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15275"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15292"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15316"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15470"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15545"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15716"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15747"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15828"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15833"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15915"] = "Reviewing Editor"
editor_roles["10.7554/eLife.15986"] = "Reviewing Editor"
editor_roles["10.7554/eLife.16088"] = "Reviewing Editor"
editor_roles["10.7554/eLife.16093"] = "Reviewing Editor"
editor_roles["10.7554/eLife.16127"] = "Reviewing Editor"
editor_roles["10.7554/eLife.16159"] = "Reviewing Editor"
editor_roles["10.7554/eLife.16178"] = "Reviewing Editor"
editor_roles["10.7554/eLife.16309"] = "Reviewing Editor"
editor_roles["10.7554/eLife.16777"] = "Reviewing Editor"
editor_roles["10.7554/eLife.16793"] = "Reviewing Editor"
editor_roles["10.7554/eLife.16950"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17101"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17180"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17240"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17262"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17282"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17463"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17551"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17667"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17681"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17978"] = "Reviewing Editor"
editor_roles["10.7554/eLife.17985"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18103"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18207"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18246"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18249"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18432"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18447"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18458"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18491"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18541"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18542"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18579"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18605"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18633"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18657"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18919"] = "Reviewing Editor"
editor_roles["10.7554/eLife.18970"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19027"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19088"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19089"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19295"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19377"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19406"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19466"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19484"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19505"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19535"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19568"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19573"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19662"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19671"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19686"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19695"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19720"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19749"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19766"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19804"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19809"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19887"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19976"] = "Reviewing Editor"
editor_roles["10.7554/eLife.19991"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20010"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20054"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20070"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20183"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20185"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20214"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20236"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20309"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20343"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20357"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20362"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20365"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20390"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20417"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20515"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20533"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20607"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20640"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20667"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20718"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20722"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20777"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20782"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20787"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20797"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20799"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20813"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20954"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20958"] = "Reviewing Editor"
editor_roles["10.7554/eLife.20985"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21032"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21049"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21052"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21170"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21172"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21290"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21330"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21394"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21397"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21455"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21481"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21491"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21589"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21598"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21616"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21635"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21728"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21771"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21776"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21855"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21886"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21920"] = "Reviewing Editor"
editor_roles["10.7554/eLife.21989"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22028"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22053"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22170"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22177"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22280"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22409"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22429"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22431"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22467"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22472"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22502"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22771"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22784"] = "Reviewing Editor"
editor_roles["10.7554/eLife.22866"] = "Reviewing Editor"
editor_roles["10.7554/eLife.23156"] = "Reviewing Editor"
editor_roles["10.7554/eLife.23352"] = "Reviewing Editor"
editor_roles["10.7554/eLife.23804"] = "Reviewing Editor"
# Edge case fix an affiliation name
if doi in editor_roles:
for i, ref in enumerate(json_content):
if not ref.get("role"):
json_content[i]["role"] = editor_roles[doi]
elif ref.get("role"):
json_content[i]["role"] = "Reviewing Editor"
else:
# Fix capitalisation on exiting role values
for i, ref in enumerate(json_content):
if ref.get("role") == "Reviewing editor":
json_content[i]["role"] = "Reviewing Editor"
# Remove duplicates
editors_kept = []
for i, ref in enumerate(json_content):
editor_values = OrderedDict()
editor_values["role"] = ref.get("role")
if ref.get("name"):
editor_values["name"] = ref.get("name").get("index")
if editor_values in editors_kept:
# remove if one is already kept
del json_content[i]
else:
editors_kept.append(editor_values)
# Merge two role values
role_replacements = [
{
"role_from": ["Senior Editor", "Reviewing Editor"],
"role_to": "Senior and Reviewing Editor"}
]
for replace_rule in role_replacements:
same_name_map = person_same_name_map(json_content, replace_rule.get('role_from'))
role_is_set = None
for same_id_list in same_name_map.values():
if not same_id_list or len(same_id_list) <= 1:
# no more than one name match, nothing to replace
continue
deleted_count = 0
for same_id in same_id_list:
if not role_is_set:
# reset the role for the first person record
json_content[same_id]["role"] = replace_rule.get("role_to")
role_is_set = True
else:
# first one is already set, remove the duplicates
del json_content[same_id-deleted_count]
deleted_count += 1
return json_content | this does the work of rewriting elife editors json | entailment |
def person_same_name_map(json_content, role_from):
"to merge multiple editors into one record, filter by role values and group by name"
matched_editors = [(i, person) for i, person in enumerate(json_content)
if person.get('role') in role_from]
same_name_map = {}
for i, editor in matched_editors:
if not editor.get("name"):
continue
# compare name of each
name = editor.get("name").get("index")
if name not in same_name_map:
same_name_map[name] = []
same_name_map[name].append(i)
return same_name_map | to merge multiple editors into one record, filter by role values and group by name | entailment |
def rewrite_elife_title_prefix_json(json_content, doi):
""" this does the work of rewriting elife title prefix json values"""
if not json_content:
return json_content
# title prefix rewrites by article DOI
title_prefix_values = {}
title_prefix_values["10.7554/eLife.00452"] = "Point of View"
title_prefix_values["10.7554/eLife.00615"] = "Point of View"
title_prefix_values["10.7554/eLife.00639"] = "Point of View"
title_prefix_values["10.7554/eLife.00642"] = "Point of View"
title_prefix_values["10.7554/eLife.00856"] = "Point of View"
title_prefix_values["10.7554/eLife.01061"] = "Point of View"
title_prefix_values["10.7554/eLife.01138"] = "Point of View"
title_prefix_values["10.7554/eLife.01139"] = "Point of View"
title_prefix_values["10.7554/eLife.01820"] = "Animal Models of Disease"
title_prefix_values["10.7554/eLife.02576"] = "Point of View"
title_prefix_values["10.7554/eLife.04902"] = "Point of View"
title_prefix_values["10.7554/eLife.05614"] = "Point of View"
title_prefix_values["10.7554/eLife.05635"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.05826"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.05835"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.05849"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.05861"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.05959"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.06024"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.06100"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.06793"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.06813"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.06956"] = "The Natural History of Model Organisms"
title_prefix_values["10.7554/eLife.09305"] = "Point of View"
title_prefix_values["10.7554/eLife.10825"] = "Point of View"
title_prefix_values["10.7554/eLife.11628"] = "Living Science"
title_prefix_values["10.7554/eLife.12708"] = "Point of View"
title_prefix_values["10.7554/eLife.12844"] = "Point of View"
title_prefix_values["10.7554/eLife.13035"] = "Point of View"
title_prefix_values["10.7554/eLife.14258"] = "Cutting Edge"
title_prefix_values["10.7554/eLife.14424"] = "Point of View"
title_prefix_values["10.7554/eLife.14511"] = "Cell Proliferation"
title_prefix_values["10.7554/eLife.14721"] = "Intracellular Bacteria"
title_prefix_values["10.7554/eLife.14790"] = "Decision Making"
title_prefix_values["10.7554/eLife.14830"] = "Progenitor Cells"
title_prefix_values["10.7554/eLife.14953"] = "Gene Expression"
title_prefix_values["10.7554/eLife.14973"] = "Breast Cancer"
title_prefix_values["10.7554/eLife.15352"] = "Autoimmune Disorders"
title_prefix_values["10.7554/eLife.15438"] = "Motor Circuits"
title_prefix_values["10.7554/eLife.15591"] = "Protein Tagging"
title_prefix_values["10.7554/eLife.15928"] = "Point of View"
title_prefix_values["10.7554/eLife.15938"] = "Cancer Metabolism"
title_prefix_values["10.7554/eLife.15957"] = "Stem Cells"
title_prefix_values["10.7554/eLife.15963"] = "Prediction Error"
title_prefix_values["10.7554/eLife.16019"] = "Social Networks"
title_prefix_values["10.7554/eLife.16076"] = "mRNA Decay"
title_prefix_values["10.7554/eLife.16207"] = "Cardiac Development"
title_prefix_values["10.7554/eLife.16209"] = "Neural Coding"
title_prefix_values["10.7554/eLife.16393"] = "Neural Circuits"
title_prefix_values["10.7554/eLife.16598"] = "RNA Localization"
title_prefix_values["10.7554/eLife.16758"] = "Adaptive Evolution"
title_prefix_values["10.7554/eLife.16800"] = "Point of View"
title_prefix_values["10.7554/eLife.16846"] = "Living Science"
title_prefix_values["10.7554/eLife.16931"] = "Point of View"
title_prefix_values["10.7554/eLife.16964"] = "Ion Channels"
title_prefix_values["10.7554/eLife.17224"] = "Host-virus Interactions"
title_prefix_values["10.7554/eLife.17293"] = "Ion Channels"
title_prefix_values["10.7554/eLife.17393"] = "Point of View"
title_prefix_values["10.7554/eLife.17394"] = "p53 Family Proteins"
title_prefix_values["10.7554/eLife.18203"] = "Antibody Engineering"
title_prefix_values["10.7554/eLife.18243"] = "Host-virus Interactions"
title_prefix_values["10.7554/eLife.18365"] = "DNA Repair"
title_prefix_values["10.7554/eLife.18431"] = "Unfolded Protein Response"
title_prefix_values["10.7554/eLife.18435"] = "Long Distance Transport"
title_prefix_values["10.7554/eLife.18721"] = "Decision Making"
title_prefix_values["10.7554/eLife.18753"] = "Resource Competition"
title_prefix_values["10.7554/eLife.18871"] = "Mathematical Modeling"
title_prefix_values["10.7554/eLife.18887"] = "Sensorimotor Transformation"
title_prefix_values["10.7554/eLife.19285"] = "Genetic Screen"
title_prefix_values["10.7554/eLife.19351"] = "Motor Control"
title_prefix_values["10.7554/eLife.19405"] = "Membrane Structures"
title_prefix_values["10.7554/eLife.19733"] = "Focal Adhesions"
title_prefix_values["10.7554/eLife.20043"] = "Amyloid-beta Peptides"
title_prefix_values["10.7554/eLife.20314"] = "Plant Reproduction"
title_prefix_values["10.7554/eLife.20468"] = "Endoplasmic Reticulum"
title_prefix_values["10.7554/eLife.20516"] = "Innate Like Lymphocytes"
title_prefix_values["10.7554/eLife.21070"] = "Scientific Publishing"
title_prefix_values["10.7554/eLife.21236"] = "Developmental Neuroscience"
title_prefix_values["10.7554/eLife.21522"] = "Developmental Neuroscience"
title_prefix_values["10.7554/eLife.21723"] = "Living Science"
title_prefix_values["10.7554/eLife.21863"] = "Genetic Screening"
title_prefix_values["10.7554/eLife.21864"] = "Evolutionary Biology"
title_prefix_values["10.7554/eLife.22073"] = "Unfolded Protein Response"
title_prefix_values["10.7554/eLife.22186"] = "Point of View"
title_prefix_values["10.7554/eLife.22215"] = "Neural Wiring"
title_prefix_values["10.7554/eLife.22256"] = "Molecular Communication"
title_prefix_values["10.7554/eLife.22471"] = "Point of View"
title_prefix_values["10.7554/eLife.22661"] = "Reproducibility in Cancer Biology"
title_prefix_values["10.7554/eLife.22662"] = "Reproducibility in Cancer Biology"
title_prefix_values["10.7554/eLife.22735"] = "Motor Networks"
title_prefix_values["10.7554/eLife.22850"] = "Heat Shock Response"
title_prefix_values["10.7554/eLife.22915"] = "Reproducibility in Cancer Biology"
title_prefix_values["10.7554/eLife.22926"] = "Skeletal Stem Cells"
title_prefix_values["10.7554/eLife.23375"] = "Social Evolution"
title_prefix_values["10.7554/eLife.23383"] = "Reproducibility in Cancer Biology"
title_prefix_values["10.7554/eLife.23447"] = "Genetic Rearrangement"
title_prefix_values["10.7554/eLife.23693"] = "Reproducibility in Cancer Biology"
title_prefix_values["10.7554/eLife.23804"] = "Point of View"
title_prefix_values["10.7554/eLife.24038"] = "Cell Division"
title_prefix_values["10.7554/eLife.24052"] = "DNA Replication"
title_prefix_values["10.7554/eLife.24106"] = "Germ Granules"
title_prefix_values["10.7554/eLife.24238"] = "Tumor Angiogenesis"
title_prefix_values["10.7554/eLife.24276"] = "Stem Cells"
title_prefix_values["10.7554/eLife.24611"] = "Point of View"
title_prefix_values["10.7554/eLife.24896"] = "Visual Behavior"
title_prefix_values["10.7554/eLife.25000"] = "Chromatin Mapping"
title_prefix_values["10.7554/eLife.25001"] = "Cell Cycle"
title_prefix_values["10.7554/eLife.25159"] = "Ion Channels"
title_prefix_values["10.7554/eLife.25358"] = "Cell Division"
title_prefix_values["10.7554/eLife.25375"] = "Membrane Phase Separation"
title_prefix_values["10.7554/eLife.25408"] = "Plain-language Summaries of Research"
title_prefix_values["10.7554/eLife.25410"] = "Plain-language Summaries of Research"
title_prefix_values["10.7554/eLife.25411"] = "Plain-language Summaries of Research"
title_prefix_values["10.7554/eLife.25412"] = "Plain-language Summaries of Research"
title_prefix_values["10.7554/eLife.25431"] = "Genetic Diversity"
title_prefix_values["10.7554/eLife.25654"] = "Systems Biology"
title_prefix_values["10.7554/eLife.25669"] = "Paternal Effects"
title_prefix_values["10.7554/eLife.25700"] = "TOR Signaling"
title_prefix_values["10.7554/eLife.25835"] = "Cutting Edge"
title_prefix_values["10.7554/eLife.25858"] = "Developmental Biology"
title_prefix_values["10.7554/eLife.25956"] = "Point of View"
title_prefix_values["10.7554/eLife.25996"] = "Cancer Therapeutics"
title_prefix_values["10.7554/eLife.26295"] = "Point of View"
title_prefix_values["10.7554/eLife.26401"] = "Object Recognition"
title_prefix_values["10.7554/eLife.26775"] = "Human Evolution"
title_prefix_values["10.7554/eLife.26787"] = "Cutting Edge"
title_prefix_values["10.7554/eLife.26942"] = "Alzheimer’s Disease"
title_prefix_values["10.7554/eLife.27085"] = "Translational Control"
title_prefix_values["10.7554/eLife.27198"] = "Cell Signaling"
title_prefix_values["10.7554/eLife.27438"] = "Point of View"
title_prefix_values["10.7554/eLife.27467"] = "Evolutionary Developmental Biology"
title_prefix_values["10.7554/eLife.27605"] = "Population Genetics"
title_prefix_values["10.7554/eLife.27933"] = "Ion Channels"
title_prefix_values["10.7554/eLife.27982"] = "Living Science"
title_prefix_values["10.7554/eLife.28339"] = "Oncogene Regulation"
title_prefix_values["10.7554/eLife.28514"] = "Maternal Behavior"
title_prefix_values["10.7554/eLife.28699"] = "Point of View"
title_prefix_values["10.7554/eLife.28757"] = "Mitochondrial Homeostasis"
title_prefix_values["10.7554/eLife.29056"] = "Gene Variation"
title_prefix_values["10.7554/eLife.29104"] = "Cardiac Hypertrophy"
title_prefix_values["10.7554/eLife.29502"] = "Meiotic Recombination"
title_prefix_values["10.7554/eLife.29586"] = "Virus Evolution"
title_prefix_values["10.7554/eLife.29942"] = "Post-translational Modifications"
title_prefix_values["10.7554/eLife.30076"] = "Scientific Publishing"
title_prefix_values["10.7554/eLife.30183"] = "Point of View"
title_prefix_values["10.7554/eLife.30194"] = "Organ Development"
title_prefix_values["10.7554/eLife.30249"] = "Tissue Regeneration"
title_prefix_values["10.7554/eLife.30280"] = "Adverse Drug Reactions"
title_prefix_values["10.7554/eLife.30599"] = "Living Science"
title_prefix_values["10.7554/eLife.30865"] = "Stone Tool Use"
title_prefix_values["10.7554/eLife.31106"] = "Sensory Neurons"
title_prefix_values["10.7554/eLife.31328"] = "Drought Stress"
title_prefix_values["10.7554/eLife.31697"] = "Scientific Publishing"
title_prefix_values["10.7554/eLife.31808"] = "Tissue Engineering"
title_prefix_values["10.7554/eLife.31816"] = "Sound Processing"
title_prefix_values["10.7554/eLife.32011"] = "Peer Review"
title_prefix_values["10.7554/eLife.32012"] = "Peer Review"
title_prefix_values["10.7554/eLife.32014"] = "Peer Review"
title_prefix_values["10.7554/eLife.32015"] = "Peer Review"
title_prefix_values["10.7554/eLife.32016"] = "Peer Review"
title_prefix_values["10.7554/eLife.32715"] = "Point of View"
# Edge case fix title prefix values
if doi in title_prefix_values:
# Do a quick sanity check, only replace if the lowercase comparison is equal
# just in case the value has been changed to something else we will not replace it
if json_content.lower() == title_prefix_values[doi].lower():
json_content = title_prefix_values[doi]
return json_content | this does the work of rewriting elife title prefix json values | entailment |
def metadata_lint(old, new, locations):
"""Run the linter over the new metadata, comparing to the old."""
# ensure we don't modify the metadata
old = old.copy()
new = new.copy()
# remove version info
old.pop('$version', None)
new.pop('$version', None)
for old_group_name in old:
if old_group_name not in new:
yield LintError('', 'api group removed', api_name=old_group_name)
for group_name, new_group in new.items():
old_group = old.get(group_name, {'apis': {}})
for name, api in new_group['apis'].items():
old_api = old_group['apis'].get(name, {})
api_locations = locations[name]
for message in lint_api(name, old_api, api, api_locations):
message.api_name = name
if message.location is None:
message.location = api_locations['api']
yield message | Run the linter over the new metadata, comparing to the old. | entailment |
def lint_api(api_name, old, new, locations):
"""Lint an acceptable api metadata."""
is_new_api = not old
api_location = locations['api']
changelog = new.get('changelog', {})
changelog_location = api_location
if locations['changelog']:
changelog_location = list(locations['changelog'].values())[0]
# apis must have documentation if they are new
if not new.get('doc'):
msg_type = LintError if is_new_api else LintWarning
yield msg_type(
'doc',
'missing docstring documentation',
api_name=api_name,
location=locations.get('view', api_location)
)
introduced_at = new.get('introduced_at')
if introduced_at is None:
yield LintError(
'introduced_at',
'missing introduced_at field',
location=api_location,
)
if not is_new_api:
# cannot change introduced_at if we already have it
old_introduced_at = old.get('introduced_at')
if old_introduced_at is not None:
if old_introduced_at != introduced_at:
yield LintError(
'introduced_at',
'introduced_at changed from {} to {}',
old_introduced_at,
introduced_at,
api_name=api_name,
location=api_location,
)
# cannot change url
if new['url'] != old.get('url', new['url']):
yield LintError(
'url',
'url changed from {} to {}',
old['url'],
new['url'],
api_name=api_name,
location=api_location,
)
# cannot add required fields
for removed in set(old.get('methods', [])) - set(new['methods']):
yield LintError(
'methods',
'HTTP method {} removed',
removed,
api_name=api_name,
location=api_location,
)
for schema in ['request_schema', 'response_schema']:
new_schema = new.get(schema)
if new_schema is None:
continue
schema_location = locations[schema]
old_schema = old.get(schema, {})
for message in walk_schema(
schema, old_schema, new_schema, root=True, new_api=is_new_api):
if isinstance(message, CheckChangelog):
if message.revision not in changelog:
yield LintFixit(
message.name,
'No changelog entry for revision {}',
message.revision,
location=changelog_location,
)
else:
# add in here, saves passing it down the recursive call
message.location = schema_location
yield message | Lint an acceptable api metadata. | entailment |
def bind(self, flask_app, service, group=None):
"""Bind the service API urls to a flask app."""
if group not in self.services[service]:
raise RuntimeError(
'API group {} does not exist in service {}'.format(
group, service)
)
for name, api in self.services[service][group].items():
# only bind APIs that have views associated with them
if api.view_fn is None:
continue
if name not in flask_app.view_functions:
flask_app.add_url_rule(
api.url, name, view_func=api.view_fn, **api.options) | Bind the service API urls to a flask app. | entailment |
def serialize(self):
"""Serialize into JSONable dict, and associated locations data."""
api_metadata = OrderedDict()
# $ char makes this come first in sort ordering
api_metadata['$version'] = self.current_version
locations = {}
for svc_name, group in self.groups():
group_apis = OrderedDict()
group_metadata = OrderedDict()
group_metadata['apis'] = group_apis
group_metadata['title'] = group.title
api_metadata[group.name] = group_metadata
if group.docs is not None:
group_metadata['docs'] = group.docs
for name, api in group.items():
group_apis[name] = OrderedDict()
group_apis[name]['service'] = svc_name
group_apis[name]['api_group'] = group.name
group_apis[name]['api_name'] = api.name
group_apis[name]['introduced_at'] = api.introduced_at
group_apis[name]['methods'] = api.methods
group_apis[name]['request_schema'] = api.request_schema
group_apis[name]['response_schema'] = api.response_schema
group_apis[name]['doc'] = api.docs
group_apis[name]['changelog'] = api._changelog
if api.title:
group_apis[name]['title'] = api.title
else:
title = name.replace('-', ' ').replace('_', ' ').title()
group_apis[name]['title'] = title
group_apis[name]['url'] = api.resolve_url()
if api.undocumented:
group_apis[name]['undocumented'] = True
if api.deprecated_at is not None:
group_apis[name]['deprecated_at'] = api.deprecated_at
locations[name] = {
'api': api.location,
'request_schema': api._request_schema_location,
'response_schema': api._response_schema_location,
'changelog': api._changelog_locations,
'view': api.view_fn_location,
}
return api_metadata, locations | Serialize into JSONable dict, and associated locations data. | entailment |
def api(self,
url,
name,
introduced_at=None,
undocumented=False,
deprecated_at=None,
title=None,
**options):
"""Add an API to the service.
:param url: This is the url that the API should be registered at.
:param name: This is the name of the api, and will be registered with
flask apps under.
Other keyword arguments may be used, and they will be passed to the
flask application when initialised. Of particular interest is the
'methods' keyword argument, which can be used to specify the HTTP
method the URL will be added for.
"""
location = get_callsite_location()
api = AcceptableAPI(
self,
name,
url,
introduced_at,
options,
undocumented=undocumented,
deprecated_at=deprecated_at,
title=title,
location=location,
)
self.metadata.register_api(self.name, self.group, api)
return api | Add an API to the service.
:param url: This is the url that the API should be registered at.
:param name: This is the name of the api, and will be registered with
flask apps under.
Other keyword arguments may be used, and they will be passed to the
flask application when initialised. Of particular interest is the
'methods' keyword argument, which can be used to specify the HTTP
method the URL will be added for. | entailment |
def django_api(
self,
name,
introduced_at,
undocumented=False,
deprecated_at=None,
title=None,
**options):
"""Add a django API handler to the service.
:param name: This is the name of the django url to use.
The 'methods' paramater can be supplied as normal, you can also user
the @api.handler decorator to link this API to its handler.
"""
from acceptable.djangoutil import DjangoAPI
location = get_callsite_location()
api = DjangoAPI(
self,
name,
introduced_at,
options,
location=location,
undocumented=undocumented,
deprecated_at=deprecated_at,
title=title,
)
self.metadata.register_api(self.name, self.group, api)
return api | Add a django API handler to the service.
:param name: This is the name of the django url to use.
The 'methods' paramater can be supplied as normal, you can also user
the @api.handler decorator to link this API to its handler. | entailment |
def bind(self, flask_app):
"""Bind the service API urls to a flask app."""
self.metadata.bind(flask_app, self.name, self.group) | Bind the service API urls to a flask app. | entailment |
Subsets and Splits