body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
9fe1680508af573fd1ade4b87f0fe908f5f8e011810b0c294a72cd0001b455b1
|
def r(line):
'\n Selects rho from a given line.\n '
(r, _) = line
return r
|
Selects rho from a given line.
|
stitch/lineutils.py
|
r
|
KnorpelSenf/bladestitching
| 1 |
python
|
def r(line):
'\n \n '
(r, _) = line
return r
|
def r(line):
'\n \n '
(r, _) = line
return r<|docstring|>Selects rho from a given line.<|endoftext|>
|
121a4b4b80593f826f9bc9a2321919f85b098b3acdbd6eea7d883bebf25e1c72
|
def t(line):
'\n Selects theta from a given line.\n '
(_, t) = line
return t
|
Selects theta from a given line.
|
stitch/lineutils.py
|
t
|
KnorpelSenf/bladestitching
| 1 |
python
|
def t(line):
'\n \n '
(_, t) = line
return t
|
def t(line):
'\n \n '
(_, t) = line
return t<|docstring|>Selects theta from a given line.<|endoftext|>
|
5db8d2e01c12714c4b14f7cd13ec1e22f189ebb3a6d7c28e3a1f180d77150427
|
def x(line):
'\n Selects the x coordinate of the foot point from a given line.\n '
(r, t) = line
return (r * np.cos(t))
|
Selects the x coordinate of the foot point from a given line.
|
stitch/lineutils.py
|
x
|
KnorpelSenf/bladestitching
| 1 |
python
|
def x(line):
'\n \n '
(r, t) = line
return (r * np.cos(t))
|
def x(line):
'\n \n '
(r, t) = line
return (r * np.cos(t))<|docstring|>Selects the x coordinate of the foot point from a given line.<|endoftext|>
|
89e4ddc2c2d8e0b10b141197c6d5160e90574ec546b13c3c8a7b5cd08fb11986
|
def y(line):
'\n Selects the y coordinate of the foot point from a given line.\n '
(r, t) = line
return (r * np.sin(t))
|
Selects the y coordinate of the foot point from a given line.
|
stitch/lineutils.py
|
y
|
KnorpelSenf/bladestitching
| 1 |
python
|
def y(line):
'\n \n '
(r, t) = line
return (r * np.sin(t))
|
def y(line):
'\n \n '
(r, t) = line
return (r * np.sin(t))<|docstring|>Selects the y coordinate of the foot point from a given line.<|endoftext|>
|
08168078e74808ed230c17fa01107d448be66e6d3d78f3b7256942849ca839af
|
def eq(line):
'\n Turns a line into a nice string representation.\n '
(rho, theta) = line
r = '{:6.2f}'.format(float(rho))
t = '{:4.4f}'.format(float(theta))
return (((((r + ' = x * sin(') + t) + ') + y * cos(') + t) + ')')
|
Turns a line into a nice string representation.
|
stitch/lineutils.py
|
eq
|
KnorpelSenf/bladestitching
| 1 |
python
|
def eq(line):
'\n \n '
(rho, theta) = line
r = '{:6.2f}'.format(float(rho))
t = '{:4.4f}'.format(float(theta))
return (((((r + ' = x * sin(') + t) + ') + y * cos(') + t) + ')')
|
def eq(line):
'\n \n '
(rho, theta) = line
r = '{:6.2f}'.format(float(rho))
t = '{:4.4f}'.format(float(theta))
return (((((r + ' = x * sin(') + t) + ') + y * cos(') + t) + ')')<|docstring|>Turns a line into a nice string representation.<|endoftext|>
|
72651bf86bd16dfbfc409ce56dee72a36ab4bfb77d28f11e419ddb650d4b56d1
|
def xy(line):
'\n Turns a line into a nice string representation of its foot point.\n '
return 'FP({:4d}, {:4d})'.format(x(line), y(line))
|
Turns a line into a nice string representation of its foot point.
|
stitch/lineutils.py
|
xy
|
KnorpelSenf/bladestitching
| 1 |
python
|
def xy(line):
'\n \n '
return 'FP({:4d}, {:4d})'.format(x(line), y(line))
|
def xy(line):
'\n \n '
return 'FP({:4d}, {:4d})'.format(x(line), y(line))<|docstring|>Turns a line into a nice string representation of its foot point.<|endoftext|>
|
f5c3bcccbc029c8823faa3549fb6024880bbd7a5f958856de4158a40f08da005
|
def are_lines_similar(r, s, max_rho=30, max_theta=0.1):
'\n Returns true if two given normalized lines\n do not deviate too far from each other\n as specified by the parameters,\n and false otherwise.\n '
(rho_r, theta_r) = r
(rho_s, theta_s) = s
diff_t = abs((theta_r - theta_s))
similar = ((abs((rho_r - rho_s)) < max_rho) and (diff_t < max_theta))
similar_inverted = ((abs((rho_r + rho_s)) < max_rho) and (abs((diff_t - np.pi)) < max_theta))
return (similar or similar_inverted)
|
Returns true if two given normalized lines
do not deviate too far from each other
as specified by the parameters,
and false otherwise.
|
stitch/lineutils.py
|
are_lines_similar
|
KnorpelSenf/bladestitching
| 1 |
python
|
def are_lines_similar(r, s, max_rho=30, max_theta=0.1):
'\n Returns true if two given normalized lines\n do not deviate too far from each other\n as specified by the parameters,\n and false otherwise.\n '
(rho_r, theta_r) = r
(rho_s, theta_s) = s
diff_t = abs((theta_r - theta_s))
similar = ((abs((rho_r - rho_s)) < max_rho) and (diff_t < max_theta))
similar_inverted = ((abs((rho_r + rho_s)) < max_rho) and (abs((diff_t - np.pi)) < max_theta))
return (similar or similar_inverted)
|
def are_lines_similar(r, s, max_rho=30, max_theta=0.1):
'\n Returns true if two given normalized lines\n do not deviate too far from each other\n as specified by the parameters,\n and false otherwise.\n '
(rho_r, theta_r) = r
(rho_s, theta_s) = s
diff_t = abs((theta_r - theta_s))
similar = ((abs((rho_r - rho_s)) < max_rho) and (diff_t < max_theta))
similar_inverted = ((abs((rho_r + rho_s)) < max_rho) and (abs((diff_t - np.pi)) < max_theta))
return (similar or similar_inverted)<|docstring|>Returns true if two given normalized lines
do not deviate too far from each other
as specified by the parameters,
and false otherwise.<|endoftext|>
|
9be59430ca95aab7a7e71a3a0f81430d5b6a7fce39abc290e50edce056f54c42
|
def is_line_left(line, x, y):
"\n Returns true if the given line is right of the given point\n in the sense that the line's foot point\n would be in the first or fourth quadrant\n if the given point would be the origin.\n "
return (not is_line_right(line, x, y))
|
Returns true if the given line is right of the given point
in the sense that the line's foot point
would be in the first or fourth quadrant
if the given point would be the origin.
|
stitch/lineutils.py
|
is_line_left
|
KnorpelSenf/bladestitching
| 1 |
python
|
def is_line_left(line, x, y):
"\n Returns true if the given line is right of the given point\n in the sense that the line's foot point\n would be in the first or fourth quadrant\n if the given point would be the origin.\n "
return (not is_line_right(line, x, y))
|
def is_line_left(line, x, y):
"\n Returns true if the given line is right of the given point\n in the sense that the line's foot point\n would be in the first or fourth quadrant\n if the given point would be the origin.\n "
return (not is_line_right(line, x, y))<|docstring|>Returns true if the given line is right of the given point
in the sense that the line's foot point
would be in the first or fourth quadrant
if the given point would be the origin.<|endoftext|>
|
c69d407c3a9f1dc11f90a35090699c419673a721e85aef74fa573297a432f35f
|
def is_line_right(line, x, y):
"\n Returns true if the given line is left of the given point\n in the sense that the line's foot point\n would be in the second or third quadrant\n if the given point would be the origin.\n "
halfpi = (np.pi / 2)
return ((- halfpi) <= t(move_origin(line, x, y, norm=True)) < halfpi)
|
Returns true if the given line is left of the given point
in the sense that the line's foot point
would be in the second or third quadrant
if the given point would be the origin.
|
stitch/lineutils.py
|
is_line_right
|
KnorpelSenf/bladestitching
| 1 |
python
|
def is_line_right(line, x, y):
"\n Returns true if the given line is left of the given point\n in the sense that the line's foot point\n would be in the second or third quadrant\n if the given point would be the origin.\n "
halfpi = (np.pi / 2)
return ((- halfpi) <= t(move_origin(line, x, y, norm=True)) < halfpi)
|
def is_line_right(line, x, y):
"\n Returns true if the given line is left of the given point\n in the sense that the line's foot point\n would be in the second or third quadrant\n if the given point would be the origin.\n "
halfpi = (np.pi / 2)
return ((- halfpi) <= t(move_origin(line, x, y, norm=True)) < halfpi)<|docstring|>Returns true if the given line is left of the given point
in the sense that the line's foot point
would be in the second or third quadrant
if the given point would be the origin.<|endoftext|>
|
a260fad2ddc360e1f51c641168abb55d3ce1e1fcab996ea8738e7e8109748e3a
|
def translate(line, x=0, y=0, norm=True):
'\n Translates a line by the given distance in x and y direction.\n '
return move_origin(line, (- x), (- y), norm=norm)
|
Translates a line by the given distance in x and y direction.
|
stitch/lineutils.py
|
translate
|
KnorpelSenf/bladestitching
| 1 |
python
|
def translate(line, x=0, y=0, norm=True):
'\n \n '
return move_origin(line, (- x), (- y), norm=norm)
|
def translate(line, x=0, y=0, norm=True):
'\n \n '
return move_origin(line, (- x), (- y), norm=norm)<|docstring|>Translates a line by the given distance in x and y direction.<|endoftext|>
|
799df462e46cdae5f97137735796978dca53f58814ec78abb694c07797ec8da8
|
def move_origin(line, x=0, y=0, norm=True):
"\n Transforms a line's representation by moving the origin as specified.\n "
(rho, theta) = line
dist = np.sqrt(((x * x) + (y * y)))
alpha = np.arctan2(y, x)
omega = (theta - alpha)
rho_prime = (rho - (dist * np.cos(omega)))
line = (rho_prime, theta)
return (normalize(line) if norm else line)
|
Transforms a line's representation by moving the origin as specified.
|
stitch/lineutils.py
|
move_origin
|
KnorpelSenf/bladestitching
| 1 |
python
|
def move_origin(line, x=0, y=0, norm=True):
"\n \n "
(rho, theta) = line
dist = np.sqrt(((x * x) + (y * y)))
alpha = np.arctan2(y, x)
omega = (theta - alpha)
rho_prime = (rho - (dist * np.cos(omega)))
line = (rho_prime, theta)
return (normalize(line) if norm else line)
|
def move_origin(line, x=0, y=0, norm=True):
"\n \n "
(rho, theta) = line
dist = np.sqrt(((x * x) + (y * y)))
alpha = np.arctan2(y, x)
omega = (theta - alpha)
rho_prime = (rho - (dist * np.cos(omega)))
line = (rho_prime, theta)
return (normalize(line) if norm else line)<|docstring|>Transforms a line's representation by moving the origin as specified.<|endoftext|>
|
a143a3675d8f35f536660ae38b94dea5b1c08bc05cf135e8458dddbbadf70198
|
def rotate(line, theta, x=0, y=0, norm=True):
'\n Rotates a line around the origin\n or optionally around a given coordinate\n by the specified angle.\n '
custom_anchor = ((x != 0) or (y != 0))
if custom_anchor:
line = move_origin(line, x, y, norm=False)
(r, t) = line
t += theta
line = (r, t)
if custom_anchor:
line = move_origin(line, (- x), (- y), norm=False)
return (normalize(line) if norm else line)
|
Rotates a line around the origin
or optionally around a given coordinate
by the specified angle.
|
stitch/lineutils.py
|
rotate
|
KnorpelSenf/bladestitching
| 1 |
python
|
def rotate(line, theta, x=0, y=0, norm=True):
'\n Rotates a line around the origin\n or optionally around a given coordinate\n by the specified angle.\n '
custom_anchor = ((x != 0) or (y != 0))
if custom_anchor:
line = move_origin(line, x, y, norm=False)
(r, t) = line
t += theta
line = (r, t)
if custom_anchor:
line = move_origin(line, (- x), (- y), norm=False)
return (normalize(line) if norm else line)
|
def rotate(line, theta, x=0, y=0, norm=True):
'\n Rotates a line around the origin\n or optionally around a given coordinate\n by the specified angle.\n '
custom_anchor = ((x != 0) or (y != 0))
if custom_anchor:
line = move_origin(line, x, y, norm=False)
(r, t) = line
t += theta
line = (r, t)
if custom_anchor:
line = move_origin(line, (- x), (- y), norm=False)
return (normalize(line) if norm else line)<|docstring|>Rotates a line around the origin
or optionally around a given coordinate
by the specified angle.<|endoftext|>
|
3f8dfae70a949a91011c571810a576ea0a0e8eea6bd40187155e59fb8ec36c43
|
def normalize(line):
'\n Normalizes a line such that rho is positive and -pi <= theta < pi holds true.\n '
(r, t) = line
if (r < 0):
(r, t) = ((- r), (np.pi + t))
while (t < (- np.pi)):
t += (2 * np.pi)
while (t >= np.pi):
t -= (2 * np.pi)
return (r, t)
|
Normalizes a line such that rho is positive and -pi <= theta < pi holds true.
|
stitch/lineutils.py
|
normalize
|
KnorpelSenf/bladestitching
| 1 |
python
|
def normalize(line):
'\n \n '
(r, t) = line
if (r < 0):
(r, t) = ((- r), (np.pi + t))
while (t < (- np.pi)):
t += (2 * np.pi)
while (t >= np.pi):
t -= (2 * np.pi)
return (r, t)
|
def normalize(line):
'\n \n '
(r, t) = line
if (r < 0):
(r, t) = ((- r), (np.pi + t))
while (t < (- np.pi)):
t += (2 * np.pi)
while (t >= np.pi):
t -= (2 * np.pi)
return (r, t)<|docstring|>Normalizes a line such that rho is positive and -pi <= theta < pi holds true.<|endoftext|>
|
1b3efb332905fa7ce97f364be219ff6d0d9f4097fca6b574c3c46795c0b10c37
|
def get_bisecting_line(l, r):
'\n Takes two lines and returns their bisecting line.\n This implementation works well for parallel lines\n as it does not rely on the intersection point of the input lines.\n As a result, it also works well for almost parallel lines. It introduces\n (almost) no errors due to imprecision of floating point operations.\n '
(rho_l, theta_l) = l
(rho_r, theta_r) = r
theta = ((theta_l + theta_r) / 2)
(x_l, y_l) = ((rho_l * np.cos(theta_l)), (rho_l * np.sin(theta_l)))
(x_r, y_r) = ((rho_r * np.cos(theta_r)), (rho_r * np.sin(theta_r)))
alpha_l = ((np.pi / 2) + theta_l)
alpha_r = ((np.pi / 2) + theta_r)
intersect_l = (np.tan((theta - theta_l)) * rho_l)
intersect_r = (np.tan((theta - theta_r)) * rho_r)
xn_l = (x_l + (intersect_l * np.cos(alpha_l)))
yn_l = (y_l + (intersect_l * np.sin(alpha_l)))
xn_r = (x_r + (intersect_r * np.cos(alpha_r)))
yn_r = (y_r + (intersect_r * np.sin(alpha_r)))
(x, y) = (((xn_l + xn_r) / 2), ((yn_l + yn_r) / 2))
rho = np.sqrt(((x * x) + (y * y)))
return (rho, theta)
|
Takes two lines and returns their bisecting line.
This implementation works well for parallel lines
as it does not rely on the intersection point of the input lines.
As a result, it also works well for almost parallel lines. It introduces
(almost) no errors due to imprecision of floating point operations.
|
stitch/lineutils.py
|
get_bisecting_line
|
KnorpelSenf/bladestitching
| 1 |
python
|
def get_bisecting_line(l, r):
'\n Takes two lines and returns their bisecting line.\n This implementation works well for parallel lines\n as it does not rely on the intersection point of the input lines.\n As a result, it also works well for almost parallel lines. It introduces\n (almost) no errors due to imprecision of floating point operations.\n '
(rho_l, theta_l) = l
(rho_r, theta_r) = r
theta = ((theta_l + theta_r) / 2)
(x_l, y_l) = ((rho_l * np.cos(theta_l)), (rho_l * np.sin(theta_l)))
(x_r, y_r) = ((rho_r * np.cos(theta_r)), (rho_r * np.sin(theta_r)))
alpha_l = ((np.pi / 2) + theta_l)
alpha_r = ((np.pi / 2) + theta_r)
intersect_l = (np.tan((theta - theta_l)) * rho_l)
intersect_r = (np.tan((theta - theta_r)) * rho_r)
xn_l = (x_l + (intersect_l * np.cos(alpha_l)))
yn_l = (y_l + (intersect_l * np.sin(alpha_l)))
xn_r = (x_r + (intersect_r * np.cos(alpha_r)))
yn_r = (y_r + (intersect_r * np.sin(alpha_r)))
(x, y) = (((xn_l + xn_r) / 2), ((yn_l + yn_r) / 2))
rho = np.sqrt(((x * x) + (y * y)))
return (rho, theta)
|
def get_bisecting_line(l, r):
'\n Takes two lines and returns their bisecting line.\n This implementation works well for parallel lines\n as it does not rely on the intersection point of the input lines.\n As a result, it also works well for almost parallel lines. It introduces\n (almost) no errors due to imprecision of floating point operations.\n '
(rho_l, theta_l) = l
(rho_r, theta_r) = r
theta = ((theta_l + theta_r) / 2)
(x_l, y_l) = ((rho_l * np.cos(theta_l)), (rho_l * np.sin(theta_l)))
(x_r, y_r) = ((rho_r * np.cos(theta_r)), (rho_r * np.sin(theta_r)))
alpha_l = ((np.pi / 2) + theta_l)
alpha_r = ((np.pi / 2) + theta_r)
intersect_l = (np.tan((theta - theta_l)) * rho_l)
intersect_r = (np.tan((theta - theta_r)) * rho_r)
xn_l = (x_l + (intersect_l * np.cos(alpha_l)))
yn_l = (y_l + (intersect_l * np.sin(alpha_l)))
xn_r = (x_r + (intersect_r * np.cos(alpha_r)))
yn_r = (y_r + (intersect_r * np.sin(alpha_r)))
(x, y) = (((xn_l + xn_r) / 2), ((yn_l + yn_r) / 2))
rho = np.sqrt(((x * x) + (y * y)))
return (rho, theta)<|docstring|>Takes two lines and returns their bisecting line.
This implementation works well for parallel lines
as it does not rely on the intersection point of the input lines.
As a result, it also works well for almost parallel lines. It introduces
(almost) no errors due to imprecision of floating point operations.<|endoftext|>
|
dddfbf8b18312bc024d298708590345acea9335cdfe998d55228c42eeccf0d5a
|
def vertical_distance(line0, line1):
"\n Computes the distance `line1` needs to moved vertically\n such that its foot point lies on `line0`. If `line0` is\n a vertical line (its theta value is either `0` or `pi`),\n this distance is either 0 (if `line1`'s foot point is on `line0`)\n or it cannot be defined (if `line1`'s foot point is not on `line0`).\n In both cases, `0` is returned.\n "
beta = (- t(line0))
sinbeta = np.sin(beta)
if (not sinbeta):
return 0
(x0, y0) = (x(line0), y(line0))
(x1, y1) = (x(line1), y(line1))
dist_x = (x0 - x1)
dist_y = (y0 - y1)
if (not dist_x):
return dist_y
b = np.sqrt(((dist_x * dist_x) + (dist_y * dist_y)))
gamma = ((np.pi / 2) + np.arctan2(dist_y, dist_x))
alpha = ((np.pi - beta) - gamma)
return ((np.sin(alpha) * b) / sinbeta)
|
Computes the distance `line1` needs to moved vertically
such that its foot point lies on `line0`. If `line0` is
a vertical line (its theta value is either `0` or `pi`),
this distance is either 0 (if `line1`'s foot point is on `line0`)
or it cannot be defined (if `line1`'s foot point is not on `line0`).
In both cases, `0` is returned.
|
stitch/lineutils.py
|
vertical_distance
|
KnorpelSenf/bladestitching
| 1 |
python
|
def vertical_distance(line0, line1):
"\n Computes the distance `line1` needs to moved vertically\n such that its foot point lies on `line0`. If `line0` is\n a vertical line (its theta value is either `0` or `pi`),\n this distance is either 0 (if `line1`'s foot point is on `line0`)\n or it cannot be defined (if `line1`'s foot point is not on `line0`).\n In both cases, `0` is returned.\n "
beta = (- t(line0))
sinbeta = np.sin(beta)
if (not sinbeta):
return 0
(x0, y0) = (x(line0), y(line0))
(x1, y1) = (x(line1), y(line1))
dist_x = (x0 - x1)
dist_y = (y0 - y1)
if (not dist_x):
return dist_y
b = np.sqrt(((dist_x * dist_x) + (dist_y * dist_y)))
gamma = ((np.pi / 2) + np.arctan2(dist_y, dist_x))
alpha = ((np.pi - beta) - gamma)
return ((np.sin(alpha) * b) / sinbeta)
|
def vertical_distance(line0, line1):
"\n Computes the distance `line1` needs to moved vertically\n such that its foot point lies on `line0`. If `line0` is\n a vertical line (its theta value is either `0` or `pi`),\n this distance is either 0 (if `line1`'s foot point is on `line0`)\n or it cannot be defined (if `line1`'s foot point is not on `line0`).\n In both cases, `0` is returned.\n "
beta = (- t(line0))
sinbeta = np.sin(beta)
if (not sinbeta):
return 0
(x0, y0) = (x(line0), y(line0))
(x1, y1) = (x(line1), y(line1))
dist_x = (x0 - x1)
dist_y = (y0 - y1)
if (not dist_x):
return dist_y
b = np.sqrt(((dist_x * dist_x) + (dist_y * dist_y)))
gamma = ((np.pi / 2) + np.arctan2(dist_y, dist_x))
alpha = ((np.pi - beta) - gamma)
return ((np.sin(alpha) * b) / sinbeta)<|docstring|>Computes the distance `line1` needs to moved vertically
such that its foot point lies on `line0`. If `line0` is
a vertical line (its theta value is either `0` or `pi`),
this distance is either 0 (if `line1`'s foot point is on `line0`)
or it cannot be defined (if `line1`'s foot point is not on `line0`).
In both cases, `0` is returned.<|endoftext|>
|
8fe565769fc2348cf7bbaac1f57b58353ad6b8187e7217996a92648469bd2e38
|
def root(line):
'\n Assume `cos(t(line)) != 0`. Be `f` the linear function\n that describes `line`.\n\n This function then solves `f(x) = 0` for `x` and returns `x`.\n In other words, it returns the `x` value of the intersection point\n between the given line and the x-axis.\n\n Crashes on `cos(t(line)) == 0` (division by zero).\n '
(rho, theta) = line
return (rho / np.cos(theta))
|
Assume `cos(t(line)) != 0`. Be `f` the linear function
that describes `line`.
This function then solves `f(x) = 0` for `x` and returns `x`.
In other words, it returns the `x` value of the intersection point
between the given line and the x-axis.
Crashes on `cos(t(line)) == 0` (division by zero).
|
stitch/lineutils.py
|
root
|
KnorpelSenf/bladestitching
| 1 |
python
|
def root(line):
'\n Assume `cos(t(line)) != 0`. Be `f` the linear function\n that describes `line`.\n\n This function then solves `f(x) = 0` for `x` and returns `x`.\n In other words, it returns the `x` value of the intersection point\n between the given line and the x-axis.\n\n Crashes on `cos(t(line)) == 0` (division by zero).\n '
(rho, theta) = line
return (rho / np.cos(theta))
|
def root(line):
'\n Assume `cos(t(line)) != 0`. Be `f` the linear function\n that describes `line`.\n\n This function then solves `f(x) = 0` for `x` and returns `x`.\n In other words, it returns the `x` value of the intersection point\n between the given line and the x-axis.\n\n Crashes on `cos(t(line)) == 0` (division by zero).\n '
(rho, theta) = line
return (rho / np.cos(theta))<|docstring|>Assume `cos(t(line)) != 0`. Be `f` the linear function
that describes `line`.
This function then solves `f(x) = 0` for `x` and returns `x`.
In other words, it returns the `x` value of the intersection point
between the given line and the x-axis.
Crashes on `cos(t(line)) == 0` (division by zero).<|endoftext|>
|
2a0c8b9d4e3430fdbbcc222bf8fb5a9af86731c7c2be686ec818c63cf24d7da3
|
def verify_boot_variable(device, boot_images, output=None):
" Verifies given boot_images are set to the next-reload BOOT vars\n\n Args:\n device (obj): The device to execute on.\n\n boot_images (list): The images that are expected to be configured\n as the boot variable for the next reload.\n\n output (str, optional): The device output from 'show boot'. If not\n provided the API will gather it from the device automatically.\n Defaults to None.\n\n Returns:\n True - if the expected images are configured\n False - if the expected images are NOT configured\n\n Raises:\n N/A\n "
next_boot_variables = device.api.get_boot_variables(boot_var='next', output=output)
if (len(next_boot_variables) != len(boot_images)):
return False
for (index, expected_image) in enumerate(boot_images):
configured_image = next_boot_variables[index]
if ((expected_image.startswith('bootflash:') or expected_image.startswith('flash:')) and (configured_image.startswith('bootflash:') or configured_image.startswith('flash:'))):
log.info("On cat9k platforms, the 'flash:' and 'bootflash:' directories are the same. Ignoring these directories during comparison.")
if (expected_image.split(':')[(- 1)] != configured_image.split(':')[(- 1)]):
log.warning('The boot variables on the device {} do not equal the expected images {}'.format(next_boot_variables, boot_images))
return False
elif (expected_image != next_boot_variables[index]):
log.warning('The boot variables on the device {} do not equal the expected images {}'.format(next_boot_variables, boot_images))
return False
log.info('The boot variables on the device {} equal the expected images {}'.format(next_boot_variables, boot_images))
return True
|
Verifies given boot_images are set to the next-reload BOOT vars
Args:
device (obj): The device to execute on.
boot_images (list): The images that are expected to be configured
as the boot variable for the next reload.
output (str, optional): The device output from 'show boot'. If not
provided the API will gather it from the device automatically.
Defaults to None.
Returns:
True - if the expected images are configured
False - if the expected images are NOT configured
Raises:
N/A
|
pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/cat9k/platform/verify.py
|
verify_boot_variable
|
jbronikowski/genielibs
| 94 |
python
|
def verify_boot_variable(device, boot_images, output=None):
" Verifies given boot_images are set to the next-reload BOOT vars\n\n Args:\n device (obj): The device to execute on.\n\n boot_images (list): The images that are expected to be configured\n as the boot variable for the next reload.\n\n output (str, optional): The device output from 'show boot'. If not\n provided the API will gather it from the device automatically.\n Defaults to None.\n\n Returns:\n True - if the expected images are configured\n False - if the expected images are NOT configured\n\n Raises:\n N/A\n "
next_boot_variables = device.api.get_boot_variables(boot_var='next', output=output)
if (len(next_boot_variables) != len(boot_images)):
return False
for (index, expected_image) in enumerate(boot_images):
configured_image = next_boot_variables[index]
if ((expected_image.startswith('bootflash:') or expected_image.startswith('flash:')) and (configured_image.startswith('bootflash:') or configured_image.startswith('flash:'))):
log.info("On cat9k platforms, the 'flash:' and 'bootflash:' directories are the same. Ignoring these directories during comparison.")
if (expected_image.split(':')[(- 1)] != configured_image.split(':')[(- 1)]):
log.warning('The boot variables on the device {} do not equal the expected images {}'.format(next_boot_variables, boot_images))
return False
elif (expected_image != next_boot_variables[index]):
log.warning('The boot variables on the device {} do not equal the expected images {}'.format(next_boot_variables, boot_images))
return False
log.info('The boot variables on the device {} equal the expected images {}'.format(next_boot_variables, boot_images))
return True
|
def verify_boot_variable(device, boot_images, output=None):
" Verifies given boot_images are set to the next-reload BOOT vars\n\n Args:\n device (obj): The device to execute on.\n\n boot_images (list): The images that are expected to be configured\n as the boot variable for the next reload.\n\n output (str, optional): The device output from 'show boot'. If not\n provided the API will gather it from the device automatically.\n Defaults to None.\n\n Returns:\n True - if the expected images are configured\n False - if the expected images are NOT configured\n\n Raises:\n N/A\n "
next_boot_variables = device.api.get_boot_variables(boot_var='next', output=output)
if (len(next_boot_variables) != len(boot_images)):
return False
for (index, expected_image) in enumerate(boot_images):
configured_image = next_boot_variables[index]
if ((expected_image.startswith('bootflash:') or expected_image.startswith('flash:')) and (configured_image.startswith('bootflash:') or configured_image.startswith('flash:'))):
log.info("On cat9k platforms, the 'flash:' and 'bootflash:' directories are the same. Ignoring these directories during comparison.")
if (expected_image.split(':')[(- 1)] != configured_image.split(':')[(- 1)]):
log.warning('The boot variables on the device {} do not equal the expected images {}'.format(next_boot_variables, boot_images))
return False
elif (expected_image != next_boot_variables[index]):
log.warning('The boot variables on the device {} do not equal the expected images {}'.format(next_boot_variables, boot_images))
return False
log.info('The boot variables on the device {} equal the expected images {}'.format(next_boot_variables, boot_images))
return True<|docstring|>Verifies given boot_images are set to the next-reload BOOT vars
Args:
device (obj): The device to execute on.
boot_images (list): The images that are expected to be configured
as the boot variable for the next reload.
output (str, optional): The device output from 'show boot'. If not
provided the API will gather it from the device automatically.
Defaults to None.
Returns:
True - if the expected images are configured
False - if the expected images are NOT configured
Raises:
N/A<|endoftext|>
|
361ff2f59c1b249129b4964da0b80652af57d416dc8c23ab22c277dd2bc9085f
|
def get_and_check_entity_basics(hass, default_mock_hap, entity_id, entity_name, device_model):
'Get and test basic device.'
ha_entity = hass.states.get(entity_id)
assert (ha_entity is not None)
assert (ha_entity.attributes['model_type'] == device_model)
assert (ha_entity.name == entity_name)
hmip_device = default_mock_hap.home.template.search_mock_device_by_id(ha_entity.attributes['id'])
assert (hmip_device is not None)
return (ha_entity, hmip_device)
|
Get and test basic device.
|
tests/components/homematicip_cloud/helper.py
|
get_and_check_entity_basics
|
SoldierCorp/home-assistant
| 2 |
python
|
def get_and_check_entity_basics(hass, default_mock_hap, entity_id, entity_name, device_model):
ha_entity = hass.states.get(entity_id)
assert (ha_entity is not None)
assert (ha_entity.attributes['model_type'] == device_model)
assert (ha_entity.name == entity_name)
hmip_device = default_mock_hap.home.template.search_mock_device_by_id(ha_entity.attributes['id'])
assert (hmip_device is not None)
return (ha_entity, hmip_device)
|
def get_and_check_entity_basics(hass, default_mock_hap, entity_id, entity_name, device_model):
ha_entity = hass.states.get(entity_id)
assert (ha_entity is not None)
assert (ha_entity.attributes['model_type'] == device_model)
assert (ha_entity.name == entity_name)
hmip_device = default_mock_hap.home.template.search_mock_device_by_id(ha_entity.attributes['id'])
assert (hmip_device is not None)
return (ha_entity, hmip_device)<|docstring|>Get and test basic device.<|endoftext|>
|
5cec8ee2aaccceb22e77b17c7ddf4ba8b96ae32204953ba324d40fec2df810c8
|
async def async_manipulate_test_data(hass, hmip_device, attribute, new_value, channel=1):
'Set new value on hmip device.'
if (channel == 1):
setattr(hmip_device, attribute, new_value)
functional_channel = hmip_device.functionalChannels[channel]
setattr(functional_channel, attribute, new_value)
hmip_device.fire_update_event()
(await hass.async_block_till_done())
|
Set new value on hmip device.
|
tests/components/homematicip_cloud/helper.py
|
async_manipulate_test_data
|
SoldierCorp/home-assistant
| 2 |
python
|
async def async_manipulate_test_data(hass, hmip_device, attribute, new_value, channel=1):
if (channel == 1):
setattr(hmip_device, attribute, new_value)
functional_channel = hmip_device.functionalChannels[channel]
setattr(functional_channel, attribute, new_value)
hmip_device.fire_update_event()
(await hass.async_block_till_done())
|
async def async_manipulate_test_data(hass, hmip_device, attribute, new_value, channel=1):
if (channel == 1):
setattr(hmip_device, attribute, new_value)
functional_channel = hmip_device.functionalChannels[channel]
setattr(functional_channel, attribute, new_value)
hmip_device.fire_update_event()
(await hass.async_block_till_done())<|docstring|>Set new value on hmip device.<|endoftext|>
|
23e8c91b6d6152c9a2fc65d83159900153334b9df449b9b340db6e52e9502499
|
def _get_mock(instance):
'Create a mock and copy instance attributes over mock.'
mock = Mock(spec=instance, wraps=instance)
mock.__dict__.update(instance.__dict__)
return mock
|
Create a mock and copy instance attributes over mock.
|
tests/components/homematicip_cloud/helper.py
|
_get_mock
|
SoldierCorp/home-assistant
| 2 |
python
|
def _get_mock(instance):
mock = Mock(spec=instance, wraps=instance)
mock.__dict__.update(instance.__dict__)
return mock
|
def _get_mock(instance):
mock = Mock(spec=instance, wraps=instance)
mock.__dict__.update(instance.__dict__)
return mock<|docstring|>Create a mock and copy instance attributes over mock.<|endoftext|>
|
94fa77cfdd8524bf2462172651661679b168ad8ea28feb0c243c542f497525ee
|
def __init__(self, connection=None):
'Init template with connection.'
super().__init__(connection=connection)
self.mock_devices = []
self.mock_groups = []
|
Init template with connection.
|
tests/components/homematicip_cloud/helper.py
|
__init__
|
SoldierCorp/home-assistant
| 2 |
python
|
def __init__(self, connection=None):
super().__init__(connection=connection)
self.mock_devices = []
self.mock_groups = []
|
def __init__(self, connection=None):
super().__init__(connection=connection)
self.mock_devices = []
self.mock_groups = []<|docstring|>Init template with connection.<|endoftext|>
|
1534db049d74012ab42f4dfaec97107844b0f873aae99cb7ebe96156985998c7
|
def init_home(self, json_path=HOME_JSON):
'Init template with json.'
json_state = json.loads(load_fixture(HOME_JSON), encoding='UTF-8')
self.update_home(json_state=json_state, clearConfig=True)
self._generate_mocks()
return self
|
Init template with json.
|
tests/components/homematicip_cloud/helper.py
|
init_home
|
SoldierCorp/home-assistant
| 2 |
python
|
def init_home(self, json_path=HOME_JSON):
json_state = json.loads(load_fixture(HOME_JSON), encoding='UTF-8')
self.update_home(json_state=json_state, clearConfig=True)
self._generate_mocks()
return self
|
def init_home(self, json_path=HOME_JSON):
json_state = json.loads(load_fixture(HOME_JSON), encoding='UTF-8')
self.update_home(json_state=json_state, clearConfig=True)
self._generate_mocks()
return self<|docstring|>Init template with json.<|endoftext|>
|
ac805dee40f82fca5c478f0ad8393528cee6bfa1d413814cde3ece4e538d4394
|
def _generate_mocks(self):
'Generate mocks for groups and devices.'
for device in self.devices:
self.mock_devices.append(_get_mock(device))
for group in self.groups:
self.mock_groups.append(_get_mock(group))
|
Generate mocks for groups and devices.
|
tests/components/homematicip_cloud/helper.py
|
_generate_mocks
|
SoldierCorp/home-assistant
| 2 |
python
|
def _generate_mocks(self):
for device in self.devices:
self.mock_devices.append(_get_mock(device))
for group in self.groups:
self.mock_groups.append(_get_mock(group))
|
def _generate_mocks(self):
for device in self.devices:
self.mock_devices.append(_get_mock(device))
for group in self.groups:
self.mock_groups.append(_get_mock(group))<|docstring|>Generate mocks for groups and devices.<|endoftext|>
|
a220ecbb55750d4170bc614fa7a52ece97073b13eb9e1f37671cafd1b02496c8
|
def search_mock_device_by_id(self, device_id):
'Search a device by given id.'
for device in self.mock_devices:
if (device.id == device_id):
return device
return None
|
Search a device by given id.
|
tests/components/homematicip_cloud/helper.py
|
search_mock_device_by_id
|
SoldierCorp/home-assistant
| 2 |
python
|
def search_mock_device_by_id(self, device_id):
for device in self.mock_devices:
if (device.id == device_id):
return device
return None
|
def search_mock_device_by_id(self, device_id):
for device in self.mock_devices:
if (device.id == device_id):
return device
return None<|docstring|>Search a device by given id.<|endoftext|>
|
2a0bdca85e408eb903d239fb02a4a86c31607d677e8578ef7f9c527d48636e59
|
def search_mock_group_by_id(self, group_id):
'Search a group by given id.'
for group in self.mock_groups:
if (group.id == group_id):
return group
return None
|
Search a group by given id.
|
tests/components/homematicip_cloud/helper.py
|
search_mock_group_by_id
|
SoldierCorp/home-assistant
| 2 |
python
|
def search_mock_group_by_id(self, group_id):
for group in self.mock_groups:
if (group.id == group_id):
return group
return None
|
def search_mock_group_by_id(self, group_id):
for group in self.mock_groups:
if (group.id == group_id):
return group
return None<|docstring|>Search a group by given id.<|endoftext|>
|
323218d661688a5b3a66988216f8016a05fa49e71164e843110e46e9a4c50785
|
def get_async_home_mock(self):
'\n Create Mock for Async_Home. based on template to be used for testing.\n\n It adds collections of mocked devices and groups to the home objects,\n and sets reuired attributes.\n '
mock_home = Mock(check_connection=self._connection, id=HAPID, connected=True, dutyCycle=self.dutyCycle, devices=self.mock_devices, groups=self.mock_groups, weather=self.weather, location=self.location, label='home label', template=self, spec=AsyncHome)
mock_home.name = ''
return mock_home
|
Create Mock for Async_Home. based on template to be used for testing.
It adds collections of mocked devices and groups to the home objects,
and sets reuired attributes.
|
tests/components/homematicip_cloud/helper.py
|
get_async_home_mock
|
SoldierCorp/home-assistant
| 2 |
python
|
def get_async_home_mock(self):
'\n Create Mock for Async_Home. based on template to be used for testing.\n\n It adds collections of mocked devices and groups to the home objects,\n and sets reuired attributes.\n '
mock_home = Mock(check_connection=self._connection, id=HAPID, connected=True, dutyCycle=self.dutyCycle, devices=self.mock_devices, groups=self.mock_groups, weather=self.weather, location=self.location, label='home label', template=self, spec=AsyncHome)
mock_home.name =
return mock_home
|
def get_async_home_mock(self):
'\n Create Mock for Async_Home. based on template to be used for testing.\n\n It adds collections of mocked devices and groups to the home objects,\n and sets reuired attributes.\n '
mock_home = Mock(check_connection=self._connection, id=HAPID, connected=True, dutyCycle=self.dutyCycle, devices=self.mock_devices, groups=self.mock_groups, weather=self.weather, location=self.location, label='home label', template=self, spec=AsyncHome)
mock_home.name =
return mock_home<|docstring|>Create Mock for Async_Home. based on template to be used for testing.
It adds collections of mocked devices and groups to the home objects,
and sets reuired attributes.<|endoftext|>
|
b4caaf442c44473f24a47daee29af536ff7389e60c040e7cd8c606d10f50b47e
|
def _get_vendor_specific_argv(self, username, host, port=None, subsystem=None, command=None):
'Return arguments to pass to rbssh.\n Args:\n username (unicode):\n The username to connect with.\n host (unicode):\n The hostname to connect to.\n port (int, optional):\n The custom port to connect to.\n subsystem (unicode, optional):\n The SSH subsystem to use.\n command (unicode, optional):\n The command to invoke through the SSH connection.\n Returns:\n list of unicode:\n The list of arguments to pass to :command:`rbssh`.\n '
args = [self.executable_path]
if (port is not None):
args.extend(['-p', six.text_type(port)])
if (username is not None):
args.extend(['-l', username])
if (subsystem is not None):
args.extend(['-s', host, subsystem])
else:
args.extend(([host] + command))
return args
|
Return arguments to pass to rbssh.
Args:
username (unicode):
The username to connect with.
host (unicode):
The hostname to connect to.
port (int, optional):
The custom port to connect to.
subsystem (unicode, optional):
The SSH subsystem to use.
command (unicode, optional):
The command to invoke through the SSH connection.
Returns:
list of unicode:
The list of arguments to pass to :command:`rbssh`.
|
reviewboard/scmtools/bzr/plugins/bzrlib/plugins/rbssh.py
|
_get_vendor_specific_argv
|
BarracudaPff/code-golf-data-pythpn
| 0 |
python
|
def _get_vendor_specific_argv(self, username, host, port=None, subsystem=None, command=None):
'Return arguments to pass to rbssh.\n Args:\n username (unicode):\n The username to connect with.\n host (unicode):\n The hostname to connect to.\n port (int, optional):\n The custom port to connect to.\n subsystem (unicode, optional):\n The SSH subsystem to use.\n command (unicode, optional):\n The command to invoke through the SSH connection.\n Returns:\n list of unicode:\n The list of arguments to pass to :command:`rbssh`.\n '
args = [self.executable_path]
if (port is not None):
args.extend(['-p', six.text_type(port)])
if (username is not None):
args.extend(['-l', username])
if (subsystem is not None):
args.extend(['-s', host, subsystem])
else:
args.extend(([host] + command))
return args
|
def _get_vendor_specific_argv(self, username, host, port=None, subsystem=None, command=None):
'Return arguments to pass to rbssh.\n Args:\n username (unicode):\n The username to connect with.\n host (unicode):\n The hostname to connect to.\n port (int, optional):\n The custom port to connect to.\n subsystem (unicode, optional):\n The SSH subsystem to use.\n command (unicode, optional):\n The command to invoke through the SSH connection.\n Returns:\n list of unicode:\n The list of arguments to pass to :command:`rbssh`.\n '
args = [self.executable_path]
if (port is not None):
args.extend(['-p', six.text_type(port)])
if (username is not None):
args.extend(['-l', username])
if (subsystem is not None):
args.extend(['-s', host, subsystem])
else:
args.extend(([host] + command))
return args<|docstring|>Return arguments to pass to rbssh.
Args:
username (unicode):
The username to connect with.
host (unicode):
The hostname to connect to.
port (int, optional):
The custom port to connect to.
subsystem (unicode, optional):
The SSH subsystem to use.
command (unicode, optional):
The command to invoke through the SSH connection.
Returns:
list of unicode:
The list of arguments to pass to :command:`rbssh`.<|endoftext|>
|
d557a79aae67b16a0a9ccb11f8e988cf03a89f8dd8e1f6101ca9ed1c45a2b028
|
def scaled_dot_product_attention(query: tf.Tensor, key: tf.Tensor, value: tf.Tensor, mask: tf.Tensor):
'Calculate the attention weights.\n\n q (query), k (key), v (value) must have matching leading dimensions.\n k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n The mask has different shapes depending on its type(padding or look ahead)\n but it must be broadcastable for addition.\n\n Args:\n query: Query feature vectors, shape == (..., seq_len_q, depth).\n key: Key feature vectors, shape == (..., seq_len_k, depth).\n value: Value feature vectors, shape == (..., seq_len_v, depth_v).\n mask: Float tensor with shape broadcastable\n to (..., seq_len_q, seq_len_k). Defaults to None.\n Returns:\n output: The output attention vectors.\n attention_weights: The attention weights.\n '
matmul_qk = tf.matmul(query, key, transpose_b=True)
ftr_dim = tf.cast(tf.shape(key)[(- 1)], tf.float32)
scaled_attention_logits = (matmul_qk / tf.math.sqrt(ftr_dim))
if (mask is not None):
scaled_attention_logits += (mask * (- 1000000000.0))
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=(- 1))
output = tf.matmul(attention_weights, value)
return (output, attention_weights)
|
Calculate the attention weights.
q (query), k (key), v (value) must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
query: Query feature vectors, shape == (..., seq_len_q, depth).
key: Key feature vectors, shape == (..., seq_len_k, depth).
value: Value feature vectors, shape == (..., seq_len_v, depth_v).
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output: The output attention vectors.
attention_weights: The attention weights.
|
utils/models/transformer_models.py
|
scaled_dot_product_attention
|
zhuchen03/federated
| 0 |
python
|
def scaled_dot_product_attention(query: tf.Tensor, key: tf.Tensor, value: tf.Tensor, mask: tf.Tensor):
'Calculate the attention weights.\n\n q (query), k (key), v (value) must have matching leading dimensions.\n k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n The mask has different shapes depending on its type(padding or look ahead)\n but it must be broadcastable for addition.\n\n Args:\n query: Query feature vectors, shape == (..., seq_len_q, depth).\n key: Key feature vectors, shape == (..., seq_len_k, depth).\n value: Value feature vectors, shape == (..., seq_len_v, depth_v).\n mask: Float tensor with shape broadcastable\n to (..., seq_len_q, seq_len_k). Defaults to None.\n Returns:\n output: The output attention vectors.\n attention_weights: The attention weights.\n '
matmul_qk = tf.matmul(query, key, transpose_b=True)
ftr_dim = tf.cast(tf.shape(key)[(- 1)], tf.float32)
scaled_attention_logits = (matmul_qk / tf.math.sqrt(ftr_dim))
if (mask is not None):
scaled_attention_logits += (mask * (- 1000000000.0))
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=(- 1))
output = tf.matmul(attention_weights, value)
return (output, attention_weights)
|
def scaled_dot_product_attention(query: tf.Tensor, key: tf.Tensor, value: tf.Tensor, mask: tf.Tensor):
'Calculate the attention weights.\n\n q (query), k (key), v (value) must have matching leading dimensions.\n k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n The mask has different shapes depending on its type(padding or look ahead)\n but it must be broadcastable for addition.\n\n Args:\n query: Query feature vectors, shape == (..., seq_len_q, depth).\n key: Key feature vectors, shape == (..., seq_len_k, depth).\n value: Value feature vectors, shape == (..., seq_len_v, depth_v).\n mask: Float tensor with shape broadcastable\n to (..., seq_len_q, seq_len_k). Defaults to None.\n Returns:\n output: The output attention vectors.\n attention_weights: The attention weights.\n '
matmul_qk = tf.matmul(query, key, transpose_b=True)
ftr_dim = tf.cast(tf.shape(key)[(- 1)], tf.float32)
scaled_attention_logits = (matmul_qk / tf.math.sqrt(ftr_dim))
if (mask is not None):
scaled_attention_logits += (mask * (- 1000000000.0))
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=(- 1))
output = tf.matmul(attention_weights, value)
return (output, attention_weights)<|docstring|>Calculate the attention weights.
q (query), k (key), v (value) must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
query: Query feature vectors, shape == (..., seq_len_q, depth).
key: Key feature vectors, shape == (..., seq_len_k, depth).
value: Value feature vectors, shape == (..., seq_len_v, depth_v).
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output: The output attention vectors.
attention_weights: The attention weights.<|endoftext|>
|
0bdf7bd4befbcefccca8b9e3372b0e9f0e7584460f00499fdcf0dd18d00a0b78
|
def point_wise_feed_forward_network(d_model, dff):
'Returns all the possible positional encodings.\n Args:\n d_model: Dimension of the input feature.\n dff: Dimension of the hidden layer.\n Returns:\n `tf.keras.Sequential`: A one-hidden-layer MLP.\n '
return tf.keras.Sequential([tf.keras.layers.Dense(dff, activation='relu'), tf.keras.layers.Dense(d_model)])
|
Returns all the possible positional encodings.
Args:
d_model: Dimension of the input feature.
dff: Dimension of the hidden layer.
Returns:
`tf.keras.Sequential`: A one-hidden-layer MLP.
|
utils/models/transformer_models.py
|
point_wise_feed_forward_network
|
zhuchen03/federated
| 0 |
python
|
def point_wise_feed_forward_network(d_model, dff):
'Returns all the possible positional encodings.\n Args:\n d_model: Dimension of the input feature.\n dff: Dimension of the hidden layer.\n Returns:\n `tf.keras.Sequential`: A one-hidden-layer MLP.\n '
return tf.keras.Sequential([tf.keras.layers.Dense(dff, activation='relu'), tf.keras.layers.Dense(d_model)])
|
def point_wise_feed_forward_network(d_model, dff):
'Returns all the possible positional encodings.\n Args:\n d_model: Dimension of the input feature.\n dff: Dimension of the hidden layer.\n Returns:\n `tf.keras.Sequential`: A one-hidden-layer MLP.\n '
return tf.keras.Sequential([tf.keras.layers.Dense(dff, activation='relu'), tf.keras.layers.Dense(d_model)])<|docstring|>Returns all the possible positional encodings.
Args:
d_model: Dimension of the input feature.
dff: Dimension of the hidden layer.
Returns:
`tf.keras.Sequential`: A one-hidden-layer MLP.<|endoftext|>
|
8548d49361eb96961882f97e15f8462c93e133fe7b4cf52c041c1b67f273397a
|
def positional_encoding(position, d_model):
'Returns all the possible positional encodings. #add one sentence about why we need positional encoding, probably link to the equation in paper?\n Args:\n position: Maximum number of positions.\n d_model: Dimension of features of MultiHeadAttention layers.\n Returns:\n `tf.Tensor`: The position encodings of the input sequence.\n '
def get_angles(pos, i, d_model):
angle_rates = (1 / np.power(position, ((2 * (i // 2)) / np.float32(d_model))))
return (pos * angle_rates)
angle_rads = get_angles(np.arange(position)[(:, np.newaxis)], np.arange(d_model)[(np.newaxis, :)], d_model)
angle_rads[(:, 0::2)] = np.sin(angle_rads[(:, 0::2)])
angle_rads[(:, 1::2)] = np.cos(angle_rads[(:, 1::2)])
pos_encoding = angle_rads[(np.newaxis, ...)]
return tf.cast(pos_encoding, dtype=tf.float32)
|
Returns all the possible positional encodings. #add one sentence about why we need positional encoding, probably link to the equation in paper?
Args:
position: Maximum number of positions.
d_model: Dimension of features of MultiHeadAttention layers.
Returns:
`tf.Tensor`: The position encodings of the input sequence.
|
utils/models/transformer_models.py
|
positional_encoding
|
zhuchen03/federated
| 0 |
python
|
def positional_encoding(position, d_model):
'Returns all the possible positional encodings. #add one sentence about why we need positional encoding, probably link to the equation in paper?\n Args:\n position: Maximum number of positions.\n d_model: Dimension of features of MultiHeadAttention layers.\n Returns:\n `tf.Tensor`: The position encodings of the input sequence.\n '
def get_angles(pos, i, d_model):
angle_rates = (1 / np.power(position, ((2 * (i // 2)) / np.float32(d_model))))
return (pos * angle_rates)
angle_rads = get_angles(np.arange(position)[(:, np.newaxis)], np.arange(d_model)[(np.newaxis, :)], d_model)
angle_rads[(:, 0::2)] = np.sin(angle_rads[(:, 0::2)])
angle_rads[(:, 1::2)] = np.cos(angle_rads[(:, 1::2)])
pos_encoding = angle_rads[(np.newaxis, ...)]
return tf.cast(pos_encoding, dtype=tf.float32)
|
def positional_encoding(position, d_model):
'Returns all the possible positional encodings. #add one sentence about why we need positional encoding, probably link to the equation in paper?\n Args:\n position: Maximum number of positions.\n d_model: Dimension of features of MultiHeadAttention layers.\n Returns:\n `tf.Tensor`: The position encodings of the input sequence.\n '
def get_angles(pos, i, d_model):
angle_rates = (1 / np.power(position, ((2 * (i // 2)) / np.float32(d_model))))
return (pos * angle_rates)
angle_rads = get_angles(np.arange(position)[(:, np.newaxis)], np.arange(d_model)[(np.newaxis, :)], d_model)
angle_rads[(:, 0::2)] = np.sin(angle_rads[(:, 0::2)])
angle_rads[(:, 1::2)] = np.cos(angle_rads[(:, 1::2)])
pos_encoding = angle_rads[(np.newaxis, ...)]
return tf.cast(pos_encoding, dtype=tf.float32)<|docstring|>Returns all the possible positional encodings. #add one sentence about why we need positional encoding, probably link to the equation in paper?
Args:
position: Maximum number of positions.
d_model: Dimension of features of MultiHeadAttention layers.
Returns:
`tf.Tensor`: The position encodings of the input sequence.<|endoftext|>
|
d1619a57bceda6ce7a6b2421bfd5a138cf36eacb7233f1d8bacab39a51a598ce
|
def create_transformer_lm(vocab_size=10000, num_oov_buckets=1, d_embed=96, d_model=512, dff=2048, num_heads=8, num_layers=1, max_position_encoding=10000, dropout=0.1, name='transformer_lm'):
'Create the transformer-based language model for next-token prediction.\n Args:\n vocab_size: Vocab size for normal tokens.\n num_oov_buckets: Number of out of vocabulary buckets.\n d_embed: Dimension of the token embeddings.\n d_model: Dimension of features of MultiHeadAttention layers.\n dff: Dimension of hidden layers of the FFN.\n num_heads: Number of attention heads.\n num_layers: Number of Transformer blocks.\n max_position_encoding: Maximum number of positions for position embeddings.\n dropout: Dropout rate.\n name: Name of the model.\n Returns:\n `tf.keras.Model`.\n '
extended_vocab_size = ((vocab_size + 3) + num_oov_buckets)
inputs = tf.keras.layers.Input(shape=(None,))
transformer = TransformerLM(num_layers, d_embed, d_model, num_heads, dff, extended_vocab_size, max_position_encoding, rate=dropout)
features = transformer(inputs)
transpose_embedding = TransposableEmbedding(transformer.embedding)
logits = transpose_embedding(features)
return tf.keras.Model(inputs=inputs, outputs=logits, name=name)
|
Create the transformer-based language model for next-token prediction.
Args:
vocab_size: Vocab size for normal tokens.
num_oov_buckets: Number of out of vocabulary buckets.
d_embed: Dimension of the token embeddings.
d_model: Dimension of features of MultiHeadAttention layers.
dff: Dimension of hidden layers of the FFN.
num_heads: Number of attention heads.
num_layers: Number of Transformer blocks.
max_position_encoding: Maximum number of positions for position embeddings.
dropout: Dropout rate.
name: Name of the model.
Returns:
`tf.keras.Model`.
|
utils/models/transformer_models.py
|
create_transformer_lm
|
zhuchen03/federated
| 0 |
python
|
def create_transformer_lm(vocab_size=10000, num_oov_buckets=1, d_embed=96, d_model=512, dff=2048, num_heads=8, num_layers=1, max_position_encoding=10000, dropout=0.1, name='transformer_lm'):
'Create the transformer-based language model for next-token prediction.\n Args:\n vocab_size: Vocab size for normal tokens.\n num_oov_buckets: Number of out of vocabulary buckets.\n d_embed: Dimension of the token embeddings.\n d_model: Dimension of features of MultiHeadAttention layers.\n dff: Dimension of hidden layers of the FFN.\n num_heads: Number of attention heads.\n num_layers: Number of Transformer blocks.\n max_position_encoding: Maximum number of positions for position embeddings.\n dropout: Dropout rate.\n name: Name of the model.\n Returns:\n `tf.keras.Model`.\n '
extended_vocab_size = ((vocab_size + 3) + num_oov_buckets)
inputs = tf.keras.layers.Input(shape=(None,))
transformer = TransformerLM(num_layers, d_embed, d_model, num_heads, dff, extended_vocab_size, max_position_encoding, rate=dropout)
features = transformer(inputs)
transpose_embedding = TransposableEmbedding(transformer.embedding)
logits = transpose_embedding(features)
return tf.keras.Model(inputs=inputs, outputs=logits, name=name)
|
def create_transformer_lm(vocab_size=10000, num_oov_buckets=1, d_embed=96, d_model=512, dff=2048, num_heads=8, num_layers=1, max_position_encoding=10000, dropout=0.1, name='transformer_lm'):
'Create the transformer-based language model for next-token prediction.\n Args:\n vocab_size: Vocab size for normal tokens.\n num_oov_buckets: Number of out of vocabulary buckets.\n d_embed: Dimension of the token embeddings.\n d_model: Dimension of features of MultiHeadAttention layers.\n dff: Dimension of hidden layers of the FFN.\n num_heads: Number of attention heads.\n num_layers: Number of Transformer blocks.\n max_position_encoding: Maximum number of positions for position embeddings.\n dropout: Dropout rate.\n name: Name of the model.\n Returns:\n `tf.keras.Model`.\n '
extended_vocab_size = ((vocab_size + 3) + num_oov_buckets)
inputs = tf.keras.layers.Input(shape=(None,))
transformer = TransformerLM(num_layers, d_embed, d_model, num_heads, dff, extended_vocab_size, max_position_encoding, rate=dropout)
features = transformer(inputs)
transpose_embedding = TransposableEmbedding(transformer.embedding)
logits = transpose_embedding(features)
return tf.keras.Model(inputs=inputs, outputs=logits, name=name)<|docstring|>Create the transformer-based language model for next-token prediction.
Args:
vocab_size: Vocab size for normal tokens.
num_oov_buckets: Number of out of vocabulary buckets.
d_embed: Dimension of the token embeddings.
d_model: Dimension of features of MultiHeadAttention layers.
dff: Dimension of hidden layers of the FFN.
num_heads: Number of attention heads.
num_layers: Number of Transformer blocks.
max_position_encoding: Maximum number of positions for position embeddings.
dropout: Dropout rate.
name: Name of the model.
Returns:
`tf.keras.Model`.<|endoftext|>
|
739e58b202c66e7508d67e648f037cc90a7430a8c7538e0c8ae7a7ae36bf6f5e
|
def split_heads(self, x, batch_size):
'Split the last dimension into (num_heads, depth).\n Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)\n '
x = tf.reshape(x, (batch_size, (- 1), self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
|
Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
|
utils/models/transformer_models.py
|
split_heads
|
zhuchen03/federated
| 0 |
python
|
def split_heads(self, x, batch_size):
'Split the last dimension into (num_heads, depth).\n Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)\n '
x = tf.reshape(x, (batch_size, (- 1), self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
|
def split_heads(self, x, batch_size):
'Split the last dimension into (num_heads, depth).\n Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)\n '
x = tf.reshape(x, (batch_size, (- 1), self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])<|docstring|>Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)<|endoftext|>
|
b1915d6f629907b3f62c97e6819b5aeabbe6793441f60eeb2cd919a539351df0
|
def download_source_ligatures():
'\n\tDownloads the Fira Code OpenType fonts (version 3.1), which contains the source ligatures.\n\n\tRemarks:\n\t\tThe Fira Code font version downloaded is the latest available that can be applied. For more information see the following issue: https://github.com/tonsky/FiraCode/issues/1100\n\t'
if (not path.isdir(LIGATURES_SOURCE)):
makedirs(LIGATURES_SOURCE, exist_ok=True)
with open(request.urlretrieve('https://api.github.com/repos/tonsky/FiraCode/contents/distr/otf?ref=e9943d2d631a4558613d7a77c58ed1d3cb790992')[0], 'r') as stream:
json = load(stream)
for entry in json:
github.download_file(entry['download_url'], path.join(LIGATURES_SOURCE, path.basename(entry['path'])))
|
Downloads the Fira Code OpenType fonts (version 3.1), which contains the source ligatures.
Remarks:
The Fira Code font version downloaded is the latest available that can be applied. For more information see the following issue: https://github.com/tonsky/FiraCode/issues/1100
|
helpers/fonts.py
|
download_source_ligatures
|
lperezperez/font-patcher-helper
| 0 |
python
|
def download_source_ligatures():
'\n\tDownloads the Fira Code OpenType fonts (version 3.1), which contains the source ligatures.\n\n\tRemarks:\n\t\tThe Fira Code font version downloaded is the latest available that can be applied. For more information see the following issue: https://github.com/tonsky/FiraCode/issues/1100\n\t'
if (not path.isdir(LIGATURES_SOURCE)):
makedirs(LIGATURES_SOURCE, exist_ok=True)
with open(request.urlretrieve('https://api.github.com/repos/tonsky/FiraCode/contents/distr/otf?ref=e9943d2d631a4558613d7a77c58ed1d3cb790992')[0], 'r') as stream:
json = load(stream)
for entry in json:
github.download_file(entry['download_url'], path.join(LIGATURES_SOURCE, path.basename(entry['path'])))
|
def download_source_ligatures():
'\n\tDownloads the Fira Code OpenType fonts (version 3.1), which contains the source ligatures.\n\n\tRemarks:\n\t\tThe Fira Code font version downloaded is the latest available that can be applied. For more information see the following issue: https://github.com/tonsky/FiraCode/issues/1100\n\t'
if (not path.isdir(LIGATURES_SOURCE)):
makedirs(LIGATURES_SOURCE, exist_ok=True)
with open(request.urlretrieve('https://api.github.com/repos/tonsky/FiraCode/contents/distr/otf?ref=e9943d2d631a4558613d7a77c58ed1d3cb790992')[0], 'r') as stream:
json = load(stream)
for entry in json:
github.download_file(entry['download_url'], path.join(LIGATURES_SOURCE, path.basename(entry['path'])))<|docstring|>Downloads the Fira Code OpenType fonts (version 3.1), which contains the source ligatures.
Remarks:
The Fira Code font version downloaded is the latest available that can be applied. For more information see the following issue: https://github.com/tonsky/FiraCode/issues/1100<|endoftext|>
|
1759bf23343f81088130fcd7c56ffc0c5aaff489233c7bf54349aefc46c374c1
|
def get_font_files(paths: list):
'\n\tGet fonts from the specified paths.\n\n\tArguments:\n\t\tpaths (list): A list of paths whether to retrieve fonts.\n\t'
font_files = []
for font_path in paths:
if (path.isfile(font_path) and (path.splitext(font_path)[(- 1)] in EXTENSIONS)):
font_files.append(font_path)
elif path.isdir(font_path):
for (root, folder_names, file_names) in walk(font_path):
for file_name in file_names:
if (path.splitext(file_name)[(- 1)] in EXTENSIONS):
font_files.append(path.join(root, file_name))
else:
stderr.write(f'Cannot retrieve path {font_path}')
return font_files
|
Get fonts from the specified paths.
Arguments:
paths (list): A list of paths whether to retrieve fonts.
|
helpers/fonts.py
|
get_font_files
|
lperezperez/font-patcher-helper
| 0 |
python
|
def get_font_files(paths: list):
'\n\tGet fonts from the specified paths.\n\n\tArguments:\n\t\tpaths (list): A list of paths whether to retrieve fonts.\n\t'
font_files = []
for font_path in paths:
if (path.isfile(font_path) and (path.splitext(font_path)[(- 1)] in EXTENSIONS)):
font_files.append(font_path)
elif path.isdir(font_path):
for (root, folder_names, file_names) in walk(font_path):
for file_name in file_names:
if (path.splitext(file_name)[(- 1)] in EXTENSIONS):
font_files.append(path.join(root, file_name))
else:
stderr.write(f'Cannot retrieve path {font_path}')
return font_files
|
def get_font_files(paths: list):
'\n\tGet fonts from the specified paths.\n\n\tArguments:\n\t\tpaths (list): A list of paths whether to retrieve fonts.\n\t'
font_files = []
for font_path in paths:
if (path.isfile(font_path) and (path.splitext(font_path)[(- 1)] in EXTENSIONS)):
font_files.append(font_path)
elif path.isdir(font_path):
for (root, folder_names, file_names) in walk(font_path):
for file_name in file_names:
if (path.splitext(file_name)[(- 1)] in EXTENSIONS):
font_files.append(path.join(root, file_name))
else:
stderr.write(f'Cannot retrieve path {font_path}')
return font_files<|docstring|>Get fonts from the specified paths.
Arguments:
paths (list): A list of paths whether to retrieve fonts.<|endoftext|>
|
cd35b7be653d5748cb40f393e37665be6e209725b7476e290972a242c2a965c3
|
def normalize_styles(font_style: str):
'\n\tNormalizes font styles and converts wheights, widths and optical sizes to one camel-case word.\n\n\tArguments:\n\t\tfont_style (str): The original font styles.\n\t'
return font_style.replace('Hairline', 'Thin').replace('Extra Light', 'ExtraLight').replace('Ultra Light', 'ExtraLight').replace('XLight', 'ExtraLight').replace('Book', 'Regular').replace('Demi Bold', 'SemiBold').replace('Semi Bold', 'SemiBold').replace('Extra Bold', 'ExtraBold').replace('Ultra Bold', 'ExtraBold').replace('Heavy', 'Black').replace('XNarrow', 'ExtraNarrow').replace('SSm', 'ScreenSmart')
|
Normalizes font styles and converts wheights, widths and optical sizes to one camel-case word.
Arguments:
font_style (str): The original font styles.
|
helpers/fonts.py
|
normalize_styles
|
lperezperez/font-patcher-helper
| 0 |
python
|
def normalize_styles(font_style: str):
'\n\tNormalizes font styles and converts wheights, widths and optical sizes to one camel-case word.\n\n\tArguments:\n\t\tfont_style (str): The original font styles.\n\t'
return font_style.replace('Hairline', 'Thin').replace('Extra Light', 'ExtraLight').replace('Ultra Light', 'ExtraLight').replace('XLight', 'ExtraLight').replace('Book', 'Regular').replace('Demi Bold', 'SemiBold').replace('Semi Bold', 'SemiBold').replace('Extra Bold', 'ExtraBold').replace('Ultra Bold', 'ExtraBold').replace('Heavy', 'Black').replace('XNarrow', 'ExtraNarrow').replace('SSm', 'ScreenSmart')
|
def normalize_styles(font_style: str):
'\n\tNormalizes font styles and converts wheights, widths and optical sizes to one camel-case word.\n\n\tArguments:\n\t\tfont_style (str): The original font styles.\n\t'
return font_style.replace('Hairline', 'Thin').replace('Extra Light', 'ExtraLight').replace('Ultra Light', 'ExtraLight').replace('XLight', 'ExtraLight').replace('Book', 'Regular').replace('Demi Bold', 'SemiBold').replace('Semi Bold', 'SemiBold').replace('Extra Bold', 'ExtraBold').replace('Ultra Bold', 'ExtraBold').replace('Heavy', 'Black').replace('XNarrow', 'ExtraNarrow').replace('SSm', 'ScreenSmart')<|docstring|>Normalizes font styles and converts wheights, widths and optical sizes to one camel-case word.
Arguments:
font_style (str): The original font styles.<|endoftext|>
|
21dce7beb0275f69c0d4edef485e618fcead539c3b1b25ffd935b21ee522b184
|
def get_style_abbreviated(font_name: str):
'\n\tGets the specified `font_name` with the style abbreviations recommended in the Adobe Tech note #5088 (http://wwwimages.adobe.com/content/dam/acom/en/devnet/font/pdfs/5088.FontNames.pdf)\n\n\tArguments:\n\t\tfont_name (str): The font name to abbreviate.\n\t'
return font_name.replace('Bold', 'Bd').replace('Book', 'Bk').replace('Black', 'Blk').replace('Compressed', 'Cm').replace('Condensed', 'Cn').replace('Compact', 'Ct').replace('Demi', 'Dm').replace('Display', 'Ds').replace('Extended', 'Ex').replace('Heavy', 'Hv').replace('Inclined', 'Ic').replace('Italic', 'It').replace('Kursiv', 'Ks').replace('Light', 'Lt').replace('Medium', 'Md').replace('Nord', 'Nd').replace('Narrow', 'Nr').replace('Oblique', 'Obl').replace('Poster', 'Po').replace('Regular', 'Rg').replace('Slanted', 'Sl').replace('Semi', 'Sm').replace('Super', 'Su').replace('Thin', 'Th').replace('Ultra', 'Ult').replace('Upright', 'Up').replace('Extra', 'X')
|
Gets the specified `font_name` with the style abbreviations recommended in the Adobe Tech note #5088 (http://wwwimages.adobe.com/content/dam/acom/en/devnet/font/pdfs/5088.FontNames.pdf)
Arguments:
font_name (str): The font name to abbreviate.
|
helpers/fonts.py
|
get_style_abbreviated
|
lperezperez/font-patcher-helper
| 0 |
python
|
def get_style_abbreviated(font_name: str):
'\n\tGets the specified `font_name` with the style abbreviations recommended in the Adobe Tech note #5088 (http://wwwimages.adobe.com/content/dam/acom/en/devnet/font/pdfs/5088.FontNames.pdf)\n\n\tArguments:\n\t\tfont_name (str): The font name to abbreviate.\n\t'
return font_name.replace('Bold', 'Bd').replace('Book', 'Bk').replace('Black', 'Blk').replace('Compressed', 'Cm').replace('Condensed', 'Cn').replace('Compact', 'Ct').replace('Demi', 'Dm').replace('Display', 'Ds').replace('Extended', 'Ex').replace('Heavy', 'Hv').replace('Inclined', 'Ic').replace('Italic', 'It').replace('Kursiv', 'Ks').replace('Light', 'Lt').replace('Medium', 'Md').replace('Nord', 'Nd').replace('Narrow', 'Nr').replace('Oblique', 'Obl').replace('Poster', 'Po').replace('Regular', 'Rg').replace('Slanted', 'Sl').replace('Semi', 'Sm').replace('Super', 'Su').replace('Thin', 'Th').replace('Ultra', 'Ult').replace('Upright', 'Up').replace('Extra', 'X')
|
def get_style_abbreviated(font_name: str):
'\n\tGets the specified `font_name` with the style abbreviations recommended in the Adobe Tech note #5088 (http://wwwimages.adobe.com/content/dam/acom/en/devnet/font/pdfs/5088.FontNames.pdf)\n\n\tArguments:\n\t\tfont_name (str): The font name to abbreviate.\n\t'
return font_name.replace('Bold', 'Bd').replace('Book', 'Bk').replace('Black', 'Blk').replace('Compressed', 'Cm').replace('Condensed', 'Cn').replace('Compact', 'Ct').replace('Demi', 'Dm').replace('Display', 'Ds').replace('Extended', 'Ex').replace('Heavy', 'Hv').replace('Inclined', 'Ic').replace('Italic', 'It').replace('Kursiv', 'Ks').replace('Light', 'Lt').replace('Medium', 'Md').replace('Nord', 'Nd').replace('Narrow', 'Nr').replace('Oblique', 'Obl').replace('Poster', 'Po').replace('Regular', 'Rg').replace('Slanted', 'Sl').replace('Semi', 'Sm').replace('Super', 'Su').replace('Thin', 'Th').replace('Ultra', 'Ult').replace('Upright', 'Up').replace('Extra', 'X')<|docstring|>Gets the specified `font_name` with the style abbreviations recommended in the Adobe Tech note #5088 (http://wwwimages.adobe.com/content/dam/acom/en/devnet/font/pdfs/5088.FontNames.pdf)
Arguments:
font_name (str): The font name to abbreviate.<|endoftext|>
|
beac9c05a4a0f122776415fd4455b194746f757955107827855ca7cba76cc05d
|
def remove_wws_styles(font_name: str):
'\n\tRemove font WWS (weight, width, and slope) styles from `font_name`\n\n\tArguments:\n\t\tfont_name (str): The font name from which the styles will be removed.\n\t'
return font_name.replace('Thin', '').replace('ExtraLight', '').replace('Light', '').replace('Regular', '').replace('Medium', '').replace('SemiBold', '').replace('ExtraBold', '').replace('Bold', '').replace('Black', '').replace('Italic', '').strip()
|
Remove font WWS (weight, width, and slope) styles from `font_name`
Arguments:
font_name (str): The font name from which the styles will be removed.
|
helpers/fonts.py
|
remove_wws_styles
|
lperezperez/font-patcher-helper
| 0 |
python
|
def remove_wws_styles(font_name: str):
'\n\tRemove font WWS (weight, width, and slope) styles from `font_name`\n\n\tArguments:\n\t\tfont_name (str): The font name from which the styles will be removed.\n\t'
return font_name.replace('Thin', ).replace('ExtraLight', ).replace('Light', ).replace('Regular', ).replace('Medium', ).replace('SemiBold', ).replace('ExtraBold', ).replace('Bold', ).replace('Black', ).replace('Italic', ).strip()
|
def remove_wws_styles(font_name: str):
'\n\tRemove font WWS (weight, width, and slope) styles from `font_name`\n\n\tArguments:\n\t\tfont_name (str): The font name from which the styles will be removed.\n\t'
return font_name.replace('Thin', ).replace('ExtraLight', ).replace('Light', ).replace('Regular', ).replace('Medium', ).replace('SemiBold', ).replace('ExtraBold', ).replace('Bold', ).replace('Black', ).replace('Italic', ).strip()<|docstring|>Remove font WWS (weight, width, and slope) styles from `font_name`
Arguments:
font_name (str): The font name from which the styles will be removed.<|endoftext|>
|
f5473862050403695d7c877c908eca1034eae3f17c31f4dfc2f2e6c308a76b64
|
def remove_styles(font_name: str):
'\n\tRemove font styles from `font_name`\n\n\tArguments:\n\t\tfont_name (str): The font name from which the styles will be removed.\n\t'
return remove_wws_styles(font_name).replace('Condensed', '').replace('ExtraNarrow', '').replace('Narrow', '').replace('ScreenSmart', '').replace('Mono', '').strip()
|
Remove font styles from `font_name`
Arguments:
font_name (str): The font name from which the styles will be removed.
|
helpers/fonts.py
|
remove_styles
|
lperezperez/font-patcher-helper
| 0 |
python
|
def remove_styles(font_name: str):
'\n\tRemove font styles from `font_name`\n\n\tArguments:\n\t\tfont_name (str): The font name from which the styles will be removed.\n\t'
return remove_wws_styles(font_name).replace('Condensed', ).replace('ExtraNarrow', ).replace('Narrow', ).replace('ScreenSmart', ).replace('Mono', ).strip()
|
def remove_styles(font_name: str):
'\n\tRemove font styles from `font_name`\n\n\tArguments:\n\t\tfont_name (str): The font name from which the styles will be removed.\n\t'
return remove_wws_styles(font_name).replace('Condensed', ).replace('ExtraNarrow', ).replace('Narrow', ).replace('ScreenSmart', ).replace('Mono', ).strip()<|docstring|>Remove font styles from `font_name`
Arguments:
font_name (str): The font name from which the styles will be removed.<|endoftext|>
|
d7040e0e8847551c74d2e4b742b0f6f8b40cf786c3bdac068d8fd0e2aad0b53e
|
def get_name_id(font: fontforge.font, name_id: str):
'\n\tGets the specified `value` for the `font` `name_id`.\n\n\tArguments:\n\t\tfont (fontforge.font): A FontForge loaded font.\n\t\tname_id (str): An Open Type Name ID.\n\t'
for sfnt_name in font.sfnt_names:
if (sfnt_name[1] == name_id):
return sfnt_name[2]
|
Gets the specified `value` for the `font` `name_id`.
Arguments:
font (fontforge.font): A FontForge loaded font.
name_id (str): An Open Type Name ID.
|
helpers/fonts.py
|
get_name_id
|
lperezperez/font-patcher-helper
| 0 |
python
|
def get_name_id(font: fontforge.font, name_id: str):
'\n\tGets the specified `value` for the `font` `name_id`.\n\n\tArguments:\n\t\tfont (fontforge.font): A FontForge loaded font.\n\t\tname_id (str): An Open Type Name ID.\n\t'
for sfnt_name in font.sfnt_names:
if (sfnt_name[1] == name_id):
return sfnt_name[2]
|
def get_name_id(font: fontforge.font, name_id: str):
'\n\tGets the specified `value` for the `font` `name_id`.\n\n\tArguments:\n\t\tfont (fontforge.font): A FontForge loaded font.\n\t\tname_id (str): An Open Type Name ID.\n\t'
for sfnt_name in font.sfnt_names:
if (sfnt_name[1] == name_id):
return sfnt_name[2]<|docstring|>Gets the specified `value` for the `font` `name_id`.
Arguments:
font (fontforge.font): A FontForge loaded font.
name_id (str): An Open Type Name ID.<|endoftext|>
|
f8d86d308338f5119f2b6d4470ccb215cdfc8a94d88ce02d8e68b30debbdbb00
|
def set_name_id(font: fontforge.font, name_id: str, value: str):
'\n\tSets the specified `value` for the `font` `name_id`.\n\n\tArguments:\n\t\tfont (fontforge.font): A FontForge loaded font.\n\t\tname_id (str): An Open Type Name ID.\n\t\tvalue (str): A value to set for the `font` `name_id`.\n\t'
font.sfnt_names = tuple((((row[0], row[1], value) if (row[1] == name_id) else row) for row in font.sfnt_names))
|
Sets the specified `value` for the `font` `name_id`.
Arguments:
font (fontforge.font): A FontForge loaded font.
name_id (str): An Open Type Name ID.
value (str): A value to set for the `font` `name_id`.
|
helpers/fonts.py
|
set_name_id
|
lperezperez/font-patcher-helper
| 0 |
python
|
def set_name_id(font: fontforge.font, name_id: str, value: str):
'\n\tSets the specified `value` for the `font` `name_id`.\n\n\tArguments:\n\t\tfont (fontforge.font): A FontForge loaded font.\n\t\tname_id (str): An Open Type Name ID.\n\t\tvalue (str): A value to set for the `font` `name_id`.\n\t'
font.sfnt_names = tuple((((row[0], row[1], value) if (row[1] == name_id) else row) for row in font.sfnt_names))
|
def set_name_id(font: fontforge.font, name_id: str, value: str):
'\n\tSets the specified `value` for the `font` `name_id`.\n\n\tArguments:\n\t\tfont (fontforge.font): A FontForge loaded font.\n\t\tname_id (str): An Open Type Name ID.\n\t\tvalue (str): A value to set for the `font` `name_id`.\n\t'
font.sfnt_names = tuple((((row[0], row[1], value) if (row[1] == name_id) else row) for row in font.sfnt_names))<|docstring|>Sets the specified `value` for the `font` `name_id`.
Arguments:
font (fontforge.font): A FontForge loaded font.
name_id (str): An Open Type Name ID.
value (str): A value to set for the `font` `name_id`.<|endoftext|>
|
10380f29401c425f034358df894291d33089abe5a479e61aa184470b56b0fbc7
|
def rename_fontforge(font: fontforge.font):
'\n\tTries to rename `font` naming table based on the OpenType specifications (https://docs.microsoft.com/typography/opentype/spec/name#name-ids)\n\n\tArguments:\n\t\tfont (fontforge.font): The font to rename.\n\t'
font.fullname = normalize_styles(font.fullname.replace(NERD_FONT_SUFFIX, ''))
font.familyname = remove_styles(font.fullname)
subfamilyname = font.fullname.replace(font.familyname, '').strip()
font.fullname = f'{font.familyname} {subfamilyname}'
font.fontname = font.fullname.replace(' ', '-')
if (len(font.fontname) > 31):
font.fontname = get_style_abbreviated(font.fontname)[:31]
wws_family = remove_wws_styles(font.fullname)
version_groups = VERSION_PATTERN.match(font.version).groups()
if (version_groups[1] is None):
font.version = f'Version {float(version_groups[0])}'
else:
font.version = f'Version {float(version_groups[0])};{version_groups[1]} {version_groups[2]}'
set_name_id(font, 'Family', font.familyname)
set_name_id(font, 'SubFamily', subfamilyname)
set_name_id(font, 'UniqueID', ((font.fontname + ';') + font.version.replace('Version ', '').replace('Nerd Fonts ', 'NF')).replace(' ', '-'))
set_name_id(font, 'Fullname', font.fullname)
set_name_id(font, 'Version', font.version)
set_name_id(font, 'PostScriptName', font.fontname)
set_name_id(font, 'Preferred Family', font.familyname)
set_name_id(font, 'Preferred Styles', subfamilyname)
set_name_id(font, 'Compatible Full', font.fullname)
set_name_id(font, 'Sample Text', 'Kuba harpisto ŝajnis amuziĝi facilege ĉe via ĵaŭda ĥoro')
set_name_id(font, 'WWS Family', wws_family)
set_name_id(font, 'WWS SubFamily', font.fullname.replace(wws_family, '').strip())
|
Tries to rename `font` naming table based on the OpenType specifications (https://docs.microsoft.com/typography/opentype/spec/name#name-ids)
Arguments:
font (fontforge.font): The font to rename.
|
helpers/fonts.py
|
rename_fontforge
|
lperezperez/font-patcher-helper
| 0 |
python
|
def rename_fontforge(font: fontforge.font):
'\n\tTries to rename `font` naming table based on the OpenType specifications (https://docs.microsoft.com/typography/opentype/spec/name#name-ids)\n\n\tArguments:\n\t\tfont (fontforge.font): The font to rename.\n\t'
font.fullname = normalize_styles(font.fullname.replace(NERD_FONT_SUFFIX, ))
font.familyname = remove_styles(font.fullname)
subfamilyname = font.fullname.replace(font.familyname, ).strip()
font.fullname = f'{font.familyname} {subfamilyname}'
font.fontname = font.fullname.replace(' ', '-')
if (len(font.fontname) > 31):
font.fontname = get_style_abbreviated(font.fontname)[:31]
wws_family = remove_wws_styles(font.fullname)
version_groups = VERSION_PATTERN.match(font.version).groups()
if (version_groups[1] is None):
font.version = f'Version {float(version_groups[0])}'
else:
font.version = f'Version {float(version_groups[0])};{version_groups[1]} {version_groups[2]}'
set_name_id(font, 'Family', font.familyname)
set_name_id(font, 'SubFamily', subfamilyname)
set_name_id(font, 'UniqueID', ((font.fontname + ';') + font.version.replace('Version ', ).replace('Nerd Fonts ', 'NF')).replace(' ', '-'))
set_name_id(font, 'Fullname', font.fullname)
set_name_id(font, 'Version', font.version)
set_name_id(font, 'PostScriptName', font.fontname)
set_name_id(font, 'Preferred Family', font.familyname)
set_name_id(font, 'Preferred Styles', subfamilyname)
set_name_id(font, 'Compatible Full', font.fullname)
set_name_id(font, 'Sample Text', 'Kuba harpisto ŝajnis amuziĝi facilege ĉe via ĵaŭda ĥoro')
set_name_id(font, 'WWS Family', wws_family)
set_name_id(font, 'WWS SubFamily', font.fullname.replace(wws_family, ).strip())
|
def rename_fontforge(font: fontforge.font):
'\n\tTries to rename `font` naming table based on the OpenType specifications (https://docs.microsoft.com/typography/opentype/spec/name#name-ids)\n\n\tArguments:\n\t\tfont (fontforge.font): The font to rename.\n\t'
font.fullname = normalize_styles(font.fullname.replace(NERD_FONT_SUFFIX, ))
font.familyname = remove_styles(font.fullname)
subfamilyname = font.fullname.replace(font.familyname, ).strip()
font.fullname = f'{font.familyname} {subfamilyname}'
font.fontname = font.fullname.replace(' ', '-')
if (len(font.fontname) > 31):
font.fontname = get_style_abbreviated(font.fontname)[:31]
wws_family = remove_wws_styles(font.fullname)
version_groups = VERSION_PATTERN.match(font.version).groups()
if (version_groups[1] is None):
font.version = f'Version {float(version_groups[0])}'
else:
font.version = f'Version {float(version_groups[0])};{version_groups[1]} {version_groups[2]}'
set_name_id(font, 'Family', font.familyname)
set_name_id(font, 'SubFamily', subfamilyname)
set_name_id(font, 'UniqueID', ((font.fontname + ';') + font.version.replace('Version ', ).replace('Nerd Fonts ', 'NF')).replace(' ', '-'))
set_name_id(font, 'Fullname', font.fullname)
set_name_id(font, 'Version', font.version)
set_name_id(font, 'PostScriptName', font.fontname)
set_name_id(font, 'Preferred Family', font.familyname)
set_name_id(font, 'Preferred Styles', subfamilyname)
set_name_id(font, 'Compatible Full', font.fullname)
set_name_id(font, 'Sample Text', 'Kuba harpisto ŝajnis amuziĝi facilege ĉe via ĵaŭda ĥoro')
set_name_id(font, 'WWS Family', wws_family)
set_name_id(font, 'WWS SubFamily', font.fullname.replace(wws_family, ).strip())<|docstring|>Tries to rename `font` naming table based on the OpenType specifications (https://docs.microsoft.com/typography/opentype/spec/name#name-ids)
Arguments:
font (fontforge.font): The font to rename.<|endoftext|>
|
823d72cdcbb26cdc38f25e814453b50a0f8ccf5cb5587df2b40db239039cdb3e
|
def rename_font(font_file_path: str, output_folder: str=RENAMED_FONTS_PATH):
'\n\tRenames the font located in the specified `font_file_path` and stores in `output_folder`.\n\n\tArguments:\n\t\tfont_file_path (str): The path of the font file.\n\t\toutput_folder (str): The renamed font file folder to store.\n\t'
if (not path.isfile(font_file_path)):
stderr.write(f'Cant not retrieve font at {font_file_path}')
font = fontforge.open(font_file_path)
rename_fontforge(font)
output_folder = path.join(output_folder, font.familyname)
makedirs(output_folder, exist_ok=True)
font.generate(path.join(output_folder, (font.fullname + path.splitext(font_file_path)[(- 1)])))
|
Renames the font located in the specified `font_file_path` and stores in `output_folder`.
Arguments:
font_file_path (str): The path of the font file.
output_folder (str): The renamed font file folder to store.
|
helpers/fonts.py
|
rename_font
|
lperezperez/font-patcher-helper
| 0 |
python
|
def rename_font(font_file_path: str, output_folder: str=RENAMED_FONTS_PATH):
'\n\tRenames the font located in the specified `font_file_path` and stores in `output_folder`.\n\n\tArguments:\n\t\tfont_file_path (str): The path of the font file.\n\t\toutput_folder (str): The renamed font file folder to store.\n\t'
if (not path.isfile(font_file_path)):
stderr.write(f'Cant not retrieve font at {font_file_path}')
font = fontforge.open(font_file_path)
rename_fontforge(font)
output_folder = path.join(output_folder, font.familyname)
makedirs(output_folder, exist_ok=True)
font.generate(path.join(output_folder, (font.fullname + path.splitext(font_file_path)[(- 1)])))
|
def rename_font(font_file_path: str, output_folder: str=RENAMED_FONTS_PATH):
'\n\tRenames the font located in the specified `font_file_path` and stores in `output_folder`.\n\n\tArguments:\n\t\tfont_file_path (str): The path of the font file.\n\t\toutput_folder (str): The renamed font file folder to store.\n\t'
if (not path.isfile(font_file_path)):
stderr.write(f'Cant not retrieve font at {font_file_path}')
font = fontforge.open(font_file_path)
rename_fontforge(font)
output_folder = path.join(output_folder, font.familyname)
makedirs(output_folder, exist_ok=True)
font.generate(path.join(output_folder, (font.fullname + path.splitext(font_file_path)[(- 1)])))<|docstring|>Renames the font located in the specified `font_file_path` and stores in `output_folder`.
Arguments:
font_file_path (str): The path of the font file.
output_folder (str): The renamed font file folder to store.<|endoftext|>
|
939bcc75c26a31b79bc45009e0af7d5d3c7e1edb637053469eabb20208d33912
|
def ligaturize(font_file_path: str, output_folder: str=LIGATURIZED_FONTS_PATH):
'\n\tLigaturizes the font in `font_file_path` using a compatible wheight font in `ligatures_font_path` and stores in `output_folder`.\n\n\tArguments:\n\t\tfont_file_path (str): The font file path to ligaturize.\n\t\tligatures_font_folder (str): The folder where the source ligaturized fonts are stored.\n\t\toutput_folder (str): The ligaturized font file folder to store.\n\t'
font = fontforge.open(font_file_path)
rename_fontforge(font)
output_folder = path.join(output_folder, font.familyname)
output_file_path = path.join(output_folder, path.basename(font_file_path))
if path.isfile(output_file_path):
stderr.write(f'File "{output_file_path}" already exists.')
return
ligaturizer = Ligaturizer(font)
def ligature_length(lig):
return len(lig['chars'])
for lig_spec in sorted(LIGATURES, key=ligature_length):
try:
ligaturizer.add_ligature(lig_spec['chars'], lig_spec['firacode_ligature_name'])
except Exception:
stderr.write(f'Cannot add ligature {lig_spec} to {font_file_path}')
return
font.upos += font.uwidth
makedirs(output_folder, exist_ok=True)
font.generate(output_file_path)
|
Ligaturizes the font in `font_file_path` using a compatible wheight font in `ligatures_font_path` and stores in `output_folder`.
Arguments:
font_file_path (str): The font file path to ligaturize.
ligatures_font_folder (str): The folder where the source ligaturized fonts are stored.
output_folder (str): The ligaturized font file folder to store.
|
helpers/fonts.py
|
ligaturize
|
lperezperez/font-patcher-helper
| 0 |
python
|
def ligaturize(font_file_path: str, output_folder: str=LIGATURIZED_FONTS_PATH):
'\n\tLigaturizes the font in `font_file_path` using a compatible wheight font in `ligatures_font_path` and stores in `output_folder`.\n\n\tArguments:\n\t\tfont_file_path (str): The font file path to ligaturize.\n\t\tligatures_font_folder (str): The folder where the source ligaturized fonts are stored.\n\t\toutput_folder (str): The ligaturized font file folder to store.\n\t'
font = fontforge.open(font_file_path)
rename_fontforge(font)
output_folder = path.join(output_folder, font.familyname)
output_file_path = path.join(output_folder, path.basename(font_file_path))
if path.isfile(output_file_path):
stderr.write(f'File "{output_file_path}" already exists.')
return
ligaturizer = Ligaturizer(font)
def ligature_length(lig):
return len(lig['chars'])
for lig_spec in sorted(LIGATURES, key=ligature_length):
try:
ligaturizer.add_ligature(lig_spec['chars'], lig_spec['firacode_ligature_name'])
except Exception:
stderr.write(f'Cannot add ligature {lig_spec} to {font_file_path}')
return
font.upos += font.uwidth
makedirs(output_folder, exist_ok=True)
font.generate(output_file_path)
|
def ligaturize(font_file_path: str, output_folder: str=LIGATURIZED_FONTS_PATH):
'\n\tLigaturizes the font in `font_file_path` using a compatible wheight font in `ligatures_font_path` and stores in `output_folder`.\n\n\tArguments:\n\t\tfont_file_path (str): The font file path to ligaturize.\n\t\tligatures_font_folder (str): The folder where the source ligaturized fonts are stored.\n\t\toutput_folder (str): The ligaturized font file folder to store.\n\t'
font = fontforge.open(font_file_path)
rename_fontforge(font)
output_folder = path.join(output_folder, font.familyname)
output_file_path = path.join(output_folder, path.basename(font_file_path))
if path.isfile(output_file_path):
stderr.write(f'File "{output_file_path}" already exists.')
return
ligaturizer = Ligaturizer(font)
def ligature_length(lig):
return len(lig['chars'])
for lig_spec in sorted(LIGATURES, key=ligature_length):
try:
ligaturizer.add_ligature(lig_spec['chars'], lig_spec['firacode_ligature_name'])
except Exception:
stderr.write(f'Cannot add ligature {lig_spec} to {font_file_path}')
return
font.upos += font.uwidth
makedirs(output_folder, exist_ok=True)
font.generate(output_file_path)<|docstring|>Ligaturizes the font in `font_file_path` using a compatible wheight font in `ligatures_font_path` and stores in `output_folder`.
Arguments:
font_file_path (str): The font file path to ligaturize.
ligatures_font_folder (str): The folder where the source ligaturized fonts are stored.
output_folder (str): The ligaturized font file folder to store.<|endoftext|>
|
82405a160ff253156a6593a7234b9af060b307e8339978e50e606d70e31f85fa
|
def download_nerd_fonts(nerd_font_family: str, url: str=''):
'\n\tDownloads the patched fonts under the specified URL of a Nerd fonts patched font family subfolder.\n\n\tArguments:\n\t\tnerd_font_family (str): The patched family name.\n\t\turl (str): URL of a patched font family subfolder. If not specified, then uses the base URL for the specified `nerd_font_family`.\n\t'
if (not url):
url = ('https://github.com/ryanoasis/nerd-fonts/tree/master/patched-fonts/' + nerd_font_family)
branch = REPO_BRANCH.search(url)
response = request.urlretrieve(((((url[:branch.start()].replace('github.com', 'api.github.com/repos', 1) + '/contents/') + url[branch.end():]) + '?ref=') + branch.group(2)))
with open(response[0], 'r') as stream:
json = load(stream)
for entry in json:
if (entry['download_url'] is not None):
file_parts = path.splitext(path.basename(entry['path']))
if ((file_parts[(- 1)] in EXTENSIONS) and file_parts[0].endswith(NERD_FONT_SUFFIX)):
github.download_file(entry['download_url'], path.join(PATCHED_FONTS_PATH, nerd_font_family, (file_parts[0].replace(NERD_FONT_SUFFIX, '') + file_parts[(- 1)])))
else:
download_nerd_fonts(nerd_font_family, entry['html_url'])
|
Downloads the patched fonts under the specified URL of a Nerd fonts patched font family subfolder.
Arguments:
nerd_font_family (str): The patched family name.
url (str): URL of a patched font family subfolder. If not specified, then uses the base URL for the specified `nerd_font_family`.
|
helpers/fonts.py
|
download_nerd_fonts
|
lperezperez/font-patcher-helper
| 0 |
python
|
def download_nerd_fonts(nerd_font_family: str, url: str=):
'\n\tDownloads the patched fonts under the specified URL of a Nerd fonts patched font family subfolder.\n\n\tArguments:\n\t\tnerd_font_family (str): The patched family name.\n\t\turl (str): URL of a patched font family subfolder. If not specified, then uses the base URL for the specified `nerd_font_family`.\n\t'
if (not url):
url = ('https://github.com/ryanoasis/nerd-fonts/tree/master/patched-fonts/' + nerd_font_family)
branch = REPO_BRANCH.search(url)
response = request.urlretrieve(((((url[:branch.start()].replace('github.com', 'api.github.com/repos', 1) + '/contents/') + url[branch.end():]) + '?ref=') + branch.group(2)))
with open(response[0], 'r') as stream:
json = load(stream)
for entry in json:
if (entry['download_url'] is not None):
file_parts = path.splitext(path.basename(entry['path']))
if ((file_parts[(- 1)] in EXTENSIONS) and file_parts[0].endswith(NERD_FONT_SUFFIX)):
github.download_file(entry['download_url'], path.join(PATCHED_FONTS_PATH, nerd_font_family, (file_parts[0].replace(NERD_FONT_SUFFIX, ) + file_parts[(- 1)])))
else:
download_nerd_fonts(nerd_font_family, entry['html_url'])
|
def download_nerd_fonts(nerd_font_family: str, url: str=):
'\n\tDownloads the patched fonts under the specified URL of a Nerd fonts patched font family subfolder.\n\n\tArguments:\n\t\tnerd_font_family (str): The patched family name.\n\t\turl (str): URL of a patched font family subfolder. If not specified, then uses the base URL for the specified `nerd_font_family`.\n\t'
if (not url):
url = ('https://github.com/ryanoasis/nerd-fonts/tree/master/patched-fonts/' + nerd_font_family)
branch = REPO_BRANCH.search(url)
response = request.urlretrieve(((((url[:branch.start()].replace('github.com', 'api.github.com/repos', 1) + '/contents/') + url[branch.end():]) + '?ref=') + branch.group(2)))
with open(response[0], 'r') as stream:
json = load(stream)
for entry in json:
if (entry['download_url'] is not None):
file_parts = path.splitext(path.basename(entry['path']))
if ((file_parts[(- 1)] in EXTENSIONS) and file_parts[0].endswith(NERD_FONT_SUFFIX)):
github.download_file(entry['download_url'], path.join(PATCHED_FONTS_PATH, nerd_font_family, (file_parts[0].replace(NERD_FONT_SUFFIX, ) + file_parts[(- 1)])))
else:
download_nerd_fonts(nerd_font_family, entry['html_url'])<|docstring|>Downloads the patched fonts under the specified URL of a Nerd fonts patched font family subfolder.
Arguments:
nerd_font_family (str): The patched family name.
url (str): URL of a patched font family subfolder. If not specified, then uses the base URL for the specified `nerd_font_family`.<|endoftext|>
|
071b2cbd2a5bbbe383c6c7a34892ea8e4e34148d17e175d0873e36ddc84515e2
|
def run_patcher(font_file_path: str, output_folder: str=PATCHED_FONTS_PATH):
'\n\tRuns the Nerd fonts patcher.\n\n\tArguments:\n\t\tfont_file_path (str): The font file path to patch.\n\t\toutput_folder (str): The output folder where the patched font will be stored.\n\t'
makedirs(output_folder, exist_ok=True)
system(f'./font-patcher -w -c "{font_file_path}" -out "{output_folder}"')
|
Runs the Nerd fonts patcher.
Arguments:
font_file_path (str): The font file path to patch.
output_folder (str): The output folder where the patched font will be stored.
|
helpers/fonts.py
|
run_patcher
|
lperezperez/font-patcher-helper
| 0 |
python
|
def run_patcher(font_file_path: str, output_folder: str=PATCHED_FONTS_PATH):
'\n\tRuns the Nerd fonts patcher.\n\n\tArguments:\n\t\tfont_file_path (str): The font file path to patch.\n\t\toutput_folder (str): The output folder where the patched font will be stored.\n\t'
makedirs(output_folder, exist_ok=True)
system(f'./font-patcher -w -c "{font_file_path}" -out "{output_folder}"')
|
def run_patcher(font_file_path: str, output_folder: str=PATCHED_FONTS_PATH):
'\n\tRuns the Nerd fonts patcher.\n\n\tArguments:\n\t\tfont_file_path (str): The font file path to patch.\n\t\toutput_folder (str): The output folder where the patched font will be stored.\n\t'
makedirs(output_folder, exist_ok=True)
system(f'./font-patcher -w -c "{font_file_path}" -out "{output_folder}"')<|docstring|>Runs the Nerd fonts patcher.
Arguments:
font_file_path (str): The font file path to patch.
output_folder (str): The output folder where the patched font will be stored.<|endoftext|>
|
75a2f28451d0372c4fe25c250ea0a8d916a22575f668def8d8553c87063f4f33
|
def run_in_parallel(paths: list, target, args: tuple=()):
'\n\tRuns multiple processes in parallel mode.\n\n\tArguments:\n\t\tpaths (list): The list of paths to retrieve fonts.\n\t\ttarget (function): The function to run in parallel mode.\n\t\targs (tuple): The target functuion arguments.\n\t'
processes = []
process_count = cpu_count()
for font_file in get_font_files(paths):
if (len(processes) == process_count):
for process in processes:
process.join()
processes = []
processes.append(Process(target=target, args=((font_file,) + args)))
processes[(- 1)].start()
if (len(processes) > 0):
for process in processes:
process.join()
|
Runs multiple processes in parallel mode.
Arguments:
paths (list): The list of paths to retrieve fonts.
target (function): The function to run in parallel mode.
args (tuple): The target functuion arguments.
|
helpers/fonts.py
|
run_in_parallel
|
lperezperez/font-patcher-helper
| 0 |
python
|
def run_in_parallel(paths: list, target, args: tuple=()):
'\n\tRuns multiple processes in parallel mode.\n\n\tArguments:\n\t\tpaths (list): The list of paths to retrieve fonts.\n\t\ttarget (function): The function to run in parallel mode.\n\t\targs (tuple): The target functuion arguments.\n\t'
processes = []
process_count = cpu_count()
for font_file in get_font_files(paths):
if (len(processes) == process_count):
for process in processes:
process.join()
processes = []
processes.append(Process(target=target, args=((font_file,) + args)))
processes[(- 1)].start()
if (len(processes) > 0):
for process in processes:
process.join()
|
def run_in_parallel(paths: list, target, args: tuple=()):
'\n\tRuns multiple processes in parallel mode.\n\n\tArguments:\n\t\tpaths (list): The list of paths to retrieve fonts.\n\t\ttarget (function): The function to run in parallel mode.\n\t\targs (tuple): The target functuion arguments.\n\t'
processes = []
process_count = cpu_count()
for font_file in get_font_files(paths):
if (len(processes) == process_count):
for process in processes:
process.join()
processes = []
processes.append(Process(target=target, args=((font_file,) + args)))
processes[(- 1)].start()
if (len(processes) > 0):
for process in processes:
process.join()<|docstring|>Runs multiple processes in parallel mode.
Arguments:
paths (list): The list of paths to retrieve fonts.
target (function): The function to run in parallel mode.
args (tuple): The target functuion arguments.<|endoftext|>
|
847aded5b291a5369b1c655680f4814bf0880735ea149d60b569e5153c74b0b0
|
def __init__(self, font, scale_character_glyphs_threshold=0.1, copy_character_glyphs=False):
'Initializes a new instance of `Ligaturizer` class.'
self.font = font
self.firacode = fontforge.open(path.join(LIGATURES_SOURCE, f'FiraCode-{self.get_seamless_font_weight()}.otf'))
self.scale_character_glyphs_threshold = scale_character_glyphs_threshold
self.should_copy_character_glyphs = copy_character_glyphs
self._lig_counter = 0
self.firacode.em = self.font.em
self.emwidth = self.font[ord('m')].width
|
Initializes a new instance of `Ligaturizer` class.
|
helpers/fonts.py
|
__init__
|
lperezperez/font-patcher-helper
| 0 |
python
|
def __init__(self, font, scale_character_glyphs_threshold=0.1, copy_character_glyphs=False):
self.font = font
self.firacode = fontforge.open(path.join(LIGATURES_SOURCE, f'FiraCode-{self.get_seamless_font_weight()}.otf'))
self.scale_character_glyphs_threshold = scale_character_glyphs_threshold
self.should_copy_character_glyphs = copy_character_glyphs
self._lig_counter = 0
self.firacode.em = self.font.em
self.emwidth = self.font[ord('m')].width
|
def __init__(self, font, scale_character_glyphs_threshold=0.1, copy_character_glyphs=False):
self.font = font
self.firacode = fontforge.open(path.join(LIGATURES_SOURCE, f'FiraCode-{self.get_seamless_font_weight()}.otf'))
self.scale_character_glyphs_threshold = scale_character_glyphs_threshold
self.should_copy_character_glyphs = copy_character_glyphs
self._lig_counter = 0
self.firacode.em = self.font.em
self.emwidth = self.font[ord('m')].width<|docstring|>Initializes a new instance of `Ligaturizer` class.<|endoftext|>
|
3fc852b89d2063abc12ca2df5f34fd27fa324dd53a16f71c6c466152982b1fa3
|
def copy_ligature_from_source(self, ligature_name):
'Tries to copy the specified ligature_name.'
try:
self.firacode.selection.none()
self.firacode.selection.select(ligature_name)
self.firacode.copy()
return True
except ValueError:
return False
|
Tries to copy the specified ligature_name.
|
helpers/fonts.py
|
copy_ligature_from_source
|
lperezperez/font-patcher-helper
| 0 |
python
|
def copy_ligature_from_source(self, ligature_name):
try:
self.firacode.selection.none()
self.firacode.selection.select(ligature_name)
self.firacode.copy()
return True
except ValueError:
return False
|
def copy_ligature_from_source(self, ligature_name):
try:
self.firacode.selection.none()
self.firacode.selection.select(ligature_name)
self.firacode.copy()
return True
except ValueError:
return False<|docstring|>Tries to copy the specified ligature_name.<|endoftext|>
|
87a655318da4e18aa7a4acfb255173cdcdfc08757e4288a33cc57f0af31fc84e
|
def copy_character_glyphs(self, chars):
'Copy individual (non-ligature) characters from the ligature font.'
if (not self.should_copy_character_glyphs):
return
for char in chars:
self.firacode.selection.none()
self.firacode.selection.select(char)
self.firacode.copy()
self.font.selection.none()
self.font.selection.select(char)
self.font.paste()
self.correct_character_width(self.font[ord(CHAR_DICTIONARY[char])])
|
Copy individual (non-ligature) characters from the ligature font.
|
helpers/fonts.py
|
copy_character_glyphs
|
lperezperez/font-patcher-helper
| 0 |
python
|
def copy_character_glyphs(self, chars):
if (not self.should_copy_character_glyphs):
return
for char in chars:
self.firacode.selection.none()
self.firacode.selection.select(char)
self.firacode.copy()
self.font.selection.none()
self.font.selection.select(char)
self.font.paste()
self.correct_character_width(self.font[ord(CHAR_DICTIONARY[char])])
|
def copy_character_glyphs(self, chars):
if (not self.should_copy_character_glyphs):
return
for char in chars:
self.firacode.selection.none()
self.firacode.selection.select(char)
self.firacode.copy()
self.font.selection.none()
self.font.selection.select(char)
self.font.paste()
self.correct_character_width(self.font[ord(CHAR_DICTIONARY[char])])<|docstring|>Copy individual (non-ligature) characters from the ligature font.<|endoftext|>
|
f12f3cdb5fb621d33071078248d24b74d4f784db683ddf4dbc8be19c28e883e8
|
def correct_ligature_width(self, glyph):
'Correct the horizontal advance and scale of a ligature.'
if (glyph.width == self.emwidth):
return
glyph.transform(scale((float(self.emwidth) / glyph.width), 1.0))
glyph.width = self.emwidth
|
Correct the horizontal advance and scale of a ligature.
|
helpers/fonts.py
|
correct_ligature_width
|
lperezperez/font-patcher-helper
| 0 |
python
|
def correct_ligature_width(self, glyph):
if (glyph.width == self.emwidth):
return
glyph.transform(scale((float(self.emwidth) / glyph.width), 1.0))
glyph.width = self.emwidth
|
def correct_ligature_width(self, glyph):
if (glyph.width == self.emwidth):
return
glyph.transform(scale((float(self.emwidth) / glyph.width), 1.0))
glyph.width = self.emwidth<|docstring|>Correct the horizontal advance and scale of a ligature.<|endoftext|>
|
31e3d25cfc23b699a561423612f93b2882d4584c187c0329b4d1457fd8f651a2
|
def add_ligature(self, input_chars, firacode_ligature_name):
'Adds a ligature from Fira Code font.'
if (firacode_ligature_name is None):
self.copy_character_glyphs(input_chars)
return
if (not self.copy_ligature_from_source(firacode_ligature_name)):
return
self._lig_counter += 1
ligature_name = f'lig.{self._lig_counter}'
self.font.createChar((- 1), ligature_name)
self.font.selection.none()
self.font.selection.select(ligature_name)
self.font.paste()
self.correct_ligature_width(self.font[ligature_name])
self.font.selection.none()
self.font.selection.select('space')
self.font.copy()
def lookup_name(i):
return f'lookup.{self._lig_counter}.{i}'
def lookup_sub_name(i):
return f'lookup.sub.{self._lig_counter}.{i}'
def cr_name(i):
return f'CR.{self._lig_counter}.{i}'
for (i, char) in enumerate(input_chars):
self.font.addLookup(lookup_name(i), 'gsub_single', (), ())
self.font.addLookupSubtable(lookup_name(i), lookup_sub_name(i))
if (char not in self.font):
self.font[ord(CHAR_DICTIONARY[char])].glyphname = char
if (i < (len(input_chars) - 1)):
self.font.createChar((- 1), cr_name(i))
self.font.selection.none()
self.font.selection.select(cr_name(i))
self.font.paste()
self.font[char].addPosSub(lookup_sub_name(i), cr_name(i))
else:
self.font[char].addPosSub(lookup_sub_name(i), ligature_name)
calt_lookup_name = f'calt.{self._lig_counter}'
self.font.addLookup(calt_lookup_name, 'gsub_contextchain', (), (('calt', (('DFLT', ('dflt',)), ('arab', ('dflt',)), ('armn', ('dflt',)), ('cyrl', ('SRB ', 'dflt')), ('geor', ('dflt',)), ('grek', ('dflt',)), ('lao ', ('dflt',)), ('latn', ('CAT ', 'ESP ', 'GAL ', 'ISM ', 'KSM ', 'LSM ', 'MOL ', 'NSM ', 'ROM ', 'SKS ', 'SSM ', 'dflt')), ('math', ('dflt',)), ('thai', ('dflt',)))),))
for (i, char) in enumerate(input_chars):
self.add_calt(calt_lookup_name, f'calt.{self._lig_counter}.{i}', '{prev} | {cur} @<{lookup}> | {next}', prev=' '.join((cr_name(j) for j in range(i))), cur=char, lookup=lookup_name(i), next=' '.join(input_chars[(i + 1):]))
self.add_calt(calt_lookup_name, f'calt.{self._lig_counter}.{(i + 1)}', '| {first} | {rest} {last}', first=input_chars[0], rest=' '.join(input_chars[1:]), last=input_chars[(- 1)])
self.add_calt(calt_lookup_name, f'calt.{self._lig_counter}.{(i + 2)}', '{first} | {first} | {rest}', first=input_chars[0], rest=' '.join(input_chars[1:]))
|
Adds a ligature from Fira Code font.
|
helpers/fonts.py
|
add_ligature
|
lperezperez/font-patcher-helper
| 0 |
python
|
def add_ligature(self, input_chars, firacode_ligature_name):
if (firacode_ligature_name is None):
self.copy_character_glyphs(input_chars)
return
if (not self.copy_ligature_from_source(firacode_ligature_name)):
return
self._lig_counter += 1
ligature_name = f'lig.{self._lig_counter}'
self.font.createChar((- 1), ligature_name)
self.font.selection.none()
self.font.selection.select(ligature_name)
self.font.paste()
self.correct_ligature_width(self.font[ligature_name])
self.font.selection.none()
self.font.selection.select('space')
self.font.copy()
def lookup_name(i):
return f'lookup.{self._lig_counter}.{i}'
def lookup_sub_name(i):
return f'lookup.sub.{self._lig_counter}.{i}'
def cr_name(i):
return f'CR.{self._lig_counter}.{i}'
for (i, char) in enumerate(input_chars):
self.font.addLookup(lookup_name(i), 'gsub_single', (), ())
self.font.addLookupSubtable(lookup_name(i), lookup_sub_name(i))
if (char not in self.font):
self.font[ord(CHAR_DICTIONARY[char])].glyphname = char
if (i < (len(input_chars) - 1)):
self.font.createChar((- 1), cr_name(i))
self.font.selection.none()
self.font.selection.select(cr_name(i))
self.font.paste()
self.font[char].addPosSub(lookup_sub_name(i), cr_name(i))
else:
self.font[char].addPosSub(lookup_sub_name(i), ligature_name)
calt_lookup_name = f'calt.{self._lig_counter}'
self.font.addLookup(calt_lookup_name, 'gsub_contextchain', (), (('calt', (('DFLT', ('dflt',)), ('arab', ('dflt',)), ('armn', ('dflt',)), ('cyrl', ('SRB ', 'dflt')), ('geor', ('dflt',)), ('grek', ('dflt',)), ('lao ', ('dflt',)), ('latn', ('CAT ', 'ESP ', 'GAL ', 'ISM ', 'KSM ', 'LSM ', 'MOL ', 'NSM ', 'ROM ', 'SKS ', 'SSM ', 'dflt')), ('math', ('dflt',)), ('thai', ('dflt',)))),))
for (i, char) in enumerate(input_chars):
self.add_calt(calt_lookup_name, f'calt.{self._lig_counter}.{i}', '{prev} | {cur} @<{lookup}> | {next}', prev=' '.join((cr_name(j) for j in range(i))), cur=char, lookup=lookup_name(i), next=' '.join(input_chars[(i + 1):]))
self.add_calt(calt_lookup_name, f'calt.{self._lig_counter}.{(i + 1)}', '| {first} | {rest} {last}', first=input_chars[0], rest=' '.join(input_chars[1:]), last=input_chars[(- 1)])
self.add_calt(calt_lookup_name, f'calt.{self._lig_counter}.{(i + 2)}', '{first} | {first} | {rest}', first=input_chars[0], rest=' '.join(input_chars[1:]))
|
def add_ligature(self, input_chars, firacode_ligature_name):
if (firacode_ligature_name is None):
self.copy_character_glyphs(input_chars)
return
if (not self.copy_ligature_from_source(firacode_ligature_name)):
return
self._lig_counter += 1
ligature_name = f'lig.{self._lig_counter}'
self.font.createChar((- 1), ligature_name)
self.font.selection.none()
self.font.selection.select(ligature_name)
self.font.paste()
self.correct_ligature_width(self.font[ligature_name])
self.font.selection.none()
self.font.selection.select('space')
self.font.copy()
def lookup_name(i):
return f'lookup.{self._lig_counter}.{i}'
def lookup_sub_name(i):
return f'lookup.sub.{self._lig_counter}.{i}'
def cr_name(i):
return f'CR.{self._lig_counter}.{i}'
for (i, char) in enumerate(input_chars):
self.font.addLookup(lookup_name(i), 'gsub_single', (), ())
self.font.addLookupSubtable(lookup_name(i), lookup_sub_name(i))
if (char not in self.font):
self.font[ord(CHAR_DICTIONARY[char])].glyphname = char
if (i < (len(input_chars) - 1)):
self.font.createChar((- 1), cr_name(i))
self.font.selection.none()
self.font.selection.select(cr_name(i))
self.font.paste()
self.font[char].addPosSub(lookup_sub_name(i), cr_name(i))
else:
self.font[char].addPosSub(lookup_sub_name(i), ligature_name)
calt_lookup_name = f'calt.{self._lig_counter}'
self.font.addLookup(calt_lookup_name, 'gsub_contextchain', (), (('calt', (('DFLT', ('dflt',)), ('arab', ('dflt',)), ('armn', ('dflt',)), ('cyrl', ('SRB ', 'dflt')), ('geor', ('dflt',)), ('grek', ('dflt',)), ('lao ', ('dflt',)), ('latn', ('CAT ', 'ESP ', 'GAL ', 'ISM ', 'KSM ', 'LSM ', 'MOL ', 'NSM ', 'ROM ', 'SKS ', 'SSM ', 'dflt')), ('math', ('dflt',)), ('thai', ('dflt',)))),))
for (i, char) in enumerate(input_chars):
self.add_calt(calt_lookup_name, f'calt.{self._lig_counter}.{i}', '{prev} | {cur} @<{lookup}> | {next}', prev=' '.join((cr_name(j) for j in range(i))), cur=char, lookup=lookup_name(i), next=' '.join(input_chars[(i + 1):]))
self.add_calt(calt_lookup_name, f'calt.{self._lig_counter}.{(i + 1)}', '| {first} | {rest} {last}', first=input_chars[0], rest=' '.join(input_chars[1:]), last=input_chars[(- 1)])
self.add_calt(calt_lookup_name, f'calt.{self._lig_counter}.{(i + 2)}', '{first} | {first} | {rest}', first=input_chars[0], rest=' '.join(input_chars[1:]))<|docstring|>Adds a ligature from Fira Code font.<|endoftext|>
|
615c42878fb513175dbb5414a699108e46c9111b35839dff8f65dc44e3ab3ca6
|
def draw_bbox(rect, im=None, values=True, black=True, width=1):
'\n rect: [x, y, x, y]\n two points (x, y), (x, y)\n values: bool\n draw values\n black: bool\n draw grid and numbers in black or white\n '
color = (0, 0, 0)
if (not black):
color = (255, 255, 255)
if (im is None):
im = Image.new('RGB', (100, 100), color='grey')
draw = ImageDraw.Draw(im)
draw.rectangle(rect, outline=color, width=width)
if values:
draw.text((rect[0], rect[1]), text=f'({rect[0]}x, {rect[1]}y)', fill=color)
draw.text((rect[0], rect[3]), text=f'({rect[0]}x, {rect[3]}y)', fill=color)
draw.text((rect[2], rect[1]), text=f'({rect[2]}x, {rect[1]}y)', fill=color)
draw.text((rect[2], rect[3]), text=f'({rect[2]}x, {rect[3]}y)', fill=color)
draw.text((((rect[0] + rect[2]) / 2), ((rect[1] + rect[3]) / 2)), text=f'{rect}', fill=color)
return im
|
rect: [x, y, x, y]
two points (x, y), (x, y)
values: bool
draw values
black: bool
draw grid and numbers in black or white
|
ipyannotator/datasets/generators.py
|
draw_bbox
|
EnriqueMoran/ipyannotator
| 19 |
python
|
def draw_bbox(rect, im=None, values=True, black=True, width=1):
'\n rect: [x, y, x, y]\n two points (x, y), (x, y)\n values: bool\n draw values\n black: bool\n draw grid and numbers in black or white\n '
color = (0, 0, 0)
if (not black):
color = (255, 255, 255)
if (im is None):
im = Image.new('RGB', (100, 100), color='grey')
draw = ImageDraw.Draw(im)
draw.rectangle(rect, outline=color, width=width)
if values:
draw.text((rect[0], rect[1]), text=f'({rect[0]}x, {rect[1]}y)', fill=color)
draw.text((rect[0], rect[3]), text=f'({rect[0]}x, {rect[3]}y)', fill=color)
draw.text((rect[2], rect[1]), text=f'({rect[2]}x, {rect[1]}y)', fill=color)
draw.text((rect[2], rect[3]), text=f'({rect[2]}x, {rect[3]}y)', fill=color)
draw.text((((rect[0] + rect[2]) / 2), ((rect[1] + rect[3]) / 2)), text=f'{rect}', fill=color)
return im
|
def draw_bbox(rect, im=None, values=True, black=True, width=1):
'\n rect: [x, y, x, y]\n two points (x, y), (x, y)\n values: bool\n draw values\n black: bool\n draw grid and numbers in black or white\n '
color = (0, 0, 0)
if (not black):
color = (255, 255, 255)
if (im is None):
im = Image.new('RGB', (100, 100), color='grey')
draw = ImageDraw.Draw(im)
draw.rectangle(rect, outline=color, width=width)
if values:
draw.text((rect[0], rect[1]), text=f'({rect[0]}x, {rect[1]}y)', fill=color)
draw.text((rect[0], rect[3]), text=f'({rect[0]}x, {rect[3]}y)', fill=color)
draw.text((rect[2], rect[1]), text=f'({rect[2]}x, {rect[1]}y)', fill=color)
draw.text((rect[2], rect[3]), text=f'({rect[2]}x, {rect[3]}y)', fill=color)
draw.text((((rect[0] + rect[2]) / 2), ((rect[1] + rect[3]) / 2)), text=f'{rect}', fill=color)
return im<|docstring|>rect: [x, y, x, y]
two points (x, y), (x, y)
values: bool
draw values
black: bool
draw grid and numbers in black or white<|endoftext|>
|
ed527e45f7907ebda9201299aeeba31901d9cb2af187ec29972dbf57fac73bca
|
def overlap(boxA, boxB, verbose=False):
'\n Returns the max relative overlap between two bboxs.\n '
(interArea, boxAArea, boxBArea, _) = bbox_intersection(boxA, boxB)
return max((interArea / float(boxAArea)), (interArea / float(boxBArea)))
|
Returns the max relative overlap between two bboxs.
|
ipyannotator/datasets/generators.py
|
overlap
|
EnriqueMoran/ipyannotator
| 19 |
python
|
def overlap(boxA, boxB, verbose=False):
'\n \n '
(interArea, boxAArea, boxBArea, _) = bbox_intersection(boxA, boxB)
return max((interArea / float(boxAArea)), (interArea / float(boxBArea)))
|
def overlap(boxA, boxB, verbose=False):
'\n \n '
(interArea, boxAArea, boxBArea, _) = bbox_intersection(boxA, boxB)
return max((interArea / float(boxAArea)), (interArea / float(boxBArea)))<|docstring|>Returns the max relative overlap between two bboxs.<|endoftext|>
|
bf127ad432dcf44f84839bf82226177badfbc4972037828a9a8833d398828707
|
def sample_bbox(bboxs=(), canvas_size=(100, 100), diag=(0.3, 0.3), ratio=(1, 1), max_iou=0.0, max_overlap=0.0, max_tries=1000, random_seed=None):
'\n bboxs: [(x, y, x, y), ..., (x, y, x, y)]\n List of existing bboxs.\n canvas_size: (int, int)\n Size of the canvas (width, height) on which to position the new bbox.\n max_iou: float [0, 1]\n Maximum acceptable intersection over union between any two bboxs.\n max_overlap: float [0, 1]\n Maximum overlap between any two bboxs.\n diag: (float, float) or float\n Range of acceptable diagonal lenght relative to canvas diagonal.\n ratio: (float, float) or float\n Range of acceptable width / height ratios of the new bbox.\n max_tries: int\n Number of random tries to create a valid bbox\n '
rng = np.random.RandomState(random_seed)
(width, height) = canvas_size
canvas_diag = np.sqrt(((width ** 2) + (height ** 2)))
for i in range(max_tries):
s_diag = (np.random.uniform(*diag) * canvas_diag)
s_ratio = np.random.uniform(*ratio)
s_height = np.sqrt(((s_diag ** 2) / (1.0 + (s_ratio ** 2))))
s_width = (s_ratio * s_height)
cx = np.random.randint((s_width / 2), (width - (s_width / 2)))
cy = np.random.randint((s_height / 2), (height - (s_height / 2)))
bbox = ((cx - (s_width / 2)), (cy - (s_height / 2)), (cx + (s_width / 2)), (cy + (s_height / 2)))
bbox = tuple((int(v) for v in bbox))
if (len(bboxs) == 0):
return bbox
violation = False
for b in bboxs:
iou = bb_intersection_over_union(b, bbox)
b_overlap = overlap(b, bbox)
if ((iou > max_iou) or (b_overlap > max_overlap)):
violation = True
if (not violation):
return bbox
return None
|
bboxs: [(x, y, x, y), ..., (x, y, x, y)]
List of existing bboxs.
canvas_size: (int, int)
Size of the canvas (width, height) on which to position the new bbox.
max_iou: float [0, 1]
Maximum acceptable intersection over union between any two bboxs.
max_overlap: float [0, 1]
Maximum overlap between any two bboxs.
diag: (float, float) or float
Range of acceptable diagonal lenght relative to canvas diagonal.
ratio: (float, float) or float
Range of acceptable width / height ratios of the new bbox.
max_tries: int
Number of random tries to create a valid bbox
|
ipyannotator/datasets/generators.py
|
sample_bbox
|
EnriqueMoran/ipyannotator
| 19 |
python
|
def sample_bbox(bboxs=(), canvas_size=(100, 100), diag=(0.3, 0.3), ratio=(1, 1), max_iou=0.0, max_overlap=0.0, max_tries=1000, random_seed=None):
'\n bboxs: [(x, y, x, y), ..., (x, y, x, y)]\n List of existing bboxs.\n canvas_size: (int, int)\n Size of the canvas (width, height) on which to position the new bbox.\n max_iou: float [0, 1]\n Maximum acceptable intersection over union between any two bboxs.\n max_overlap: float [0, 1]\n Maximum overlap between any two bboxs.\n diag: (float, float) or float\n Range of acceptable diagonal lenght relative to canvas diagonal.\n ratio: (float, float) or float\n Range of acceptable width / height ratios of the new bbox.\n max_tries: int\n Number of random tries to create a valid bbox\n '
rng = np.random.RandomState(random_seed)
(width, height) = canvas_size
canvas_diag = np.sqrt(((width ** 2) + (height ** 2)))
for i in range(max_tries):
s_diag = (np.random.uniform(*diag) * canvas_diag)
s_ratio = np.random.uniform(*ratio)
s_height = np.sqrt(((s_diag ** 2) / (1.0 + (s_ratio ** 2))))
s_width = (s_ratio * s_height)
cx = np.random.randint((s_width / 2), (width - (s_width / 2)))
cy = np.random.randint((s_height / 2), (height - (s_height / 2)))
bbox = ((cx - (s_width / 2)), (cy - (s_height / 2)), (cx + (s_width / 2)), (cy + (s_height / 2)))
bbox = tuple((int(v) for v in bbox))
if (len(bboxs) == 0):
return bbox
violation = False
for b in bboxs:
iou = bb_intersection_over_union(b, bbox)
b_overlap = overlap(b, bbox)
if ((iou > max_iou) or (b_overlap > max_overlap)):
violation = True
if (not violation):
return bbox
return None
|
def sample_bbox(bboxs=(), canvas_size=(100, 100), diag=(0.3, 0.3), ratio=(1, 1), max_iou=0.0, max_overlap=0.0, max_tries=1000, random_seed=None):
'\n bboxs: [(x, y, x, y), ..., (x, y, x, y)]\n List of existing bboxs.\n canvas_size: (int, int)\n Size of the canvas (width, height) on which to position the new bbox.\n max_iou: float [0, 1]\n Maximum acceptable intersection over union between any two bboxs.\n max_overlap: float [0, 1]\n Maximum overlap between any two bboxs.\n diag: (float, float) or float\n Range of acceptable diagonal lenght relative to canvas diagonal.\n ratio: (float, float) or float\n Range of acceptable width / height ratios of the new bbox.\n max_tries: int\n Number of random tries to create a valid bbox\n '
rng = np.random.RandomState(random_seed)
(width, height) = canvas_size
canvas_diag = np.sqrt(((width ** 2) + (height ** 2)))
for i in range(max_tries):
s_diag = (np.random.uniform(*diag) * canvas_diag)
s_ratio = np.random.uniform(*ratio)
s_height = np.sqrt(((s_diag ** 2) / (1.0 + (s_ratio ** 2))))
s_width = (s_ratio * s_height)
cx = np.random.randint((s_width / 2), (width - (s_width / 2)))
cy = np.random.randint((s_height / 2), (height - (s_height / 2)))
bbox = ((cx - (s_width / 2)), (cy - (s_height / 2)), (cx + (s_width / 2)), (cy + (s_height / 2)))
bbox = tuple((int(v) for v in bbox))
if (len(bboxs) == 0):
return bbox
violation = False
for b in bboxs:
iou = bb_intersection_over_union(b, bbox)
b_overlap = overlap(b, bbox)
if ((iou > max_iou) or (b_overlap > max_overlap)):
violation = True
if (not violation):
return bbox
return None<|docstring|>bboxs: [(x, y, x, y), ..., (x, y, x, y)]
List of existing bboxs.
canvas_size: (int, int)
Size of the canvas (width, height) on which to position the new bbox.
max_iou: float [0, 1]
Maximum acceptable intersection over union between any two bboxs.
max_overlap: float [0, 1]
Maximum overlap between any two bboxs.
diag: (float, float) or float
Range of acceptable diagonal lenght relative to canvas diagonal.
ratio: (float, float) or float
Range of acceptable width / height ratios of the new bbox.
max_tries: int
Number of random tries to create a valid bbox<|endoftext|>
|
29ad643ffdc9d7719811f532e6d55930ad93d8d362c943e470aa513ecbace8ac
|
def vertices_nao_adjacentes(self):
'\n Provê uma lista de vértices não adjacentes no grafo. A lista terá o seguinte formato: [X-Z, X-W, ...]\n Onde X, Z e W são vértices no grafo que não tem uma aresta entre eles.\n :return: Uma lista com os pares de vértices não adjacentes\n '
verticesNaoAdj = list()
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i < j) and (self.M[i][j] == {})):
verticesNaoAdj.append(f'{self.N[i]}-{self.N[j]}')
return verticesNaoAdj
|
Provê uma lista de vértices não adjacentes no grafo. A lista terá o seguinte formato: [X-Z, X-W, ...]
Onde X, Z e W são vértices no grafo que não tem uma aresta entre eles.
:return: Uma lista com os pares de vértices não adjacentes
|
Roteiro 6/meu_grafo_matriz_adjacencia_nao_dir.py
|
vertices_nao_adjacentes
|
JhonatanGuilherme/GraphTheory
| 0 |
python
|
def vertices_nao_adjacentes(self):
'\n Provê uma lista de vértices não adjacentes no grafo. A lista terá o seguinte formato: [X-Z, X-W, ...]\n Onde X, Z e W são vértices no grafo que não tem uma aresta entre eles.\n :return: Uma lista com os pares de vértices não adjacentes\n '
verticesNaoAdj = list()
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i < j) and (self.M[i][j] == {})):
verticesNaoAdj.append(f'{self.N[i]}-{self.N[j]}')
return verticesNaoAdj
|
def vertices_nao_adjacentes(self):
'\n Provê uma lista de vértices não adjacentes no grafo. A lista terá o seguinte formato: [X-Z, X-W, ...]\n Onde X, Z e W são vértices no grafo que não tem uma aresta entre eles.\n :return: Uma lista com os pares de vértices não adjacentes\n '
verticesNaoAdj = list()
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i < j) and (self.M[i][j] == {})):
verticesNaoAdj.append(f'{self.N[i]}-{self.N[j]}')
return verticesNaoAdj<|docstring|>Provê uma lista de vértices não adjacentes no grafo. A lista terá o seguinte formato: [X-Z, X-W, ...]
Onde X, Z e W são vértices no grafo que não tem uma aresta entre eles.
:return: Uma lista com os pares de vértices não adjacentes<|endoftext|>
|
546de9c317c45a1ec23384e5516d4d8b1b3a002938d4db6a73e35e4ddecf3c93
|
def ha_laco(self):
'\n Verifica se existe algum laço no grafo.\n :return: Um valor booleano que indica se existe algum laço.\n '
for i in range(len(self.M)):
if (self.M[i][i] != {}):
return True
return False
|
Verifica se existe algum laço no grafo.
:return: Um valor booleano que indica se existe algum laço.
|
Roteiro 6/meu_grafo_matriz_adjacencia_nao_dir.py
|
ha_laco
|
JhonatanGuilherme/GraphTheory
| 0 |
python
|
def ha_laco(self):
'\n Verifica se existe algum laço no grafo.\n :return: Um valor booleano que indica se existe algum laço.\n '
for i in range(len(self.M)):
if (self.M[i][i] != {}):
return True
return False
|
def ha_laco(self):
'\n Verifica se existe algum laço no grafo.\n :return: Um valor booleano que indica se existe algum laço.\n '
for i in range(len(self.M)):
if (self.M[i][i] != {}):
return True
return False<|docstring|>Verifica se existe algum laço no grafo.
:return: Um valor booleano que indica se existe algum laço.<|endoftext|>
|
0a93a391fd892a5f57868ea8484c6776e0eaeaad6aa7a81e9eade46034e059f7
|
def grau(self, V=''):
'\n Provê o grau do vértice passado como parâmetro\n :param V: O rótulo do vértice a ser analisado\n :return: Um valor inteiro que indica o grau do vértice\n :raises: VerticeInvalidoException se o vértice não existe no grafo\n '
if self.existeVertice(V):
contador = 0
indice = self.N.index(V)
for i in self.M[indice]:
if ((type(i) == dict) and (len(i) >= 1)):
contador += len(i)
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i <= j) and (j == indice)):
contador += len(self.M[i][j])
return contador
raise VerticeInvalidoException
|
Provê o grau do vértice passado como parâmetro
:param V: O rótulo do vértice a ser analisado
:return: Um valor inteiro que indica o grau do vértice
:raises: VerticeInvalidoException se o vértice não existe no grafo
|
Roteiro 6/meu_grafo_matriz_adjacencia_nao_dir.py
|
grau
|
JhonatanGuilherme/GraphTheory
| 0 |
python
|
def grau(self, V=):
'\n Provê o grau do vértice passado como parâmetro\n :param V: O rótulo do vértice a ser analisado\n :return: Um valor inteiro que indica o grau do vértice\n :raises: VerticeInvalidoException se o vértice não existe no grafo\n '
if self.existeVertice(V):
contador = 0
indice = self.N.index(V)
for i in self.M[indice]:
if ((type(i) == dict) and (len(i) >= 1)):
contador += len(i)
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i <= j) and (j == indice)):
contador += len(self.M[i][j])
return contador
raise VerticeInvalidoException
|
def grau(self, V=):
'\n Provê o grau do vértice passado como parâmetro\n :param V: O rótulo do vértice a ser analisado\n :return: Um valor inteiro que indica o grau do vértice\n :raises: VerticeInvalidoException se o vértice não existe no grafo\n '
if self.existeVertice(V):
contador = 0
indice = self.N.index(V)
for i in self.M[indice]:
if ((type(i) == dict) and (len(i) >= 1)):
contador += len(i)
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i <= j) and (j == indice)):
contador += len(self.M[i][j])
return contador
raise VerticeInvalidoException<|docstring|>Provê o grau do vértice passado como parâmetro
:param V: O rótulo do vértice a ser analisado
:return: Um valor inteiro que indica o grau do vértice
:raises: VerticeInvalidoException se o vértice não existe no grafo<|endoftext|>
|
b18c84535780541d8a00f7f7bedda7a39c6791606d7fc74f494445e0d4731a6a
|
def ha_paralelas(self):
'\n Verifica se há arestas paralelas no grafo\n :return: Um valor booleano que indica se existem arestas paralelas no grafo.\n '
for i in self.M:
for j in i:
if (len(j) > 1):
return True
return False
|
Verifica se há arestas paralelas no grafo
:return: Um valor booleano que indica se existem arestas paralelas no grafo.
|
Roteiro 6/meu_grafo_matriz_adjacencia_nao_dir.py
|
ha_paralelas
|
JhonatanGuilherme/GraphTheory
| 0 |
python
|
def ha_paralelas(self):
'\n Verifica se há arestas paralelas no grafo\n :return: Um valor booleano que indica se existem arestas paralelas no grafo.\n '
for i in self.M:
for j in i:
if (len(j) > 1):
return True
return False
|
def ha_paralelas(self):
'\n Verifica se há arestas paralelas no grafo\n :return: Um valor booleano que indica se existem arestas paralelas no grafo.\n '
for i in self.M:
for j in i:
if (len(j) > 1):
return True
return False<|docstring|>Verifica se há arestas paralelas no grafo
:return: Um valor booleano que indica se existem arestas paralelas no grafo.<|endoftext|>
|
3949450e323b3567b325464f8be63febe6efa730bb60f216dc961aaedaa2ed5c
|
def arestas_sobre_vertice(self, V):
'\n Provê uma lista que contém os rótulos das arestas que incidem sobre o vértice passado como parâmetro\n :param V: O vértice a ser analisado\n :return: Uma lista os rótulos das arestas que incidem sobre o vértice\n :raises: VerticeInvalidoException se o vértice não existe no grafo\n '
if self.existeVertice(V):
arestasSobreV = list()
indice = self.N.index(V)
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i <= j) and ((i == indice) or (j == indice))):
for k in self.M[i][j]:
if (k not in arestasSobreV):
arestasSobreV.append(k)
return sorted(arestasSobreV)
raise VerticeInvalidoException
|
Provê uma lista que contém os rótulos das arestas que incidem sobre o vértice passado como parâmetro
:param V: O vértice a ser analisado
:return: Uma lista os rótulos das arestas que incidem sobre o vértice
:raises: VerticeInvalidoException se o vértice não existe no grafo
|
Roteiro 6/meu_grafo_matriz_adjacencia_nao_dir.py
|
arestas_sobre_vertice
|
JhonatanGuilherme/GraphTheory
| 0 |
python
|
def arestas_sobre_vertice(self, V):
'\n Provê uma lista que contém os rótulos das arestas que incidem sobre o vértice passado como parâmetro\n :param V: O vértice a ser analisado\n :return: Uma lista os rótulos das arestas que incidem sobre o vértice\n :raises: VerticeInvalidoException se o vértice não existe no grafo\n '
if self.existeVertice(V):
arestasSobreV = list()
indice = self.N.index(V)
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i <= j) and ((i == indice) or (j == indice))):
for k in self.M[i][j]:
if (k not in arestasSobreV):
arestasSobreV.append(k)
return sorted(arestasSobreV)
raise VerticeInvalidoException
|
def arestas_sobre_vertice(self, V):
'\n Provê uma lista que contém os rótulos das arestas que incidem sobre o vértice passado como parâmetro\n :param V: O vértice a ser analisado\n :return: Uma lista os rótulos das arestas que incidem sobre o vértice\n :raises: VerticeInvalidoException se o vértice não existe no grafo\n '
if self.existeVertice(V):
arestasSobreV = list()
indice = self.N.index(V)
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i <= j) and ((i == indice) or (j == indice))):
for k in self.M[i][j]:
if (k not in arestasSobreV):
arestasSobreV.append(k)
return sorted(arestasSobreV)
raise VerticeInvalidoException<|docstring|>Provê uma lista que contém os rótulos das arestas que incidem sobre o vértice passado como parâmetro
:param V: O vértice a ser analisado
:return: Uma lista os rótulos das arestas que incidem sobre o vértice
:raises: VerticeInvalidoException se o vértice não existe no grafo<|endoftext|>
|
69b4bbbfa58fa986f7d9ae10b67e49a0baceceb0ce5d8bbc6179f7b033376aaf
|
def eh_completo(self):
'\n Verifica se o grafo é completo.\n :return: Um valor booleano que indica se o grafo é completo\n '
if (self.ha_laco() or self.ha_paralelas()):
return False
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i < j) and (self.M[i][j] == {})):
return False
return True
|
Verifica se o grafo é completo.
:return: Um valor booleano que indica se o grafo é completo
|
Roteiro 6/meu_grafo_matriz_adjacencia_nao_dir.py
|
eh_completo
|
JhonatanGuilherme/GraphTheory
| 0 |
python
|
def eh_completo(self):
'\n Verifica se o grafo é completo.\n :return: Um valor booleano que indica se o grafo é completo\n '
if (self.ha_laco() or self.ha_paralelas()):
return False
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i < j) and (self.M[i][j] == {})):
return False
return True
|
def eh_completo(self):
'\n Verifica se o grafo é completo.\n :return: Um valor booleano que indica se o grafo é completo\n '
if (self.ha_laco() or self.ha_paralelas()):
return False
for i in range(len(self.M)):
for j in range(len(self.M)):
if ((i < j) and (self.M[i][j] == {})):
return False
return True<|docstring|>Verifica se o grafo é completo.
:return: Um valor booleano que indica se o grafo é completo<|endoftext|>
|
91442c280922f87faa229439e53b1295ae17036eba405e44872d0f2cc7b80324
|
def pad_sentence(self, sen_len: int, feature: dict, article_number: int) -> tuple():
'Returns padded sentences so that within the batch, each sentence has the same number of words.\n\n Args:\n sen_len (list): Number of words that each sentence should have.\n feature (dict): Respective training instance of the batch.\n article_number (int): Article number.\n\n Returns:\n (tuple): Sentences and attention masks of the respective document after sentence-level padding.\n '
sentences = [(sentence + ([self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)] * (sen_len - len(sentence)))) for sentence in feature[f'article_{article_number}']]
masks = [(sentence + ([0] * (sen_len - len(sentence)))) for sentence in feature[f'mask_{article_number}']]
return (sentences, masks)
|
Returns padded sentences so that within the batch, each sentence has the same number of words.
Args:
sen_len (list): Number of words that each sentence should have.
feature (dict): Respective training instance of the batch.
article_number (int): Article number.
Returns:
(tuple): Sentences and attention masks of the respective document after sentence-level padding.
|
data_collator.py
|
pad_sentence
|
ogal93/pre-training-multilingual-document-encoders
| 0 |
python
|
def pad_sentence(self, sen_len: int, feature: dict, article_number: int) -> tuple():
'Returns padded sentences so that within the batch, each sentence has the same number of words.\n\n Args:\n sen_len (list): Number of words that each sentence should have.\n feature (dict): Respective training instance of the batch.\n article_number (int): Article number.\n\n Returns:\n (tuple): Sentences and attention masks of the respective document after sentence-level padding.\n '
sentences = [(sentence + ([self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)] * (sen_len - len(sentence)))) for sentence in feature[f'article_{article_number}']]
masks = [(sentence + ([0] * (sen_len - len(sentence)))) for sentence in feature[f'mask_{article_number}']]
return (sentences, masks)
|
def pad_sentence(self, sen_len: int, feature: dict, article_number: int) -> tuple():
'Returns padded sentences so that within the batch, each sentence has the same number of words.\n\n Args:\n sen_len (list): Number of words that each sentence should have.\n feature (dict): Respective training instance of the batch.\n article_number (int): Article number.\n\n Returns:\n (tuple): Sentences and attention masks of the respective document after sentence-level padding.\n '
sentences = [(sentence + ([self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)] * (sen_len - len(sentence)))) for sentence in feature[f'article_{article_number}']]
masks = [(sentence + ([0] * (sen_len - len(sentence)))) for sentence in feature[f'mask_{article_number}']]
return (sentences, masks)<|docstring|>Returns padded sentences so that within the batch, each sentence has the same number of words.
Args:
sen_len (list): Number of words that each sentence should have.
feature (dict): Respective training instance of the batch.
article_number (int): Article number.
Returns:
(tuple): Sentences and attention masks of the respective document after sentence-level padding.<|endoftext|>
|
0ea606bad9521c6b312e3b7dfc76044dd7e24a6a8f90227caac948b6f5c49e5b
|
def pad_document(self, sentences: list, masks: list, document_mask: list, doc_len: int):
' Does document level padding so that within the batch, each document has the same\n number of sentences.\n\n Args:\n sentences (list): Sentences of the respective document.\n masks (list): Sentence level attention masks of the respective document.\n document_mask (list): Document level attention mask of the respective document\n doc_len (int): Number of sentences that each document of the batch should have.\n '
if self.consider_dcls:
doc_len -= 1
mask_padding_array = [0 for i0 in range(len(masks[0]))]
sentence_padding_array = [self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token) for i0 in range(len(sentences[0]))]
if (len(sentences) < doc_len):
sentences += [sentence_padding_array for difference in range((doc_len - len(sentences)))]
masks += [mask_padding_array for difference in range((doc_len - len(masks)))]
document_mask.extend(([0] * (doc_len - len(document_mask))))
elif (len(sentences) > doc_len):
sentences[:] = sentences[:doc_len]
masks[:] = masks[:doc_len]
document_mask[:] = document_mask[:doc_len]
|
Does document level padding so that within the batch, each document has the same
number of sentences.
Args:
sentences (list): Sentences of the respective document.
masks (list): Sentence level attention masks of the respective document.
document_mask (list): Document level attention mask of the respective document
doc_len (int): Number of sentences that each document of the batch should have.
|
data_collator.py
|
pad_document
|
ogal93/pre-training-multilingual-document-encoders
| 0 |
python
|
def pad_document(self, sentences: list, masks: list, document_mask: list, doc_len: int):
' Does document level padding so that within the batch, each document has the same\n number of sentences.\n\n Args:\n sentences (list): Sentences of the respective document.\n masks (list): Sentence level attention masks of the respective document.\n document_mask (list): Document level attention mask of the respective document\n doc_len (int): Number of sentences that each document of the batch should have.\n '
if self.consider_dcls:
doc_len -= 1
mask_padding_array = [0 for i0 in range(len(masks[0]))]
sentence_padding_array = [self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token) for i0 in range(len(sentences[0]))]
if (len(sentences) < doc_len):
sentences += [sentence_padding_array for difference in range((doc_len - len(sentences)))]
masks += [mask_padding_array for difference in range((doc_len - len(masks)))]
document_mask.extend(([0] * (doc_len - len(document_mask))))
elif (len(sentences) > doc_len):
sentences[:] = sentences[:doc_len]
masks[:] = masks[:doc_len]
document_mask[:] = document_mask[:doc_len]
|
def pad_document(self, sentences: list, masks: list, document_mask: list, doc_len: int):
' Does document level padding so that within the batch, each document has the same\n number of sentences.\n\n Args:\n sentences (list): Sentences of the respective document.\n masks (list): Sentence level attention masks of the respective document.\n document_mask (list): Document level attention mask of the respective document\n doc_len (int): Number of sentences that each document of the batch should have.\n '
if self.consider_dcls:
doc_len -= 1
mask_padding_array = [0 for i0 in range(len(masks[0]))]
sentence_padding_array = [self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token) for i0 in range(len(sentences[0]))]
if (len(sentences) < doc_len):
sentences += [sentence_padding_array for difference in range((doc_len - len(sentences)))]
masks += [mask_padding_array for difference in range((doc_len - len(masks)))]
document_mask.extend(([0] * (doc_len - len(document_mask))))
elif (len(sentences) > doc_len):
sentences[:] = sentences[:doc_len]
masks[:] = masks[:doc_len]
document_mask[:] = document_mask[:doc_len]<|docstring|>Does document level padding so that within the batch, each document has the same
number of sentences.
Args:
sentences (list): Sentences of the respective document.
masks (list): Sentence level attention masks of the respective document.
document_mask (list): Document level attention mask of the respective document
doc_len (int): Number of sentences that each document of the batch should have.<|endoftext|>
|
6561f9d41fa145a967164bd07c6c3b47e35095091191746335d4071530516ea0
|
def add_command(self, command: str, params=None, usage: str='', example=None):
'\n Inserts commands..\n '
self.COMMANDS[command] = {'command': command, 'params': params, 'usage': usage, 'example': example}
return self
|
Inserts commands..
|
plugins/__init__.py
|
add_command
|
dopamusicopbot/Andencento
| 2 |
python
|
def add_command(self, command: str, params=None, usage: str=, example=None):
'\n \n '
self.COMMANDS[command] = {'command': command, 'params': params, 'usage': usage, 'example': example}
return self
|
def add_command(self, command: str, params=None, usage: str=, example=None):
'\n \n '
self.COMMANDS[command] = {'command': command, 'params': params, 'usage': usage, 'example': example}
return self<|docstring|>Inserts commands..<|endoftext|>
|
decb9db1fe927ed082df19230bff6197cc52c474d125d24b1820ca0ae7bbc2e3
|
def get_result(self):
'\n Brings results.\n '
result = f'''**📗 File :** `{self.FILE}`
'''
if ((self.WARNING == '') and (self.INFO == '')):
result += f'''**⬇️ Official:** {('✅' if self.IS_OFFICIAL else '❌')}
'''
else:
result += f'''**⬇️ Official:** {('✅' if self.IS_OFFICIAL else '❌')}
'''
if (self.INFO == ''):
if (not (self.WARNING == '')):
result += f'''**⚠️ Warning :** {self.WARNING}
'''
else:
if (not (self.WARNING == '')):
result += f'''**⚠️ Warning :** {self.WARNING}
'''
result += f'''**ℹ️ Info:** {self.INFO}
'''
for command in self.COMMANDS:
command = self.COMMANDS[command]
if (command['params'] is None):
result += f'''**🛠 Command :** `{COMMAND_HAND_LER[:1]}{command['command']}`
'''
else:
result += f'''**🛠 Command :** `{COMMAND_HAND_LER[:1]}{command['command']} {command['params']}`
'''
if (command['example'] is None):
result += f'''**💬 Details :** `{command['usage']}`
'''
else:
result += f'''**💬 Details :** `{command['usage']}`
'''
result += f'''**⌨️ For Example :** `{COMMAND_HAND_LER[:1]}{command['example']}`
'''
return result
|
Brings results.
|
plugins/__init__.py
|
get_result
|
dopamusicopbot/Andencento
| 2 |
python
|
def get_result(self):
'\n \n '
result = f'**📗 File :** `{self.FILE}`
'
if ((self.WARNING == ) and (self.INFO == )):
result += f'**⬇️ Official:** {('✅' if self.IS_OFFICIAL else '❌')}
'
else:
result += f'**⬇️ Official:** {('✅' if self.IS_OFFICIAL else '❌')}
'
if (self.INFO == ):
if (not (self.WARNING == )):
result += f'**⚠️ Warning :** {self.WARNING}
'
else:
if (not (self.WARNING == )):
result += f'**⚠️ Warning :** {self.WARNING}
'
result += f'**ℹ️ Info:** {self.INFO}
'
for command in self.COMMANDS:
command = self.COMMANDS[command]
if (command['params'] is None):
result += f'**🛠 Command :** `{COMMAND_HAND_LER[:1]}{command['command']}`
'
else:
result += f'**🛠 Command :** `{COMMAND_HAND_LER[:1]}{command['command']} {command['params']}`
'
if (command['example'] is None):
result += f'**💬 Details :** `{command['usage']}`
'
else:
result += f'**💬 Details :** `{command['usage']}`
'
result += f'**⌨️ For Example :** `{COMMAND_HAND_LER[:1]}{command['example']}`
'
return result
|
def get_result(self):
'\n \n '
result = f'**📗 File :** `{self.FILE}`
'
if ((self.WARNING == ) and (self.INFO == )):
result += f'**⬇️ Official:** {('✅' if self.IS_OFFICIAL else '❌')}
'
else:
result += f'**⬇️ Official:** {('✅' if self.IS_OFFICIAL else '❌')}
'
if (self.INFO == ):
if (not (self.WARNING == )):
result += f'**⚠️ Warning :** {self.WARNING}
'
else:
if (not (self.WARNING == )):
result += f'**⚠️ Warning :** {self.WARNING}
'
result += f'**ℹ️ Info:** {self.INFO}
'
for command in self.COMMANDS:
command = self.COMMANDS[command]
if (command['params'] is None):
result += f'**🛠 Command :** `{COMMAND_HAND_LER[:1]}{command['command']}`
'
else:
result += f'**🛠 Command :** `{COMMAND_HAND_LER[:1]}{command['command']} {command['params']}`
'
if (command['example'] is None):
result += f'**💬 Details :** `{command['usage']}`
'
else:
result += f'**💬 Details :** `{command['usage']}`
'
result += f'**⌨️ For Example :** `{COMMAND_HAND_LER[:1]}{command['example']}`
'
return result<|docstring|>Brings results.<|endoftext|>
|
ed0adac415a80b97bb0ac79e0a4c9c44e540993d1dc85337087ec41a4c53af81
|
def add(self):
'\n Directly adds CMD_HELP.\n '
CMD_HELP_BOT[self.FILE] = {'info': {'official': self.IS_OFFICIAL, 'warning': self.WARNING, 'info': self.INFO}, 'commands': self.COMMANDS}
CMD_HELP[self.FILE] = self.get_result()
return True
|
Directly adds CMD_HELP.
|
plugins/__init__.py
|
add
|
dopamusicopbot/Andencento
| 2 |
python
|
def add(self):
'\n \n '
CMD_HELP_BOT[self.FILE] = {'info': {'official': self.IS_OFFICIAL, 'warning': self.WARNING, 'info': self.INFO}, 'commands': self.COMMANDS}
CMD_HELP[self.FILE] = self.get_result()
return True
|
def add(self):
'\n \n '
CMD_HELP_BOT[self.FILE] = {'info': {'official': self.IS_OFFICIAL, 'warning': self.WARNING, 'info': self.INFO}, 'commands': self.COMMANDS}
CMD_HELP[self.FILE] = self.get_result()
return True<|docstring|>Directly adds CMD_HELP.<|endoftext|>
|
db8a75103fcc80b68140f8f0ce3a2daba29eaba168871b355bca7b3eb38b0d31
|
def load_jupyter_server_extension(nbapp):
'serve the Corina_Trackerv2.ipynb directory with bokeh server'
Popen(['panel', 'serve', 'Corina_Trackerv2.ipynb', '--allow-websocket-origin=*'])
|
serve the Corina_Trackerv2.ipynb directory with bokeh server
|
panelserverextension.py
|
load_jupyter_server_extension
|
marcs994/20200603_Coronav2
| 0 |
python
|
def load_jupyter_server_extension(nbapp):
Popen(['panel', 'serve', 'Corina_Trackerv2.ipynb', '--allow-websocket-origin=*'])
|
def load_jupyter_server_extension(nbapp):
Popen(['panel', 'serve', 'Corina_Trackerv2.ipynb', '--allow-websocket-origin=*'])<|docstring|>serve the Corina_Trackerv2.ipynb directory with bokeh server<|endoftext|>
|
062b46547536617fb88626bc13bc6f9a218a1647ab38fc04b33b5e1a3841a107
|
def test_missing_servername(self):
'\n Some web-servers require that the "Host" be included on SSL connections when the server is hosting multiple domains on the same IP.\n\n Without the host header, the server is unable to determine which certificate to provide and thus closes the connection.\n\n http://lukemurphey.net/issues/1035\n '
url_field = URLField('test_ping', 'title', 'this is a test')
result = WebPing.ping(url_field.to_python('https://lukemurphey.net'), timeout=3)
self.assertEqual(result.response_code, 200)
|
Some web-servers require that the "Host" be included on SSL connections when the server is hosting multiple domains on the same IP.
Without the host header, the server is unable to determine which certificate to provide and thus closes the connection.
http://lukemurphey.net/issues/1035
|
tests/unit.py
|
test_missing_servername
|
sudhir-12/splunk-website-monitoring
| 0 |
python
|
def test_missing_servername(self):
'\n Some web-servers require that the "Host" be included on SSL connections when the server is hosting multiple domains on the same IP.\n\n Without the host header, the server is unable to determine which certificate to provide and thus closes the connection.\n\n http://lukemurphey.net/issues/1035\n '
url_field = URLField('test_ping', 'title', 'this is a test')
result = WebPing.ping(url_field.to_python('https://lukemurphey.net'), timeout=3)
self.assertEqual(result.response_code, 200)
|
def test_missing_servername(self):
'\n Some web-servers require that the "Host" be included on SSL connections when the server is hosting multiple domains on the same IP.\n\n Without the host header, the server is unable to determine which certificate to provide and thus closes the connection.\n\n http://lukemurphey.net/issues/1035\n '
url_field = URLField('test_ping', 'title', 'this is a test')
result = WebPing.ping(url_field.to_python('https://lukemurphey.net'), timeout=3)
self.assertEqual(result.response_code, 200)<|docstring|>Some web-servers require that the "Host" be included on SSL connections when the server is hosting multiple domains on the same IP.
Without the host header, the server is unable to determine which certificate to provide and thus closes the connection.
http://lukemurphey.net/issues/1035<|endoftext|>
|
ee97fa96f670ff419367a35df6c076d20571c64c5d90af4f7b6f1deb3250163c
|
@skipIfNoServer
def test_custom_user_agent(self):
'\n http://lukemurphey.net/issues/1341\n '
url_field = URLField('test_ping', 'title', 'this is a test')
result = WebPing.ping(url_field.to_python((('http://127.0.0.1:' + str(self.web_server_port)) + '/user_agent_check')), user_agent='USER_AGENT_CHECK_DOESNT_MATCH', timeout=3)
self.assertEqual(result.response_code, 200)
result = WebPing.ping(url_field.to_python((('http://127.0.0.1:' + str(self.web_server_port)) + '/user_agent_check')), user_agent='USER_AGENT_CHECK', timeout=3)
self.assertEqual(result.response_code, 201)
|
http://lukemurphey.net/issues/1341
|
tests/unit.py
|
test_custom_user_agent
|
sudhir-12/splunk-website-monitoring
| 0 |
python
|
@skipIfNoServer
def test_custom_user_agent(self):
'\n \n '
url_field = URLField('test_ping', 'title', 'this is a test')
result = WebPing.ping(url_field.to_python((('http://127.0.0.1:' + str(self.web_server_port)) + '/user_agent_check')), user_agent='USER_AGENT_CHECK_DOESNT_MATCH', timeout=3)
self.assertEqual(result.response_code, 200)
result = WebPing.ping(url_field.to_python((('http://127.0.0.1:' + str(self.web_server_port)) + '/user_agent_check')), user_agent='USER_AGENT_CHECK', timeout=3)
self.assertEqual(result.response_code, 201)
|
@skipIfNoServer
def test_custom_user_agent(self):
'\n \n '
url_field = URLField('test_ping', 'title', 'this is a test')
result = WebPing.ping(url_field.to_python((('http://127.0.0.1:' + str(self.web_server_port)) + '/user_agent_check')), user_agent='USER_AGENT_CHECK_DOESNT_MATCH', timeout=3)
self.assertEqual(result.response_code, 200)
result = WebPing.ping(url_field.to_python((('http://127.0.0.1:' + str(self.web_server_port)) + '/user_agent_check')), user_agent='USER_AGENT_CHECK', timeout=3)
self.assertEqual(result.response_code, 201)<|docstring|>http://lukemurphey.net/issues/1341<|endoftext|>
|
5c45afbd6d2dfe8ecbff483d226758361e6adc8476d8c6caa2b126e12cefe197
|
def __init__(self, conditions):
'\n Args:\n conditions list<Condition>: a list of condition\n '
self._conditions = conditions
|
Args:
conditions list<Condition>: a list of condition
|
src/clustaar/authorize/conditions_combinations.py
|
__init__
|
Clustaar/clustaar.authorize
| 0 |
python
|
def __init__(self, conditions):
'\n Args:\n conditions list<Condition>: a list of condition\n '
self._conditions = conditions
|
def __init__(self, conditions):
'\n Args:\n conditions list<Condition>: a list of condition\n '
self._conditions = conditions<|docstring|>Args:
conditions list<Condition>: a list of condition<|endoftext|>
|
5c45afbd6d2dfe8ecbff483d226758361e6adc8476d8c6caa2b126e12cefe197
|
def __init__(self, conditions):
'\n Args:\n conditions list<Condition>: a list of condition\n '
self._conditions = conditions
|
Args:
conditions list<Condition>: a list of condition
|
src/clustaar/authorize/conditions_combinations.py
|
__init__
|
Clustaar/clustaar.authorize
| 0 |
python
|
def __init__(self, conditions):
'\n Args:\n conditions list<Condition>: a list of condition\n '
self._conditions = conditions
|
def __init__(self, conditions):
'\n Args:\n conditions list<Condition>: a list of condition\n '
self._conditions = conditions<|docstring|>Args:
conditions list<Condition>: a list of condition<|endoftext|>
|
e1ceeec8563d9d74f658047a0755bfb13f339422d47f9b98f2ca35ce977dd2df
|
def unlinkChildren(parent: Path) -> int:
'Removes all symlinks that are immediate children of parent dir.\n\n\t:param parent: The parent directory\n\t:return: Count of removed symlinks\n\t'
removedCount = 0
for child in parent.glob('*'):
if child.is_symlink():
child.unlink()
removedCount += 1
return removedCount
|
Removes all symlinks that are immediate children of parent dir.
:param parent: The parent directory
:return: Count of removed symlinks
|
depz/x50_unlink.py
|
unlinkChildren
|
rtmigo/lnkdpn
| 1 |
python
|
def unlinkChildren(parent: Path) -> int:
'Removes all symlinks that are immediate children of parent dir.\n\n\t:param parent: The parent directory\n\t:return: Count of removed symlinks\n\t'
removedCount = 0
for child in parent.glob('*'):
if child.is_symlink():
child.unlink()
removedCount += 1
return removedCount
|
def unlinkChildren(parent: Path) -> int:
'Removes all symlinks that are immediate children of parent dir.\n\n\t:param parent: The parent directory\n\t:return: Count of removed symlinks\n\t'
removedCount = 0
for child in parent.glob('*'):
if child.is_symlink():
child.unlink()
removedCount += 1
return removedCount<|docstring|>Removes all symlinks that are immediate children of parent dir.
:param parent: The parent directory
:return: Count of removed symlinks<|endoftext|>
|
e44c2407dbaa8a3683fefea322122682203c418a07a9330a6657ce19638bcc2b
|
def unlinkChildrenAndMaybeRemove(parent: Path) -> None:
'Removes all the symlinks that a direct children of [parent].\n\tThen removes the directory if it contained only symlinks.\n\tIf the directory was empty before the call, it will not be removed\n\t(it did not contain any symlinks).\n\t'
if unlinkChildren(parent):
if (not list(parent.glob('*'))):
os.rmdir(str(parent))
|
Removes all the symlinks that a direct children of [parent].
Then removes the directory if it contained only symlinks.
If the directory was empty before the call, it will not be removed
(it did not contain any symlinks).
|
depz/x50_unlink.py
|
unlinkChildrenAndMaybeRemove
|
rtmigo/lnkdpn
| 1 |
python
|
def unlinkChildrenAndMaybeRemove(parent: Path) -> None:
'Removes all the symlinks that a direct children of [parent].\n\tThen removes the directory if it contained only symlinks.\n\tIf the directory was empty before the call, it will not be removed\n\t(it did not contain any symlinks).\n\t'
if unlinkChildren(parent):
if (not list(parent.glob('*'))):
os.rmdir(str(parent))
|
def unlinkChildrenAndMaybeRemove(parent: Path) -> None:
'Removes all the symlinks that a direct children of [parent].\n\tThen removes the directory if it contained only symlinks.\n\tIf the directory was empty before the call, it will not be removed\n\t(it did not contain any symlinks).\n\t'
if unlinkChildren(parent):
if (not list(parent.glob('*'))):
os.rmdir(str(parent))<|docstring|>Removes all the symlinks that a direct children of [parent].
Then removes the directory if it contained only symlinks.
If the directory was empty before the call, it will not be removed
(it did not contain any symlinks).<|endoftext|>
|
3f8b55fb5ced4b0406b766f418f828d97f06105a94016762fda4fda332b29031
|
def lsp_document_changes(refactoring: Refactoring) -> List[Union[(TextDocumentEdit, RenameFile)]]:
'Get lsp text document edits from Jedi refactoring.\n\n This is the main public function that you probably want\n '
converter = RefactoringConverter(refactoring)
return [*converter.lsp_text_document_edits(), *converter.lsp_renames()]
|
Get lsp text document edits from Jedi refactoring.
This is the main public function that you probably want
|
.vscode-insiders/extensions/ms-python.python-2021.1.502429796/pythonFiles/lib/python/jedi_language_server/text_edit_utils.py
|
lsp_document_changes
|
Guitaraholic/dotfiles
| 1 |
python
|
def lsp_document_changes(refactoring: Refactoring) -> List[Union[(TextDocumentEdit, RenameFile)]]:
'Get lsp text document edits from Jedi refactoring.\n\n This is the main public function that you probably want\n '
converter = RefactoringConverter(refactoring)
return [*converter.lsp_text_document_edits(), *converter.lsp_renames()]
|
def lsp_document_changes(refactoring: Refactoring) -> List[Union[(TextDocumentEdit, RenameFile)]]:
'Get lsp text document edits from Jedi refactoring.\n\n This is the main public function that you probably want\n '
converter = RefactoringConverter(refactoring)
return [*converter.lsp_text_document_edits(), *converter.lsp_renames()]<|docstring|>Get lsp text document edits from Jedi refactoring.
This is the main public function that you probably want<|endoftext|>
|
a13eca0d3f36db7d7b6d135c95a6292fbbbb0c42a0d7152c5077687ab8af5e5f
|
def lsp_text_edits(changed_file: ChangedFile) -> List[TextEdit]:
'Take a jedi `ChangedFile` and convert to list of text edits.\n\n Handles inserts, replaces, and deletions within a text file\n '
old_code = changed_file._module_node.get_code()
new_code = changed_file.get_new_code()
opcode_position_lookup_old = get_opcode_position_lookup(old_code)
text_edits = []
for opcode in get_opcodes(old_code, new_code):
if (opcode.op in _OPCODES_CHANGE):
start = opcode_position_lookup_old[opcode.old_start]
end = opcode_position_lookup_old[opcode.old_end]
start_char = (opcode.old_start - start.range_start)
end_char = (opcode.old_end - end.range_start)
new_text = new_code[opcode.new_start:opcode.new_end]
text_edits.append(TextEdit(range=Range(start=Position(line=start.line, character=start_char), end=Position(line=end.line, character=end_char)), new_text=new_text))
return text_edits
|
Take a jedi `ChangedFile` and convert to list of text edits.
Handles inserts, replaces, and deletions within a text file
|
.vscode-insiders/extensions/ms-python.python-2021.1.502429796/pythonFiles/lib/python/jedi_language_server/text_edit_utils.py
|
lsp_text_edits
|
Guitaraholic/dotfiles
| 1 |
python
|
def lsp_text_edits(changed_file: ChangedFile) -> List[TextEdit]:
'Take a jedi `ChangedFile` and convert to list of text edits.\n\n Handles inserts, replaces, and deletions within a text file\n '
old_code = changed_file._module_node.get_code()
new_code = changed_file.get_new_code()
opcode_position_lookup_old = get_opcode_position_lookup(old_code)
text_edits = []
for opcode in get_opcodes(old_code, new_code):
if (opcode.op in _OPCODES_CHANGE):
start = opcode_position_lookup_old[opcode.old_start]
end = opcode_position_lookup_old[opcode.old_end]
start_char = (opcode.old_start - start.range_start)
end_char = (opcode.old_end - end.range_start)
new_text = new_code[opcode.new_start:opcode.new_end]
text_edits.append(TextEdit(range=Range(start=Position(line=start.line, character=start_char), end=Position(line=end.line, character=end_char)), new_text=new_text))
return text_edits
|
def lsp_text_edits(changed_file: ChangedFile) -> List[TextEdit]:
'Take a jedi `ChangedFile` and convert to list of text edits.\n\n Handles inserts, replaces, and deletions within a text file\n '
old_code = changed_file._module_node.get_code()
new_code = changed_file.get_new_code()
opcode_position_lookup_old = get_opcode_position_lookup(old_code)
text_edits = []
for opcode in get_opcodes(old_code, new_code):
if (opcode.op in _OPCODES_CHANGE):
start = opcode_position_lookup_old[opcode.old_start]
end = opcode_position_lookup_old[opcode.old_end]
start_char = (opcode.old_start - start.range_start)
end_char = (opcode.old_end - end.range_start)
new_text = new_code[opcode.new_start:opcode.new_end]
text_edits.append(TextEdit(range=Range(start=Position(line=start.line, character=start_char), end=Position(line=end.line, character=end_char)), new_text=new_text))
return text_edits<|docstring|>Take a jedi `ChangedFile` and convert to list of text edits.
Handles inserts, replaces, and deletions within a text file<|endoftext|>
|
e56df34ad278de7199f856ee6c6dd6c18adabe0a7409c94e4340814124c1eeee
|
def get_opcodes(old: str, new: str) -> List[Opcode]:
'Obtain typed opcodes from two files (old and new)'
diff = difflib.SequenceMatcher(a=old, b=new)
return [Opcode(*opcode) for opcode in diff.get_opcodes()]
|
Obtain typed opcodes from two files (old and new)
|
.vscode-insiders/extensions/ms-python.python-2021.1.502429796/pythonFiles/lib/python/jedi_language_server/text_edit_utils.py
|
get_opcodes
|
Guitaraholic/dotfiles
| 1 |
python
|
def get_opcodes(old: str, new: str) -> List[Opcode]:
diff = difflib.SequenceMatcher(a=old, b=new)
return [Opcode(*opcode) for opcode in diff.get_opcodes()]
|
def get_opcodes(old: str, new: str) -> List[Opcode]:
diff = difflib.SequenceMatcher(a=old, b=new)
return [Opcode(*opcode) for opcode in diff.get_opcodes()]<|docstring|>Obtain typed opcodes from two files (old and new)<|endoftext|>
|
881c2e3b3d983079c555c473f8fe775e52cdd22c84f0f28b6b73a19eca7fe02b
|
def get_opcode_position_lookup(code: str) -> Dict[(int, LinePosition)]:
'Obtain the opcode lookup position.\n\n This function is beautiful. It takes code and creates a data\n structure within which one can look up opcode-friendly values. It\n relies on the `RangeDict` above, which lets you look up a value\n within a range of linear values\n '
original_lines = code.splitlines(keepends=True)
line_lookup = RangeDict()
start = 0
for (line, code_line) in enumerate(original_lines):
end = (start + len(code_line))
key = range(start, (end + 1))
line_lookup[key] = LinePosition(start, end, line, code_line)
start = end
return line_lookup
|
Obtain the opcode lookup position.
This function is beautiful. It takes code and creates a data
structure within which one can look up opcode-friendly values. It
relies on the `RangeDict` above, which lets you look up a value
within a range of linear values
|
.vscode-insiders/extensions/ms-python.python-2021.1.502429796/pythonFiles/lib/python/jedi_language_server/text_edit_utils.py
|
get_opcode_position_lookup
|
Guitaraholic/dotfiles
| 1 |
python
|
def get_opcode_position_lookup(code: str) -> Dict[(int, LinePosition)]:
'Obtain the opcode lookup position.\n\n This function is beautiful. It takes code and creates a data\n structure within which one can look up opcode-friendly values. It\n relies on the `RangeDict` above, which lets you look up a value\n within a range of linear values\n '
original_lines = code.splitlines(keepends=True)
line_lookup = RangeDict()
start = 0
for (line, code_line) in enumerate(original_lines):
end = (start + len(code_line))
key = range(start, (end + 1))
line_lookup[key] = LinePosition(start, end, line, code_line)
start = end
return line_lookup
|
def get_opcode_position_lookup(code: str) -> Dict[(int, LinePosition)]:
'Obtain the opcode lookup position.\n\n This function is beautiful. It takes code and creates a data\n structure within which one can look up opcode-friendly values. It\n relies on the `RangeDict` above, which lets you look up a value\n within a range of linear values\n '
original_lines = code.splitlines(keepends=True)
line_lookup = RangeDict()
start = 0
for (line, code_line) in enumerate(original_lines):
end = (start + len(code_line))
key = range(start, (end + 1))
line_lookup[key] = LinePosition(start, end, line, code_line)
start = end
return line_lookup<|docstring|>Obtain the opcode lookup position.
This function is beautiful. It takes code and creates a data
structure within which one can look up opcode-friendly values. It
relies on the `RangeDict` above, which lets you look up a value
within a range of linear values<|endoftext|>
|
d0a9cce50cea3d2959dc30cffe9d79bedc00941eab243160d851b713151741c2
|
def lsp_renames(self) -> Iterator[RenameFile]:
'Get all File rename operations.'
for (old_name, new_name) in self.refactoring.get_renames():
(yield RenameFile(old_uri=from_fs_path(old_name), new_uri=from_fs_path(new_name), options=RenameFileOptions(ignore_if_exists=True, overwrite=True)))
|
Get all File rename operations.
|
.vscode-insiders/extensions/ms-python.python-2021.1.502429796/pythonFiles/lib/python/jedi_language_server/text_edit_utils.py
|
lsp_renames
|
Guitaraholic/dotfiles
| 1 |
python
|
def lsp_renames(self) -> Iterator[RenameFile]:
for (old_name, new_name) in self.refactoring.get_renames():
(yield RenameFile(old_uri=from_fs_path(old_name), new_uri=from_fs_path(new_name), options=RenameFileOptions(ignore_if_exists=True, overwrite=True)))
|
def lsp_renames(self) -> Iterator[RenameFile]:
for (old_name, new_name) in self.refactoring.get_renames():
(yield RenameFile(old_uri=from_fs_path(old_name), new_uri=from_fs_path(new_name), options=RenameFileOptions(ignore_if_exists=True, overwrite=True)))<|docstring|>Get all File rename operations.<|endoftext|>
|
7579a15ea8ab76b65f2e26ccd2ca0c431c43ed011032df2161fda623dd88a652
|
def lsp_text_document_edits(self) -> Iterator[TextDocumentEdit]:
'Get all text document edits.'
changed_files = self.refactoring.get_changed_files()
for (path, changed_file) in changed_files.items():
uri = from_fs_path(path)
text_edits = lsp_text_edits(changed_file)
(yield TextDocumentEdit(text_document=VersionedTextDocumentIdentifier(uri=uri, version=None), edits=text_edits))
|
Get all text document edits.
|
.vscode-insiders/extensions/ms-python.python-2021.1.502429796/pythonFiles/lib/python/jedi_language_server/text_edit_utils.py
|
lsp_text_document_edits
|
Guitaraholic/dotfiles
| 1 |
python
|
def lsp_text_document_edits(self) -> Iterator[TextDocumentEdit]:
changed_files = self.refactoring.get_changed_files()
for (path, changed_file) in changed_files.items():
uri = from_fs_path(path)
text_edits = lsp_text_edits(changed_file)
(yield TextDocumentEdit(text_document=VersionedTextDocumentIdentifier(uri=uri, version=None), edits=text_edits))
|
def lsp_text_document_edits(self) -> Iterator[TextDocumentEdit]:
changed_files = self.refactoring.get_changed_files()
for (path, changed_file) in changed_files.items():
uri = from_fs_path(path)
text_edits = lsp_text_edits(changed_file)
(yield TextDocumentEdit(text_document=VersionedTextDocumentIdentifier(uri=uri, version=None), edits=text_edits))<|docstring|>Get all text document edits.<|endoftext|>
|
9ac7551a3e5504f166669213d44b21bac4fd12bd9a33af70eff37b0f6ca88015
|
def define_folder(loc_):
'\n Creating folder based on the giving location information. \n If the given information is not folder, it gives error message.\n \n Parameters\n ----------\n loc_ : str\n The location of folder\n Returns\n -------\n path_ : str\n It gives the created location.\n '
prefix = ''
if (loc_[0] == '/'):
prefix = '/'
loc_ = [x for x in loc_.split('/') if (x != '')]
loc_ = '/'.join(loc_)
loc_ = (prefix + loc_)
if (loc_.split('/')[(- 1)].find('.') > 0 == False):
print('PLEASE ENTER FOLDER PATH!!, given information is ', loc_)
else:
path_ = ''
count = 0
for s_ in loc_.split('/'):
path_ = ((path_ + s_) + '/')
if (os.path.exists(path_) == False):
count = (count + 1)
os.mkdir(path_)
if (count > 0):
print('PATH created!!')
print('FOLDER information, ', path_)
return path_
|
Creating folder based on the giving location information.
If the given information is not folder, it gives error message.
Parameters
----------
loc_ : str
The location of folder
Returns
-------
path_ : str
It gives the created location.
|
scripts/path_scripts.py
|
define_folder
|
pelingundogdu/mlfpm-esr9-m1.16
| 0 |
python
|
def define_folder(loc_):
'\n Creating folder based on the giving location information. \n If the given information is not folder, it gives error message.\n \n Parameters\n ----------\n loc_ : str\n The location of folder\n Returns\n -------\n path_ : str\n It gives the created location.\n '
prefix =
if (loc_[0] == '/'):
prefix = '/'
loc_ = [x for x in loc_.split('/') if (x != )]
loc_ = '/'.join(loc_)
loc_ = (prefix + loc_)
if (loc_.split('/')[(- 1)].find('.') > 0 == False):
print('PLEASE ENTER FOLDER PATH!!, given information is ', loc_)
else:
path_ =
count = 0
for s_ in loc_.split('/'):
path_ = ((path_ + s_) + '/')
if (os.path.exists(path_) == False):
count = (count + 1)
os.mkdir(path_)
if (count > 0):
print('PATH created!!')
print('FOLDER information, ', path_)
return path_
|
def define_folder(loc_):
'\n Creating folder based on the giving location information. \n If the given information is not folder, it gives error message.\n \n Parameters\n ----------\n loc_ : str\n The location of folder\n Returns\n -------\n path_ : str\n It gives the created location.\n '
prefix =
if (loc_[0] == '/'):
prefix = '/'
loc_ = [x for x in loc_.split('/') if (x != )]
loc_ = '/'.join(loc_)
loc_ = (prefix + loc_)
if (loc_.split('/')[(- 1)].find('.') > 0 == False):
print('PLEASE ENTER FOLDER PATH!!, given information is ', loc_)
else:
path_ =
count = 0
for s_ in loc_.split('/'):
path_ = ((path_ + s_) + '/')
if (os.path.exists(path_) == False):
count = (count + 1)
os.mkdir(path_)
if (count > 0):
print('PATH created!!')
print('FOLDER information, ', path_)
return path_<|docstring|>Creating folder based on the giving location information.
If the given information is not folder, it gives error message.
Parameters
----------
loc_ : str
The location of folder
Returns
-------
path_ : str
It gives the created location.<|endoftext|>
|
9ff5cceb7e4c3077ad1156df9499efedc8c6469469e734c8b1da34c16bfdb011
|
def send_temperature(bus, input_file):
'Sends a temperature over the CAN Bus.\n\n :param input_file: The file from which the temperature should be read.\n :param bus: The Bus instance.\n '
temperature = read_temperature_from_file(input_file)
try:
temperature = float(temperature)
except ValueError:
raise ValueError('Could not convert temperature to float')
else:
temperature = int(temperature)
msg = can.Message(data=[temperature])
bus.send(msg)
|
Sends a temperature over the CAN Bus.
:param input_file: The file from which the temperature should be read.
:param bus: The Bus instance.
|
sender.py
|
send_temperature
|
sh4nks/tempserver
| 3 |
python
|
def send_temperature(bus, input_file):
'Sends a temperature over the CAN Bus.\n\n :param input_file: The file from which the temperature should be read.\n :param bus: The Bus instance.\n '
temperature = read_temperature_from_file(input_file)
try:
temperature = float(temperature)
except ValueError:
raise ValueError('Could not convert temperature to float')
else:
temperature = int(temperature)
msg = can.Message(data=[temperature])
bus.send(msg)
|
def send_temperature(bus, input_file):
'Sends a temperature over the CAN Bus.\n\n :param input_file: The file from which the temperature should be read.\n :param bus: The Bus instance.\n '
temperature = read_temperature_from_file(input_file)
try:
temperature = float(temperature)
except ValueError:
raise ValueError('Could not convert temperature to float')
else:
temperature = int(temperature)
msg = can.Message(data=[temperature])
bus.send(msg)<|docstring|>Sends a temperature over the CAN Bus.
:param input_file: The file from which the temperature should be read.
:param bus: The Bus instance.<|endoftext|>
|
ced9508f08a3bcd5698458355a2782297d5c531c92ccc1965a30f5d555f67a1e
|
def read_temperature_from_file(input_file):
'Reads a temperature from the given file and returns it.\n\n :param intput_file: The full path to the file from which the temperature\n should be read.\n '
temperature = None
with open(input_file) as f:
temperature = f.read().rstrip()
return temperature
|
Reads a temperature from the given file and returns it.
:param intput_file: The full path to the file from which the temperature
should be read.
|
sender.py
|
read_temperature_from_file
|
sh4nks/tempserver
| 3 |
python
|
def read_temperature_from_file(input_file):
'Reads a temperature from the given file and returns it.\n\n :param intput_file: The full path to the file from which the temperature\n should be read.\n '
temperature = None
with open(input_file) as f:
temperature = f.read().rstrip()
return temperature
|
def read_temperature_from_file(input_file):
'Reads a temperature from the given file and returns it.\n\n :param intput_file: The full path to the file from which the temperature\n should be read.\n '
temperature = None
with open(input_file) as f:
temperature = f.read().rstrip()
return temperature<|docstring|>Reads a temperature from the given file and returns it.
:param intput_file: The full path to the file from which the temperature
should be read.<|endoftext|>
|
27fbbcfc3a94ede93fd61a3d94325641933367db82518632384bde084bdb0f1d
|
def test_deco_sklearn_cluster_KMeans_class():
'\n Code from "Demonstration of k-means assumptions",\n http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_assumptions.html#example-cluster-plot-kmeans-assumptions-py\n\n This test demonstrates\n * decorating an "external" class and its subclasses --\n KMeans from sklearn.cluster and its subclasses,\n amongst which is MiniBatchKMeans\n * using the `override` keyword with one of the `log_calls.decorate_*`\n functions to make a change to the settings of (all the methods of)\n an already-decorated class\n\n >>> from log_calls import log_calls\n >>> from sklearn.cluster import KMeans, MiniBatchKMeans\n >>> from sklearn.datasets import make_blobs\n >>> n_samples = 1500\n >>> random_state = 170\n >>> X, y = make_blobs(n_samples=n_samples, random_state=random_state)\n\nFirst, let\'s see the call hierarchy:\n\n >>> log_calls.decorate_hierarchy(KMeans, log_args=False, override=True)\n\n >>> kmo = KMeans(n_clusters=2, random_state=random_state,\n ... n_init=10)\n KMeans.__init__ <== called by <module>\n KMeans.__init__ ==> returning to <module>\n >>> y_pred = kmo.fit_predict(X)\n KMeans.fit_predict <== called by <module>\n KMeans.fit <== called by KMeans.fit_predict\n KMeans._check_fit_data <== called by KMeans.fit\n KMeans._check_fit_data ==> returning to KMeans.fit\n KMeans.fit ==> returning to KMeans.fit_predict\n KMeans.fit_predict ==> returning to <module>\n\n`MiniBatchKMeans` is a subclass of `KMeans`, so that class is decorated too.\n\n >>> mbk = MiniBatchKMeans(init=\'k-means++\', n_clusters=2, batch_size=45,\n ... n_init=10, max_no_improvement=10)\n MiniBatchKMeans.__init__ <== called by <module>\n KMeans.__init__ <== called by MiniBatchKMeans.__init__\n KMeans.__init__ ==> returning to MiniBatchKMeans.__init__\n MiniBatchKMeans.__init__ ==> returning to <module>\n\n >>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n MiniBatchKMeans.fit <== called by <module>\n MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit\n MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit\n MiniBatchKMeans.fit ==> returning to <module>\n MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n\nNow let\'s view arguments too:\n >>> log_calls.decorate_class(KMeans, decorate_subclasses=True,\n ... log_args=True, args_sep=\'\\n\',\n ... override=True)\n >>> # Incorrect number of clusters\n >>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n MiniBatchKMeans.fit <== called by <module>\n arguments:\n self=MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n X=array([[ -5.19811282e+00, 6.41869316e-01],\n [ -5.75229538e+00, 4.18627111e-01],\n [ -1.08448984e+01, -7.55352273e+00],\n ...,\n [ 1.36105255e+00, -9.07491863e-01],\n [ -3.54141108e-01, 7.12241630e-01],\n [ 1.88577252e+00, 1.41185693e-03]])\n defaults:\n y=None\n MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit\n arguments:\n self=MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n X=array([[ -5.19811282e+00, 6.41869316e-01],\n [ -5.75229538e+00, 4.18627111e-01],\n [ -1.08448984e+01, -7.55352273e+00],\n ...,\n [ 1.36105255e+00, -9.07491863e-01],\n [ -3.54141108e-01, 7.12241630e-01],\n [ 1.88577252e+00, 1.41185693e-03]])\n MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit\n MiniBatchKMeans.fit ==> returning to <module>\n MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n\n Note: the ellipses in the values of array `X` are produced by the `repr` of `numpy`.\n '
pass
|
Code from "Demonstration of k-means assumptions",
http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_assumptions.html#example-cluster-plot-kmeans-assumptions-py
This test demonstrates
* decorating an "external" class and its subclasses --
KMeans from sklearn.cluster and its subclasses,
amongst which is MiniBatchKMeans
* using the `override` keyword with one of the `log_calls.decorate_*`
functions to make a change to the settings of (all the methods of)
an already-decorated class
>>> from log_calls import log_calls
>>> from sklearn.cluster import KMeans, MiniBatchKMeans
>>> from sklearn.datasets import make_blobs
>>> n_samples = 1500
>>> random_state = 170
>>> X, y = make_blobs(n_samples=n_samples, random_state=random_state)
First, let's see the call hierarchy:
>>> log_calls.decorate_hierarchy(KMeans, log_args=False, override=True)
>>> kmo = KMeans(n_clusters=2, random_state=random_state,
... n_init=10)
KMeans.__init__ <== called by <module>
KMeans.__init__ ==> returning to <module>
>>> y_pred = kmo.fit_predict(X)
KMeans.fit_predict <== called by <module>
KMeans.fit <== called by KMeans.fit_predict
KMeans._check_fit_data <== called by KMeans.fit
KMeans._check_fit_data ==> returning to KMeans.fit
KMeans.fit ==> returning to KMeans.fit_predict
KMeans.fit_predict ==> returning to <module>
`MiniBatchKMeans` is a subclass of `KMeans`, so that class is decorated too.
>>> mbk = MiniBatchKMeans(init='k-means++', n_clusters=2, batch_size=45,
... n_init=10, max_no_improvement=10)
MiniBatchKMeans.__init__ <== called by <module>
KMeans.__init__ <== called by MiniBatchKMeans.__init__
KMeans.__init__ ==> returning to MiniBatchKMeans.__init__
MiniBatchKMeans.__init__ ==> returning to <module>
>>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
MiniBatchKMeans.fit <== called by <module>
MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit
MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit
MiniBatchKMeans.fit ==> returning to <module>
MiniBatchKMeans(batch_size=45, compute_labels=True, init='k-means++',
init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,
n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,
verbose=0)
Now let's view arguments too:
>>> log_calls.decorate_class(KMeans, decorate_subclasses=True,
... log_args=True, args_sep='\n',
... override=True)
>>> # Incorrect number of clusters
>>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
MiniBatchKMeans.fit <== called by <module>
arguments:
self=MiniBatchKMeans(batch_size=45, compute_labels=True, init='k-means++',
init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,
n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,
verbose=0)
X=array([[ -5.19811282e+00, 6.41869316e-01],
[ -5.75229538e+00, 4.18627111e-01],
[ -1.08448984e+01, -7.55352273e+00],
...,
[ 1.36105255e+00, -9.07491863e-01],
[ -3.54141108e-01, 7.12241630e-01],
[ 1.88577252e+00, 1.41185693e-03]])
defaults:
y=None
MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit
arguments:
self=MiniBatchKMeans(batch_size=45, compute_labels=True, init='k-means++',
init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,
n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,
verbose=0)
X=array([[ -5.19811282e+00, 6.41869316e-01],
[ -5.75229538e+00, 4.18627111e-01],
[ -1.08448984e+01, -7.55352273e+00],
...,
[ 1.36105255e+00, -9.07491863e-01],
[ -3.54141108e-01, 7.12241630e-01],
[ 1.88577252e+00, 1.41185693e-03]])
MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit
MiniBatchKMeans.fit ==> returning to <module>
MiniBatchKMeans(batch_size=45, compute_labels=True, init='k-means++',
init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,
n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,
verbose=0)
Note: the ellipses in the values of array `X` are produced by the `repr` of `numpy`.
|
tests/test_with_sklearn/test_decorate_sklearn_KMeans.py
|
test_deco_sklearn_cluster_KMeans_class
|
Twangist/log_calls
| 16 |
python
|
def test_deco_sklearn_cluster_KMeans_class():
'\n Code from "Demonstration of k-means assumptions",\n http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_assumptions.html#example-cluster-plot-kmeans-assumptions-py\n\n This test demonstrates\n * decorating an "external" class and its subclasses --\n KMeans from sklearn.cluster and its subclasses,\n amongst which is MiniBatchKMeans\n * using the `override` keyword with one of the `log_calls.decorate_*`\n functions to make a change to the settings of (all the methods of)\n an already-decorated class\n\n >>> from log_calls import log_calls\n >>> from sklearn.cluster import KMeans, MiniBatchKMeans\n >>> from sklearn.datasets import make_blobs\n >>> n_samples = 1500\n >>> random_state = 170\n >>> X, y = make_blobs(n_samples=n_samples, random_state=random_state)\n\nFirst, let\'s see the call hierarchy:\n\n >>> log_calls.decorate_hierarchy(KMeans, log_args=False, override=True)\n\n >>> kmo = KMeans(n_clusters=2, random_state=random_state,\n ... n_init=10)\n KMeans.__init__ <== called by <module>\n KMeans.__init__ ==> returning to <module>\n >>> y_pred = kmo.fit_predict(X)\n KMeans.fit_predict <== called by <module>\n KMeans.fit <== called by KMeans.fit_predict\n KMeans._check_fit_data <== called by KMeans.fit\n KMeans._check_fit_data ==> returning to KMeans.fit\n KMeans.fit ==> returning to KMeans.fit_predict\n KMeans.fit_predict ==> returning to <module>\n\n`MiniBatchKMeans` is a subclass of `KMeans`, so that class is decorated too.\n\n >>> mbk = MiniBatchKMeans(init=\'k-means++\', n_clusters=2, batch_size=45,\n ... n_init=10, max_no_improvement=10)\n MiniBatchKMeans.__init__ <== called by <module>\n KMeans.__init__ <== called by MiniBatchKMeans.__init__\n KMeans.__init__ ==> returning to MiniBatchKMeans.__init__\n MiniBatchKMeans.__init__ ==> returning to <module>\n\n >>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n MiniBatchKMeans.fit <== called by <module>\n MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit\n MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit\n MiniBatchKMeans.fit ==> returning to <module>\n MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n\nNow let\'s view arguments too:\n >>> log_calls.decorate_class(KMeans, decorate_subclasses=True,\n ... log_args=True, args_sep=\'\\n\',\n ... override=True)\n >>> # Incorrect number of clusters\n >>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n MiniBatchKMeans.fit <== called by <module>\n arguments:\n self=MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n X=array([[ -5.19811282e+00, 6.41869316e-01],\n [ -5.75229538e+00, 4.18627111e-01],\n [ -1.08448984e+01, -7.55352273e+00],\n ...,\n [ 1.36105255e+00, -9.07491863e-01],\n [ -3.54141108e-01, 7.12241630e-01],\n [ 1.88577252e+00, 1.41185693e-03]])\n defaults:\n y=None\n MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit\n arguments:\n self=MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n X=array([[ -5.19811282e+00, 6.41869316e-01],\n [ -5.75229538e+00, 4.18627111e-01],\n [ -1.08448984e+01, -7.55352273e+00],\n ...,\n [ 1.36105255e+00, -9.07491863e-01],\n [ -3.54141108e-01, 7.12241630e-01],\n [ 1.88577252e+00, 1.41185693e-03]])\n MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit\n MiniBatchKMeans.fit ==> returning to <module>\n MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n\n Note: the ellipses in the values of array `X` are produced by the `repr` of `numpy`.\n '
pass
|
def test_deco_sklearn_cluster_KMeans_class():
'\n Code from "Demonstration of k-means assumptions",\n http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_assumptions.html#example-cluster-plot-kmeans-assumptions-py\n\n This test demonstrates\n * decorating an "external" class and its subclasses --\n KMeans from sklearn.cluster and its subclasses,\n amongst which is MiniBatchKMeans\n * using the `override` keyword with one of the `log_calls.decorate_*`\n functions to make a change to the settings of (all the methods of)\n an already-decorated class\n\n >>> from log_calls import log_calls\n >>> from sklearn.cluster import KMeans, MiniBatchKMeans\n >>> from sklearn.datasets import make_blobs\n >>> n_samples = 1500\n >>> random_state = 170\n >>> X, y = make_blobs(n_samples=n_samples, random_state=random_state)\n\nFirst, let\'s see the call hierarchy:\n\n >>> log_calls.decorate_hierarchy(KMeans, log_args=False, override=True)\n\n >>> kmo = KMeans(n_clusters=2, random_state=random_state,\n ... n_init=10)\n KMeans.__init__ <== called by <module>\n KMeans.__init__ ==> returning to <module>\n >>> y_pred = kmo.fit_predict(X)\n KMeans.fit_predict <== called by <module>\n KMeans.fit <== called by KMeans.fit_predict\n KMeans._check_fit_data <== called by KMeans.fit\n KMeans._check_fit_data ==> returning to KMeans.fit\n KMeans.fit ==> returning to KMeans.fit_predict\n KMeans.fit_predict ==> returning to <module>\n\n`MiniBatchKMeans` is a subclass of `KMeans`, so that class is decorated too.\n\n >>> mbk = MiniBatchKMeans(init=\'k-means++\', n_clusters=2, batch_size=45,\n ... n_init=10, max_no_improvement=10)\n MiniBatchKMeans.__init__ <== called by <module>\n KMeans.__init__ <== called by MiniBatchKMeans.__init__\n KMeans.__init__ ==> returning to MiniBatchKMeans.__init__\n MiniBatchKMeans.__init__ ==> returning to <module>\n\n >>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n MiniBatchKMeans.fit <== called by <module>\n MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit\n MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit\n MiniBatchKMeans.fit ==> returning to <module>\n MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n\nNow let\'s view arguments too:\n >>> log_calls.decorate_class(KMeans, decorate_subclasses=True,\n ... log_args=True, args_sep=\'\\n\',\n ... override=True)\n >>> # Incorrect number of clusters\n >>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n MiniBatchKMeans.fit <== called by <module>\n arguments:\n self=MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n X=array([[ -5.19811282e+00, 6.41869316e-01],\n [ -5.75229538e+00, 4.18627111e-01],\n [ -1.08448984e+01, -7.55352273e+00],\n ...,\n [ 1.36105255e+00, -9.07491863e-01],\n [ -3.54141108e-01, 7.12241630e-01],\n [ 1.88577252e+00, 1.41185693e-03]])\n defaults:\n y=None\n MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit\n arguments:\n self=MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n X=array([[ -5.19811282e+00, 6.41869316e-01],\n [ -5.75229538e+00, 4.18627111e-01],\n [ -1.08448984e+01, -7.55352273e+00],\n ...,\n [ 1.36105255e+00, -9.07491863e-01],\n [ -3.54141108e-01, 7.12241630e-01],\n [ 1.88577252e+00, 1.41185693e-03]])\n MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit\n MiniBatchKMeans.fit ==> returning to <module>\n MiniBatchKMeans(batch_size=45, compute_labels=True, init=\'k-means++\',\n init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,\n n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,\n verbose=0)\n\n Note: the ellipses in the values of array `X` are produced by the `repr` of `numpy`.\n '
pass<|docstring|>Code from "Demonstration of k-means assumptions",
http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_assumptions.html#example-cluster-plot-kmeans-assumptions-py
This test demonstrates
* decorating an "external" class and its subclasses --
KMeans from sklearn.cluster and its subclasses,
amongst which is MiniBatchKMeans
* using the `override` keyword with one of the `log_calls.decorate_*`
functions to make a change to the settings of (all the methods of)
an already-decorated class
>>> from log_calls import log_calls
>>> from sklearn.cluster import KMeans, MiniBatchKMeans
>>> from sklearn.datasets import make_blobs
>>> n_samples = 1500
>>> random_state = 170
>>> X, y = make_blobs(n_samples=n_samples, random_state=random_state)
First, let's see the call hierarchy:
>>> log_calls.decorate_hierarchy(KMeans, log_args=False, override=True)
>>> kmo = KMeans(n_clusters=2, random_state=random_state,
... n_init=10)
KMeans.__init__ <== called by <module>
KMeans.__init__ ==> returning to <module>
>>> y_pred = kmo.fit_predict(X)
KMeans.fit_predict <== called by <module>
KMeans.fit <== called by KMeans.fit_predict
KMeans._check_fit_data <== called by KMeans.fit
KMeans._check_fit_data ==> returning to KMeans.fit
KMeans.fit ==> returning to KMeans.fit_predict
KMeans.fit_predict ==> returning to <module>
`MiniBatchKMeans` is a subclass of `KMeans`, so that class is decorated too.
>>> mbk = MiniBatchKMeans(init='k-means++', n_clusters=2, batch_size=45,
... n_init=10, max_no_improvement=10)
MiniBatchKMeans.__init__ <== called by <module>
KMeans.__init__ <== called by MiniBatchKMeans.__init__
KMeans.__init__ ==> returning to MiniBatchKMeans.__init__
MiniBatchKMeans.__init__ ==> returning to <module>
>>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
MiniBatchKMeans.fit <== called by <module>
MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit
MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit
MiniBatchKMeans.fit ==> returning to <module>
MiniBatchKMeans(batch_size=45, compute_labels=True, init='k-means++',
init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,
n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,
verbose=0)
Now let's view arguments too:
>>> log_calls.decorate_class(KMeans, decorate_subclasses=True,
... log_args=True, args_sep='\n',
... override=True)
>>> # Incorrect number of clusters
>>> mbk.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
MiniBatchKMeans.fit <== called by <module>
arguments:
self=MiniBatchKMeans(batch_size=45, compute_labels=True, init='k-means++',
init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,
n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,
verbose=0)
X=array([[ -5.19811282e+00, 6.41869316e-01],
[ -5.75229538e+00, 4.18627111e-01],
[ -1.08448984e+01, -7.55352273e+00],
...,
[ 1.36105255e+00, -9.07491863e-01],
[ -3.54141108e-01, 7.12241630e-01],
[ 1.88577252e+00, 1.41185693e-03]])
defaults:
y=None
MiniBatchKMeans._labels_inertia_minibatch <== called by MiniBatchKMeans.fit
arguments:
self=MiniBatchKMeans(batch_size=45, compute_labels=True, init='k-means++',
init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,
n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,
verbose=0)
X=array([[ -5.19811282e+00, 6.41869316e-01],
[ -5.75229538e+00, 4.18627111e-01],
[ -1.08448984e+01, -7.55352273e+00],
...,
[ 1.36105255e+00, -9.07491863e-01],
[ -3.54141108e-01, 7.12241630e-01],
[ 1.88577252e+00, 1.41185693e-03]])
MiniBatchKMeans._labels_inertia_minibatch ==> returning to MiniBatchKMeans.fit
MiniBatchKMeans.fit ==> returning to <module>
MiniBatchKMeans(batch_size=45, compute_labels=True, init='k-means++',
init_size=None, max_iter=100, max_no_improvement=10, n_clusters=2,
n_init=10, random_state=None, reassignment_ratio=0.01, tol=0.0,
verbose=0)
Note: the ellipses in the values of array `X` are produced by the `repr` of `numpy`.<|endoftext|>
|
dbbde837535d9ef38fd3937f84d7fbad6f0a4c60030f0f52843e0bbd8aefe074
|
def build_entity(self, data, i):
'Build entity object from data.\n Go into entity collection\n\n Args:\n data (:obj:`Obj`): source object.\n i (:obj: `int`): index (row labels) of object in dataframe\n Return:\n (:obj:`Obj`), e.g.\n {\n "entity": {\n "type": "protein",\n "name": "formate--tetrahydrofolate ligase",\n "identifiers": [{}... {}]\n }\n } \n '
entity = {}
entity['type'] = 'protein'
entity['name'] = str(data.iloc[(i, 0)])[(str(data.iloc[(i, 0)]).rfind('|') + 2):]
entity['identifiers'] = []
entity['identifiers'].append({'namespace': 'Seq_ID', 'value': str(data.iloc[(i, 0)])[str(data.iloc[(i, 0)]).find('W'):str(data.iloc[(i, 0)]).rfind('|')]})
return entity
|
Build entity object from data.
Go into entity collection
Args:
data (:obj:`Obj`): source object.
i (:obj: `int`): index (row labels) of object in dataframe
Return:
(:obj:`Obj`), e.g.
{
"entity": {
"type": "protein",
"name": "formate--tetrahydrofolate ligase",
"identifiers": [{}... {}]
}
}
|
datanator/data_source/protein_localization/victoria_insert_neg_wo_outer_membrane.py
|
build_entity
|
KarrLab/Kinetic-Datanator
| 10 |
python
|
def build_entity(self, data, i):
'Build entity object from data.\n Go into entity collection\n\n Args:\n data (:obj:`Obj`): source object.\n i (:obj: `int`): index (row labels) of object in dataframe\n Return:\n (:obj:`Obj`), e.g.\n {\n "entity": {\n "type": "protein",\n "name": "formate--tetrahydrofolate ligase",\n "identifiers": [{}... {}]\n }\n } \n '
entity = {}
entity['type'] = 'protein'
entity['name'] = str(data.iloc[(i, 0)])[(str(data.iloc[(i, 0)]).rfind('|') + 2):]
entity['identifiers'] = []
entity['identifiers'].append({'namespace': 'Seq_ID', 'value': str(data.iloc[(i, 0)])[str(data.iloc[(i, 0)]).find('W'):str(data.iloc[(i, 0)]).rfind('|')]})
return entity
|
def build_entity(self, data, i):
'Build entity object from data.\n Go into entity collection\n\n Args:\n data (:obj:`Obj`): source object.\n i (:obj: `int`): index (row labels) of object in dataframe\n Return:\n (:obj:`Obj`), e.g.\n {\n "entity": {\n "type": "protein",\n "name": "formate--tetrahydrofolate ligase",\n "identifiers": [{}... {}]\n }\n } \n '
entity = {}
entity['type'] = 'protein'
entity['name'] = str(data.iloc[(i, 0)])[(str(data.iloc[(i, 0)]).rfind('|') + 2):]
entity['identifiers'] = []
entity['identifiers'].append({'namespace': 'Seq_ID', 'value': str(data.iloc[(i, 0)])[str(data.iloc[(i, 0)]).find('W'):str(data.iloc[(i, 0)]).rfind('|')]})
return entity<|docstring|>Build entity object from data.
Go into entity collection
Args:
data (:obj:`Obj`): source object.
i (:obj: `int`): index (row labels) of object in dataframe
Return:
(:obj:`Obj`), e.g.
{
"entity": {
"type": "protein",
"name": "formate--tetrahydrofolate ligase",
"identifiers": [{}... {}]
}
}<|endoftext|>
|
6e448888ec60849640f0551c4877a068059bf4eef9a54a8e1af5473c962c03ed
|
def build_obs(self, data, i):
'Build observation objects from data.\n Go into observations collection.\n Args:\n data (:obj:`Obj`): source object.\n i (:obj: `int`): index (row labels) of object in dataframe\n Return:\n obj(:obj:`Obj`)\n {\n "entity": {\n "type": "protein",\n "name": "formate--tetrahydrofolate ligase",\n "identifiers": [{}... {}]\n },\n "value": [],\n "source": {}, ...\n }\n '
entity = {}
entity['type'] = 'protein'
entity['name'] = str(data.iloc[(i, 0)])[(str(data.iloc[(i, 0)]).rfind('|') + 2):]
entity['identifiers'] = []
entity['identifiers'].append({'namespace': 'Seq_ID', 'value': str(data.iloc[(i, 0)])[str(data.iloc[(i, 0)]).find('W'):str(data.iloc[(i, 0)]).rfind('|')]})
print(str(data.iloc[(i, 0)])[str(data.iloc[(i, 0)]).find('W'):])
values_p = []
if (data.iloc[(i, 27)] == None):
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 1)]), 'description': 'Cytoplasmic Membrane'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 2)]), 'description': 'Cell Wall'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 3)]), 'description': 'Extracellular'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 4)]), 'description': 'Cytoplasmic'})
values_p.append({'type': 'localization', 'value': str(data.iloc[(i, 5)]), 'description': 'predicted'})
else:
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 27)]), 'description': 'Cytoplasmic Membrane'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 28)]), 'description': 'Cell Wall'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 29)]), 'description': 'Extracellular'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 30)]), 'description': 'Cytoplasmic'})
values_p.append({'type': 'localization', 'value': str(data.iloc[(i, 31)]), 'description': 'predicted'})
genotype = {}
genotype['cellType'] = 'Gram negative without Outer Membrane'
source = [{'namespace': 'PSORTb', 'value': 'Version 3.0'}]
ob_p = {'entity': entity, 'genotype': genotype, 'values': values_p, 'source': source, 'schema_version': '2.0'}
return ob_p
|
Build observation objects from data.
Go into observations collection.
Args:
data (:obj:`Obj`): source object.
i (:obj: `int`): index (row labels) of object in dataframe
Return:
obj(:obj:`Obj`)
{
"entity": {
"type": "protein",
"name": "formate--tetrahydrofolate ligase",
"identifiers": [{}... {}]
},
"value": [],
"source": {}, ...
}
|
datanator/data_source/protein_localization/victoria_insert_neg_wo_outer_membrane.py
|
build_obs
|
KarrLab/Kinetic-Datanator
| 10 |
python
|
def build_obs(self, data, i):
'Build observation objects from data.\n Go into observations collection.\n Args:\n data (:obj:`Obj`): source object.\n i (:obj: `int`): index (row labels) of object in dataframe\n Return:\n obj(:obj:`Obj`)\n {\n "entity": {\n "type": "protein",\n "name": "formate--tetrahydrofolate ligase",\n "identifiers": [{}... {}]\n },\n "value": [],\n "source": {}, ...\n }\n '
entity = {}
entity['type'] = 'protein'
entity['name'] = str(data.iloc[(i, 0)])[(str(data.iloc[(i, 0)]).rfind('|') + 2):]
entity['identifiers'] = []
entity['identifiers'].append({'namespace': 'Seq_ID', 'value': str(data.iloc[(i, 0)])[str(data.iloc[(i, 0)]).find('W'):str(data.iloc[(i, 0)]).rfind('|')]})
print(str(data.iloc[(i, 0)])[str(data.iloc[(i, 0)]).find('W'):])
values_p = []
if (data.iloc[(i, 27)] == None):
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 1)]), 'description': 'Cytoplasmic Membrane'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 2)]), 'description': 'Cell Wall'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 3)]), 'description': 'Extracellular'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 4)]), 'description': 'Cytoplasmic'})
values_p.append({'type': 'localization', 'value': str(data.iloc[(i, 5)]), 'description': 'predicted'})
else:
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 27)]), 'description': 'Cytoplasmic Membrane'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 28)]), 'description': 'Cell Wall'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 29)]), 'description': 'Extracellular'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 30)]), 'description': 'Cytoplasmic'})
values_p.append({'type': 'localization', 'value': str(data.iloc[(i, 31)]), 'description': 'predicted'})
genotype = {}
genotype['cellType'] = 'Gram negative without Outer Membrane'
source = [{'namespace': 'PSORTb', 'value': 'Version 3.0'}]
ob_p = {'entity': entity, 'genotype': genotype, 'values': values_p, 'source': source, 'schema_version': '2.0'}
return ob_p
|
def build_obs(self, data, i):
'Build observation objects from data.\n Go into observations collection.\n Args:\n data (:obj:`Obj`): source object.\n i (:obj: `int`): index (row labels) of object in dataframe\n Return:\n obj(:obj:`Obj`)\n {\n "entity": {\n "type": "protein",\n "name": "formate--tetrahydrofolate ligase",\n "identifiers": [{}... {}]\n },\n "value": [],\n "source": {}, ...\n }\n '
entity = {}
entity['type'] = 'protein'
entity['name'] = str(data.iloc[(i, 0)])[(str(data.iloc[(i, 0)]).rfind('|') + 2):]
entity['identifiers'] = []
entity['identifiers'].append({'namespace': 'Seq_ID', 'value': str(data.iloc[(i, 0)])[str(data.iloc[(i, 0)]).find('W'):str(data.iloc[(i, 0)]).rfind('|')]})
print(str(data.iloc[(i, 0)])[str(data.iloc[(i, 0)]).find('W'):])
values_p = []
if (data.iloc[(i, 27)] == None):
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 1)]), 'description': 'Cytoplasmic Membrane'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 2)]), 'description': 'Cell Wall'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 3)]), 'description': 'Extracellular'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 4)]), 'description': 'Cytoplasmic'})
values_p.append({'type': 'localization', 'value': str(data.iloc[(i, 5)]), 'description': 'predicted'})
else:
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 27)]), 'description': 'Cytoplasmic Membrane'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 28)]), 'description': 'Cell Wall'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 29)]), 'description': 'Extracellular'})
values_p.append({'type': 'localization_score', 'value': float(data.iloc[(i, 30)]), 'description': 'Cytoplasmic'})
values_p.append({'type': 'localization', 'value': str(data.iloc[(i, 31)]), 'description': 'predicted'})
genotype = {}
genotype['cellType'] = 'Gram negative without Outer Membrane'
source = [{'namespace': 'PSORTb', 'value': 'Version 3.0'}]
ob_p = {'entity': entity, 'genotype': genotype, 'values': values_p, 'source': source, 'schema_version': '2.0'}
return ob_p<|docstring|>Build observation objects from data.
Go into observations collection.
Args:
data (:obj:`Obj`): source object.
i (:obj: `int`): index (row labels) of object in dataframe
Return:
obj(:obj:`Obj`)
{
"entity": {
"type": "protein",
"name": "formate--tetrahydrofolate ligase",
"identifiers": [{}... {}]
},
"value": [],
"source": {}, ...
}<|endoftext|>
|
6b7f221970ce12bfe1b01e4658081435e54c61ee243a423da79985bc713fbd5a
|
def __init__(self, out_folder: StorageFolderLocation=None, images: List[AiBcrImageStorageFile]=None, options: AiBcrOptions=None):
'\n Parse business card images from Storage request \n :param out_folder: Parse output folder location on storage \n :type out_folder: StorageFolderLocation\n :param images: Images to parse. \n :type images: List[AiBcrImageStorageFile]\n :param options: Recognition options. \n :type options: AiBcrOptions\n '
self._out_folder = None
self._images = None
self._options = None
if (out_folder is not None):
self.out_folder = out_folder
if (images is not None):
self.images = images
if (options is not None):
self.options = options
|
Parse business card images from Storage request
:param out_folder: Parse output folder location on storage
:type out_folder: StorageFolderLocation
:param images: Images to parse.
:type images: List[AiBcrImageStorageFile]
:param options: Recognition options.
:type options: AiBcrOptions
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
__init__
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
def __init__(self, out_folder: StorageFolderLocation=None, images: List[AiBcrImageStorageFile]=None, options: AiBcrOptions=None):
'\n Parse business card images from Storage request \n :param out_folder: Parse output folder location on storage \n :type out_folder: StorageFolderLocation\n :param images: Images to parse. \n :type images: List[AiBcrImageStorageFile]\n :param options: Recognition options. \n :type options: AiBcrOptions\n '
self._out_folder = None
self._images = None
self._options = None
if (out_folder is not None):
self.out_folder = out_folder
if (images is not None):
self.images = images
if (options is not None):
self.options = options
|
def __init__(self, out_folder: StorageFolderLocation=None, images: List[AiBcrImageStorageFile]=None, options: AiBcrOptions=None):
'\n Parse business card images from Storage request \n :param out_folder: Parse output folder location on storage \n :type out_folder: StorageFolderLocation\n :param images: Images to parse. \n :type images: List[AiBcrImageStorageFile]\n :param options: Recognition options. \n :type options: AiBcrOptions\n '
self._out_folder = None
self._images = None
self._options = None
if (out_folder is not None):
self.out_folder = out_folder
if (images is not None):
self.images = images
if (options is not None):
self.options = options<|docstring|>Parse business card images from Storage request
:param out_folder: Parse output folder location on storage
:type out_folder: StorageFolderLocation
:param images: Images to parse.
:type images: List[AiBcrImageStorageFile]
:param options: Recognition options.
:type options: AiBcrOptions<|endoftext|>
|
339b719eb937f97d2b683d3878e7c86042cb1efda1cd7f0aea06358f4ac608a8
|
@property
def out_folder(self) -> StorageFolderLocation:
'\n Parse output folder location on storage \n\n :return: The out_folder of this AiBcrParseStorageRequest.\n :rtype: StorageFolderLocation\n '
return self._out_folder
|
Parse output folder location on storage
:return: The out_folder of this AiBcrParseStorageRequest.
:rtype: StorageFolderLocation
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
out_folder
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
@property
def out_folder(self) -> StorageFolderLocation:
'\n Parse output folder location on storage \n\n :return: The out_folder of this AiBcrParseStorageRequest.\n :rtype: StorageFolderLocation\n '
return self._out_folder
|
@property
def out_folder(self) -> StorageFolderLocation:
'\n Parse output folder location on storage \n\n :return: The out_folder of this AiBcrParseStorageRequest.\n :rtype: StorageFolderLocation\n '
return self._out_folder<|docstring|>Parse output folder location on storage
:return: The out_folder of this AiBcrParseStorageRequest.
:rtype: StorageFolderLocation<|endoftext|>
|
5c1e204503c0bbbc48b8f28f4fde099d6d035eb6c2e7d7effa6dc0fb17352ec0
|
@out_folder.setter
def out_folder(self, out_folder: StorageFolderLocation):
'\n Parse output folder location on storage \n\n :param out_folder: The out_folder of this AiBcrParseStorageRequest.\n :type: StorageFolderLocation\n '
if (out_folder is None):
raise ValueError('Invalid value for `out_folder`, must not be `None`')
self._out_folder = out_folder
|
Parse output folder location on storage
:param out_folder: The out_folder of this AiBcrParseStorageRequest.
:type: StorageFolderLocation
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
out_folder
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
@out_folder.setter
def out_folder(self, out_folder: StorageFolderLocation):
'\n Parse output folder location on storage \n\n :param out_folder: The out_folder of this AiBcrParseStorageRequest.\n :type: StorageFolderLocation\n '
if (out_folder is None):
raise ValueError('Invalid value for `out_folder`, must not be `None`')
self._out_folder = out_folder
|
@out_folder.setter
def out_folder(self, out_folder: StorageFolderLocation):
'\n Parse output folder location on storage \n\n :param out_folder: The out_folder of this AiBcrParseStorageRequest.\n :type: StorageFolderLocation\n '
if (out_folder is None):
raise ValueError('Invalid value for `out_folder`, must not be `None`')
self._out_folder = out_folder<|docstring|>Parse output folder location on storage
:param out_folder: The out_folder of this AiBcrParseStorageRequest.
:type: StorageFolderLocation<|endoftext|>
|
510048e2f1ac5bfb270913bbf31294852d8fe090d7a3b29e500b7e6a319811a9
|
@property
def images(self) -> List[AiBcrImageStorageFile]:
'\n Images to parse. \n\n :return: The images of this AiBcrParseStorageRequest.\n :rtype: list[AiBcrImageStorageFile]\n '
return self._images
|
Images to parse.
:return: The images of this AiBcrParseStorageRequest.
:rtype: list[AiBcrImageStorageFile]
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
images
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
@property
def images(self) -> List[AiBcrImageStorageFile]:
'\n Images to parse. \n\n :return: The images of this AiBcrParseStorageRequest.\n :rtype: list[AiBcrImageStorageFile]\n '
return self._images
|
@property
def images(self) -> List[AiBcrImageStorageFile]:
'\n Images to parse. \n\n :return: The images of this AiBcrParseStorageRequest.\n :rtype: list[AiBcrImageStorageFile]\n '
return self._images<|docstring|>Images to parse.
:return: The images of this AiBcrParseStorageRequest.
:rtype: list[AiBcrImageStorageFile]<|endoftext|>
|
f85cb0b339dc26076e71d7605cb3730b742427b263f2f3a950c2a231e4c07b92
|
@images.setter
def images(self, images: List[AiBcrImageStorageFile]):
'\n Images to parse. \n\n :param images: The images of this AiBcrParseStorageRequest.\n :type: list[AiBcrImageStorageFile]\n '
if (images is None):
raise ValueError('Invalid value for `images`, must not be `None`')
self._images = images
|
Images to parse.
:param images: The images of this AiBcrParseStorageRequest.
:type: list[AiBcrImageStorageFile]
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
images
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
@images.setter
def images(self, images: List[AiBcrImageStorageFile]):
'\n Images to parse. \n\n :param images: The images of this AiBcrParseStorageRequest.\n :type: list[AiBcrImageStorageFile]\n '
if (images is None):
raise ValueError('Invalid value for `images`, must not be `None`')
self._images = images
|
@images.setter
def images(self, images: List[AiBcrImageStorageFile]):
'\n Images to parse. \n\n :param images: The images of this AiBcrParseStorageRequest.\n :type: list[AiBcrImageStorageFile]\n '
if (images is None):
raise ValueError('Invalid value for `images`, must not be `None`')
self._images = images<|docstring|>Images to parse.
:param images: The images of this AiBcrParseStorageRequest.
:type: list[AiBcrImageStorageFile]<|endoftext|>
|
93f53dcf2d2378e549559c0c1917b47a056908062b4ab2dfc51ad69584b4868f
|
@property
def options(self) -> AiBcrOptions:
'\n Recognition options. \n\n :return: The options of this AiBcrParseStorageRequest.\n :rtype: AiBcrOptions\n '
return self._options
|
Recognition options.
:return: The options of this AiBcrParseStorageRequest.
:rtype: AiBcrOptions
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
options
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
@property
def options(self) -> AiBcrOptions:
'\n Recognition options. \n\n :return: The options of this AiBcrParseStorageRequest.\n :rtype: AiBcrOptions\n '
return self._options
|
@property
def options(self) -> AiBcrOptions:
'\n Recognition options. \n\n :return: The options of this AiBcrParseStorageRequest.\n :rtype: AiBcrOptions\n '
return self._options<|docstring|>Recognition options.
:return: The options of this AiBcrParseStorageRequest.
:rtype: AiBcrOptions<|endoftext|>
|
c2bd7e7954ecb260ce733f1461e7fcffd01f301cadb1a25b7a971cb53467f219
|
@options.setter
def options(self, options: AiBcrOptions):
'\n Recognition options. \n\n :param options: The options of this AiBcrParseStorageRequest.\n :type: AiBcrOptions\n '
self._options = options
|
Recognition options.
:param options: The options of this AiBcrParseStorageRequest.
:type: AiBcrOptions
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
options
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
@options.setter
def options(self, options: AiBcrOptions):
'\n Recognition options. \n\n :param options: The options of this AiBcrParseStorageRequest.\n :type: AiBcrOptions\n '
self._options = options
|
@options.setter
def options(self, options: AiBcrOptions):
'\n Recognition options. \n\n :param options: The options of this AiBcrParseStorageRequest.\n :type: AiBcrOptions\n '
self._options = options<|docstring|>Recognition options.
:param options: The options of this AiBcrParseStorageRequest.
:type: AiBcrOptions<|endoftext|>
|
137ba0f026bd6074febc2e7ebe1fec840dba70990f936f32b47eaf0fb048bd4a
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
Returns the model properties as a dict
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
to_dict
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|>
|
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
|
Returns the string representation of the model
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
to_str
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
|
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
|
For `print` and `pprint`
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
__repr__
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
def __repr__(self):
return self.to_str()
|
def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
|
d5038990226da977db63139f3e80da64db722941cf3b61a00858a3ada30884b2
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, AiBcrParseStorageRequest)):
return False
return (self.__dict__ == other.__dict__)
|
Returns true if both objects are equal
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
__eq__
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
def __eq__(self, other):
if (not isinstance(other, AiBcrParseStorageRequest)):
return False
return (self.__dict__ == other.__dict__)
|
def __eq__(self, other):
if (not isinstance(other, AiBcrParseStorageRequest)):
return False
return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|>
|
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
|
Returns true if both objects are not equal
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_storage_request.py
|
__ne__
|
aspose-email-cloud/aspose-email-cloud-python
| 1 |
python
|
def __ne__(self, other):
return (not (self == other))
|
def __ne__(self, other):
return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|>
|
04561ed8c278b208df3c56c7fca1d58c4d2a5be7dc8267ae03072e88f79ab11c
|
def pagination_number_get(page_name: str) -> int:
' Gets the pagination number from variable in session. '
return session.pagination_number_get(page_name)
|
Gets the pagination number from variable in session.
|
modules/utilities.py
|
pagination_number_get
|
romanpindela/api-wars-master-websql-python-flask-codecool-json-ajax-
| 0 |
python
|
def pagination_number_get(page_name: str) -> int:
' '
return session.pagination_number_get(page_name)
|
def pagination_number_get(page_name: str) -> int:
' '
return session.pagination_number_get(page_name)<|docstring|>Gets the pagination number from variable in session.<|endoftext|>
|
76bc32b26ca5a73f62c153088b039d89b1f620d0153c819d2c0689d282a94148
|
def pagination_number_set(page_name: str, items_number: int):
'\n Calculates the number of pages of pagination.\n The result is rounded up from the formula: (numerator + denominator - 1) // denominator\n Next, sets the number of subpages (pagination of the page) and remembers it as variable in the sessions.\n '
pagination_number = (((items_number + swapi.PAGINATION_NUMBER) - 1) // swapi.PAGINATION_NUMBER)
session.pagination_number_set(page_name, pagination_number)
|
Calculates the number of pages of pagination.
The result is rounded up from the formula: (numerator + denominator - 1) // denominator
Next, sets the number of subpages (pagination of the page) and remembers it as variable in the sessions.
|
modules/utilities.py
|
pagination_number_set
|
romanpindela/api-wars-master-websql-python-flask-codecool-json-ajax-
| 0 |
python
|
def pagination_number_set(page_name: str, items_number: int):
'\n Calculates the number of pages of pagination.\n The result is rounded up from the formula: (numerator + denominator - 1) // denominator\n Next, sets the number of subpages (pagination of the page) and remembers it as variable in the sessions.\n '
pagination_number = (((items_number + swapi.PAGINATION_NUMBER) - 1) // swapi.PAGINATION_NUMBER)
session.pagination_number_set(page_name, pagination_number)
|
def pagination_number_set(page_name: str, items_number: int):
'\n Calculates the number of pages of pagination.\n The result is rounded up from the formula: (numerator + denominator - 1) // denominator\n Next, sets the number of subpages (pagination of the page) and remembers it as variable in the sessions.\n '
pagination_number = (((items_number + swapi.PAGINATION_NUMBER) - 1) // swapi.PAGINATION_NUMBER)
session.pagination_number_set(page_name, pagination_number)<|docstring|>Calculates the number of pages of pagination.
The result is rounded up from the formula: (numerator + denominator - 1) // denominator
Next, sets the number of subpages (pagination of the page) and remembers it as variable in the sessions.<|endoftext|>
|
45cc5c36e64ca7fe1daeb8b5c00b9deb931fa10900da0ec08b16ec31c43f42bf
|
def change_list_value(array: list, value_old: str, value_new: str) -> list:
' Returns a given list with a changed value. '
for (index, value) in enumerate(array):
if (value == value_old):
array[index] = value_new
return array
|
Returns a given list with a changed value.
|
modules/utilities.py
|
change_list_value
|
romanpindela/api-wars-master-websql-python-flask-codecool-json-ajax-
| 0 |
python
|
def change_list_value(array: list, value_old: str, value_new: str) -> list:
' '
for (index, value) in enumerate(array):
if (value == value_old):
array[index] = value_new
return array
|
def change_list_value(array: list, value_old: str, value_new: str) -> list:
' '
for (index, value) in enumerate(array):
if (value == value_old):
array[index] = value_new
return array<|docstring|>Returns a given list with a changed value.<|endoftext|>
|
a16f51f8198b21cfeb44efce984d2d2dc20b6f85a26c71f3b4f748c9c488b9f9
|
def unpack_data(data_list: list) -> str:
' Returns a string concatenated with the list data. '
return ', '.join(data_list)
|
Returns a string concatenated with the list data.
|
modules/utilities.py
|
unpack_data
|
romanpindela/api-wars-master-websql-python-flask-codecool-json-ajax-
| 0 |
python
|
def unpack_data(data_list: list) -> str:
' '
return ', '.join(data_list)
|
def unpack_data(data_list: list) -> str:
' '
return ', '.join(data_list)<|docstring|>Returns a string concatenated with the list data.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.