body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
920ecf2a801c8c4ca1b105b4718313308f1091c0afe403ebd49cb6f667de15f8 | def ensure_large_islands(smoothed_cluster, min_size):
'\n Function to eliminate islands below a threshold size\n Inputs:\n smoothed_cluster (np.ndarray) : Binary raster indicating\n filtered connected cluster of active pixels, output of\n smooth_object_interface()\n min_size (int) : Minimum area (specified in pixels) of islands\n to qualify for coarsening\n Outputs:\n large_islands (np.ndarray) : Binary raster similar to\n smoothed_cluster, with islands below min_size eliminated\n '
large_islands = morphology.remove_small_holes(smoothed_cluster, min_size, 2)
return large_islands | Function to eliminate islands below a threshold size
Inputs:
smoothed_cluster (np.ndarray) : Binary raster indicating
filtered connected cluster of active pixels, output of
smooth_object_interface()
min_size (int) : Minimum area (specified in pixels) of islands
to qualify for coarsening
Outputs:
large_islands (np.ndarray) : Binary raster similar to
smoothed_cluster, with islands below min_size eliminated | unstructured_mesh_refinement_tools.py | ensure_large_islands | passaH2O/meshrefinement | 1 | python | def ensure_large_islands(smoothed_cluster, min_size):
'\n Function to eliminate islands below a threshold size\n Inputs:\n smoothed_cluster (np.ndarray) : Binary raster indicating\n filtered connected cluster of active pixels, output of\n smooth_object_interface()\n min_size (int) : Minimum area (specified in pixels) of islands\n to qualify for coarsening\n Outputs:\n large_islands (np.ndarray) : Binary raster similar to\n smoothed_cluster, with islands below min_size eliminated\n '
large_islands = morphology.remove_small_holes(smoothed_cluster, min_size, 2)
return large_islands | def ensure_large_islands(smoothed_cluster, min_size):
'\n Function to eliminate islands below a threshold size\n Inputs:\n smoothed_cluster (np.ndarray) : Binary raster indicating\n filtered connected cluster of active pixels, output of\n smooth_object_interface()\n min_size (int) : Minimum area (specified in pixels) of islands\n to qualify for coarsening\n Outputs:\n large_islands (np.ndarray) : Binary raster similar to\n smoothed_cluster, with islands below min_size eliminated\n '
large_islands = morphology.remove_small_holes(smoothed_cluster, min_size, 2)
return large_islands<|docstring|>Function to eliminate islands below a threshold size
Inputs:
smoothed_cluster (np.ndarray) : Binary raster indicating
filtered connected cluster of active pixels, output of
smooth_object_interface()
min_size (int) : Minimum area (specified in pixels) of islands
to qualify for coarsening
Outputs:
large_islands (np.ndarray) : Binary raster similar to
smoothed_cluster, with islands below min_size eliminated<|endoftext|> |
05f9dd7201292159791b30565a496a397059857ed7730f950f967ddddbf734ee | def smooth_island_interface(large_islands, outside, buffer=4):
"\n Optional function to smooth channel-island interface from opposite\n orientation as smooth_object_interface. Can lead to slightly more\n locally-convex perimeter.\n Inputs:\n large_islands (np.ndarray) : Binary raster indicating\n largest islands for coarsening, output of ensure_large_islands()\n outside (np.ndarray) : Binary raster same size as the imagery\n raster with 1's representing cells outside the boundary\n and 0's inside the boundary. Output of generate_boundary()\n buffer (int) : Radius of a disk-shaped buffer with which to\n smooth island features, specified in number of pixels\n Outputs:\n large_islands (np.ndarray) : Binary raster similar to\n large_islands, with islands slightly smoothed\n "
large_islands = morphology.binary_opening(large_islands, morphology.disk(buffer))
large_islands[outside] = 1
return large_islands | Optional function to smooth channel-island interface from opposite
orientation as smooth_object_interface. Can lead to slightly more
locally-convex perimeter.
Inputs:
large_islands (np.ndarray) : Binary raster indicating
largest islands for coarsening, output of ensure_large_islands()
outside (np.ndarray) : Binary raster same size as the imagery
raster with 1's representing cells outside the boundary
and 0's inside the boundary. Output of generate_boundary()
buffer (int) : Radius of a disk-shaped buffer with which to
smooth island features, specified in number of pixels
Outputs:
large_islands (np.ndarray) : Binary raster similar to
large_islands, with islands slightly smoothed | unstructured_mesh_refinement_tools.py | smooth_island_interface | passaH2O/meshrefinement | 1 | python | def smooth_island_interface(large_islands, outside, buffer=4):
"\n Optional function to smooth channel-island interface from opposite\n orientation as smooth_object_interface. Can lead to slightly more\n locally-convex perimeter.\n Inputs:\n large_islands (np.ndarray) : Binary raster indicating\n largest islands for coarsening, output of ensure_large_islands()\n outside (np.ndarray) : Binary raster same size as the imagery\n raster with 1's representing cells outside the boundary\n and 0's inside the boundary. Output of generate_boundary()\n buffer (int) : Radius of a disk-shaped buffer with which to\n smooth island features, specified in number of pixels\n Outputs:\n large_islands (np.ndarray) : Binary raster similar to\n large_islands, with islands slightly smoothed\n "
large_islands = morphology.binary_opening(large_islands, morphology.disk(buffer))
large_islands[outside] = 1
return large_islands | def smooth_island_interface(large_islands, outside, buffer=4):
"\n Optional function to smooth channel-island interface from opposite\n orientation as smooth_object_interface. Can lead to slightly more\n locally-convex perimeter.\n Inputs:\n large_islands (np.ndarray) : Binary raster indicating\n largest islands for coarsening, output of ensure_large_islands()\n outside (np.ndarray) : Binary raster same size as the imagery\n raster with 1's representing cells outside the boundary\n and 0's inside the boundary. Output of generate_boundary()\n buffer (int) : Radius of a disk-shaped buffer with which to\n smooth island features, specified in number of pixels\n Outputs:\n large_islands (np.ndarray) : Binary raster similar to\n large_islands, with islands slightly smoothed\n "
large_islands = morphology.binary_opening(large_islands, morphology.disk(buffer))
large_islands[outside] = 1
return large_islands<|docstring|>Optional function to smooth channel-island interface from opposite
orientation as smooth_object_interface. Can lead to slightly more
locally-convex perimeter.
Inputs:
large_islands (np.ndarray) : Binary raster indicating
largest islands for coarsening, output of ensure_large_islands()
outside (np.ndarray) : Binary raster same size as the imagery
raster with 1's representing cells outside the boundary
and 0's inside the boundary. Output of generate_boundary()
buffer (int) : Radius of a disk-shaped buffer with which to
smooth island features, specified in number of pixels
Outputs:
large_islands (np.ndarray) : Binary raster similar to
large_islands, with islands slightly smoothed<|endoftext|> |
803d3f80d7a40d6f671065c461bdb28a53c343bf1e48598374a58d50d49e6516 | def raster2polygon(large_islands, img_path):
"\n Function to convert binary raster with 0's indicating islands\n into a list of vector polygons.\n Inputs:\n large_islands (np.ndarray) : Binary raster indicating\n largest islands for coarsening, output of ensure_large_islands()\n or smooth_island_interface()\n img_path (str) : Path to original input image, from which we need to\n grab the geo transform matrix\n Outputs:\n polycoords (list) : List of polygon vertices outlining the islands\n (holes) of large_islands\n "
image = np.array((large_islands == 0)).astype(np.uint8)
src = rasterio.open(img_path)
with rasterio.Env():
results = ({'properties': {'raster_val': v}, 'geometry': s} for (i, (s, v)) in enumerate(rasterio.features.shapes(image, mask=None, transform=src.transform)))
geoms = list(results)
polycoords = [geoms[c]['geometry']['coordinates'][0] for c in range(len(geoms)) if (geoms[c]['properties']['raster_val'] == 1)]
return polycoords | Function to convert binary raster with 0's indicating islands
into a list of vector polygons.
Inputs:
large_islands (np.ndarray) : Binary raster indicating
largest islands for coarsening, output of ensure_large_islands()
or smooth_island_interface()
img_path (str) : Path to original input image, from which we need to
grab the geo transform matrix
Outputs:
polycoords (list) : List of polygon vertices outlining the islands
(holes) of large_islands | unstructured_mesh_refinement_tools.py | raster2polygon | passaH2O/meshrefinement | 1 | python | def raster2polygon(large_islands, img_path):
"\n Function to convert binary raster with 0's indicating islands\n into a list of vector polygons.\n Inputs:\n large_islands (np.ndarray) : Binary raster indicating\n largest islands for coarsening, output of ensure_large_islands()\n or smooth_island_interface()\n img_path (str) : Path to original input image, from which we need to\n grab the geo transform matrix\n Outputs:\n polycoords (list) : List of polygon vertices outlining the islands\n (holes) of large_islands\n "
image = np.array((large_islands == 0)).astype(np.uint8)
src = rasterio.open(img_path)
with rasterio.Env():
results = ({'properties': {'raster_val': v}, 'geometry': s} for (i, (s, v)) in enumerate(rasterio.features.shapes(image, mask=None, transform=src.transform)))
geoms = list(results)
polycoords = [geoms[c]['geometry']['coordinates'][0] for c in range(len(geoms)) if (geoms[c]['properties']['raster_val'] == 1)]
return polycoords | def raster2polygon(large_islands, img_path):
"\n Function to convert binary raster with 0's indicating islands\n into a list of vector polygons.\n Inputs:\n large_islands (np.ndarray) : Binary raster indicating\n largest islands for coarsening, output of ensure_large_islands()\n or smooth_island_interface()\n img_path (str) : Path to original input image, from which we need to\n grab the geo transform matrix\n Outputs:\n polycoords (list) : List of polygon vertices outlining the islands\n (holes) of large_islands\n "
image = np.array((large_islands == 0)).astype(np.uint8)
src = rasterio.open(img_path)
with rasterio.Env():
results = ({'properties': {'raster_val': v}, 'geometry': s} for (i, (s, v)) in enumerate(rasterio.features.shapes(image, mask=None, transform=src.transform)))
geoms = list(results)
polycoords = [geoms[c]['geometry']['coordinates'][0] for c in range(len(geoms)) if (geoms[c]['properties']['raster_val'] == 1)]
return polycoords<|docstring|>Function to convert binary raster with 0's indicating islands
into a list of vector polygons.
Inputs:
large_islands (np.ndarray) : Binary raster indicating
largest islands for coarsening, output of ensure_large_islands()
or smooth_island_interface()
img_path (str) : Path to original input image, from which we need to
grab the geo transform matrix
Outputs:
polycoords (list) : List of polygon vertices outlining the islands
(holes) of large_islands<|endoftext|> |
bf8b57782d95deb54e915e99883cc926a2abf907f67a003f7bd94190bd7b78ba | def simplify_polygons(polycoords, epsilon):
'\n Function to decimate the vertices of a list of polygons using the\n Ramer-Douglas-Peucker algorithm.\n Inputs:\n polycoords (list) : List of polygon vertices outlining the islands\n to be simplified, output of raster2polygon()\n epsilon (float or int) : Epsilon value to use for the RDP algorithm,\n essentially a buffer lengthscale to eliminate proximal vertices\n Outputs:\n simple_polygons (list) : Simplified (decimated) form of polycoords\n '
simple_polygons = [rdp(c, epsilon=epsilon) for c in polycoords]
return simple_polygons | Function to decimate the vertices of a list of polygons using the
Ramer-Douglas-Peucker algorithm.
Inputs:
polycoords (list) : List of polygon vertices outlining the islands
to be simplified, output of raster2polygon()
epsilon (float or int) : Epsilon value to use for the RDP algorithm,
essentially a buffer lengthscale to eliminate proximal vertices
Outputs:
simple_polygons (list) : Simplified (decimated) form of polycoords | unstructured_mesh_refinement_tools.py | simplify_polygons | passaH2O/meshrefinement | 1 | python | def simplify_polygons(polycoords, epsilon):
'\n Function to decimate the vertices of a list of polygons using the\n Ramer-Douglas-Peucker algorithm.\n Inputs:\n polycoords (list) : List of polygon vertices outlining the islands\n to be simplified, output of raster2polygon()\n epsilon (float or int) : Epsilon value to use for the RDP algorithm,\n essentially a buffer lengthscale to eliminate proximal vertices\n Outputs:\n simple_polygons (list) : Simplified (decimated) form of polycoords\n '
simple_polygons = [rdp(c, epsilon=epsilon) for c in polycoords]
return simple_polygons | def simplify_polygons(polycoords, epsilon):
'\n Function to decimate the vertices of a list of polygons using the\n Ramer-Douglas-Peucker algorithm.\n Inputs:\n polycoords (list) : List of polygon vertices outlining the islands\n to be simplified, output of raster2polygon()\n epsilon (float or int) : Epsilon value to use for the RDP algorithm,\n essentially a buffer lengthscale to eliminate proximal vertices\n Outputs:\n simple_polygons (list) : Simplified (decimated) form of polycoords\n '
simple_polygons = [rdp(c, epsilon=epsilon) for c in polycoords]
return simple_polygons<|docstring|>Function to decimate the vertices of a list of polygons using the
Ramer-Douglas-Peucker algorithm.
Inputs:
polycoords (list) : List of polygon vertices outlining the islands
to be simplified, output of raster2polygon()
epsilon (float or int) : Epsilon value to use for the RDP algorithm,
essentially a buffer lengthscale to eliminate proximal vertices
Outputs:
simple_polygons (list) : Simplified (decimated) form of polycoords<|endoftext|> |
54575610d81047b7fd5c2ab3295df1a39346be0d086e91fa28dd15eda3865eb5 | def getAngle(a, b, c):
'\n Helper function for filter_poly_angles()\n Find angle between three points ABC\n '
ang = math.degrees((math.atan2((c[1] - b[1]), (c[0] - b[0])) - math.atan2((a[1] - b[1]), (a[0] - b[0]))))
return ((ang + 360) if (ang < 0) else ang) | Helper function for filter_poly_angles()
Find angle between three points ABC | unstructured_mesh_refinement_tools.py | getAngle | passaH2O/meshrefinement | 1 | python | def getAngle(a, b, c):
'\n Helper function for filter_poly_angles()\n Find angle between three points ABC\n '
ang = math.degrees((math.atan2((c[1] - b[1]), (c[0] - b[0])) - math.atan2((a[1] - b[1]), (a[0] - b[0]))))
return ((ang + 360) if (ang < 0) else ang) | def getAngle(a, b, c):
'\n Helper function for filter_poly_angles()\n Find angle between three points ABC\n '
ang = math.degrees((math.atan2((c[1] - b[1]), (c[0] - b[0])) - math.atan2((a[1] - b[1]), (a[0] - b[0]))))
return ((ang + 360) if (ang < 0) else ang)<|docstring|>Helper function for filter_poly_angles()
Find angle between three points ABC<|endoftext|> |
b0a58a7d21766723a4bda28d53dbac2df38432dfd0475110f6b9f7af498fda5a | def removeAcute(polygon):
'\n Helper function for filter_poly_angles()\n Remove any angles which are too sharp (< 28 deg)\n '
newpoly = polygon.copy()
for n in range((len(polygon) - 2), 0, (- 1)):
ang = getAngle(polygon[(n - 1)], polygon[n], polygon[(n + 1)])
if ((ang < 28) | (ang > 332)):
del newpoly[n]
return newpoly | Helper function for filter_poly_angles()
Remove any angles which are too sharp (< 28 deg) | unstructured_mesh_refinement_tools.py | removeAcute | passaH2O/meshrefinement | 1 | python | def removeAcute(polygon):
'\n Helper function for filter_poly_angles()\n Remove any angles which are too sharp (< 28 deg)\n '
newpoly = polygon.copy()
for n in range((len(polygon) - 2), 0, (- 1)):
ang = getAngle(polygon[(n - 1)], polygon[n], polygon[(n + 1)])
if ((ang < 28) | (ang > 332)):
del newpoly[n]
return newpoly | def removeAcute(polygon):
'\n Helper function for filter_poly_angles()\n Remove any angles which are too sharp (< 28 deg)\n '
newpoly = polygon.copy()
for n in range((len(polygon) - 2), 0, (- 1)):
ang = getAngle(polygon[(n - 1)], polygon[n], polygon[(n + 1)])
if ((ang < 28) | (ang > 332)):
del newpoly[n]
return newpoly<|docstring|>Helper function for filter_poly_angles()
Remove any angles which are too sharp (< 28 deg)<|endoftext|> |
8db5737cbefb709ad39fd51389fcf06f3ebebbb236fa92b913cb6dcc97fe6057 | def filter_poly_angles(simple_polygons):
'\n Function to eliminate acute angles from of a list of polygons \n Inputs:\n simple_polygons (list) : List of polygon vertices outlining islands\n to be simplified, output of simplify_polygons()\n Outputs:\n safe_simple_polygons (list) : List similar to simple_polygons with\n sharp angles removed\n '
safe_simple_polygons = [removeAcute(c) for c in simple_polygons]
return safe_simple_polygons | Function to eliminate acute angles from of a list of polygons
Inputs:
simple_polygons (list) : List of polygon vertices outlining islands
to be simplified, output of simplify_polygons()
Outputs:
safe_simple_polygons (list) : List similar to simple_polygons with
sharp angles removed | unstructured_mesh_refinement_tools.py | filter_poly_angles | passaH2O/meshrefinement | 1 | python | def filter_poly_angles(simple_polygons):
'\n Function to eliminate acute angles from of a list of polygons \n Inputs:\n simple_polygons (list) : List of polygon vertices outlining islands\n to be simplified, output of simplify_polygons()\n Outputs:\n safe_simple_polygons (list) : List similar to simple_polygons with\n sharp angles removed\n '
safe_simple_polygons = [removeAcute(c) for c in simple_polygons]
return safe_simple_polygons | def filter_poly_angles(simple_polygons):
'\n Function to eliminate acute angles from of a list of polygons \n Inputs:\n simple_polygons (list) : List of polygon vertices outlining islands\n to be simplified, output of simplify_polygons()\n Outputs:\n safe_simple_polygons (list) : List similar to simple_polygons with\n sharp angles removed\n '
safe_simple_polygons = [removeAcute(c) for c in simple_polygons]
return safe_simple_polygons<|docstring|>Function to eliminate acute angles from of a list of polygons
Inputs:
simple_polygons (list) : List of polygon vertices outlining islands
to be simplified, output of simplify_polygons()
Outputs:
safe_simple_polygons (list) : List similar to simple_polygons with
sharp angles removed<|endoftext|> |
628824a63add0c03a6f2ce2bb1cccb160eecb34e2627a64d36bd35dadbe43a6a | def save_for_anuga(safe_simple_polygons, outfolder, triangle_res):
'\n Function to save list of polygons to CSV files indicating their future\n ANUGA resolution in outfolder.\n Inputs:\n safe_simple_polygons (list) : List of filtered polygon vertices\n outlining islands to be coarsened\n outfolder (str) : String specifying folder path in which to\n save polygon files\n triangle_res (float or list of floats) : Max triangle area\n to be assigned to this polygon when loaded into ANUGA, which\n is saved into the filename for ease of use later. Can be\n specified as a single float for all polygons or as a list/array\n of floats of equal length to the number of polygons.\n Outputs:\n Saves a list of CSV files in outfolder\n '
for (n, simple_poly) in enumerate(safe_simple_polygons):
try:
res = triangle_res[n]
except TypeError:
res = triangle_res
name = os.path.join(outfolder, ('CoarseReg%s_Res%s.csv' % (n, res)))
df = pd.DataFrame(data=simple_poly[0:(- 1)])
df.to_csv(name, index=False, header=False)
return | Function to save list of polygons to CSV files indicating their future
ANUGA resolution in outfolder.
Inputs:
safe_simple_polygons (list) : List of filtered polygon vertices
outlining islands to be coarsened
outfolder (str) : String specifying folder path in which to
save polygon files
triangle_res (float or list of floats) : Max triangle area
to be assigned to this polygon when loaded into ANUGA, which
is saved into the filename for ease of use later. Can be
specified as a single float for all polygons or as a list/array
of floats of equal length to the number of polygons.
Outputs:
Saves a list of CSV files in outfolder | unstructured_mesh_refinement_tools.py | save_for_anuga | passaH2O/meshrefinement | 1 | python | def save_for_anuga(safe_simple_polygons, outfolder, triangle_res):
'\n Function to save list of polygons to CSV files indicating their future\n ANUGA resolution in outfolder.\n Inputs:\n safe_simple_polygons (list) : List of filtered polygon vertices\n outlining islands to be coarsened\n outfolder (str) : String specifying folder path in which to\n save polygon files\n triangle_res (float or list of floats) : Max triangle area\n to be assigned to this polygon when loaded into ANUGA, which\n is saved into the filename for ease of use later. Can be\n specified as a single float for all polygons or as a list/array\n of floats of equal length to the number of polygons.\n Outputs:\n Saves a list of CSV files in outfolder\n '
for (n, simple_poly) in enumerate(safe_simple_polygons):
try:
res = triangle_res[n]
except TypeError:
res = triangle_res
name = os.path.join(outfolder, ('CoarseReg%s_Res%s.csv' % (n, res)))
df = pd.DataFrame(data=simple_poly[0:(- 1)])
df.to_csv(name, index=False, header=False)
return | def save_for_anuga(safe_simple_polygons, outfolder, triangle_res):
'\n Function to save list of polygons to CSV files indicating their future\n ANUGA resolution in outfolder.\n Inputs:\n safe_simple_polygons (list) : List of filtered polygon vertices\n outlining islands to be coarsened\n outfolder (str) : String specifying folder path in which to\n save polygon files\n triangle_res (float or list of floats) : Max triangle area\n to be assigned to this polygon when loaded into ANUGA, which\n is saved into the filename for ease of use later. Can be\n specified as a single float for all polygons or as a list/array\n of floats of equal length to the number of polygons.\n Outputs:\n Saves a list of CSV files in outfolder\n '
for (n, simple_poly) in enumerate(safe_simple_polygons):
try:
res = triangle_res[n]
except TypeError:
res = triangle_res
name = os.path.join(outfolder, ('CoarseReg%s_Res%s.csv' % (n, res)))
df = pd.DataFrame(data=simple_poly[0:(- 1)])
df.to_csv(name, index=False, header=False)
return<|docstring|>Function to save list of polygons to CSV files indicating their future
ANUGA resolution in outfolder.
Inputs:
safe_simple_polygons (list) : List of filtered polygon vertices
outlining islands to be coarsened
outfolder (str) : String specifying folder path in which to
save polygon files
triangle_res (float or list of floats) : Max triangle area
to be assigned to this polygon when loaded into ANUGA, which
is saved into the filename for ease of use later. Can be
specified as a single float for all polygons or as a list/array
of floats of equal length to the number of polygons.
Outputs:
Saves a list of CSV files in outfolder<|endoftext|> |
957564cf8e1403a0be2a3aead3b1d7e414a34bc433e8800dd09df7c68d6357b7 | def plot_polygons(polygons, fill=True, outline=False, outline_color='k'):
'\n Helper function to plot the vector form of the interior polygons,\n either filled in or as outlines.\n Inputs:\n polygons (list) : List of polygon coordinates\n fill (bool) : Option to plot polygons as filled-in\n outline (bool) : Option to plot polygon outlines\n outline_color (str) : If outline is True, plot with this color\n Outputs:\n Outputs a figure showing polygon features\n '
fig = plt.figure(figsize=(8, 8), dpi=400)
for poly in polygons:
x = [c[0] for c in poly]
y = [c[1] for c in poly]
if fill:
plt.fill(x, y)
if outline:
plt.plot(x, y, c=outline_color, linewidth=0.5, alpha=0.9)
plt.axis('scaled')
return | Helper function to plot the vector form of the interior polygons,
either filled in or as outlines.
Inputs:
polygons (list) : List of polygon coordinates
fill (bool) : Option to plot polygons as filled-in
outline (bool) : Option to plot polygon outlines
outline_color (str) : If outline is True, plot with this color
Outputs:
Outputs a figure showing polygon features | unstructured_mesh_refinement_tools.py | plot_polygons | passaH2O/meshrefinement | 1 | python | def plot_polygons(polygons, fill=True, outline=False, outline_color='k'):
'\n Helper function to plot the vector form of the interior polygons,\n either filled in or as outlines.\n Inputs:\n polygons (list) : List of polygon coordinates\n fill (bool) : Option to plot polygons as filled-in\n outline (bool) : Option to plot polygon outlines\n outline_color (str) : If outline is True, plot with this color\n Outputs:\n Outputs a figure showing polygon features\n '
fig = plt.figure(figsize=(8, 8), dpi=400)
for poly in polygons:
x = [c[0] for c in poly]
y = [c[1] for c in poly]
if fill:
plt.fill(x, y)
if outline:
plt.plot(x, y, c=outline_color, linewidth=0.5, alpha=0.9)
plt.axis('scaled')
return | def plot_polygons(polygons, fill=True, outline=False, outline_color='k'):
'\n Helper function to plot the vector form of the interior polygons,\n either filled in or as outlines.\n Inputs:\n polygons (list) : List of polygon coordinates\n fill (bool) : Option to plot polygons as filled-in\n outline (bool) : Option to plot polygon outlines\n outline_color (str) : If outline is True, plot with this color\n Outputs:\n Outputs a figure showing polygon features\n '
fig = plt.figure(figsize=(8, 8), dpi=400)
for poly in polygons:
x = [c[0] for c in poly]
y = [c[1] for c in poly]
if fill:
plt.fill(x, y)
if outline:
plt.plot(x, y, c=outline_color, linewidth=0.5, alpha=0.9)
plt.axis('scaled')
return<|docstring|>Helper function to plot the vector form of the interior polygons,
either filled in or as outlines.
Inputs:
polygons (list) : List of polygon coordinates
fill (bool) : Option to plot polygons as filled-in
outline (bool) : Option to plot polygon outlines
outline_color (str) : If outline is True, plot with this color
Outputs:
Outputs a figure showing polygon features<|endoftext|> |
d2f33e6978ba0c240caf03983fc92a1a1b4a11a199d251746207687a037f2431 | def initialize_multi_client_cluster(job_name: str, dtensor_jobs: List[str], client_id: int, collective_leader: str, port: Optional[int]=None, protocol: Optional[str]='grpc+loas', enable_coordination_service: bool=False):
'Initialize GRPC servers and collectives for multi-client DTensor setup.\n\n While single clients (e.g. Forge) can use local mode of collectives, GRPC\n servers are necessary in mutli-client setup. This function can be used to\n initialize a cluster and enable collective ops.\n\n NOTE: this function must be called in an eager context.\n\n Args:\n job_name: The job name used by all clients in the DTensor cluster.\n dtensor_jobs: A list of the DTensor client jobs participating in the\n cluster. Must be strings of the form "hostname:port".\n client_id: The ID of the DTensor client this function is being called in.\n collective_leader: The job/task that will be used to run collectives.\n port: The port this client\'s GRPC server will run on.\n protocol: The protocol to be used by this server.\n enable_coordination_service: If true, enable distributed coordination\n service to make sure that workers know the devices on each other, a\n prerequisite for data transfer through cross-worker rendezvous.\n\n Raises:\n RuntimeError: If running inside a tf.function.\n '
assert context.executing_eagerly()
if (not collective_leader.startswith('/job:')):
collective_leader = ('/job:' + collective_leader)
context.context().configure_collective_ops(collective_leader=collective_leader)
if enable_coordination_service:
context.context().configure_coordination_service(service_type='standalone', service_leader=collective_leader)
config_proto = context.get_config()
config_proto.experimental.collective_group_leader = collective_leader
cluster_def = cluster_pb2.ClusterDef()
cluster_def.job.add(name=job_name, tasks=dict(enumerate(dtensor_jobs)))
server_def = tensorflow_server_pb2.ServerDef(cluster=cluster_def, default_session_config=config_proto, job_name=job_name, task_index=client_id, protocol=protocol, port=port)
server_def.default_session_config.rpc_options.num_channels_per_target = 4
server_def.default_session_config.experimental.recv_buf_max_chunk = (- 1)
logging.info('Enabling collectives with server_def: %s', server_def)
context.context().enable_collective_ops(server_def)
context.ensure_initialized() | Initialize GRPC servers and collectives for multi-client DTensor setup.
While single clients (e.g. Forge) can use local mode of collectives, GRPC
servers are necessary in mutli-client setup. This function can be used to
initialize a cluster and enable collective ops.
NOTE: this function must be called in an eager context.
Args:
job_name: The job name used by all clients in the DTensor cluster.
dtensor_jobs: A list of the DTensor client jobs participating in the
cluster. Must be strings of the form "hostname:port".
client_id: The ID of the DTensor client this function is being called in.
collective_leader: The job/task that will be used to run collectives.
port: The port this client's GRPC server will run on.
protocol: The protocol to be used by this server.
enable_coordination_service: If true, enable distributed coordination
service to make sure that workers know the devices on each other, a
prerequisite for data transfer through cross-worker rendezvous.
Raises:
RuntimeError: If running inside a tf.function. | tensorflow/dtensor/python/multi_client_util.py | initialize_multi_client_cluster | snadampal/tensorflow | 3 | python | def initialize_multi_client_cluster(job_name: str, dtensor_jobs: List[str], client_id: int, collective_leader: str, port: Optional[int]=None, protocol: Optional[str]='grpc+loas', enable_coordination_service: bool=False):
'Initialize GRPC servers and collectives for multi-client DTensor setup.\n\n While single clients (e.g. Forge) can use local mode of collectives, GRPC\n servers are necessary in mutli-client setup. This function can be used to\n initialize a cluster and enable collective ops.\n\n NOTE: this function must be called in an eager context.\n\n Args:\n job_name: The job name used by all clients in the DTensor cluster.\n dtensor_jobs: A list of the DTensor client jobs participating in the\n cluster. Must be strings of the form "hostname:port".\n client_id: The ID of the DTensor client this function is being called in.\n collective_leader: The job/task that will be used to run collectives.\n port: The port this client\'s GRPC server will run on.\n protocol: The protocol to be used by this server.\n enable_coordination_service: If true, enable distributed coordination\n service to make sure that workers know the devices on each other, a\n prerequisite for data transfer through cross-worker rendezvous.\n\n Raises:\n RuntimeError: If running inside a tf.function.\n '
assert context.executing_eagerly()
if (not collective_leader.startswith('/job:')):
collective_leader = ('/job:' + collective_leader)
context.context().configure_collective_ops(collective_leader=collective_leader)
if enable_coordination_service:
context.context().configure_coordination_service(service_type='standalone', service_leader=collective_leader)
config_proto = context.get_config()
config_proto.experimental.collective_group_leader = collective_leader
cluster_def = cluster_pb2.ClusterDef()
cluster_def.job.add(name=job_name, tasks=dict(enumerate(dtensor_jobs)))
server_def = tensorflow_server_pb2.ServerDef(cluster=cluster_def, default_session_config=config_proto, job_name=job_name, task_index=client_id, protocol=protocol, port=port)
server_def.default_session_config.rpc_options.num_channels_per_target = 4
server_def.default_session_config.experimental.recv_buf_max_chunk = (- 1)
logging.info('Enabling collectives with server_def: %s', server_def)
context.context().enable_collective_ops(server_def)
context.ensure_initialized() | def initialize_multi_client_cluster(job_name: str, dtensor_jobs: List[str], client_id: int, collective_leader: str, port: Optional[int]=None, protocol: Optional[str]='grpc+loas', enable_coordination_service: bool=False):
'Initialize GRPC servers and collectives for multi-client DTensor setup.\n\n While single clients (e.g. Forge) can use local mode of collectives, GRPC\n servers are necessary in mutli-client setup. This function can be used to\n initialize a cluster and enable collective ops.\n\n NOTE: this function must be called in an eager context.\n\n Args:\n job_name: The job name used by all clients in the DTensor cluster.\n dtensor_jobs: A list of the DTensor client jobs participating in the\n cluster. Must be strings of the form "hostname:port".\n client_id: The ID of the DTensor client this function is being called in.\n collective_leader: The job/task that will be used to run collectives.\n port: The port this client\'s GRPC server will run on.\n protocol: The protocol to be used by this server.\n enable_coordination_service: If true, enable distributed coordination\n service to make sure that workers know the devices on each other, a\n prerequisite for data transfer through cross-worker rendezvous.\n\n Raises:\n RuntimeError: If running inside a tf.function.\n '
assert context.executing_eagerly()
if (not collective_leader.startswith('/job:')):
collective_leader = ('/job:' + collective_leader)
context.context().configure_collective_ops(collective_leader=collective_leader)
if enable_coordination_service:
context.context().configure_coordination_service(service_type='standalone', service_leader=collective_leader)
config_proto = context.get_config()
config_proto.experimental.collective_group_leader = collective_leader
cluster_def = cluster_pb2.ClusterDef()
cluster_def.job.add(name=job_name, tasks=dict(enumerate(dtensor_jobs)))
server_def = tensorflow_server_pb2.ServerDef(cluster=cluster_def, default_session_config=config_proto, job_name=job_name, task_index=client_id, protocol=protocol, port=port)
server_def.default_session_config.rpc_options.num_channels_per_target = 4
server_def.default_session_config.experimental.recv_buf_max_chunk = (- 1)
logging.info('Enabling collectives with server_def: %s', server_def)
context.context().enable_collective_ops(server_def)
context.ensure_initialized()<|docstring|>Initialize GRPC servers and collectives for multi-client DTensor setup.
While single clients (e.g. Forge) can use local mode of collectives, GRPC
servers are necessary in mutli-client setup. This function can be used to
initialize a cluster and enable collective ops.
NOTE: this function must be called in an eager context.
Args:
job_name: The job name used by all clients in the DTensor cluster.
dtensor_jobs: A list of the DTensor client jobs participating in the
cluster. Must be strings of the form "hostname:port".
client_id: The ID of the DTensor client this function is being called in.
collective_leader: The job/task that will be used to run collectives.
port: The port this client's GRPC server will run on.
protocol: The protocol to be used by this server.
enable_coordination_service: If true, enable distributed coordination
service to make sure that workers know the devices on each other, a
prerequisite for data transfer through cross-worker rendezvous.
Raises:
RuntimeError: If running inside a tf.function.<|endoftext|> |
efd563fef01bad7578a1f1b0cbdebfebd22cf6328226dd6344862f722a825cf9 | def parse_input(self, input, inflv, starttime, endtime):
'Read simulations data from input file.\n\n Arguments:\n input -- prefix of file containing neutrino fluxes\n inflv -- neutrino flavor to consider\n starttime -- start time set by user via command line option (or None)\n endtime -- end time set by user via command line option (or None)\n '
(self.times_el, self.times_nb) = ([], [])
self.e_bins = [zero]
(self.N_dict, self.egroup_dict, self.dNLde_dict, self.log_spectrum) = ({}, {}, {}, {})
self._parse((input + '-early.txt'), 'early', inflv)
self._parse((input + '-late.txt'), 'late', inflv)
self._calculate_dNLde()
times = self.times_el
self.starttime = get_starttime(starttime, times[0])
self.endtime = get_endtime(endtime, times[(- 1)])
if ((inflv == 'e') and (self.starttime < 50)):
self._parse_nb((input + '-nb.txt'))
times = sorted((times + self.times_nb))
self.raw_times = get_raw_times(times, self.starttime, self.endtime)
log_group_e = [log10(e_bin) for e_bin in self.e_bins]
for time in self.raw_times:
log_dNLde = [log10(d) for d in self.dNLde_dict[time]]
self.log_spectrum[time] = InterpolatedUnivariateSpline(log_group_e, log_dNLde) | Read simulations data from input file.
Arguments:
input -- prefix of file containing neutrino fluxes
inflv -- neutrino flavor to consider
starttime -- start time set by user via command line option (or None)
endtime -- end time set by user via command line option (or None) | sntools/formats/totani.py | parse_input | svalder/sntools | 10 | python | def parse_input(self, input, inflv, starttime, endtime):
'Read simulations data from input file.\n\n Arguments:\n input -- prefix of file containing neutrino fluxes\n inflv -- neutrino flavor to consider\n starttime -- start time set by user via command line option (or None)\n endtime -- end time set by user via command line option (or None)\n '
(self.times_el, self.times_nb) = ([], [])
self.e_bins = [zero]
(self.N_dict, self.egroup_dict, self.dNLde_dict, self.log_spectrum) = ({}, {}, {}, {})
self._parse((input + '-early.txt'), 'early', inflv)
self._parse((input + '-late.txt'), 'late', inflv)
self._calculate_dNLde()
times = self.times_el
self.starttime = get_starttime(starttime, times[0])
self.endtime = get_endtime(endtime, times[(- 1)])
if ((inflv == 'e') and (self.starttime < 50)):
self._parse_nb((input + '-nb.txt'))
times = sorted((times + self.times_nb))
self.raw_times = get_raw_times(times, self.starttime, self.endtime)
log_group_e = [log10(e_bin) for e_bin in self.e_bins]
for time in self.raw_times:
log_dNLde = [log10(d) for d in self.dNLde_dict[time]]
self.log_spectrum[time] = InterpolatedUnivariateSpline(log_group_e, log_dNLde) | def parse_input(self, input, inflv, starttime, endtime):
'Read simulations data from input file.\n\n Arguments:\n input -- prefix of file containing neutrino fluxes\n inflv -- neutrino flavor to consider\n starttime -- start time set by user via command line option (or None)\n endtime -- end time set by user via command line option (or None)\n '
(self.times_el, self.times_nb) = ([], [])
self.e_bins = [zero]
(self.N_dict, self.egroup_dict, self.dNLde_dict, self.log_spectrum) = ({}, {}, {}, {})
self._parse((input + '-early.txt'), 'early', inflv)
self._parse((input + '-late.txt'), 'late', inflv)
self._calculate_dNLde()
times = self.times_el
self.starttime = get_starttime(starttime, times[0])
self.endtime = get_endtime(endtime, times[(- 1)])
if ((inflv == 'e') and (self.starttime < 50)):
self._parse_nb((input + '-nb.txt'))
times = sorted((times + self.times_nb))
self.raw_times = get_raw_times(times, self.starttime, self.endtime)
log_group_e = [log10(e_bin) for e_bin in self.e_bins]
for time in self.raw_times:
log_dNLde = [log10(d) for d in self.dNLde_dict[time]]
self.log_spectrum[time] = InterpolatedUnivariateSpline(log_group_e, log_dNLde)<|docstring|>Read simulations data from input file.
Arguments:
input -- prefix of file containing neutrino fluxes
inflv -- neutrino flavor to consider
starttime -- start time set by user via command line option (or None)
endtime -- end time set by user via command line option (or None)<|endoftext|> |
62ee4841012503a7fc5b64b9775d2da0633f5a35865d18abe025abb01cc0658b | def prepare_evt_gen(self, binned_t):
'Pre-compute values necessary for event generation.\n\n Scipy/numpy are optimized for parallel operation on large arrays, making\n it orders of magnitude faster to pre-compute all values at one time\n instead of computing them lazily when needed.\n\n Argument:\n binned_t -- list of time bins for generating events\n '
for time in binned_t:
if (time in self.log_spectrum):
continue
if ((40 <= time <= 49.99) and (self.times_nb != [])):
_times = [x for x in self.times_nb if (x in self.raw_times)]
else:
_times = [x for x in self.times_el if (x in self.raw_times)]
for t_bin in _times:
if (time <= t_bin):
t1 = t_bin
break
else:
t0 = t_bin
dNLde = []
prev_dNLde = self.dNLde_dict[t0]
next_dNLde = self.dNLde_dict[t1]
for (i, _) in enumerate(self.e_bins):
tmp = (prev_dNLde[i] + (((next_dNLde[i] - prev_dNLde[i]) * (time - t0)) / (t1 - t0)))
dNLde.append(tmp)
log_group_e = [log10(e_bin) for e_bin in self.e_bins]
log_dNLde = [log10(d) for d in dNLde]
self.log_spectrum[time] = InterpolatedUnivariateSpline(log_group_e, log_dNLde)
return None | Pre-compute values necessary for event generation.
Scipy/numpy are optimized for parallel operation on large arrays, making
it orders of magnitude faster to pre-compute all values at one time
instead of computing them lazily when needed.
Argument:
binned_t -- list of time bins for generating events | sntools/formats/totani.py | prepare_evt_gen | svalder/sntools | 10 | python | def prepare_evt_gen(self, binned_t):
'Pre-compute values necessary for event generation.\n\n Scipy/numpy are optimized for parallel operation on large arrays, making\n it orders of magnitude faster to pre-compute all values at one time\n instead of computing them lazily when needed.\n\n Argument:\n binned_t -- list of time bins for generating events\n '
for time in binned_t:
if (time in self.log_spectrum):
continue
if ((40 <= time <= 49.99) and (self.times_nb != [])):
_times = [x for x in self.times_nb if (x in self.raw_times)]
else:
_times = [x for x in self.times_el if (x in self.raw_times)]
for t_bin in _times:
if (time <= t_bin):
t1 = t_bin
break
else:
t0 = t_bin
dNLde = []
prev_dNLde = self.dNLde_dict[t0]
next_dNLde = self.dNLde_dict[t1]
for (i, _) in enumerate(self.e_bins):
tmp = (prev_dNLde[i] + (((next_dNLde[i] - prev_dNLde[i]) * (time - t0)) / (t1 - t0)))
dNLde.append(tmp)
log_group_e = [log10(e_bin) for e_bin in self.e_bins]
log_dNLde = [log10(d) for d in dNLde]
self.log_spectrum[time] = InterpolatedUnivariateSpline(log_group_e, log_dNLde)
return None | def prepare_evt_gen(self, binned_t):
'Pre-compute values necessary for event generation.\n\n Scipy/numpy are optimized for parallel operation on large arrays, making\n it orders of magnitude faster to pre-compute all values at one time\n instead of computing them lazily when needed.\n\n Argument:\n binned_t -- list of time bins for generating events\n '
for time in binned_t:
if (time in self.log_spectrum):
continue
if ((40 <= time <= 49.99) and (self.times_nb != [])):
_times = [x for x in self.times_nb if (x in self.raw_times)]
else:
_times = [x for x in self.times_el if (x in self.raw_times)]
for t_bin in _times:
if (time <= t_bin):
t1 = t_bin
break
else:
t0 = t_bin
dNLde = []
prev_dNLde = self.dNLde_dict[t0]
next_dNLde = self.dNLde_dict[t1]
for (i, _) in enumerate(self.e_bins):
tmp = (prev_dNLde[i] + (((next_dNLde[i] - prev_dNLde[i]) * (time - t0)) / (t1 - t0)))
dNLde.append(tmp)
log_group_e = [log10(e_bin) for e_bin in self.e_bins]
log_dNLde = [log10(d) for d in dNLde]
self.log_spectrum[time] = InterpolatedUnivariateSpline(log_group_e, log_dNLde)
return None<|docstring|>Pre-compute values necessary for event generation.
Scipy/numpy are optimized for parallel operation on large arrays, making
it orders of magnitude faster to pre-compute all values at one time
instead of computing them lazily when needed.
Argument:
binned_t -- list of time bins for generating events<|endoftext|> |
7f2125d3593acc2be42d4eba167f8f3f851eea699ec788ee544ae5a26db4db56 | def nu_emission(self, eNu, time):
'Number of neutrinos emitted, as a function of energy.\n\n This is not yet the flux! The geometry factor 1/(4 pi r**2) is added later.\n Arguments:\n eNu -- neutrino energy\n time -- time ;)\n '
f = self.log_spectrum[time]
return (10 ** f(log10(eNu))) | Number of neutrinos emitted, as a function of energy.
This is not yet the flux! The geometry factor 1/(4 pi r**2) is added later.
Arguments:
eNu -- neutrino energy
time -- time ;) | sntools/formats/totani.py | nu_emission | svalder/sntools | 10 | python | def nu_emission(self, eNu, time):
'Number of neutrinos emitted, as a function of energy.\n\n This is not yet the flux! The geometry factor 1/(4 pi r**2) is added later.\n Arguments:\n eNu -- neutrino energy\n time -- time ;)\n '
f = self.log_spectrum[time]
return (10 ** f(log10(eNu))) | def nu_emission(self, eNu, time):
'Number of neutrinos emitted, as a function of energy.\n\n This is not yet the flux! The geometry factor 1/(4 pi r**2) is added later.\n Arguments:\n eNu -- neutrino energy\n time -- time ;)\n '
f = self.log_spectrum[time]
return (10 ** f(log10(eNu)))<|docstring|>Number of neutrinos emitted, as a function of energy.
This is not yet the flux! The geometry factor 1/(4 pi r**2) is added later.
Arguments:
eNu -- neutrino energy
time -- time ;)<|endoftext|> |
1e0cd3889187119a0786f867d56a7173ede6240878951127c1655d5e1f8eef78 | def _parse(self, input, format, flv):
'Read data from files into dictionaries to look up by time.'
with open(input) as infile:
raw_indata = [line for line in infile]
chunks = []
if (format == 'early'):
for i in range(26):
chunks.append(raw_indata[(42 * i):(42 * (i + 1))])
line_N = 6
range_egroup = range(19, 39)
elif (format == 'late'):
for i in range(36):
chunks.append(raw_indata[(46 * i):(46 * (i + 1))])
line_N = 8
range_egroup = range(21, 41)
offset = {'e': 0, 'eb': 1, 'x': 2, 'xb': 2}[flv]
for chunk in chunks:
time = (float(chunk[0].split()[0]) * 1000)
time -= 2
self.times_el.append(time)
N = float(chunk[line_N].split()[offset])
if (offset == 2):
N /= 4
self.N_dict[time] = N
egroup = [zero]
for i in range_egroup:
line = list(map(float, chunk[i].split()))
egroup.append(line[((- 3) + offset)])
if (self.egroup_dict == {}):
self.e_bins.append((line[1] / 1000))
self.egroup_dict[time] = egroup
return None | Read data from files into dictionaries to look up by time. | sntools/formats/totani.py | _parse | svalder/sntools | 10 | python | def _parse(self, input, format, flv):
with open(input) as infile:
raw_indata = [line for line in infile]
chunks = []
if (format == 'early'):
for i in range(26):
chunks.append(raw_indata[(42 * i):(42 * (i + 1))])
line_N = 6
range_egroup = range(19, 39)
elif (format == 'late'):
for i in range(36):
chunks.append(raw_indata[(46 * i):(46 * (i + 1))])
line_N = 8
range_egroup = range(21, 41)
offset = {'e': 0, 'eb': 1, 'x': 2, 'xb': 2}[flv]
for chunk in chunks:
time = (float(chunk[0].split()[0]) * 1000)
time -= 2
self.times_el.append(time)
N = float(chunk[line_N].split()[offset])
if (offset == 2):
N /= 4
self.N_dict[time] = N
egroup = [zero]
for i in range_egroup:
line = list(map(float, chunk[i].split()))
egroup.append(line[((- 3) + offset)])
if (self.egroup_dict == {}):
self.e_bins.append((line[1] / 1000))
self.egroup_dict[time] = egroup
return None | def _parse(self, input, format, flv):
with open(input) as infile:
raw_indata = [line for line in infile]
chunks = []
if (format == 'early'):
for i in range(26):
chunks.append(raw_indata[(42 * i):(42 * (i + 1))])
line_N = 6
range_egroup = range(19, 39)
elif (format == 'late'):
for i in range(36):
chunks.append(raw_indata[(46 * i):(46 * (i + 1))])
line_N = 8
range_egroup = range(21, 41)
offset = {'e': 0, 'eb': 1, 'x': 2, 'xb': 2}[flv]
for chunk in chunks:
time = (float(chunk[0].split()[0]) * 1000)
time -= 2
self.times_el.append(time)
N = float(chunk[line_N].split()[offset])
if (offset == 2):
N /= 4
self.N_dict[time] = N
egroup = [zero]
for i in range_egroup:
line = list(map(float, chunk[i].split()))
egroup.append(line[((- 3) + offset)])
if (self.egroup_dict == {}):
self.e_bins.append((line[1] / 1000))
self.egroup_dict[time] = egroup
return None<|docstring|>Read data from files into dictionaries to look up by time.<|endoftext|> |
7779c10c6303d4a9e343ea8a7926b5ba8f55b7684f5a522053c855c3d9854a28 | def _parse_nb(self, input):
'More granular nu_e data for the neutronization burst ("nb", 40-50ms).\n\n Note: the nb file comes from a slightly different simulation, therefore we\n have to deal with a time offset and a scaling factor.\n '
with open(input) as infile:
raw_indata = [line for line in infile]
chunks = [raw_indata[(26 * i):(26 * (i + 1))] for i in range(6, 57)]
for chunk in chunks:
time = (float(chunk[0].split()[2]) * 1000)
time -= 467.5
self.times_nb.append(time)
luminosity = (float(chunk[1].split()[2]) * 624.151)
egroup = [zero]
for i in range(3, 23):
line = list(map(float, chunk[i].split()))
egroup.append(line[(- 3)])
E_integ = 0
spec = []
for (j, n) in enumerate(egroup):
if ((j == 0) or (j == (len(egroup) - 1))):
spec.append(zero)
else:
spec.append((n / (self.e_bins[(j + 1)] - self.e_bins[(j - 1)])))
E_integ += ((((spec[(j - 1)] * self.e_bins[(j - 1)]) + (spec[j] * self.e_bins[j])) * (self.e_bins[j] - self.e_bins[(j - 1)])) / 2)
spec = [((x / E_integ) * luminosity) for x in spec]
nb_scale = (1 - (((5.23 / 13.82) * (time - 40)) / 10))
self.dNLde_dict[time] = [(x * nb_scale) for x in spec]
return None | More granular nu_e data for the neutronization burst ("nb", 40-50ms).
Note: the nb file comes from a slightly different simulation, therefore we
have to deal with a time offset and a scaling factor. | sntools/formats/totani.py | _parse_nb | svalder/sntools | 10 | python | def _parse_nb(self, input):
'More granular nu_e data for the neutronization burst ("nb", 40-50ms).\n\n Note: the nb file comes from a slightly different simulation, therefore we\n have to deal with a time offset and a scaling factor.\n '
with open(input) as infile:
raw_indata = [line for line in infile]
chunks = [raw_indata[(26 * i):(26 * (i + 1))] for i in range(6, 57)]
for chunk in chunks:
time = (float(chunk[0].split()[2]) * 1000)
time -= 467.5
self.times_nb.append(time)
luminosity = (float(chunk[1].split()[2]) * 624.151)
egroup = [zero]
for i in range(3, 23):
line = list(map(float, chunk[i].split()))
egroup.append(line[(- 3)])
E_integ = 0
spec = []
for (j, n) in enumerate(egroup):
if ((j == 0) or (j == (len(egroup) - 1))):
spec.append(zero)
else:
spec.append((n / (self.e_bins[(j + 1)] - self.e_bins[(j - 1)])))
E_integ += ((((spec[(j - 1)] * self.e_bins[(j - 1)]) + (spec[j] * self.e_bins[j])) * (self.e_bins[j] - self.e_bins[(j - 1)])) / 2)
spec = [((x / E_integ) * luminosity) for x in spec]
nb_scale = (1 - (((5.23 / 13.82) * (time - 40)) / 10))
self.dNLde_dict[time] = [(x * nb_scale) for x in spec]
return None | def _parse_nb(self, input):
'More granular nu_e data for the neutronization burst ("nb", 40-50ms).\n\n Note: the nb file comes from a slightly different simulation, therefore we\n have to deal with a time offset and a scaling factor.\n '
with open(input) as infile:
raw_indata = [line for line in infile]
chunks = [raw_indata[(26 * i):(26 * (i + 1))] for i in range(6, 57)]
for chunk in chunks:
time = (float(chunk[0].split()[2]) * 1000)
time -= 467.5
self.times_nb.append(time)
luminosity = (float(chunk[1].split()[2]) * 624.151)
egroup = [zero]
for i in range(3, 23):
line = list(map(float, chunk[i].split()))
egroup.append(line[(- 3)])
E_integ = 0
spec = []
for (j, n) in enumerate(egroup):
if ((j == 0) or (j == (len(egroup) - 1))):
spec.append(zero)
else:
spec.append((n / (self.e_bins[(j + 1)] - self.e_bins[(j - 1)])))
E_integ += ((((spec[(j - 1)] * self.e_bins[(j - 1)]) + (spec[j] * self.e_bins[j])) * (self.e_bins[j] - self.e_bins[(j - 1)])) / 2)
spec = [((x / E_integ) * luminosity) for x in spec]
nb_scale = (1 - (((5.23 / 13.82) * (time - 40)) / 10))
self.dNLde_dict[time] = [(x * nb_scale) for x in spec]
return None<|docstring|>More granular nu_e data for the neutronization burst ("nb", 40-50ms).
Note: the nb file comes from a slightly different simulation, therefore we
have to deal with a time offset and a scaling factor.<|endoftext|> |
48b2c4cb86b77d1825c3307cf6ff45474a87514ce89f6393c6896ebf7c6e4723 | def _calculate_dNLde(self):
'Calculate number luminosity spectrum for each time bin.'
for (i, time) in enumerate(self.times_el):
E_integ = 0
spec = []
egroup = self.egroup_dict[time]
for (j, n) in enumerate(egroup):
if ((j == 0) or (j == (len(egroup) - 1))):
spec.append(zero)
else:
spec.append((n / (self.e_bins[(j + 1)] - self.e_bins[(j - 1)])))
E_integ += (((spec[(j - 1)] + spec[j]) * (self.e_bins[j] - self.e_bins[(j - 1)])) / 2)
spec = [(x / E_integ) for x in spec]
if (i == 0):
num_lum = zero
else:
prev_time = self.times_el[(i - 1)]
num_lum = ((self.N_dict[time] - self.N_dict[prev_time]) / (time - prev_time))
dNLde = [(num_lum * spectrum) for spectrum in spec]
self.dNLde_dict[time] = dNLde
return None | Calculate number luminosity spectrum for each time bin. | sntools/formats/totani.py | _calculate_dNLde | svalder/sntools | 10 | python | def _calculate_dNLde(self):
for (i, time) in enumerate(self.times_el):
E_integ = 0
spec = []
egroup = self.egroup_dict[time]
for (j, n) in enumerate(egroup):
if ((j == 0) or (j == (len(egroup) - 1))):
spec.append(zero)
else:
spec.append((n / (self.e_bins[(j + 1)] - self.e_bins[(j - 1)])))
E_integ += (((spec[(j - 1)] + spec[j]) * (self.e_bins[j] - self.e_bins[(j - 1)])) / 2)
spec = [(x / E_integ) for x in spec]
if (i == 0):
num_lum = zero
else:
prev_time = self.times_el[(i - 1)]
num_lum = ((self.N_dict[time] - self.N_dict[prev_time]) / (time - prev_time))
dNLde = [(num_lum * spectrum) for spectrum in spec]
self.dNLde_dict[time] = dNLde
return None | def _calculate_dNLde(self):
for (i, time) in enumerate(self.times_el):
E_integ = 0
spec = []
egroup = self.egroup_dict[time]
for (j, n) in enumerate(egroup):
if ((j == 0) or (j == (len(egroup) - 1))):
spec.append(zero)
else:
spec.append((n / (self.e_bins[(j + 1)] - self.e_bins[(j - 1)])))
E_integ += (((spec[(j - 1)] + spec[j]) * (self.e_bins[j] - self.e_bins[(j - 1)])) / 2)
spec = [(x / E_integ) for x in spec]
if (i == 0):
num_lum = zero
else:
prev_time = self.times_el[(i - 1)]
num_lum = ((self.N_dict[time] - self.N_dict[prev_time]) / (time - prev_time))
dNLde = [(num_lum * spectrum) for spectrum in spec]
self.dNLde_dict[time] = dNLde
return None<|docstring|>Calculate number luminosity spectrum for each time bin.<|endoftext|> |
c2d8baf536cf3d8eec8ee8622556bcb0ecf3683bdcffb6abdf72825ec692653a | @staticmethod
def get_diseases_for_gene_desc(gene_id):
'for a given NCBI Entrez Gene ID, returns a ``set`` of DOI disease identifiers for the gene\n\n :returns: a ``set`` containing ``str`` disease ontology identifiers\n '
handler = QueryBioLink.HANDLER_MAP['get_diseases_for_gene'].format(gene_id=gene_id)
results = QueryBioLink.__access_api(handler)
ret_data = dict()
if (results is None):
return ret_data
ret_list = results['objects']
if (len(ret_list) > 200):
print(((('Number of diseases found for gene ' + gene_id) + ' is: ') + str(len(ret_list))), file=sys.stderr)
for disease_id in ret_list:
if (('DOID:' in disease_id) or ('OMIM:' in disease_id)):
ret_data[disease_id] = QueryBioLink.get_label_for_disease(disease_id)
return ret_data | for a given NCBI Entrez Gene ID, returns a ``set`` of DOI disease identifiers for the gene
:returns: a ``set`` containing ``str`` disease ontology identifiers | code/reasoningtool/kg-construction/QueryBioLink.py | get_diseases_for_gene_desc | rtx-travis-tester/RTX | 31 | python | @staticmethod
def get_diseases_for_gene_desc(gene_id):
'for a given NCBI Entrez Gene ID, returns a ``set`` of DOI disease identifiers for the gene\n\n :returns: a ``set`` containing ``str`` disease ontology identifiers\n '
handler = QueryBioLink.HANDLER_MAP['get_diseases_for_gene'].format(gene_id=gene_id)
results = QueryBioLink.__access_api(handler)
ret_data = dict()
if (results is None):
return ret_data
ret_list = results['objects']
if (len(ret_list) > 200):
print(((('Number of diseases found for gene ' + gene_id) + ' is: ') + str(len(ret_list))), file=sys.stderr)
for disease_id in ret_list:
if (('DOID:' in disease_id) or ('OMIM:' in disease_id)):
ret_data[disease_id] = QueryBioLink.get_label_for_disease(disease_id)
return ret_data | @staticmethod
def get_diseases_for_gene_desc(gene_id):
'for a given NCBI Entrez Gene ID, returns a ``set`` of DOI disease identifiers for the gene\n\n :returns: a ``set`` containing ``str`` disease ontology identifiers\n '
handler = QueryBioLink.HANDLER_MAP['get_diseases_for_gene'].format(gene_id=gene_id)
results = QueryBioLink.__access_api(handler)
ret_data = dict()
if (results is None):
return ret_data
ret_list = results['objects']
if (len(ret_list) > 200):
print(((('Number of diseases found for gene ' + gene_id) + ' is: ') + str(len(ret_list))), file=sys.stderr)
for disease_id in ret_list:
if (('DOID:' in disease_id) or ('OMIM:' in disease_id)):
ret_data[disease_id] = QueryBioLink.get_label_for_disease(disease_id)
return ret_data<|docstring|>for a given NCBI Entrez Gene ID, returns a ``set`` of DOI disease identifiers for the gene
:returns: a ``set`` containing ``str`` disease ontology identifiers<|endoftext|> |
45fbb7ac54796aa83a6e049cee57622b1a0816ac214e6ff643a2fb26a4d2c0c1 | @staticmethod
def get_anatomies_for_gene(gene_id):
'for a given NCBI Entrez Gene ID, returns a ``dict`` of Anatomy IDs and labels for the gene\n\n :returns: a ``dict`` of <anatomy_ID, label>\n '
handler = QueryBioLink.HANDLER_MAP['get_anatomies_for_gene'].format(gene_id=gene_id)
results = QueryBioLink.__access_api(handler)
ret_dict = dict()
if (results is None):
return ret_dict
res_dict = results['associations']
ret_dict = dict(map((lambda r: (r['object']['id'], r['object']['label'])), res_dict))
if (len(ret_dict) > 200):
print('Warning, got {} anatomies for gene {}'.format(len(ret_dict), gene_id), file=sys.stderr)
return ret_dict | for a given NCBI Entrez Gene ID, returns a ``dict`` of Anatomy IDs and labels for the gene
:returns: a ``dict`` of <anatomy_ID, label> | code/reasoningtool/kg-construction/QueryBioLink.py | get_anatomies_for_gene | rtx-travis-tester/RTX | 31 | python | @staticmethod
def get_anatomies_for_gene(gene_id):
'for a given NCBI Entrez Gene ID, returns a ``dict`` of Anatomy IDs and labels for the gene\n\n :returns: a ``dict`` of <anatomy_ID, label>\n '
handler = QueryBioLink.HANDLER_MAP['get_anatomies_for_gene'].format(gene_id=gene_id)
results = QueryBioLink.__access_api(handler)
ret_dict = dict()
if (results is None):
return ret_dict
res_dict = results['associations']
ret_dict = dict(map((lambda r: (r['object']['id'], r['object']['label'])), res_dict))
if (len(ret_dict) > 200):
print('Warning, got {} anatomies for gene {}'.format(len(ret_dict), gene_id), file=sys.stderr)
return ret_dict | @staticmethod
def get_anatomies_for_gene(gene_id):
'for a given NCBI Entrez Gene ID, returns a ``dict`` of Anatomy IDs and labels for the gene\n\n :returns: a ``dict`` of <anatomy_ID, label>\n '
handler = QueryBioLink.HANDLER_MAP['get_anatomies_for_gene'].format(gene_id=gene_id)
results = QueryBioLink.__access_api(handler)
ret_dict = dict()
if (results is None):
return ret_dict
res_dict = results['associations']
ret_dict = dict(map((lambda r: (r['object']['id'], r['object']['label'])), res_dict))
if (len(ret_dict) > 200):
print('Warning, got {} anatomies for gene {}'.format(len(ret_dict), gene_id), file=sys.stderr)
return ret_dict<|docstring|>for a given NCBI Entrez Gene ID, returns a ``dict`` of Anatomy IDs and labels for the gene
:returns: a ``dict`` of <anatomy_ID, label><|endoftext|> |
58aa0b2917265a605719f08f4d3a2d58c303ce38823247d8d5242cbd955e06a6 | @staticmethod
def get_genes_for_anatomy(anatomy_id):
'for a given Anatomy ID, returns a ``list`` of Gene ID for the anatomy\n\n :returns: a ``list`` of gene ID\n '
handler = QueryBioLink.HANDLER_MAP['get_genes_for_anatomy'].format(anatomy_id=anatomy_id)
results = QueryBioLink.__access_api(handler)
ret_list = []
if (results is None):
return ret_list
res_dict = results['associations']
ret_list = list(map((lambda r: r['subject']['id']), res_dict))
if (len(ret_list) > 200):
print('Warning, got {} genes for anatomy {}'.format(len(ret_list), anatomy_id), file=sys.stderr)
return ret_list | for a given Anatomy ID, returns a ``list`` of Gene ID for the anatomy
:returns: a ``list`` of gene ID | code/reasoningtool/kg-construction/QueryBioLink.py | get_genes_for_anatomy | rtx-travis-tester/RTX | 31 | python | @staticmethod
def get_genes_for_anatomy(anatomy_id):
'for a given Anatomy ID, returns a ``list`` of Gene ID for the anatomy\n\n :returns: a ``list`` of gene ID\n '
handler = QueryBioLink.HANDLER_MAP['get_genes_for_anatomy'].format(anatomy_id=anatomy_id)
results = QueryBioLink.__access_api(handler)
ret_list = []
if (results is None):
return ret_list
res_dict = results['associations']
ret_list = list(map((lambda r: r['subject']['id']), res_dict))
if (len(ret_list) > 200):
print('Warning, got {} genes for anatomy {}'.format(len(ret_list), anatomy_id), file=sys.stderr)
return ret_list | @staticmethod
def get_genes_for_anatomy(anatomy_id):
'for a given Anatomy ID, returns a ``list`` of Gene ID for the anatomy\n\n :returns: a ``list`` of gene ID\n '
handler = QueryBioLink.HANDLER_MAP['get_genes_for_anatomy'].format(anatomy_id=anatomy_id)
results = QueryBioLink.__access_api(handler)
ret_list = []
if (results is None):
return ret_list
res_dict = results['associations']
ret_list = list(map((lambda r: r['subject']['id']), res_dict))
if (len(ret_list) > 200):
print('Warning, got {} genes for anatomy {}'.format(len(ret_list), anatomy_id), file=sys.stderr)
return ret_list<|docstring|>for a given Anatomy ID, returns a ``list`` of Gene ID for the anatomy
:returns: a ``list`` of gene ID<|endoftext|> |
6e164784aaa8eb69bb1ef59fde74802a711507c033c8f2da06e9c7b9974afac3 | @staticmethod
def get_anatomies_for_phenotype(phenotype_id):
'for a given phenotype ID, returns a ``dict`` of Anatomy IDs and labels for the phenotype\n\n :returns: a ``dict`` of <anatomy_ID, label>\n '
handler = QueryBioLink.HANDLER_MAP['get_anatomies_for_phenotype'].format(phenotype_id=phenotype_id)
results = QueryBioLink.__access_api(handler)
ret_dict = dict()
if (results is None):
return ret_dict
ret_dict = dict(map((lambda r: (r['id'], r['label'])), results))
if (len(ret_dict) > 200):
print('Warning, got {} anatomies for phenotype {}'.format(len(ret_dict), phenotype_id), file=sys.stderr)
return ret_dict | for a given phenotype ID, returns a ``dict`` of Anatomy IDs and labels for the phenotype
:returns: a ``dict`` of <anatomy_ID, label> | code/reasoningtool/kg-construction/QueryBioLink.py | get_anatomies_for_phenotype | rtx-travis-tester/RTX | 31 | python | @staticmethod
def get_anatomies_for_phenotype(phenotype_id):
'for a given phenotype ID, returns a ``dict`` of Anatomy IDs and labels for the phenotype\n\n :returns: a ``dict`` of <anatomy_ID, label>\n '
handler = QueryBioLink.HANDLER_MAP['get_anatomies_for_phenotype'].format(phenotype_id=phenotype_id)
results = QueryBioLink.__access_api(handler)
ret_dict = dict()
if (results is None):
return ret_dict
ret_dict = dict(map((lambda r: (r['id'], r['label'])), results))
if (len(ret_dict) > 200):
print('Warning, got {} anatomies for phenotype {}'.format(len(ret_dict), phenotype_id), file=sys.stderr)
return ret_dict | @staticmethod
def get_anatomies_for_phenotype(phenotype_id):
'for a given phenotype ID, returns a ``dict`` of Anatomy IDs and labels for the phenotype\n\n :returns: a ``dict`` of <anatomy_ID, label>\n '
handler = QueryBioLink.HANDLER_MAP['get_anatomies_for_phenotype'].format(phenotype_id=phenotype_id)
results = QueryBioLink.__access_api(handler)
ret_dict = dict()
if (results is None):
return ret_dict
ret_dict = dict(map((lambda r: (r['id'], r['label'])), results))
if (len(ret_dict) > 200):
print('Warning, got {} anatomies for phenotype {}'.format(len(ret_dict), phenotype_id), file=sys.stderr)
return ret_dict<|docstring|>for a given phenotype ID, returns a ``dict`` of Anatomy IDs and labels for the phenotype
:returns: a ``dict`` of <anatomy_ID, label><|endoftext|> |
cc9130d47069ef9d9d63cb647e58ebd09bbe2e402989ed6b95c3bccd7e30aa1b | @staticmethod
def map_disease_to_phenotype(disease_id):
'\n Mapping a disease to a list of phenotypes\n :param disease_id: The DOID / OMIM ID for a disease\n :return: A list of phenotypes HP IDs, or an empty array if no HP IDs are found\n '
hp_array = []
if ((not isinstance(disease_id, str)) or ((disease_id[:5] != 'OMIM:') and (disease_id[:5] != 'DOID:'))):
return hp_array
handler = QueryBioLink.HANDLER_MAP['map_disease_to_phenotype'].format(disease_id=disease_id)
results = QueryBioLink.__access_api(handler)
if (results is not None):
if ('objects' in results.keys()):
hp_array = results['objects']
return hp_array | Mapping a disease to a list of phenotypes
:param disease_id: The DOID / OMIM ID for a disease
:return: A list of phenotypes HP IDs, or an empty array if no HP IDs are found | code/reasoningtool/kg-construction/QueryBioLink.py | map_disease_to_phenotype | rtx-travis-tester/RTX | 31 | python | @staticmethod
def map_disease_to_phenotype(disease_id):
'\n Mapping a disease to a list of phenotypes\n :param disease_id: The DOID / OMIM ID for a disease\n :return: A list of phenotypes HP IDs, or an empty array if no HP IDs are found\n '
hp_array = []
if ((not isinstance(disease_id, str)) or ((disease_id[:5] != 'OMIM:') and (disease_id[:5] != 'DOID:'))):
return hp_array
handler = QueryBioLink.HANDLER_MAP['map_disease_to_phenotype'].format(disease_id=disease_id)
results = QueryBioLink.__access_api(handler)
if (results is not None):
if ('objects' in results.keys()):
hp_array = results['objects']
return hp_array | @staticmethod
def map_disease_to_phenotype(disease_id):
'\n Mapping a disease to a list of phenotypes\n :param disease_id: The DOID / OMIM ID for a disease\n :return: A list of phenotypes HP IDs, or an empty array if no HP IDs are found\n '
hp_array = []
if ((not isinstance(disease_id, str)) or ((disease_id[:5] != 'OMIM:') and (disease_id[:5] != 'DOID:'))):
return hp_array
handler = QueryBioLink.HANDLER_MAP['map_disease_to_phenotype'].format(disease_id=disease_id)
results = QueryBioLink.__access_api(handler)
if (results is not None):
if ('objects' in results.keys()):
hp_array = results['objects']
return hp_array<|docstring|>Mapping a disease to a list of phenotypes
:param disease_id: The DOID / OMIM ID for a disease
:return: A list of phenotypes HP IDs, or an empty array if no HP IDs are found<|endoftext|> |
b94e9b4cfd19c7c0bdb5dee7d04b44d87ff982e7c00c8bb20090a79eea29abcb | def compute_homography(src, dst):
'computes the homography from src, to dst using inversion method.'
if (src.shape[1] == 2):
p1 = np.ones((len(src), 3), 'float64')
p1[(:, :2)] = src
elif (src.shape[1] == 3):
p1 = src
if (dst.shape[1] == 2):
p2 = np.ones((len(dst), 3), 'float64')
p2[(:, :2)] = dst
elif (dst.shape[1] == 3):
p2 = dst
npoints = len(src)
count = ((2 * npoints) + 1)
A = np.zeros((count, 9), 'float32')
for i in range(npoints):
p1i = p1[i]
(x2i, y2i, w2i) = p2[i]
xpi = (x2i * p1i)
ypi = (y2i * p1i)
wpi = (w2i * p1i)
A[(((i * 2) + 1), 3:6)] = (- wpi)
A[(((i * 2) + 1), 6:9)] = ypi
A[((i * 2), 0:3)] = (- wpi)
A[((i * 2), 6:9)] = xpi
A[(8, 8)] = 1
B = np.zeros((9, 1))
B[(8, 0)] = 1
h = (np.linalg.inv(A) @ B)
print(np.linalg.inv(A).shape)
H = h.reshape(3, 3)
return H | computes the homography from src, to dst using inversion method. | compute_homography.py | compute_homography | adi2809/SimpleScanner | 1 | python | def compute_homography(src, dst):
if (src.shape[1] == 2):
p1 = np.ones((len(src), 3), 'float64')
p1[(:, :2)] = src
elif (src.shape[1] == 3):
p1 = src
if (dst.shape[1] == 2):
p2 = np.ones((len(dst), 3), 'float64')
p2[(:, :2)] = dst
elif (dst.shape[1] == 3):
p2 = dst
npoints = len(src)
count = ((2 * npoints) + 1)
A = np.zeros((count, 9), 'float32')
for i in range(npoints):
p1i = p1[i]
(x2i, y2i, w2i) = p2[i]
xpi = (x2i * p1i)
ypi = (y2i * p1i)
wpi = (w2i * p1i)
A[(((i * 2) + 1), 3:6)] = (- wpi)
A[(((i * 2) + 1), 6:9)] = ypi
A[((i * 2), 0:3)] = (- wpi)
A[((i * 2), 6:9)] = xpi
A[(8, 8)] = 1
B = np.zeros((9, 1))
B[(8, 0)] = 1
h = (np.linalg.inv(A) @ B)
print(np.linalg.inv(A).shape)
H = h.reshape(3, 3)
return H | def compute_homography(src, dst):
if (src.shape[1] == 2):
p1 = np.ones((len(src), 3), 'float64')
p1[(:, :2)] = src
elif (src.shape[1] == 3):
p1 = src
if (dst.shape[1] == 2):
p2 = np.ones((len(dst), 3), 'float64')
p2[(:, :2)] = dst
elif (dst.shape[1] == 3):
p2 = dst
npoints = len(src)
count = ((2 * npoints) + 1)
A = np.zeros((count, 9), 'float32')
for i in range(npoints):
p1i = p1[i]
(x2i, y2i, w2i) = p2[i]
xpi = (x2i * p1i)
ypi = (y2i * p1i)
wpi = (w2i * p1i)
A[(((i * 2) + 1), 3:6)] = (- wpi)
A[(((i * 2) + 1), 6:9)] = ypi
A[((i * 2), 0:3)] = (- wpi)
A[((i * 2), 6:9)] = xpi
A[(8, 8)] = 1
B = np.zeros((9, 1))
B[(8, 0)] = 1
h = (np.linalg.inv(A) @ B)
print(np.linalg.inv(A).shape)
H = h.reshape(3, 3)
return H<|docstring|>computes the homography from src, to dst using inversion method.<|endoftext|> |
1676c3db720271b2ff0807150ea7e2b0d112396a4ada70d0f03b872a59f43c02 | def find_homography(src, dst):
'computes the homography from src, to dst using singular value decomposition method.'
if (src.shape[1] == 2):
p1 = np.ones((len(src), 3), 'float64')
p1[(:, :2)] = src
elif (src.shape[1] == 3):
p1 = src
if (dst.shape[1] == 2):
p2 = np.ones((len(dst), 3), 'float64')
p2[(:, :2)] = dst
elif (dst.shape[1] == 3):
p2 = dst
npoints = len(src)
count = (3 * npoints)
A = np.zeros((count, 9), 'float32')
for i in range(npoints):
p1i = p1[i]
(x2i, y2i, w2i) = p2[i]
xpi = (x2i * p1i)
ypi = (y2i * p1i)
wpi = (w2i * p1i)
A[((i * 3), 3:6)] = (- wpi)
A[((i * 3), 6:9)] = ypi
A[(((i * 3) + 1), 0:3)] = wpi
A[(((i * 3) + 1), 6:9)] = (- xpi)
A[(((i * 3) + 2), 0:3)] = (- ypi)
A[(((i * 3) + 2), 3:6)] = xpi
(U, s, V) = np.linalg.svd(A)
h = V[(- 1)]
H = h.reshape(3, 3)
return H | computes the homography from src, to dst using singular value decomposition method. | compute_homography.py | find_homography | adi2809/SimpleScanner | 1 | python | def find_homography(src, dst):
if (src.shape[1] == 2):
p1 = np.ones((len(src), 3), 'float64')
p1[(:, :2)] = src
elif (src.shape[1] == 3):
p1 = src
if (dst.shape[1] == 2):
p2 = np.ones((len(dst), 3), 'float64')
p2[(:, :2)] = dst
elif (dst.shape[1] == 3):
p2 = dst
npoints = len(src)
count = (3 * npoints)
A = np.zeros((count, 9), 'float32')
for i in range(npoints):
p1i = p1[i]
(x2i, y2i, w2i) = p2[i]
xpi = (x2i * p1i)
ypi = (y2i * p1i)
wpi = (w2i * p1i)
A[((i * 3), 3:6)] = (- wpi)
A[((i * 3), 6:9)] = ypi
A[(((i * 3) + 1), 0:3)] = wpi
A[(((i * 3) + 1), 6:9)] = (- xpi)
A[(((i * 3) + 2), 0:3)] = (- ypi)
A[(((i * 3) + 2), 3:6)] = xpi
(U, s, V) = np.linalg.svd(A)
h = V[(- 1)]
H = h.reshape(3, 3)
return H | def find_homography(src, dst):
if (src.shape[1] == 2):
p1 = np.ones((len(src), 3), 'float64')
p1[(:, :2)] = src
elif (src.shape[1] == 3):
p1 = src
if (dst.shape[1] == 2):
p2 = np.ones((len(dst), 3), 'float64')
p2[(:, :2)] = dst
elif (dst.shape[1] == 3):
p2 = dst
npoints = len(src)
count = (3 * npoints)
A = np.zeros((count, 9), 'float32')
for i in range(npoints):
p1i = p1[i]
(x2i, y2i, w2i) = p2[i]
xpi = (x2i * p1i)
ypi = (y2i * p1i)
wpi = (w2i * p1i)
A[((i * 3), 3:6)] = (- wpi)
A[((i * 3), 6:9)] = ypi
A[(((i * 3) + 1), 0:3)] = wpi
A[(((i * 3) + 1), 6:9)] = (- xpi)
A[(((i * 3) + 2), 0:3)] = (- ypi)
A[(((i * 3) + 2), 3:6)] = xpi
(U, s, V) = np.linalg.svd(A)
h = V[(- 1)]
H = h.reshape(3, 3)
return H<|docstring|>computes the homography from src, to dst using singular value decomposition method.<|endoftext|> |
84b32cfb38ed9b4045850c4a145d7a9f32548fd4105340cf05fb46ec8955cab9 | def find_homography_2(src, dst):
'computes the homography from src, to dst using singular value decomposition method.'
if (src.shape[1] == 2):
p1 = np.ones((len(src), 3), 'float64')
p1[(:, :2)] = src
elif (src.shape[1] == 3):
p1 = src
if (dst.shape[1] == 2):
p2 = np.ones((len(dst), 3), 'float64')
p2[(:, :2)] = dst
elif (dst.shape[1] == 3):
p2 = dst
npoints = len(src)
count = ((2 * npoints) + 1)
A = np.zeros((count, 9), 'float32')
for i in range(npoints):
p1i = p1[i]
(x2i, y2i, w2i) = p2[i]
xpi = (x2i * p1i)
ypi = (y2i * p1i)
wpi = (w2i * p1i)
A[(((i * 2) + 1), 3:6)] = (- wpi)
A[(((i * 2) + 1), 6:9)] = ypi
A[((i * 2), 0:3)] = (- wpi)
A[((i * 2), 6:9)] = xpi
(U, s, V) = np.linalg.svd(A)
h = V[(- 1)]
H = h.reshape(3, 3)
return H | computes the homography from src, to dst using singular value decomposition method. | compute_homography.py | find_homography_2 | adi2809/SimpleScanner | 1 | python | def find_homography_2(src, dst):
if (src.shape[1] == 2):
p1 = np.ones((len(src), 3), 'float64')
p1[(:, :2)] = src
elif (src.shape[1] == 3):
p1 = src
if (dst.shape[1] == 2):
p2 = np.ones((len(dst), 3), 'float64')
p2[(:, :2)] = dst
elif (dst.shape[1] == 3):
p2 = dst
npoints = len(src)
count = ((2 * npoints) + 1)
A = np.zeros((count, 9), 'float32')
for i in range(npoints):
p1i = p1[i]
(x2i, y2i, w2i) = p2[i]
xpi = (x2i * p1i)
ypi = (y2i * p1i)
wpi = (w2i * p1i)
A[(((i * 2) + 1), 3:6)] = (- wpi)
A[(((i * 2) + 1), 6:9)] = ypi
A[((i * 2), 0:3)] = (- wpi)
A[((i * 2), 6:9)] = xpi
(U, s, V) = np.linalg.svd(A)
h = V[(- 1)]
H = h.reshape(3, 3)
return H | def find_homography_2(src, dst):
if (src.shape[1] == 2):
p1 = np.ones((len(src), 3), 'float64')
p1[(:, :2)] = src
elif (src.shape[1] == 3):
p1 = src
if (dst.shape[1] == 2):
p2 = np.ones((len(dst), 3), 'float64')
p2[(:, :2)] = dst
elif (dst.shape[1] == 3):
p2 = dst
npoints = len(src)
count = ((2 * npoints) + 1)
A = np.zeros((count, 9), 'float32')
for i in range(npoints):
p1i = p1[i]
(x2i, y2i, w2i) = p2[i]
xpi = (x2i * p1i)
ypi = (y2i * p1i)
wpi = (w2i * p1i)
A[(((i * 2) + 1), 3:6)] = (- wpi)
A[(((i * 2) + 1), 6:9)] = ypi
A[((i * 2), 0:3)] = (- wpi)
A[((i * 2), 6:9)] = xpi
(U, s, V) = np.linalg.svd(A)
h = V[(- 1)]
H = h.reshape(3, 3)
return H<|docstring|>computes the homography from src, to dst using singular value decomposition method.<|endoftext|> |
f3c8fc1a18f0f140ca221b9d5da908fa1307c2c23f6b0e7ffb8e36b9b8f06f1f | def string_list_validator(value: str) -> str:
"\n Validate if value is a str\n\n Arguments:\n value {str} -- value to validate\n\n Raises:\n ValueError: value is not a type of str\n ValueError: Value can't be a empty string\n\n Returns:\n str -- unchanged input value\n "
if (not isinstance(value, str)):
raise ValueError('All values has to be an string! List[str]')
if (value == ''):
raise ValueError("Value can't be a empty string! List[str]")
return value | Validate if value is a str
Arguments:
value {str} -- value to validate
Raises:
ValueError: value is not a type of str
ValueError: Value can't be a empty string
Returns:
str -- unchanged input value | meetup_search/rest_api/argument_validator.py | string_list_validator | saxsys/flask-meetup-data-scraper | 1 | python | def string_list_validator(value: str) -> str:
"\n Validate if value is a str\n\n Arguments:\n value {str} -- value to validate\n\n Raises:\n ValueError: value is not a type of str\n ValueError: Value can't be a empty string\n\n Returns:\n str -- unchanged input value\n "
if (not isinstance(value, str)):
raise ValueError('All values has to be an string! List[str]')
if (value == ):
raise ValueError("Value can't be a empty string! List[str]")
return value | def string_list_validator(value: str) -> str:
"\n Validate if value is a str\n\n Arguments:\n value {str} -- value to validate\n\n Raises:\n ValueError: value is not a type of str\n ValueError: Value can't be a empty string\n\n Returns:\n str -- unchanged input value\n "
if (not isinstance(value, str)):
raise ValueError('All values has to be an string! List[str]')
if (value == ):
raise ValueError("Value can't be a empty string! List[str]")
return value<|docstring|>Validate if value is a str
Arguments:
value {str} -- value to validate
Raises:
ValueError: value is not a type of str
ValueError: Value can't be a empty string
Returns:
str -- unchanged input value<|endoftext|> |
7464ea7bb387d17ba8e853fb5b7a1fcb2bb4fd090a7a54642f202d3468a78c6f | def positive_int_validator(value: int) -> int:
'\n Validate for positive int\n\n Arguments:\n value {int} -- int number\n\n Raises:\n ValueError: Value is an str that can not convert to an int\n ValueError: Value has to be an int\n ValueError: Value has to be equal or greater than 0\n\n Returns:\n int -- unchanged input value\n '
if isinstance(value, str):
try:
value = int(value)
except ValueError:
raise ValueError('Value has to be an int!')
if (not isinstance(value, int)):
raise ValueError('Value has to be an int!')
if (value < 0):
raise ValueError('Value has to be equal or greater than 0!')
return value | Validate for positive int
Arguments:
value {int} -- int number
Raises:
ValueError: Value is an str that can not convert to an int
ValueError: Value has to be an int
ValueError: Value has to be equal or greater than 0
Returns:
int -- unchanged input value | meetup_search/rest_api/argument_validator.py | positive_int_validator | saxsys/flask-meetup-data-scraper | 1 | python | def positive_int_validator(value: int) -> int:
'\n Validate for positive int\n\n Arguments:\n value {int} -- int number\n\n Raises:\n ValueError: Value is an str that can not convert to an int\n ValueError: Value has to be an int\n ValueError: Value has to be equal or greater than 0\n\n Returns:\n int -- unchanged input value\n '
if isinstance(value, str):
try:
value = int(value)
except ValueError:
raise ValueError('Value has to be an int!')
if (not isinstance(value, int)):
raise ValueError('Value has to be an int!')
if (value < 0):
raise ValueError('Value has to be equal or greater than 0!')
return value | def positive_int_validator(value: int) -> int:
'\n Validate for positive int\n\n Arguments:\n value {int} -- int number\n\n Raises:\n ValueError: Value is an str that can not convert to an int\n ValueError: Value has to be an int\n ValueError: Value has to be equal or greater than 0\n\n Returns:\n int -- unchanged input value\n '
if isinstance(value, str):
try:
value = int(value)
except ValueError:
raise ValueError('Value has to be an int!')
if (not isinstance(value, int)):
raise ValueError('Value has to be an int!')
if (value < 0):
raise ValueError('Value has to be equal or greater than 0!')
return value<|docstring|>Validate for positive int
Arguments:
value {int} -- int number
Raises:
ValueError: Value is an str that can not convert to an int
ValueError: Value has to be an int
ValueError: Value has to be equal or greater than 0
Returns:
int -- unchanged input value<|endoftext|> |
589c169bfa97d31674910afd7b711b98b8d96f307bd8bae6feffef697825c26d | def date_validator(value: str) -> str:
'\n Validate if string is a valid date\n\n Arguments:\n value {str} -- value to validate\n\n Returns:\n str -- validate date as string\n '
try:
return str(datetime.fromisoformat(value).date())
except TypeError:
raise ValueError("Can't convert value to date!") | Validate if string is a valid date
Arguments:
value {str} -- value to validate
Returns:
str -- validate date as string | meetup_search/rest_api/argument_validator.py | date_validator | saxsys/flask-meetup-data-scraper | 1 | python | def date_validator(value: str) -> str:
'\n Validate if string is a valid date\n\n Arguments:\n value {str} -- value to validate\n\n Returns:\n str -- validate date as string\n '
try:
return str(datetime.fromisoformat(value).date())
except TypeError:
raise ValueError("Can't convert value to date!") | def date_validator(value: str) -> str:
'\n Validate if string is a valid date\n\n Arguments:\n value {str} -- value to validate\n\n Returns:\n str -- validate date as string\n '
try:
return str(datetime.fromisoformat(value).date())
except TypeError:
raise ValueError("Can't convert value to date!")<|docstring|>Validate if string is a valid date
Arguments:
value {str} -- value to validate
Returns:
str -- validate date as string<|endoftext|> |
6ca657b3043c5307828cd0dda315de85aa3c84dfcd46a78030a1198cca19da89 | def __init__(self, batch_queue, min_records_in_aggregated_batches):
'\n :param batch_queue: instance of :class:`BatchQueue` or :class:`PartitionedBatchQueue` to be wrapped\n '
self._q = batch_queue
self._empty = False
self._min_records_in_aggregated_batches = min_records_in_aggregated_batches | :param batch_queue: instance of :class:`BatchQueue` or :class:`PartitionedBatchQueue` to be wrapped | shellstreaming/core/remote_queue.py | __init__ | laysakura/shellstreaming | 1 | python | def __init__(self, batch_queue, min_records_in_aggregated_batches):
'\n \n '
self._q = batch_queue
self._empty = False
self._min_records_in_aggregated_batches = min_records_in_aggregated_batches | def __init__(self, batch_queue, min_records_in_aggregated_batches):
'\n \n '
self._q = batch_queue
self._empty = False
self._min_records_in_aggregated_batches = min_records_in_aggregated_batches<|docstring|>:param batch_queue: instance of :class:`BatchQueue` or :class:`PartitionedBatchQueue` to be wrapped<|endoftext|> |
800b9d67c131e5e07d11c8ff4ccc12ec1b96eee9b420a234359ddf2646a23e01 | def findChildEndingWith(el, tagEnd):
'Finds first child of an XML element with tag ending in tagEnd (case insensitive).'
tagEnd = tagEnd.lower()
for child in el:
if child.tag.lower().endswith(tagEnd):
return child
return None | Finds first child of an XML element with tag ending in tagEnd (case insensitive). | secscan/scrape13F.py | findChildEndingWith | ikedim01/secscan | 0 | python | def findChildEndingWith(el, tagEnd):
tagEnd = tagEnd.lower()
for child in el:
if child.tag.lower().endswith(tagEnd):
return child
return None | def findChildEndingWith(el, tagEnd):
tagEnd = tagEnd.lower()
for child in el:
if child.tag.lower().endswith(tagEnd):
return child
return None<|docstring|>Finds first child of an XML element with tag ending in tagEnd (case insensitive).<|endoftext|> |
8ee08baf12a78f0b85bea370a07df78123ec2ee8f22d2fa126920d6420bf643a | def findChildSeries(el, tagEnds):
'Finds a nested series of children by tag using findChildEndingWith'
for tagEnd in tagEnds:
el = findChildEndingWith(el, tagEnd)
return el | Finds a nested series of children by tag using findChildEndingWith | secscan/scrape13F.py | findChildSeries | ikedim01/secscan | 0 | python | def findChildSeries(el, tagEnds):
for tagEnd in tagEnds:
el = findChildEndingWith(el, tagEnd)
return el | def findChildSeries(el, tagEnds):
for tagEnd in tagEnds:
el = findChildEndingWith(el, tagEnd)
return el<|docstring|>Finds a nested series of children by tag using findChildEndingWith<|endoftext|> |
9f0ca411945b0292ff991aeebb6b0cf8c4ea422f53692cb44887fc9cb7514d8c | def getRowInfo(row):
"\n Returns information for a row in a 13F table in the form:\n (cusip, name, value, title, count, putCall)\n where the field values are as given in the table,\n except putCall is 'CALL', 'PUT', or ''.\n "
cusip = findChildEndingWith(row, 'cusip').text.upper().strip()
name = findChildEndingWith(row, 'issuer').text.strip()
value = findChildEndingWith(row, 'value').text.strip()
title = findChildEndingWith(row, 'titleOfClass').text.upper().strip()
shrsOrPrnEl = findChildEndingWith(row, 'shrsOrPrnAmt')
count = findChildEndingWith(shrsOrPrnEl, 'sshPrnamt').text.strip()
putCallEl = findChildEndingWith(row, 'putCall')
if (putCallEl is None):
putCallEl = findChildEndingWith(shrsOrPrnEl, 'putCall')
if (putCallEl is not None):
putCall = putCallEl.text.upper().strip()
elif (callOptPat.search(name) or title.startswith('CALL') or (title == 'CAL')):
putCall = 'CALL'
elif (putOptPat.search(name) or title.startswith('PUT')):
putCall = 'PUT'
else:
putCall = ''
return (cusip, name, value, title, count, putCall) | Returns information for a row in a 13F table in the form:
(cusip, name, value, title, count, putCall)
where the field values are as given in the table,
except putCall is 'CALL', 'PUT', or ''. | secscan/scrape13F.py | getRowInfo | ikedim01/secscan | 0 | python | def getRowInfo(row):
"\n Returns information for a row in a 13F table in the form:\n (cusip, name, value, title, count, putCall)\n where the field values are as given in the table,\n except putCall is 'CALL', 'PUT', or .\n "
cusip = findChildEndingWith(row, 'cusip').text.upper().strip()
name = findChildEndingWith(row, 'issuer').text.strip()
value = findChildEndingWith(row, 'value').text.strip()
title = findChildEndingWith(row, 'titleOfClass').text.upper().strip()
shrsOrPrnEl = findChildEndingWith(row, 'shrsOrPrnAmt')
count = findChildEndingWith(shrsOrPrnEl, 'sshPrnamt').text.strip()
putCallEl = findChildEndingWith(row, 'putCall')
if (putCallEl is None):
putCallEl = findChildEndingWith(shrsOrPrnEl, 'putCall')
if (putCallEl is not None):
putCall = putCallEl.text.upper().strip()
elif (callOptPat.search(name) or title.startswith('CALL') or (title == 'CAL')):
putCall = 'CALL'
elif (putOptPat.search(name) or title.startswith('PUT')):
putCall = 'PUT'
else:
putCall =
return (cusip, name, value, title, count, putCall) | def getRowInfo(row):
"\n Returns information for a row in a 13F table in the form:\n (cusip, name, value, title, count, putCall)\n where the field values are as given in the table,\n except putCall is 'CALL', 'PUT', or .\n "
cusip = findChildEndingWith(row, 'cusip').text.upper().strip()
name = findChildEndingWith(row, 'issuer').text.strip()
value = findChildEndingWith(row, 'value').text.strip()
title = findChildEndingWith(row, 'titleOfClass').text.upper().strip()
shrsOrPrnEl = findChildEndingWith(row, 'shrsOrPrnAmt')
count = findChildEndingWith(shrsOrPrnEl, 'sshPrnamt').text.strip()
putCallEl = findChildEndingWith(row, 'putCall')
if (putCallEl is None):
putCallEl = findChildEndingWith(shrsOrPrnEl, 'putCall')
if (putCallEl is not None):
putCall = putCallEl.text.upper().strip()
elif (callOptPat.search(name) or title.startswith('CALL') or (title == 'CAL')):
putCall = 'CALL'
elif (putOptPat.search(name) or title.startswith('PUT')):
putCall = 'PUT'
else:
putCall =
return (cusip, name, value, title, count, putCall)<|docstring|>Returns information for a row in a 13F table in the form:
(cusip, name, value, title, count, putCall)
where the field values are as given in the table,
except putCall is 'CALL', 'PUT', or ''.<|endoftext|> |
77640d80ac88d73e35a8a3212324bd08dd4d59e5eed3e81ebe3b2c2e17fc192a | def parse13FHoldings(accNo, formType=None):
"\n Parses a 13F filing, returning the result in the form:\n {\n 'period': 'YYYY-MM-DD',\n 'acceptDate': 'YYYY-MM-DD',\n 'acceptTime': 'HH:MM:SS',\n 'cik' : 'DDDDDDDDDD',\n 'holdings': [(cusip, name, value, title, count, putCall), ... ]\n }\n where the field values are as given in the table,\n except putCall is 'CALL', 'PUT', or ''.\n "
info = basicInfo.getSecFormInfo(accNo, formType)
xmlUrls = [l[(- 1)] for l in info['links'] if l[0].lower().endswith('xml')]
if (len(xmlUrls) == 1):
xmlSummTab = utils.downloadSecUrl(xmlUrls[0], toFormat='xml')
tot = int(findChildSeries(xmlSummTab, ['formdata', 'summarypage', 'tableentrytotal']).text.strip())
if (tot == 0):
print('*** zero total, table not present')
else:
print('*** nonzero total, but table not present')
holdings = []
else:
xmlTab = utils.downloadSecUrl(xmlUrls[(- 1)], toFormat='xml')
tabRows = [tabRow for tabRow in xmlTab if tabRow.tag.lower().endswith('infotable')]
if (len(xmlTab) != len(tabRows)):
print('*** #rows mismatch', len(xmlTab), 'all children', len(tabRows), 'table rows')
if (len(tabRows) == 0):
print('*** no holdings in table')
holdings = [getRowInfo(tabRow) for tabRow in tabRows]
if (len(info['ciks']) != 1):
print('*** unexpected number of CIKs!=1', info['ciks'])
return {'period': info['period'], 'acceptDate': info['acceptDate'], 'acceptTime': info['acceptTime'], 'cik': info['ciks'][0], 'holdings': holdings} | Parses a 13F filing, returning the result in the form:
{
'period': 'YYYY-MM-DD',
'acceptDate': 'YYYY-MM-DD',
'acceptTime': 'HH:MM:SS',
'cik' : 'DDDDDDDDDD',
'holdings': [(cusip, name, value, title, count, putCall), ... ]
}
where the field values are as given in the table,
except putCall is 'CALL', 'PUT', or ''. | secscan/scrape13F.py | parse13FHoldings | ikedim01/secscan | 0 | python | def parse13FHoldings(accNo, formType=None):
"\n Parses a 13F filing, returning the result in the form:\n {\n 'period': 'YYYY-MM-DD',\n 'acceptDate': 'YYYY-MM-DD',\n 'acceptTime': 'HH:MM:SS',\n 'cik' : 'DDDDDDDDDD',\n 'holdings': [(cusip, name, value, title, count, putCall), ... ]\n }\n where the field values are as given in the table,\n except putCall is 'CALL', 'PUT', or .\n "
info = basicInfo.getSecFormInfo(accNo, formType)
xmlUrls = [l[(- 1)] for l in info['links'] if l[0].lower().endswith('xml')]
if (len(xmlUrls) == 1):
xmlSummTab = utils.downloadSecUrl(xmlUrls[0], toFormat='xml')
tot = int(findChildSeries(xmlSummTab, ['formdata', 'summarypage', 'tableentrytotal']).text.strip())
if (tot == 0):
print('*** zero total, table not present')
else:
print('*** nonzero total, but table not present')
holdings = []
else:
xmlTab = utils.downloadSecUrl(xmlUrls[(- 1)], toFormat='xml')
tabRows = [tabRow for tabRow in xmlTab if tabRow.tag.lower().endswith('infotable')]
if (len(xmlTab) != len(tabRows)):
print('*** #rows mismatch', len(xmlTab), 'all children', len(tabRows), 'table rows')
if (len(tabRows) == 0):
print('*** no holdings in table')
holdings = [getRowInfo(tabRow) for tabRow in tabRows]
if (len(info['ciks']) != 1):
print('*** unexpected number of CIKs!=1', info['ciks'])
return {'period': info['period'], 'acceptDate': info['acceptDate'], 'acceptTime': info['acceptTime'], 'cik': info['ciks'][0], 'holdings': holdings} | def parse13FHoldings(accNo, formType=None):
"\n Parses a 13F filing, returning the result in the form:\n {\n 'period': 'YYYY-MM-DD',\n 'acceptDate': 'YYYY-MM-DD',\n 'acceptTime': 'HH:MM:SS',\n 'cik' : 'DDDDDDDDDD',\n 'holdings': [(cusip, name, value, title, count, putCall), ... ]\n }\n where the field values are as given in the table,\n except putCall is 'CALL', 'PUT', or .\n "
info = basicInfo.getSecFormInfo(accNo, formType)
xmlUrls = [l[(- 1)] for l in info['links'] if l[0].lower().endswith('xml')]
if (len(xmlUrls) == 1):
xmlSummTab = utils.downloadSecUrl(xmlUrls[0], toFormat='xml')
tot = int(findChildSeries(xmlSummTab, ['formdata', 'summarypage', 'tableentrytotal']).text.strip())
if (tot == 0):
print('*** zero total, table not present')
else:
print('*** nonzero total, but table not present')
holdings = []
else:
xmlTab = utils.downloadSecUrl(xmlUrls[(- 1)], toFormat='xml')
tabRows = [tabRow for tabRow in xmlTab if tabRow.tag.lower().endswith('infotable')]
if (len(xmlTab) != len(tabRows)):
print('*** #rows mismatch', len(xmlTab), 'all children', len(tabRows), 'table rows')
if (len(tabRows) == 0):
print('*** no holdings in table')
holdings = [getRowInfo(tabRow) for tabRow in tabRows]
if (len(info['ciks']) != 1):
print('*** unexpected number of CIKs!=1', info['ciks'])
return {'period': info['period'], 'acceptDate': info['acceptDate'], 'acceptTime': info['acceptTime'], 'cik': info['ciks'][0], 'holdings': holdings}<|docstring|>Parses a 13F filing, returning the result in the form:
{
'period': 'YYYY-MM-DD',
'acceptDate': 'YYYY-MM-DD',
'acceptTime': 'HH:MM:SS',
'cik' : 'DDDDDDDDDD',
'holdings': [(cusip, name, value, title, count, putCall), ... ]
}
where the field values are as given in the table,
except putCall is 'CALL', 'PUT', or ''.<|endoftext|> |
fe2fcdfd2d16a3cfd34557480f332b4e8749b345e65063a4d76a96b79f5ffc99 | def condenseHoldings(holdings, minFrac=0.0, maxFrac=1.0, pctFormat=False, includeName=False, cusipNames={}, minStocksPerInv=None, maxStocksPerInv=None, minTop10Frac=None, minAUM=None, allCusipCounter=None, all13FHoldingsMap=None, forCik=None):
'\n Converts a list of of stock and option holdings as parsed from the 13F:\n [(cusip, name, value, title, count, putCall), ... ]\n that may have multiple entries per stock into a condensed list that omits\n call/put options and only has one combined entry per stock:\n [(cusip, val, frac) ... ]\n sorted in descending order by value, and restricted to stocks with fraction\n of total portfolio in [minFrac..maxFrac]\n\n If pctFormat is True, frac is returned as a string in the format N.NN%\n If includeName is True, the cusip name is also returned:\n [(cusip, name, val, frac) ... ]\n\n If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, returns None\n for lists with too few stocks, too many stocks, too small a fraction in the\n top 10 stocks, or too small a total value.\n\n If supplied, allCusipCounter should be a Counter, and it will be updated to count\n all investors that have any position in each stock, without regard to the min/max\n options supplied to restrict the holdings list.\n\n If supplied, all13FHoldingsMap should be a dict, and a full sorted holdings list:\n [(cusip, val, frac) ... ]\n will be saved in all13FHoldingsMap[forCik], without regard to the min/max\n options supplied to restrict the holdings list.\n '
if includeName:
cusipToName = dict(((cusip, name) for (cusip, name, value, shType, nShares, putCall) in holdings))
holdings = sorted(((cusip, float(value)) for (cusip, name, value, shType, nShares, putCall) in holdings if (putCall == '')))
holdings = [(cusip, sum((val for (_, val) in it))) for (cusip, it) in itertools.groupby(holdings, key=(lambda x: x[0]))]
holdings.sort(key=(lambda x: x[1]), reverse=True)
totAum = sum((val for (_, val) in holdings))
holdings = [(cusip, val, ((val / totAum) if (totAum > 0.0) else 0.0)) for (cusip, val) in holdings]
if (all13FHoldingsMap is not None):
all13FHoldingsMap[forCik] = holdings
if (allCusipCounter is not None):
allCusipCounter.update((cusip for (cusip, _, _) in holdings))
if (((minStocksPerInv is not None) and (minStocksPerInv > len(holdings))) or ((maxStocksPerInv is not None) and (maxStocksPerInv < len(holdings))) or ((minAUM is not None) and (minAUM > (totAum * 1000.0))) or ((minTop10Frac is not None) and (minTop10Frac > sum((frac for (_, _, frac) in holdings[:10]))))):
return None
res = []
for (cusip, val, frac) in holdings:
if (frac > maxFrac):
continue
if (minFrac > frac):
break
fracOut = (f'{frac:.2%}' if pctFormat else frac)
if includeName:
res.append((cusip, cusipNames.get(cusip, cusipToName[cusip]), val, fracOut))
else:
res.append((cusip, val, fracOut))
return (res if (len(res) > 0) else None) | Converts a list of of stock and option holdings as parsed from the 13F:
[(cusip, name, value, title, count, putCall), ... ]
that may have multiple entries per stock into a condensed list that omits
call/put options and only has one combined entry per stock:
[(cusip, val, frac) ... ]
sorted in descending order by value, and restricted to stocks with fraction
of total portfolio in [minFrac..maxFrac]
If pctFormat is True, frac is returned as a string in the format N.NN%
If includeName is True, the cusip name is also returned:
[(cusip, name, val, frac) ... ]
If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, returns None
for lists with too few stocks, too many stocks, too small a fraction in the
top 10 stocks, or too small a total value.
If supplied, allCusipCounter should be a Counter, and it will be updated to count
all investors that have any position in each stock, without regard to the min/max
options supplied to restrict the holdings list.
If supplied, all13FHoldingsMap should be a dict, and a full sorted holdings list:
[(cusip, val, frac) ... ]
will be saved in all13FHoldingsMap[forCik], without regard to the min/max
options supplied to restrict the holdings list. | secscan/scrape13F.py | condenseHoldings | ikedim01/secscan | 0 | python | def condenseHoldings(holdings, minFrac=0.0, maxFrac=1.0, pctFormat=False, includeName=False, cusipNames={}, minStocksPerInv=None, maxStocksPerInv=None, minTop10Frac=None, minAUM=None, allCusipCounter=None, all13FHoldingsMap=None, forCik=None):
'\n Converts a list of of stock and option holdings as parsed from the 13F:\n [(cusip, name, value, title, count, putCall), ... ]\n that may have multiple entries per stock into a condensed list that omits\n call/put options and only has one combined entry per stock:\n [(cusip, val, frac) ... ]\n sorted in descending order by value, and restricted to stocks with fraction\n of total portfolio in [minFrac..maxFrac]\n\n If pctFormat is True, frac is returned as a string in the format N.NN%\n If includeName is True, the cusip name is also returned:\n [(cusip, name, val, frac) ... ]\n\n If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, returns None\n for lists with too few stocks, too many stocks, too small a fraction in the\n top 10 stocks, or too small a total value.\n\n If supplied, allCusipCounter should be a Counter, and it will be updated to count\n all investors that have any position in each stock, without regard to the min/max\n options supplied to restrict the holdings list.\n\n If supplied, all13FHoldingsMap should be a dict, and a full sorted holdings list:\n [(cusip, val, frac) ... ]\n will be saved in all13FHoldingsMap[forCik], without regard to the min/max\n options supplied to restrict the holdings list.\n '
if includeName:
cusipToName = dict(((cusip, name) for (cusip, name, value, shType, nShares, putCall) in holdings))
holdings = sorted(((cusip, float(value)) for (cusip, name, value, shType, nShares, putCall) in holdings if (putCall == )))
holdings = [(cusip, sum((val for (_, val) in it))) for (cusip, it) in itertools.groupby(holdings, key=(lambda x: x[0]))]
holdings.sort(key=(lambda x: x[1]), reverse=True)
totAum = sum((val for (_, val) in holdings))
holdings = [(cusip, val, ((val / totAum) if (totAum > 0.0) else 0.0)) for (cusip, val) in holdings]
if (all13FHoldingsMap is not None):
all13FHoldingsMap[forCik] = holdings
if (allCusipCounter is not None):
allCusipCounter.update((cusip for (cusip, _, _) in holdings))
if (((minStocksPerInv is not None) and (minStocksPerInv > len(holdings))) or ((maxStocksPerInv is not None) and (maxStocksPerInv < len(holdings))) or ((minAUM is not None) and (minAUM > (totAum * 1000.0))) or ((minTop10Frac is not None) and (minTop10Frac > sum((frac for (_, _, frac) in holdings[:10]))))):
return None
res = []
for (cusip, val, frac) in holdings:
if (frac > maxFrac):
continue
if (minFrac > frac):
break
fracOut = (f'{frac:.2%}' if pctFormat else frac)
if includeName:
res.append((cusip, cusipNames.get(cusip, cusipToName[cusip]), val, fracOut))
else:
res.append((cusip, val, fracOut))
return (res if (len(res) > 0) else None) | def condenseHoldings(holdings, minFrac=0.0, maxFrac=1.0, pctFormat=False, includeName=False, cusipNames={}, minStocksPerInv=None, maxStocksPerInv=None, minTop10Frac=None, minAUM=None, allCusipCounter=None, all13FHoldingsMap=None, forCik=None):
'\n Converts a list of of stock and option holdings as parsed from the 13F:\n [(cusip, name, value, title, count, putCall), ... ]\n that may have multiple entries per stock into a condensed list that omits\n call/put options and only has one combined entry per stock:\n [(cusip, val, frac) ... ]\n sorted in descending order by value, and restricted to stocks with fraction\n of total portfolio in [minFrac..maxFrac]\n\n If pctFormat is True, frac is returned as a string in the format N.NN%\n If includeName is True, the cusip name is also returned:\n [(cusip, name, val, frac) ... ]\n\n If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, returns None\n for lists with too few stocks, too many stocks, too small a fraction in the\n top 10 stocks, or too small a total value.\n\n If supplied, allCusipCounter should be a Counter, and it will be updated to count\n all investors that have any position in each stock, without regard to the min/max\n options supplied to restrict the holdings list.\n\n If supplied, all13FHoldingsMap should be a dict, and a full sorted holdings list:\n [(cusip, val, frac) ... ]\n will be saved in all13FHoldingsMap[forCik], without regard to the min/max\n options supplied to restrict the holdings list.\n '
if includeName:
cusipToName = dict(((cusip, name) for (cusip, name, value, shType, nShares, putCall) in holdings))
holdings = sorted(((cusip, float(value)) for (cusip, name, value, shType, nShares, putCall) in holdings if (putCall == )))
holdings = [(cusip, sum((val for (_, val) in it))) for (cusip, it) in itertools.groupby(holdings, key=(lambda x: x[0]))]
holdings.sort(key=(lambda x: x[1]), reverse=True)
totAum = sum((val for (_, val) in holdings))
holdings = [(cusip, val, ((val / totAum) if (totAum > 0.0) else 0.0)) for (cusip, val) in holdings]
if (all13FHoldingsMap is not None):
all13FHoldingsMap[forCik] = holdings
if (allCusipCounter is not None):
allCusipCounter.update((cusip for (cusip, _, _) in holdings))
if (((minStocksPerInv is not None) and (minStocksPerInv > len(holdings))) or ((maxStocksPerInv is not None) and (maxStocksPerInv < len(holdings))) or ((minAUM is not None) and (minAUM > (totAum * 1000.0))) or ((minTop10Frac is not None) and (minTop10Frac > sum((frac for (_, _, frac) in holdings[:10]))))):
return None
res = []
for (cusip, val, frac) in holdings:
if (frac > maxFrac):
continue
if (minFrac > frac):
break
fracOut = (f'{frac:.2%}' if pctFormat else frac)
if includeName:
res.append((cusip, cusipNames.get(cusip, cusipToName[cusip]), val, fracOut))
else:
res.append((cusip, val, fracOut))
return (res if (len(res) > 0) else None)<|docstring|>Converts a list of of stock and option holdings as parsed from the 13F:
[(cusip, name, value, title, count, putCall), ... ]
that may have multiple entries per stock into a condensed list that omits
call/put options and only has one combined entry per stock:
[(cusip, val, frac) ... ]
sorted in descending order by value, and restricted to stocks with fraction
of total portfolio in [minFrac..maxFrac]
If pctFormat is True, frac is returned as a string in the format N.NN%
If includeName is True, the cusip name is also returned:
[(cusip, name, val, frac) ... ]
If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, returns None
for lists with too few stocks, too many stocks, too small a fraction in the
top 10 stocks, or too small a total value.
If supplied, allCusipCounter should be a Counter, and it will be updated to count
all investors that have any position in each stock, without regard to the min/max
options supplied to restrict the holdings list.
If supplied, all13FHoldingsMap should be a dict, and a full sorted holdings list:
[(cusip, val, frac) ... ]
will be saved in all13FHoldingsMap[forCik], without regard to the min/max
options supplied to restrict the holdings list.<|endoftext|> |
2fb6eb58f8f3c3fe7fb02f4cdee2246c670d2b5e3ba42faa0c025c1ac4c0f971 | def get13FAmendmentType(accNo, formType=None):
"\n Gets the amendment type for a 13F-HR/A filing - may be RESTATEMENT or NEW HOLDINGS.\n This turned out to be unreliable (often missing or wrong), so I don't use it to get\n the combined holdings for an investor. Instead I just look at the number of holdings\n in an amendment compared to the previous filing, and treat it as a restatement\n if the new number of holdings is more than half the old number.\n "
info = basicInfo.getSecFormInfo(accNo, formType)
xmlUrls = [l[(- 1)] for l in info['links'] if l[0].lower().endswith('xml')]
xmlSummTab = utils.downloadSecUrl(xmlUrls[0], toFormat='xml')
coverPage = findChildSeries(xmlSummTab, ['formdata', 'coverpage'])
isAmendment = findChildEndingWith(coverPage, 'isamendment')
if ((isAmendment is None) or (isAmendment.text.strip().lower() not in ['true', 'yes'])):
return None
return findChildSeries(coverPage, ['amendmentinfo', 'amendmenttype']).text.strip() | Gets the amendment type for a 13F-HR/A filing - may be RESTATEMENT or NEW HOLDINGS.
This turned out to be unreliable (often missing or wrong), so I don't use it to get
the combined holdings for an investor. Instead I just look at the number of holdings
in an amendment compared to the previous filing, and treat it as a restatement
if the new number of holdings is more than half the old number. | secscan/scrape13F.py | get13FAmendmentType | ikedim01/secscan | 0 | python | def get13FAmendmentType(accNo, formType=None):
"\n Gets the amendment type for a 13F-HR/A filing - may be RESTATEMENT or NEW HOLDINGS.\n This turned out to be unreliable (often missing or wrong), so I don't use it to get\n the combined holdings for an investor. Instead I just look at the number of holdings\n in an amendment compared to the previous filing, and treat it as a restatement\n if the new number of holdings is more than half the old number.\n "
info = basicInfo.getSecFormInfo(accNo, formType)
xmlUrls = [l[(- 1)] for l in info['links'] if l[0].lower().endswith('xml')]
xmlSummTab = utils.downloadSecUrl(xmlUrls[0], toFormat='xml')
coverPage = findChildSeries(xmlSummTab, ['formdata', 'coverpage'])
isAmendment = findChildEndingWith(coverPage, 'isamendment')
if ((isAmendment is None) or (isAmendment.text.strip().lower() not in ['true', 'yes'])):
return None
return findChildSeries(coverPage, ['amendmentinfo', 'amendmenttype']).text.strip() | def get13FAmendmentType(accNo, formType=None):
"\n Gets the amendment type for a 13F-HR/A filing - may be RESTATEMENT or NEW HOLDINGS.\n This turned out to be unreliable (often missing or wrong), so I don't use it to get\n the combined holdings for an investor. Instead I just look at the number of holdings\n in an amendment compared to the previous filing, and treat it as a restatement\n if the new number of holdings is more than half the old number.\n "
info = basicInfo.getSecFormInfo(accNo, formType)
xmlUrls = [l[(- 1)] for l in info['links'] if l[0].lower().endswith('xml')]
xmlSummTab = utils.downloadSecUrl(xmlUrls[0], toFormat='xml')
coverPage = findChildSeries(xmlSummTab, ['formdata', 'coverpage'])
isAmendment = findChildEndingWith(coverPage, 'isamendment')
if ((isAmendment is None) or (isAmendment.text.strip().lower() not in ['true', 'yes'])):
return None
return findChildSeries(coverPage, ['amendmentinfo', 'amendmenttype']).text.strip()<|docstring|>Gets the amendment type for a 13F-HR/A filing - may be RESTATEMENT or NEW HOLDINGS.
This turned out to be unreliable (often missing or wrong), so I don't use it to get
the combined holdings for an investor. Instead I just look at the number of holdings
in an amendment compared to the previous filing, and treat it as a restatement
if the new number of holdings is more than half the old number.<|endoftext|> |
03b9025a87833302dd97646589ddc4a3567271dcfdf19d01f38e1868af588a54 | def indexMap(lis):
'Converts a list to a dict mapping item -> index in the list.'
return dict(((el, i) for (i, el) in enumerate(lis))) | Converts a list to a dict mapping item -> index in the list. | secscan/scrape13F.py | indexMap | ikedim01/secscan | 0 | python | def indexMap(lis):
return dict(((el, i) for (i, el) in enumerate(lis))) | def indexMap(lis):
return dict(((el, i) for (i, el) in enumerate(lis)))<|docstring|>Converts a list to a dict mapping item -> index in the list.<|endoftext|> |
039b00b1e49a3be6022cb36161880fdc80303ff606509ff8fa5142475c39f926 | def getHoldingsMap(scraped13F, period, minFrac=0.0, maxFrac=1.0, minStocksPerInv=None, maxStocksPerInv=None, minTop10Frac=None, minAUM=None, allCusipCounter=None, all13FHoldingsMap=None):
'\n Consolidate holdings for each CIK based on all filings for a given period into\n a combined map of investor holdings.\n\n Returns a dict: cik -> {cusip -> pct}\n\n Restricts to stocks only (no call/put options).\n\n If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of\n total portfolio >=minFrac and/or <=maxFrac.\n\n If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits\n investors with too few stocks, too many stocks, too small a fraction in the\n top 10 stocks, or too small a total stock value.\n\n If supplied, allCusipCounter should be a Counter, and it will be updated to count\n all investors that have any position in each stock, without regard to the min/max\n options supplied to restrict the returned holdings map.\n\n If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted\n holdings list for each CIK:\n all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]\n without regard to the min/max options supplied to restrict the returned holdings map.\n '
for (v, msg) in [(minFrac, 'min stock fraction of portfolio'), (maxFrac, 'max stock fraction of portfolio'), (minStocksPerInv, 'min stocks per investor'), (maxStocksPerInv, 'max stocks per investor'), (minTop10Frac, 'min fraction of portfolio in top 10 positions'), (minAUM, 'min AUM (total portfolio value)')]:
if (v is not None):
print(msg, v)
cikTo13Fs = collections.defaultdict(list)
count = 0
for (dStr, accNoToInfo) in scraped13F.infoMap.items():
for (accNo, info) in accNoToInfo.items():
if (info == 'ERROR'):
print('ERR', accNo)
elif (info['period'] == period):
cikTo13Fs[info['cik'].lstrip('0')].append((dStr, accNo, info['holdings']))
count += 1
print('period', period, '- total of', len(cikTo13Fs), 'ciks,', count, '13F filings')
cikToPosList = {}
for (cik, cik13FList) in cikTo13Fs.items():
cik13FList.sort()
i = 0
j = 1
while (j < len(cik13FList)):
if (len(cik13FList[j][2]) > (len(cik13FList[i][2]) // 2)):
i = j
j += 1
if (j != 1):
print('CIK', cik, i, '-', j, [(dStr, accNo, len(holdings)) for (dStr, accNo, holdings) in cik13FList])
combHoldings = cik13FList[i][2]
while ((i + 1) < j):
i += 1
combHoldings = (combHoldings + cik13FList[i][2])
posList = condenseHoldings(combHoldings, minFrac=minFrac, maxFrac=maxFrac, minStocksPerInv=minStocksPerInv, maxStocksPerInv=maxStocksPerInv, minTop10Frac=minTop10Frac, minAUM=minAUM, allCusipCounter=allCusipCounter, all13FHoldingsMap=all13FHoldingsMap, forCik=cik)
if (posList is not None):
cikToPosList[cik] = posList
res = {}
for (cik, posList) in cikToPosList.items():
res[cik] = dict(((cusip, frac) for (cusip, _, frac) in posList))
return res | Consolidate holdings for each CIK based on all filings for a given period into
a combined map of investor holdings.
Returns a dict: cik -> {cusip -> pct}
Restricts to stocks only (no call/put options).
If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of
total portfolio >=minFrac and/or <=maxFrac.
If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits
investors with too few stocks, too many stocks, too small a fraction in the
top 10 stocks, or too small a total stock value.
If supplied, allCusipCounter should be a Counter, and it will be updated to count
all investors that have any position in each stock, without regard to the min/max
options supplied to restrict the returned holdings map.
If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted
holdings list for each CIK:
all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]
without regard to the min/max options supplied to restrict the returned holdings map. | secscan/scrape13F.py | getHoldingsMap | ikedim01/secscan | 0 | python | def getHoldingsMap(scraped13F, period, minFrac=0.0, maxFrac=1.0, minStocksPerInv=None, maxStocksPerInv=None, minTop10Frac=None, minAUM=None, allCusipCounter=None, all13FHoldingsMap=None):
'\n Consolidate holdings for each CIK based on all filings for a given period into\n a combined map of investor holdings.\n\n Returns a dict: cik -> {cusip -> pct}\n\n Restricts to stocks only (no call/put options).\n\n If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of\n total portfolio >=minFrac and/or <=maxFrac.\n\n If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits\n investors with too few stocks, too many stocks, too small a fraction in the\n top 10 stocks, or too small a total stock value.\n\n If supplied, allCusipCounter should be a Counter, and it will be updated to count\n all investors that have any position in each stock, without regard to the min/max\n options supplied to restrict the returned holdings map.\n\n If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted\n holdings list for each CIK:\n all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]\n without regard to the min/max options supplied to restrict the returned holdings map.\n '
for (v, msg) in [(minFrac, 'min stock fraction of portfolio'), (maxFrac, 'max stock fraction of portfolio'), (minStocksPerInv, 'min stocks per investor'), (maxStocksPerInv, 'max stocks per investor'), (minTop10Frac, 'min fraction of portfolio in top 10 positions'), (minAUM, 'min AUM (total portfolio value)')]:
if (v is not None):
print(msg, v)
cikTo13Fs = collections.defaultdict(list)
count = 0
for (dStr, accNoToInfo) in scraped13F.infoMap.items():
for (accNo, info) in accNoToInfo.items():
if (info == 'ERROR'):
print('ERR', accNo)
elif (info['period'] == period):
cikTo13Fs[info['cik'].lstrip('0')].append((dStr, accNo, info['holdings']))
count += 1
print('period', period, '- total of', len(cikTo13Fs), 'ciks,', count, '13F filings')
cikToPosList = {}
for (cik, cik13FList) in cikTo13Fs.items():
cik13FList.sort()
i = 0
j = 1
while (j < len(cik13FList)):
if (len(cik13FList[j][2]) > (len(cik13FList[i][2]) // 2)):
i = j
j += 1
if (j != 1):
print('CIK', cik, i, '-', j, [(dStr, accNo, len(holdings)) for (dStr, accNo, holdings) in cik13FList])
combHoldings = cik13FList[i][2]
while ((i + 1) < j):
i += 1
combHoldings = (combHoldings + cik13FList[i][2])
posList = condenseHoldings(combHoldings, minFrac=minFrac, maxFrac=maxFrac, minStocksPerInv=minStocksPerInv, maxStocksPerInv=maxStocksPerInv, minTop10Frac=minTop10Frac, minAUM=minAUM, allCusipCounter=allCusipCounter, all13FHoldingsMap=all13FHoldingsMap, forCik=cik)
if (posList is not None):
cikToPosList[cik] = posList
res = {}
for (cik, posList) in cikToPosList.items():
res[cik] = dict(((cusip, frac) for (cusip, _, frac) in posList))
return res | def getHoldingsMap(scraped13F, period, minFrac=0.0, maxFrac=1.0, minStocksPerInv=None, maxStocksPerInv=None, minTop10Frac=None, minAUM=None, allCusipCounter=None, all13FHoldingsMap=None):
'\n Consolidate holdings for each CIK based on all filings for a given period into\n a combined map of investor holdings.\n\n Returns a dict: cik -> {cusip -> pct}\n\n Restricts to stocks only (no call/put options).\n\n If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of\n total portfolio >=minFrac and/or <=maxFrac.\n\n If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits\n investors with too few stocks, too many stocks, too small a fraction in the\n top 10 stocks, or too small a total stock value.\n\n If supplied, allCusipCounter should be a Counter, and it will be updated to count\n all investors that have any position in each stock, without regard to the min/max\n options supplied to restrict the returned holdings map.\n\n If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted\n holdings list for each CIK:\n all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]\n without regard to the min/max options supplied to restrict the returned holdings map.\n '
for (v, msg) in [(minFrac, 'min stock fraction of portfolio'), (maxFrac, 'max stock fraction of portfolio'), (minStocksPerInv, 'min stocks per investor'), (maxStocksPerInv, 'max stocks per investor'), (minTop10Frac, 'min fraction of portfolio in top 10 positions'), (minAUM, 'min AUM (total portfolio value)')]:
if (v is not None):
print(msg, v)
cikTo13Fs = collections.defaultdict(list)
count = 0
for (dStr, accNoToInfo) in scraped13F.infoMap.items():
for (accNo, info) in accNoToInfo.items():
if (info == 'ERROR'):
print('ERR', accNo)
elif (info['period'] == period):
cikTo13Fs[info['cik'].lstrip('0')].append((dStr, accNo, info['holdings']))
count += 1
print('period', period, '- total of', len(cikTo13Fs), 'ciks,', count, '13F filings')
cikToPosList = {}
for (cik, cik13FList) in cikTo13Fs.items():
cik13FList.sort()
i = 0
j = 1
while (j < len(cik13FList)):
if (len(cik13FList[j][2]) > (len(cik13FList[i][2]) // 2)):
i = j
j += 1
if (j != 1):
print('CIK', cik, i, '-', j, [(dStr, accNo, len(holdings)) for (dStr, accNo, holdings) in cik13FList])
combHoldings = cik13FList[i][2]
while ((i + 1) < j):
i += 1
combHoldings = (combHoldings + cik13FList[i][2])
posList = condenseHoldings(combHoldings, minFrac=minFrac, maxFrac=maxFrac, minStocksPerInv=minStocksPerInv, maxStocksPerInv=maxStocksPerInv, minTop10Frac=minTop10Frac, minAUM=minAUM, allCusipCounter=allCusipCounter, all13FHoldingsMap=all13FHoldingsMap, forCik=cik)
if (posList is not None):
cikToPosList[cik] = posList
res = {}
for (cik, posList) in cikToPosList.items():
res[cik] = dict(((cusip, frac) for (cusip, _, frac) in posList))
return res<|docstring|>Consolidate holdings for each CIK based on all filings for a given period into
a combined map of investor holdings.
Returns a dict: cik -> {cusip -> pct}
Restricts to stocks only (no call/put options).
If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of
total portfolio >=minFrac and/or <=maxFrac.
If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits
investors with too few stocks, too many stocks, too small a fraction in the
top 10 stocks, or too small a total stock value.
If supplied, allCusipCounter should be a Counter, and it will be updated to count
all investors that have any position in each stock, without regard to the min/max
options supplied to restrict the returned holdings map.
If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted
holdings list for each CIK:
all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]
without regard to the min/max options supplied to restrict the returned holdings map.<|endoftext|> |
ff9f679a222dc9854d2922210fde5d7554609c73d23416db62be4bfb06ec422f | def addHoldingsMap(holdingsMap, extraHoldingsMap):
'\n Adds positions in extraHoldingsMap to holdingsMap.\n Each argument is a dict: cik -> {cusip -> pct}\n but extraHoldingsMap may contain ciks and cusips not in holdingsMap.\n '
for (cik, extraPosMap) in extraHoldingsMap.items():
if (cik not in holdingsMap):
holdingsMap[cik] = {}
posMap = holdingsMap[cik]
for (cusip, frac) in extraPosMap.items():
posMap[cusip] = (posMap.get(cusip, 0.0) + frac) | Adds positions in extraHoldingsMap to holdingsMap.
Each argument is a dict: cik -> {cusip -> pct}
but extraHoldingsMap may contain ciks and cusips not in holdingsMap. | secscan/scrape13F.py | addHoldingsMap | ikedim01/secscan | 0 | python | def addHoldingsMap(holdingsMap, extraHoldingsMap):
'\n Adds positions in extraHoldingsMap to holdingsMap.\n Each argument is a dict: cik -> {cusip -> pct}\n but extraHoldingsMap may contain ciks and cusips not in holdingsMap.\n '
for (cik, extraPosMap) in extraHoldingsMap.items():
if (cik not in holdingsMap):
holdingsMap[cik] = {}
posMap = holdingsMap[cik]
for (cusip, frac) in extraPosMap.items():
posMap[cusip] = (posMap.get(cusip, 0.0) + frac) | def addHoldingsMap(holdingsMap, extraHoldingsMap):
'\n Adds positions in extraHoldingsMap to holdingsMap.\n Each argument is a dict: cik -> {cusip -> pct}\n but extraHoldingsMap may contain ciks and cusips not in holdingsMap.\n '
for (cik, extraPosMap) in extraHoldingsMap.items():
if (cik not in holdingsMap):
holdingsMap[cik] = {}
posMap = holdingsMap[cik]
for (cusip, frac) in extraPosMap.items():
posMap[cusip] = (posMap.get(cusip, 0.0) + frac)<|docstring|>Adds positions in extraHoldingsMap to holdingsMap.
Each argument is a dict: cik -> {cusip -> pct}
but extraHoldingsMap may contain ciks and cusips not in holdingsMap.<|endoftext|> |
b184531800e3079981ad435c621b9d635aa379db25310a6db2e44de047a73c27 | def holdingsMapToMatrix(holdingsMap, minStocksPerInvestor=None, maxStocksPerInvestor=None, minInvestorsPerStock=None, maxInvestorsPerStock=None, minAllInvestorsPerStock=None, maxAllInvestorsPerStock=None, allCusipCounter=None, cusipFilter=None, dtype=np.float64):
'\n Converts a holdings map: cik -> {cusip -> frac} into a matrix.\n\n Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))\n in which each row has the fractions held by the corresponding cik in each cusip.\n\n If minStocksPerInvestor is specified, restricts to investors with at least that many stocks\n in the returned matrix; likewise, maxStocksPerInvestor can be used to give an upper bound.\n\n If minInvestorsPerStock is specified, restricts to stocks with at least that many investors\n in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.\n\n If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter\n should be a Counter counting all investors that have any position in each stock,\n and the result will be restricted based on this count.\n\n If cusipFilter is specified, this should be a function that returns True for cusips to keep.\n '
invCount = len(holdingsMap)
print('starting investor count:', invCount)
if ((minStocksPerInvestor is None) and (maxStocksPerInvestor is None)):
print('not limiting number of stocks per investor')
else:
if (minStocksPerInvestor is not None):
print('requiring at least', minStocksPerInvestor, 'stocks per investor')
holdingsMap = dict(((cik, posMap) for (cik, posMap) in holdingsMap.items() if (len(posMap) >= minStocksPerInvestor)))
print('- removed', (invCount - len(holdingsMap)), 'investors,', len(holdingsMap), 'remaining')
invCount = len(holdingsMap)
if (maxStocksPerInvestor is not None):
print('requiring at most', maxStocksPerInvestor, 'stocks per investor')
holdingsMap = dict(((cik, posMap) for (cik, posMap) in holdingsMap.items() if (len(posMap) <= maxStocksPerInvestor)))
print('- removed', (invCount - len(holdingsMap)), 'investors,', len(holdingsMap), 'remaining')
invCount = len(holdingsMap)
cusipCounter = collections.Counter()
for posMap in holdingsMap.values():
cusipCounter.update(posMap.keys())
print('starting stock count:', len(cusipCounter))
cusipsToRemove = set()
delCount = 0
if ((minInvestorsPerStock is None) and (maxInvestorsPerStock is None) and (minAllInvestorsPerStock is None) and (maxAllInvestorsPerStock is None)):
print('not limiting number of investors per stock')
else:
if (minAllInvestorsPerStock is not None):
cusipsToRemove.update((cusip for cusip in cusipCounter if (allCusipCounter[cusip] < minAllInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at least {minAllInvestorsPerStock} ALL investors per stock')
if (maxAllInvestorsPerStock is not None):
cusipsToRemove.update((cusip for cusip in cusipCounter if (allCusipCounter[cusip] > maxAllInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at most {maxAllInvestorsPerStock} ALL investors per stock')
if (minInvestorsPerStock is not None):
cusipsToRemove.update((cusip for (cusip, count) in cusipCounter.items() if (count < minInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at least {minInvestorsPerStock} investors per stock')
if (maxInvestorsPerStock is not None):
cusipsToRemove.update((cusip for (cusip, count) in cusipCounter.items() if (count > maxInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at most {maxInvestorsPerStock} investors per stock')
if (cusipFilter is not None):
cusipsToRemove.update((cusip for cusip in cusipCounter if (not cusipFilter(cusip))))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, 'applying CUSIP filter')
cusips = sorted((set(cusipCounter.keys()) - cusipsToRemove))
if (delCount > 0):
print('removed a total of', delCount, 'stocks,', len(cusips), 'remaining')
ciks = sorted((cik.zfill(10) for (cik, posMap) in holdingsMap.items() if (1 <= len((set(posMap.keys()) - cusipsToRemove)))))
print('removed', (invCount - len(ciks)), 'investors with no remaining positions')
print(f'final counts: {len(ciks):,} investors; {len(cusips):,} stocks;', end=' ')
cikToRow = indexMap(ciks)
cusipToCol = indexMap(cusips)
mat = np.zeros((len(ciks), len(cusips)), dtype=dtype)
count = 0
for (cik, posMap) in holdingsMap.items():
cikRow = cikToRow.get(cik.zfill(10))
if (cikRow is None):
continue
for (cusip, frac) in posMap.items():
if (cusip not in cusipsToRemove):
mat[(cikRow, cusipToCol[cusip])] = frac
count += 1
print(f'{count:,} positions')
return (mat, ciks, cusips) | Converts a holdings map: cik -> {cusip -> frac} into a matrix.
Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))
in which each row has the fractions held by the corresponding cik in each cusip.
If minStocksPerInvestor is specified, restricts to investors with at least that many stocks
in the returned matrix; likewise, maxStocksPerInvestor can be used to give an upper bound.
If minInvestorsPerStock is specified, restricts to stocks with at least that many investors
in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.
If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter
should be a Counter counting all investors that have any position in each stock,
and the result will be restricted based on this count.
If cusipFilter is specified, this should be a function that returns True for cusips to keep. | secscan/scrape13F.py | holdingsMapToMatrix | ikedim01/secscan | 0 | python | def holdingsMapToMatrix(holdingsMap, minStocksPerInvestor=None, maxStocksPerInvestor=None, minInvestorsPerStock=None, maxInvestorsPerStock=None, minAllInvestorsPerStock=None, maxAllInvestorsPerStock=None, allCusipCounter=None, cusipFilter=None, dtype=np.float64):
'\n Converts a holdings map: cik -> {cusip -> frac} into a matrix.\n\n Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))\n in which each row has the fractions held by the corresponding cik in each cusip.\n\n If minStocksPerInvestor is specified, restricts to investors with at least that many stocks\n in the returned matrix; likewise, maxStocksPerInvestor can be used to give an upper bound.\n\n If minInvestorsPerStock is specified, restricts to stocks with at least that many investors\n in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.\n\n If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter\n should be a Counter counting all investors that have any position in each stock,\n and the result will be restricted based on this count.\n\n If cusipFilter is specified, this should be a function that returns True for cusips to keep.\n '
invCount = len(holdingsMap)
print('starting investor count:', invCount)
if ((minStocksPerInvestor is None) and (maxStocksPerInvestor is None)):
print('not limiting number of stocks per investor')
else:
if (minStocksPerInvestor is not None):
print('requiring at least', minStocksPerInvestor, 'stocks per investor')
holdingsMap = dict(((cik, posMap) for (cik, posMap) in holdingsMap.items() if (len(posMap) >= minStocksPerInvestor)))
print('- removed', (invCount - len(holdingsMap)), 'investors,', len(holdingsMap), 'remaining')
invCount = len(holdingsMap)
if (maxStocksPerInvestor is not None):
print('requiring at most', maxStocksPerInvestor, 'stocks per investor')
holdingsMap = dict(((cik, posMap) for (cik, posMap) in holdingsMap.items() if (len(posMap) <= maxStocksPerInvestor)))
print('- removed', (invCount - len(holdingsMap)), 'investors,', len(holdingsMap), 'remaining')
invCount = len(holdingsMap)
cusipCounter = collections.Counter()
for posMap in holdingsMap.values():
cusipCounter.update(posMap.keys())
print('starting stock count:', len(cusipCounter))
cusipsToRemove = set()
delCount = 0
if ((minInvestorsPerStock is None) and (maxInvestorsPerStock is None) and (minAllInvestorsPerStock is None) and (maxAllInvestorsPerStock is None)):
print('not limiting number of investors per stock')
else:
if (minAllInvestorsPerStock is not None):
cusipsToRemove.update((cusip for cusip in cusipCounter if (allCusipCounter[cusip] < minAllInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at least {minAllInvestorsPerStock} ALL investors per stock')
if (maxAllInvestorsPerStock is not None):
cusipsToRemove.update((cusip for cusip in cusipCounter if (allCusipCounter[cusip] > maxAllInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at most {maxAllInvestorsPerStock} ALL investors per stock')
if (minInvestorsPerStock is not None):
cusipsToRemove.update((cusip for (cusip, count) in cusipCounter.items() if (count < minInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at least {minInvestorsPerStock} investors per stock')
if (maxInvestorsPerStock is not None):
cusipsToRemove.update((cusip for (cusip, count) in cusipCounter.items() if (count > maxInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at most {maxInvestorsPerStock} investors per stock')
if (cusipFilter is not None):
cusipsToRemove.update((cusip for cusip in cusipCounter if (not cusipFilter(cusip))))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, 'applying CUSIP filter')
cusips = sorted((set(cusipCounter.keys()) - cusipsToRemove))
if (delCount > 0):
print('removed a total of', delCount, 'stocks,', len(cusips), 'remaining')
ciks = sorted((cik.zfill(10) for (cik, posMap) in holdingsMap.items() if (1 <= len((set(posMap.keys()) - cusipsToRemove)))))
print('removed', (invCount - len(ciks)), 'investors with no remaining positions')
print(f'final counts: {len(ciks):,} investors; {len(cusips):,} stocks;', end=' ')
cikToRow = indexMap(ciks)
cusipToCol = indexMap(cusips)
mat = np.zeros((len(ciks), len(cusips)), dtype=dtype)
count = 0
for (cik, posMap) in holdingsMap.items():
cikRow = cikToRow.get(cik.zfill(10))
if (cikRow is None):
continue
for (cusip, frac) in posMap.items():
if (cusip not in cusipsToRemove):
mat[(cikRow, cusipToCol[cusip])] = frac
count += 1
print(f'{count:,} positions')
return (mat, ciks, cusips) | def holdingsMapToMatrix(holdingsMap, minStocksPerInvestor=None, maxStocksPerInvestor=None, minInvestorsPerStock=None, maxInvestorsPerStock=None, minAllInvestorsPerStock=None, maxAllInvestorsPerStock=None, allCusipCounter=None, cusipFilter=None, dtype=np.float64):
'\n Converts a holdings map: cik -> {cusip -> frac} into a matrix.\n\n Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))\n in which each row has the fractions held by the corresponding cik in each cusip.\n\n If minStocksPerInvestor is specified, restricts to investors with at least that many stocks\n in the returned matrix; likewise, maxStocksPerInvestor can be used to give an upper bound.\n\n If minInvestorsPerStock is specified, restricts to stocks with at least that many investors\n in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.\n\n If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter\n should be a Counter counting all investors that have any position in each stock,\n and the result will be restricted based on this count.\n\n If cusipFilter is specified, this should be a function that returns True for cusips to keep.\n '
invCount = len(holdingsMap)
print('starting investor count:', invCount)
if ((minStocksPerInvestor is None) and (maxStocksPerInvestor is None)):
print('not limiting number of stocks per investor')
else:
if (minStocksPerInvestor is not None):
print('requiring at least', minStocksPerInvestor, 'stocks per investor')
holdingsMap = dict(((cik, posMap) for (cik, posMap) in holdingsMap.items() if (len(posMap) >= minStocksPerInvestor)))
print('- removed', (invCount - len(holdingsMap)), 'investors,', len(holdingsMap), 'remaining')
invCount = len(holdingsMap)
if (maxStocksPerInvestor is not None):
print('requiring at most', maxStocksPerInvestor, 'stocks per investor')
holdingsMap = dict(((cik, posMap) for (cik, posMap) in holdingsMap.items() if (len(posMap) <= maxStocksPerInvestor)))
print('- removed', (invCount - len(holdingsMap)), 'investors,', len(holdingsMap), 'remaining')
invCount = len(holdingsMap)
cusipCounter = collections.Counter()
for posMap in holdingsMap.values():
cusipCounter.update(posMap.keys())
print('starting stock count:', len(cusipCounter))
cusipsToRemove = set()
delCount = 0
if ((minInvestorsPerStock is None) and (maxInvestorsPerStock is None) and (minAllInvestorsPerStock is None) and (maxAllInvestorsPerStock is None)):
print('not limiting number of investors per stock')
else:
if (minAllInvestorsPerStock is not None):
cusipsToRemove.update((cusip for cusip in cusipCounter if (allCusipCounter[cusip] < minAllInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at least {minAllInvestorsPerStock} ALL investors per stock')
if (maxAllInvestorsPerStock is not None):
cusipsToRemove.update((cusip for cusip in cusipCounter if (allCusipCounter[cusip] > maxAllInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at most {maxAllInvestorsPerStock} ALL investors per stock')
if (minInvestorsPerStock is not None):
cusipsToRemove.update((cusip for (cusip, count) in cusipCounter.items() if (count < minInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at least {minInvestorsPerStock} investors per stock')
if (maxInvestorsPerStock is not None):
cusipsToRemove.update((cusip for (cusip, count) in cusipCounter.items() if (count > maxInvestorsPerStock)))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, f'requiring at most {maxInvestorsPerStock} investors per stock')
if (cusipFilter is not None):
cusipsToRemove.update((cusip for cusip in cusipCounter if (not cusipFilter(cusip))))
delCount = printRemoveStocksMessage(cusipsToRemove, delCount, 'applying CUSIP filter')
cusips = sorted((set(cusipCounter.keys()) - cusipsToRemove))
if (delCount > 0):
print('removed a total of', delCount, 'stocks,', len(cusips), 'remaining')
ciks = sorted((cik.zfill(10) for (cik, posMap) in holdingsMap.items() if (1 <= len((set(posMap.keys()) - cusipsToRemove)))))
print('removed', (invCount - len(ciks)), 'investors with no remaining positions')
print(f'final counts: {len(ciks):,} investors; {len(cusips):,} stocks;', end=' ')
cikToRow = indexMap(ciks)
cusipToCol = indexMap(cusips)
mat = np.zeros((len(ciks), len(cusips)), dtype=dtype)
count = 0
for (cik, posMap) in holdingsMap.items():
cikRow = cikToRow.get(cik.zfill(10))
if (cikRow is None):
continue
for (cusip, frac) in posMap.items():
if (cusip not in cusipsToRemove):
mat[(cikRow, cusipToCol[cusip])] = frac
count += 1
print(f'{count:,} positions')
return (mat, ciks, cusips)<|docstring|>Converts a holdings map: cik -> {cusip -> frac} into a matrix.
Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))
in which each row has the fractions held by the corresponding cik in each cusip.
If minStocksPerInvestor is specified, restricts to investors with at least that many stocks
in the returned matrix; likewise, maxStocksPerInvestor can be used to give an upper bound.
If minInvestorsPerStock is specified, restricts to stocks with at least that many investors
in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.
If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter
should be a Counter counting all investors that have any position in each stock,
and the result will be restricted based on this count.
If cusipFilter is specified, this should be a function that returns True for cusips to keep.<|endoftext|> |
847fc5accffdbbadc045fcde526e86b9db836c8fd670e3c26855fbc2f1f79b9b | def getPeriodAndNextQStartEnd(y, qNo):
'\n Returns the 13F period date for a given year and quarter number (this is the\n last day in the quarter), along with the start and end dateStrs for the next\n quarter (this is the date range when the 13Fs for this year should be filed).\n Quarters are numbered 1-4.\n '
nextY = ((y + 1) if (qNo == 4) else y)
nextQNo = (1 if (qNo == 4) else (qNo + 1))
return ((str(y) + qPeriods[(qNo - 1)]), {'startD': (str(nextY) + qStartEnds[(nextQNo - 1)]), 'endD': (str(((nextY + 1) if (nextQNo == 4) else nextY)) + qStartEnds[nextQNo])}) | Returns the 13F period date for a given year and quarter number (this is the
last day in the quarter), along with the start and end dateStrs for the next
quarter (this is the date range when the 13Fs for this year should be filed).
Quarters are numbered 1-4. | secscan/scrape13F.py | getPeriodAndNextQStartEnd | ikedim01/secscan | 0 | python | def getPeriodAndNextQStartEnd(y, qNo):
'\n Returns the 13F period date for a given year and quarter number (this is the\n last day in the quarter), along with the start and end dateStrs for the next\n quarter (this is the date range when the 13Fs for this year should be filed).\n Quarters are numbered 1-4.\n '
nextY = ((y + 1) if (qNo == 4) else y)
nextQNo = (1 if (qNo == 4) else (qNo + 1))
return ((str(y) + qPeriods[(qNo - 1)]), {'startD': (str(nextY) + qStartEnds[(nextQNo - 1)]), 'endD': (str(((nextY + 1) if (nextQNo == 4) else nextY)) + qStartEnds[nextQNo])}) | def getPeriodAndNextQStartEnd(y, qNo):
'\n Returns the 13F period date for a given year and quarter number (this is the\n last day in the quarter), along with the start and end dateStrs for the next\n quarter (this is the date range when the 13Fs for this year should be filed).\n Quarters are numbered 1-4.\n '
nextY = ((y + 1) if (qNo == 4) else y)
nextQNo = (1 if (qNo == 4) else (qNo + 1))
return ((str(y) + qPeriods[(qNo - 1)]), {'startD': (str(nextY) + qStartEnds[(nextQNo - 1)]), 'endD': (str(((nextY + 1) if (nextQNo == 4) else nextY)) + qStartEnds[nextQNo])})<|docstring|>Returns the 13F period date for a given year and quarter number (this is the
last day in the quarter), along with the start and end dateStrs for the next
quarter (this is the date range when the 13Fs for this year should be filed).
Quarters are numbered 1-4.<|endoftext|> |
c335d2ee5f5e85c47a2067dcf31ec7eea75ce07f51b42637a3af650826473e26 | def getNSSForQ(y, qNo, minFrac=0.01, maxFrac=1.0, minStocksPerInv=3, maxStocksPerInv=100, minTop10Frac=0.4, minAUM=None, dtype=np.float64, minInvestorsPerStock=2, maxInvestorsPerStock=None, minAllInvestorsPerStock=None, maxAllInvestorsPerStock=None, allCusipCounter=None, cusipFilter=None, extraHoldingsMaps=[], include13F=True, all13FHoldingsMap=None):
'\n Calculates a matrix of investor holdings for a quarter, based on all 13F filings filed\n during the succeeding quarter.\n\n Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))\n in which each row has the fractions held by the corresponding cik in each cusip.\n\n If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of\n total portfolio >=minFrac and/or <=maxFrac.\n\n If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits\n investors with too few stocks, too many stocks, too small a fraction in the\n top 10 holdings, or too small a total stock value.\n If minInvestorsPerStock is specified, restricts to stocks with at least that many investors\n in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.\n If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter\n should be a Counter counting all investors that have any position in each stock,\n and the result will be restricted based on this count.\n If cusipFilter is specified, this should be a function that returns True for cusips to keep.\n\n If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted\n holdings list for each CIK:\n all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]\n without regard to the min/max options supplied to restrict the returned holdings map.\n\n Optionally adds holdings from a list of extraHoldingsMaps (used for 13G/13D filings).\n '
if (((minAllInvestorsPerStock is not None) or (maxAllInvestorsPerStock is not None)) and (allCusipCounter is None)):
allCusipCounter = collections.Counter()
if include13F:
(period, nextQStartEnd) = getPeriodAndNextQStartEnd(y, qNo)
holdingsMap = getHoldingsMap(scraper13F(**nextQStartEnd), period, minFrac=minFrac, maxFrac=maxFrac, minStocksPerInv=None, maxStocksPerInv=None, minTop10Frac=minTop10Frac, minAUM=minAUM, allCusipCounter=allCusipCounter, all13FHoldingsMap=all13FHoldingsMap)
else:
holdingsMap = {}
for extraHoldingsMap in extraHoldingsMaps:
addHoldingsMap(holdingsMap, extraHoldingsMap)
return holdingsMapToMatrix(holdingsMap, minStocksPerInvestor=minStocksPerInv, maxStocksPerInvestor=maxStocksPerInv, minInvestorsPerStock=minInvestorsPerStock, maxInvestorsPerStock=maxInvestorsPerStock, minAllInvestorsPerStock=minAllInvestorsPerStock, maxAllInvestorsPerStock=maxAllInvestorsPerStock, allCusipCounter=allCusipCounter, cusipFilter=cusipFilter, dtype=dtype) | Calculates a matrix of investor holdings for a quarter, based on all 13F filings filed
during the succeeding quarter.
Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))
in which each row has the fractions held by the corresponding cik in each cusip.
If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of
total portfolio >=minFrac and/or <=maxFrac.
If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits
investors with too few stocks, too many stocks, too small a fraction in the
top 10 holdings, or too small a total stock value.
If minInvestorsPerStock is specified, restricts to stocks with at least that many investors
in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.
If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter
should be a Counter counting all investors that have any position in each stock,
and the result will be restricted based on this count.
If cusipFilter is specified, this should be a function that returns True for cusips to keep.
If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted
holdings list for each CIK:
all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]
without regard to the min/max options supplied to restrict the returned holdings map.
Optionally adds holdings from a list of extraHoldingsMaps (used for 13G/13D filings). | secscan/scrape13F.py | getNSSForQ | ikedim01/secscan | 0 | python | def getNSSForQ(y, qNo, minFrac=0.01, maxFrac=1.0, minStocksPerInv=3, maxStocksPerInv=100, minTop10Frac=0.4, minAUM=None, dtype=np.float64, minInvestorsPerStock=2, maxInvestorsPerStock=None, minAllInvestorsPerStock=None, maxAllInvestorsPerStock=None, allCusipCounter=None, cusipFilter=None, extraHoldingsMaps=[], include13F=True, all13FHoldingsMap=None):
'\n Calculates a matrix of investor holdings for a quarter, based on all 13F filings filed\n during the succeeding quarter.\n\n Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))\n in which each row has the fractions held by the corresponding cik in each cusip.\n\n If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of\n total portfolio >=minFrac and/or <=maxFrac.\n\n If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits\n investors with too few stocks, too many stocks, too small a fraction in the\n top 10 holdings, or too small a total stock value.\n If minInvestorsPerStock is specified, restricts to stocks with at least that many investors\n in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.\n If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter\n should be a Counter counting all investors that have any position in each stock,\n and the result will be restricted based on this count.\n If cusipFilter is specified, this should be a function that returns True for cusips to keep.\n\n If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted\n holdings list for each CIK:\n all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]\n without regard to the min/max options supplied to restrict the returned holdings map.\n\n Optionally adds holdings from a list of extraHoldingsMaps (used for 13G/13D filings).\n '
if (((minAllInvestorsPerStock is not None) or (maxAllInvestorsPerStock is not None)) and (allCusipCounter is None)):
allCusipCounter = collections.Counter()
if include13F:
(period, nextQStartEnd) = getPeriodAndNextQStartEnd(y, qNo)
holdingsMap = getHoldingsMap(scraper13F(**nextQStartEnd), period, minFrac=minFrac, maxFrac=maxFrac, minStocksPerInv=None, maxStocksPerInv=None, minTop10Frac=minTop10Frac, minAUM=minAUM, allCusipCounter=allCusipCounter, all13FHoldingsMap=all13FHoldingsMap)
else:
holdingsMap = {}
for extraHoldingsMap in extraHoldingsMaps:
addHoldingsMap(holdingsMap, extraHoldingsMap)
return holdingsMapToMatrix(holdingsMap, minStocksPerInvestor=minStocksPerInv, maxStocksPerInvestor=maxStocksPerInv, minInvestorsPerStock=minInvestorsPerStock, maxInvestorsPerStock=maxInvestorsPerStock, minAllInvestorsPerStock=minAllInvestorsPerStock, maxAllInvestorsPerStock=maxAllInvestorsPerStock, allCusipCounter=allCusipCounter, cusipFilter=cusipFilter, dtype=dtype) | def getNSSForQ(y, qNo, minFrac=0.01, maxFrac=1.0, minStocksPerInv=3, maxStocksPerInv=100, minTop10Frac=0.4, minAUM=None, dtype=np.float64, minInvestorsPerStock=2, maxInvestorsPerStock=None, minAllInvestorsPerStock=None, maxAllInvestorsPerStock=None, allCusipCounter=None, cusipFilter=None, extraHoldingsMaps=[], include13F=True, all13FHoldingsMap=None):
'\n Calculates a matrix of investor holdings for a quarter, based on all 13F filings filed\n during the succeeding quarter.\n\n Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))\n in which each row has the fractions held by the corresponding cik in each cusip.\n\n If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of\n total portfolio >=minFrac and/or <=maxFrac.\n\n If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits\n investors with too few stocks, too many stocks, too small a fraction in the\n top 10 holdings, or too small a total stock value.\n If minInvestorsPerStock is specified, restricts to stocks with at least that many investors\n in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.\n If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter\n should be a Counter counting all investors that have any position in each stock,\n and the result will be restricted based on this count.\n If cusipFilter is specified, this should be a function that returns True for cusips to keep.\n\n If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted\n holdings list for each CIK:\n all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]\n without regard to the min/max options supplied to restrict the returned holdings map.\n\n Optionally adds holdings from a list of extraHoldingsMaps (used for 13G/13D filings).\n '
if (((minAllInvestorsPerStock is not None) or (maxAllInvestorsPerStock is not None)) and (allCusipCounter is None)):
allCusipCounter = collections.Counter()
if include13F:
(period, nextQStartEnd) = getPeriodAndNextQStartEnd(y, qNo)
holdingsMap = getHoldingsMap(scraper13F(**nextQStartEnd), period, minFrac=minFrac, maxFrac=maxFrac, minStocksPerInv=None, maxStocksPerInv=None, minTop10Frac=minTop10Frac, minAUM=minAUM, allCusipCounter=allCusipCounter, all13FHoldingsMap=all13FHoldingsMap)
else:
holdingsMap = {}
for extraHoldingsMap in extraHoldingsMaps:
addHoldingsMap(holdingsMap, extraHoldingsMap)
return holdingsMapToMatrix(holdingsMap, minStocksPerInvestor=minStocksPerInv, maxStocksPerInvestor=maxStocksPerInv, minInvestorsPerStock=minInvestorsPerStock, maxInvestorsPerStock=maxInvestorsPerStock, minAllInvestorsPerStock=minAllInvestorsPerStock, maxAllInvestorsPerStock=maxAllInvestorsPerStock, allCusipCounter=allCusipCounter, cusipFilter=cusipFilter, dtype=dtype)<|docstring|>Calculates a matrix of investor holdings for a quarter, based on all 13F filings filed
during the succeeding quarter.
Returns mat, ciks, cusips where mat is a matrix of shape (len(ciks), len(cusips))
in which each row has the fractions held by the corresponding cik in each cusip.
If minFrac and/or maxFrac is supplied, restricts to stocks with fraction of
total portfolio >=minFrac and/or <=maxFrac.
If minStocksPerInv, maxStocksPerInv, minTop10Frac or minAUM are specified, omits
investors with too few stocks, too many stocks, too small a fraction in the
top 10 holdings, or too small a total stock value.
If minInvestorsPerStock is specified, restricts to stocks with at least that many investors
in the returned matrix; likewise, maxInvestorsPerStock can be used to give an upper bound.
If minAllInvestorsPerStock or maxAllInvestorsPerStock is specified, then allCusipCounter
should be a Counter counting all investors that have any position in each stock,
and the result will be restricted based on this count.
If cusipFilter is specified, this should be a function that returns True for cusips to keep.
If supplied, all13FHoldingsMap should be a dict, and it will be updated with a full sorted
holdings list for each CIK:
all13FHoldingsMap[cik] = [(cusip, val, frac) ... ]
without regard to the min/max options supplied to restrict the returned holdings map.
Optionally adds holdings from a list of extraHoldingsMaps (used for 13G/13D filings).<|endoftext|> |
1461a9bbbb054b152d3566257e60e394272e7185f0eefad350d2a915f47a5fdc | def saveConvMatrixPy2(y, qNo, minFrac=0.13, maxFrac=0.4, minStocksPerInv=3, maxStocksPerInv=500, minTop10Frac=None, minAUM=75000000.0, dtype=np.float64, minInvestorsPerStock=2, maxInvestorsPerStock=None):
'\n Save a matrix of 13F conviction positions only for the given quarter,\n in a format readable by the BW old Python2 version.\n '
(mat, ciks, cusips) = getNSSForQ(y, qNo, minFrac=minFrac, maxFrac=maxFrac, minStocksPerInv=minStocksPerInv, maxStocksPerInv=maxStocksPerInv, minTop10Frac=minTop10Frac, minAUM=minAUM, dtype=dtype, minInvestorsPerStock=minInvestorsPerStock, maxInvestorsPerStock=maxInvestorsPerStock)
ciks = [cik.encode(encoding='ascii', errors='ignore') for cik in ciks]
cusips = [cusip.encode(encoding='ascii', errors='ignore') for cusip in cusips]
m = ([[('0' if (el == 0.0) else str(el)).encode(encoding='ascii') for el in row] for row in mat], ciks, indexMap(ciks), cusips, indexMap(cusips))
fPath = os.path.join(utils.stockDataRoot, f'Conv{y}Q{qNo}.pkl')
print('saving to', fPath)
utils.pickSave(fPath, m, fix_imports=True, protocol=2) | Save a matrix of 13F conviction positions only for the given quarter,
in a format readable by the BW old Python2 version. | secscan/scrape13F.py | saveConvMatrixPy2 | ikedim01/secscan | 0 | python | def saveConvMatrixPy2(y, qNo, minFrac=0.13, maxFrac=0.4, minStocksPerInv=3, maxStocksPerInv=500, minTop10Frac=None, minAUM=75000000.0, dtype=np.float64, minInvestorsPerStock=2, maxInvestorsPerStock=None):
'\n Save a matrix of 13F conviction positions only for the given quarter,\n in a format readable by the BW old Python2 version.\n '
(mat, ciks, cusips) = getNSSForQ(y, qNo, minFrac=minFrac, maxFrac=maxFrac, minStocksPerInv=minStocksPerInv, maxStocksPerInv=maxStocksPerInv, minTop10Frac=minTop10Frac, minAUM=minAUM, dtype=dtype, minInvestorsPerStock=minInvestorsPerStock, maxInvestorsPerStock=maxInvestorsPerStock)
ciks = [cik.encode(encoding='ascii', errors='ignore') for cik in ciks]
cusips = [cusip.encode(encoding='ascii', errors='ignore') for cusip in cusips]
m = ([[('0' if (el == 0.0) else str(el)).encode(encoding='ascii') for el in row] for row in mat], ciks, indexMap(ciks), cusips, indexMap(cusips))
fPath = os.path.join(utils.stockDataRoot, f'Conv{y}Q{qNo}.pkl')
print('saving to', fPath)
utils.pickSave(fPath, m, fix_imports=True, protocol=2) | def saveConvMatrixPy2(y, qNo, minFrac=0.13, maxFrac=0.4, minStocksPerInv=3, maxStocksPerInv=500, minTop10Frac=None, minAUM=75000000.0, dtype=np.float64, minInvestorsPerStock=2, maxInvestorsPerStock=None):
'\n Save a matrix of 13F conviction positions only for the given quarter,\n in a format readable by the BW old Python2 version.\n '
(mat, ciks, cusips) = getNSSForQ(y, qNo, minFrac=minFrac, maxFrac=maxFrac, minStocksPerInv=minStocksPerInv, maxStocksPerInv=maxStocksPerInv, minTop10Frac=minTop10Frac, minAUM=minAUM, dtype=dtype, minInvestorsPerStock=minInvestorsPerStock, maxInvestorsPerStock=maxInvestorsPerStock)
ciks = [cik.encode(encoding='ascii', errors='ignore') for cik in ciks]
cusips = [cusip.encode(encoding='ascii', errors='ignore') for cusip in cusips]
m = ([[('0' if (el == 0.0) else str(el)).encode(encoding='ascii') for el in row] for row in mat], ciks, indexMap(ciks), cusips, indexMap(cusips))
fPath = os.path.join(utils.stockDataRoot, f'Conv{y}Q{qNo}.pkl')
print('saving to', fPath)
utils.pickSave(fPath, m, fix_imports=True, protocol=2)<|docstring|>Save a matrix of 13F conviction positions only for the given quarter,
in a format readable by the BW old Python2 version.<|endoftext|> |
911670c04c3a7273ecdf163091b5c1288854b7856308bb9c1253bb02da2be09b | def test_no_mysterious_extra_vertical_lines():
'\n This test is to make sure that issue #2 is fixed.\n '
width = 60
height = 17
pixels = render(xs=np.array([1, 1]), ys=np.array([0, 1]), x_min=3, y_min=0, x_max=6, y_max=1.1, width=width, height=height, lines=True)
desired_pixels = np.zeros((height, width), dtype=int)
np.testing.assert_array_equal(pixels, desired_pixels) | This test is to make sure that issue #2 is fixed. | tests/unit/test_pixel_matrix.py | test_no_mysterious_extra_vertical_lines | olavolav/textplot | 156 | python | def test_no_mysterious_extra_vertical_lines():
'\n \n '
width = 60
height = 17
pixels = render(xs=np.array([1, 1]), ys=np.array([0, 1]), x_min=3, y_min=0, x_max=6, y_max=1.1, width=width, height=height, lines=True)
desired_pixels = np.zeros((height, width), dtype=int)
np.testing.assert_array_equal(pixels, desired_pixels) | def test_no_mysterious_extra_vertical_lines():
'\n \n '
width = 60
height = 17
pixels = render(xs=np.array([1, 1]), ys=np.array([0, 1]), x_min=3, y_min=0, x_max=6, y_max=1.1, width=width, height=height, lines=True)
desired_pixels = np.zeros((height, width), dtype=int)
np.testing.assert_array_equal(pixels, desired_pixels)<|docstring|>This test is to make sure that issue #2 is fixed.<|endoftext|> |
3b3ab4355801b0aa0963082edf2bbb511dfd3aae2f9c1f0f6fac513339ea83e8 | def __init__(self, future: BaseFuture, value: int) -> None:
'ValueAtMostConstraint constructor.\n\n :param future: the variable that should be at most the given value\n :param value: the maximum value that the given future may have\n '
self._future = future
self._value = value | ValueAtMostConstraint constructor.
:param future: the variable that should be at most the given value
:param value: the maximum value that the given future may have | netqasm/sdk/constraint.py | __init__ | QuTech-Delft/netqasm | 6 | python | def __init__(self, future: BaseFuture, value: int) -> None:
'ValueAtMostConstraint constructor.\n\n :param future: the variable that should be at most the given value\n :param value: the maximum value that the given future may have\n '
self._future = future
self._value = value | def __init__(self, future: BaseFuture, value: int) -> None:
'ValueAtMostConstraint constructor.\n\n :param future: the variable that should be at most the given value\n :param value: the maximum value that the given future may have\n '
self._future = future
self._value = value<|docstring|>ValueAtMostConstraint constructor.
:param future: the variable that should be at most the given value
:param value: the maximum value that the given future may have<|endoftext|> |
5f041fe7b24475ead9fd87935b6064d5adf4d9c0f0e739e649574cf28e36cb12 | def dump_content(filename, offset, count, strucc):
'\n Dump the content of the file "filename" starting from offset and using the\n BStruct subclass pointed by strucc\n '
try:
fp = open(filename, 'rb')
except OSError as e:
print(("[ERROR] '%s' raised when tried to read the file '%s'" % (e.strerror, filename)))
sys.exit(1)
fp.seek(offset)
i = 0
while ((i < count) or (count == 0)):
buf = fp.read(strucc._size)
if (len(buf) != strucc._size):
break
obj = strucc(buf)
i += 1
print(obj) | Dump the content of the file "filename" starting from offset and using the
BStruct subclass pointed by strucc | scripts/py/mt_read.py | dump_content | ulises2k/EA-Tester | 58 | python | def dump_content(filename, offset, count, strucc):
'\n Dump the content of the file "filename" starting from offset and using the\n BStruct subclass pointed by strucc\n '
try:
fp = open(filename, 'rb')
except OSError as e:
print(("[ERROR] '%s' raised when tried to read the file '%s'" % (e.strerror, filename)))
sys.exit(1)
fp.seek(offset)
i = 0
while ((i < count) or (count == 0)):
buf = fp.read(strucc._size)
if (len(buf) != strucc._size):
break
obj = strucc(buf)
i += 1
print(obj) | def dump_content(filename, offset, count, strucc):
'\n Dump the content of the file "filename" starting from offset and using the\n BStruct subclass pointed by strucc\n '
try:
fp = open(filename, 'rb')
except OSError as e:
print(("[ERROR] '%s' raised when tried to read the file '%s'" % (e.strerror, filename)))
sys.exit(1)
fp.seek(offset)
i = 0
while ((i < count) or (count == 0)):
buf = fp.read(strucc._size)
if (len(buf) != strucc._size):
break
obj = strucc(buf)
i += 1
print(obj)<|docstring|>Dump the content of the file "filename" starting from offset and using the
BStruct subclass pointed by strucc<|endoftext|> |
f01d556e6074e02e89effa273a6d0afd84a46841613d318b2e34e55b56ca3937 | def create_app(*, config_module_class: str) -> Flask:
'\n Creates app in function so that flask with flask extensions can be\n initialized with specific config. Here it defines the route of APIs\n so that it can be seen in one place where implementation is separated.\n\n Config is being fetched via module.class name where module.class name\n can be passed through environment variable.\n This is to make config fetched through runtime PYTHON_PATH so that\n Config class can be easily injected.\n More on: http://flask.pocoo.org/docs/1.0/config/\n\n :param config_module_class: name of the config (TODO: Implement config.py)\n :return: Flask\n '
if (FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME):
print('Using requested Flask module {module_name} and class {class_name}'.format(module_name=FLASK_APP_MODULE_NAME, class_name=FLASK_APP_CLASS_NAME), file=sys.stderr)
class_obj = getattr(importlib.import_module(FLASK_APP_MODULE_NAME), FLASK_APP_CLASS_NAME)
flask_kwargs_dict = {}
if FLASK_APP_KWARGS_DICT_STR:
print('Using kwargs {kwargs} to instantiate Flask'.format(kwargs=FLASK_APP_KWARGS_DICT_STR), file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
config_module_class = (os.getenv('METADATA_SVC_CONFIG_MODULE_CLASS') or config_module_class)
app.config.from_object(config_module_class)
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Created app with config name {}'.format(config_module_class))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
api.add_resource(PopularTablesAPI, '/popular_tables/')
api.add_resource(TableDetailAPI, '/table/<path:table_uri>')
api.add_resource(TableDescriptionAPI, '/table/<path:table_uri>/description', '/table/<path:table_uri>/description/<path:description_val>')
api.add_resource(TableTagAPI, '/table/<path:table_uri>/tag', '/table/<path:table_uri>/tag/<tag>')
api.add_resource(TableOwnerAPI, '/table/<path:table_uri>/owner/<owner>')
api.add_resource(ColumnDescriptionAPI, '/table/<path:table_uri>/column/<column_name>/description', '/table/<path:table_uri>/column/<column_name>/description/<path:description_val>')
api.add_resource(Neo4jDetailAPI, '/latest_updated_ts')
api.add_resource(TagAPI, '/tags/')
api.add_resource(UserDetailAPI, '/user/<path:user_id>')
api.add_resource(UserFollowAPI, '/user/<path:user_id>/follow/', '/user/<path:user_id>/follow/<resource_type>/<path:table_uri>')
api.add_resource(UserOwnAPI, '/user/<path:user_id>/own/', '/user/<path:user_id>/own/<resource_type>/<path:table_uri>')
api.add_resource(UserReadAPI, '/user/<path:user_id>/read/', '/user/<path:user_id>/read/<resource_type>/<path:table_uri>')
app.register_blueprint(api_bp)
return app | Creates app in function so that flask with flask extensions can be
initialized with specific config. Here it defines the route of APIs
so that it can be seen in one place where implementation is separated.
Config is being fetched via module.class name where module.class name
can be passed through environment variable.
This is to make config fetched through runtime PYTHON_PATH so that
Config class can be easily injected.
More on: http://flask.pocoo.org/docs/1.0/config/
:param config_module_class: name of the config (TODO: Implement config.py)
:return: Flask | metadata_service/__init__.py | create_app | feng-tao/amundsenmetadatalibrary | 1 | python | def create_app(*, config_module_class: str) -> Flask:
'\n Creates app in function so that flask with flask extensions can be\n initialized with specific config. Here it defines the route of APIs\n so that it can be seen in one place where implementation is separated.\n\n Config is being fetched via module.class name where module.class name\n can be passed through environment variable.\n This is to make config fetched through runtime PYTHON_PATH so that\n Config class can be easily injected.\n More on: http://flask.pocoo.org/docs/1.0/config/\n\n :param config_module_class: name of the config (TODO: Implement config.py)\n :return: Flask\n '
if (FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME):
print('Using requested Flask module {module_name} and class {class_name}'.format(module_name=FLASK_APP_MODULE_NAME, class_name=FLASK_APP_CLASS_NAME), file=sys.stderr)
class_obj = getattr(importlib.import_module(FLASK_APP_MODULE_NAME), FLASK_APP_CLASS_NAME)
flask_kwargs_dict = {}
if FLASK_APP_KWARGS_DICT_STR:
print('Using kwargs {kwargs} to instantiate Flask'.format(kwargs=FLASK_APP_KWARGS_DICT_STR), file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
config_module_class = (os.getenv('METADATA_SVC_CONFIG_MODULE_CLASS') or config_module_class)
app.config.from_object(config_module_class)
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Created app with config name {}'.format(config_module_class))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
api.add_resource(PopularTablesAPI, '/popular_tables/')
api.add_resource(TableDetailAPI, '/table/<path:table_uri>')
api.add_resource(TableDescriptionAPI, '/table/<path:table_uri>/description', '/table/<path:table_uri>/description/<path:description_val>')
api.add_resource(TableTagAPI, '/table/<path:table_uri>/tag', '/table/<path:table_uri>/tag/<tag>')
api.add_resource(TableOwnerAPI, '/table/<path:table_uri>/owner/<owner>')
api.add_resource(ColumnDescriptionAPI, '/table/<path:table_uri>/column/<column_name>/description', '/table/<path:table_uri>/column/<column_name>/description/<path:description_val>')
api.add_resource(Neo4jDetailAPI, '/latest_updated_ts')
api.add_resource(TagAPI, '/tags/')
api.add_resource(UserDetailAPI, '/user/<path:user_id>')
api.add_resource(UserFollowAPI, '/user/<path:user_id>/follow/', '/user/<path:user_id>/follow/<resource_type>/<path:table_uri>')
api.add_resource(UserOwnAPI, '/user/<path:user_id>/own/', '/user/<path:user_id>/own/<resource_type>/<path:table_uri>')
api.add_resource(UserReadAPI, '/user/<path:user_id>/read/', '/user/<path:user_id>/read/<resource_type>/<path:table_uri>')
app.register_blueprint(api_bp)
return app | def create_app(*, config_module_class: str) -> Flask:
'\n Creates app in function so that flask with flask extensions can be\n initialized with specific config. Here it defines the route of APIs\n so that it can be seen in one place where implementation is separated.\n\n Config is being fetched via module.class name where module.class name\n can be passed through environment variable.\n This is to make config fetched through runtime PYTHON_PATH so that\n Config class can be easily injected.\n More on: http://flask.pocoo.org/docs/1.0/config/\n\n :param config_module_class: name of the config (TODO: Implement config.py)\n :return: Flask\n '
if (FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME):
print('Using requested Flask module {module_name} and class {class_name}'.format(module_name=FLASK_APP_MODULE_NAME, class_name=FLASK_APP_CLASS_NAME), file=sys.stderr)
class_obj = getattr(importlib.import_module(FLASK_APP_MODULE_NAME), FLASK_APP_CLASS_NAME)
flask_kwargs_dict = {}
if FLASK_APP_KWARGS_DICT_STR:
print('Using kwargs {kwargs} to instantiate Flask'.format(kwargs=FLASK_APP_KWARGS_DICT_STR), file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
config_module_class = (os.getenv('METADATA_SVC_CONFIG_MODULE_CLASS') or config_module_class)
app.config.from_object(config_module_class)
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Created app with config name {}'.format(config_module_class))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
api.add_resource(PopularTablesAPI, '/popular_tables/')
api.add_resource(TableDetailAPI, '/table/<path:table_uri>')
api.add_resource(TableDescriptionAPI, '/table/<path:table_uri>/description', '/table/<path:table_uri>/description/<path:description_val>')
api.add_resource(TableTagAPI, '/table/<path:table_uri>/tag', '/table/<path:table_uri>/tag/<tag>')
api.add_resource(TableOwnerAPI, '/table/<path:table_uri>/owner/<owner>')
api.add_resource(ColumnDescriptionAPI, '/table/<path:table_uri>/column/<column_name>/description', '/table/<path:table_uri>/column/<column_name>/description/<path:description_val>')
api.add_resource(Neo4jDetailAPI, '/latest_updated_ts')
api.add_resource(TagAPI, '/tags/')
api.add_resource(UserDetailAPI, '/user/<path:user_id>')
api.add_resource(UserFollowAPI, '/user/<path:user_id>/follow/', '/user/<path:user_id>/follow/<resource_type>/<path:table_uri>')
api.add_resource(UserOwnAPI, '/user/<path:user_id>/own/', '/user/<path:user_id>/own/<resource_type>/<path:table_uri>')
api.add_resource(UserReadAPI, '/user/<path:user_id>/read/', '/user/<path:user_id>/read/<resource_type>/<path:table_uri>')
app.register_blueprint(api_bp)
return app<|docstring|>Creates app in function so that flask with flask extensions can be
initialized with specific config. Here it defines the route of APIs
so that it can be seen in one place where implementation is separated.
Config is being fetched via module.class name where module.class name
can be passed through environment variable.
This is to make config fetched through runtime PYTHON_PATH so that
Config class can be easily injected.
More on: http://flask.pocoo.org/docs/1.0/config/
:param config_module_class: name of the config (TODO: Implement config.py)
:return: Flask<|endoftext|> |
e541a61daf36fedf357af8f6ac6fa71e6edc0dc04876dcaac5fdfa364dffd835 | def test_evaluate(self):
'\n Test if values are computed correctly.\n '
for struct in [rosen_for_sensi(2, False, [0, 1]), poly_for_sensi(2, True, 0.5), convreact_for_funmode(2, [(- 0.3), (- 0.7)])]:
self._test_evaluate_funmode(struct)
self._test_evaluate_resmode(convreact_for_resmode(1, [(- 0.3), (- 0.7)])) | Test if values are computed correctly. | test/test_aggregated.py | test_evaluate | LukasSp/pyPESTO | 0 | python | def test_evaluate(self):
'\n \n '
for struct in [rosen_for_sensi(2, False, [0, 1]), poly_for_sensi(2, True, 0.5), convreact_for_funmode(2, [(- 0.3), (- 0.7)])]:
self._test_evaluate_funmode(struct)
self._test_evaluate_resmode(convreact_for_resmode(1, [(- 0.3), (- 0.7)])) | def test_evaluate(self):
'\n \n '
for struct in [rosen_for_sensi(2, False, [0, 1]), poly_for_sensi(2, True, 0.5), convreact_for_funmode(2, [(- 0.3), (- 0.7)])]:
self._test_evaluate_funmode(struct)
self._test_evaluate_resmode(convreact_for_resmode(1, [(- 0.3), (- 0.7)]))<|docstring|>Test if values are computed correctly.<|endoftext|> |
e5f1bf0c82a4f7f47ab82b191e8c8c4abd6d40f057df72260d3e9fe2ae33b902 | @abstractmethod
def getConfigurationController(self) -> 'XConfigurationController_557c15c4':
'\n Return the XConfigurationController object.\n ' | Return the XConfigurationController object. | ooobuild/lo/drawing/framework/x_controller_manager.py | getConfigurationController | Amourspirit/ooo_uno_tmpl | 0 | python | @abstractmethod
def getConfigurationController(self) -> 'XConfigurationController_557c15c4':
'\n \n ' | @abstractmethod
def getConfigurationController(self) -> 'XConfigurationController_557c15c4':
'\n \n '<|docstring|>Return the XConfigurationController object.<|endoftext|> |
16a4dc46b2601062574717b439518bad8164353e3bd8f19cdcc33c779d12a7b4 | @abstractmethod
def getModuleController(self) -> 'XModuleController_c5d112d2':
'\n Return the XModuleController object.\n ' | Return the XModuleController object. | ooobuild/lo/drawing/framework/x_controller_manager.py | getModuleController | Amourspirit/ooo_uno_tmpl | 0 | python | @abstractmethod
def getModuleController(self) -> 'XModuleController_c5d112d2':
'\n \n ' | @abstractmethod
def getModuleController(self) -> 'XModuleController_c5d112d2':
'\n \n '<|docstring|>Return the XModuleController object.<|endoftext|> |
9063bdcaac7d0cf10dd22c4ce5a2dd5b55b32e401b54ae2d45a3503c5c84cb5d | def testIndividualDataConsentDocument(self):
'Test IndividualDataConsentDocument'
pass | Test IndividualDataConsentDocument | test/test_individual_data_consent_document.py | testIndividualDataConsentDocument | My-Data-My-Consent/python-sdk | 0 | python | def testIndividualDataConsentDocument(self):
pass | def testIndividualDataConsentDocument(self):
pass<|docstring|>Test IndividualDataConsentDocument<|endoftext|> |
39ce838414f9a26f411e0ad8db3e34c1f8d373a93575b134e19f800c98b72210 | def on_start(self):
"Run the task pool.\n\n Will pre-fork all workers so they're ready to accept tasks.\n\n "
self._pool = self.Pool(processes=self.limit, **self.options)
self.on_apply = self._pool.apply_async | Run the task pool.
Will pre-fork all workers so they're ready to accept tasks. | celery/concurrency/processes/__init__.py | on_start | aleszoulek/celery | 2 | python | def on_start(self):
"Run the task pool.\n\n Will pre-fork all workers so they're ready to accept tasks.\n\n "
self._pool = self.Pool(processes=self.limit, **self.options)
self.on_apply = self._pool.apply_async | def on_start(self):
"Run the task pool.\n\n Will pre-fork all workers so they're ready to accept tasks.\n\n "
self._pool = self.Pool(processes=self.limit, **self.options)
self.on_apply = self._pool.apply_async<|docstring|>Run the task pool.
Will pre-fork all workers so they're ready to accept tasks.<|endoftext|> |
2f5cb79463e4d4d21b9a8a87beac01a5f9f6f59a7f1e3fcf52f6001e8be90d4b | def on_stop(self):
'Gracefully stop the pool.'
if ((self._pool is not None) and (self._pool._state == RUN)):
self._pool.close()
self._pool.join()
self._pool = None | Gracefully stop the pool. | celery/concurrency/processes/__init__.py | on_stop | aleszoulek/celery | 2 | python | def on_stop(self):
if ((self._pool is not None) and (self._pool._state == RUN)):
self._pool.close()
self._pool.join()
self._pool = None | def on_stop(self):
if ((self._pool is not None) and (self._pool._state == RUN)):
self._pool.close()
self._pool.join()
self._pool = None<|docstring|>Gracefully stop the pool.<|endoftext|> |
eb8cc733aff7634e56d32c94b107fcbd919faa8efc7b8cc4d6a2d2830a166133 | def on_terminate(self):
'Force terminate the pool.'
if (self._pool is not None):
self._pool.terminate()
self._pool = None | Force terminate the pool. | celery/concurrency/processes/__init__.py | on_terminate | aleszoulek/celery | 2 | python | def on_terminate(self):
if (self._pool is not None):
self._pool.terminate()
self._pool = None | def on_terminate(self):
if (self._pool is not None):
self._pool.terminate()
self._pool = None<|docstring|>Force terminate the pool.<|endoftext|> |
3fca7905be83fbacc6956a61366b9dd92582a19321cdca5e372befbe2ef4cd9a | def test_patch_druid_get_columns(mocker: MockerFixture) -> None:
'\n Test ``patch_druid_get_columns``.\n '
pytest.importorskip('pydruid')
DruidDialect = mocker.patch('datajunction.fixes.DruidDialect')
connection = mocker.MagicMock()
mocker.patch('datajunction.fixes.PYDRUID_INSTALLED', new=False)
patch_druid_get_columns()
DruidDialect.assert_not_called()
mocker.patch('datajunction.fixes.PYDRUID_INSTALLED', new=True)
patch_druid_get_columns()
DruidDialect.get_columns(None, connection, 'table_name', 'schema')
assert (str(connection.execute.mock_calls[0].args[0]) == "\nSELECT COLUMN_NAME,\n DATA_TYPE,\n IS_NULLABLE,\n COLUMN_DEFAULT\n FROM INFORMATION_SCHEMA.COLUMNS\n WHERE TABLE_NAME = 'table_name'\n AND TABLE_SCHEMA = 'schema'")
connection.execute.reset_mock()
DruidDialect.get_columns(None, connection, 'table_name')
assert (str(connection.execute.mock_calls[0].args[0]) == "\nSELECT COLUMN_NAME,\n DATA_TYPE,\n IS_NULLABLE,\n COLUMN_DEFAULT\n FROM INFORMATION_SCHEMA.COLUMNS\n WHERE TABLE_NAME = 'table_name'\n") | Test ``patch_druid_get_columns``. | tests/fixes_test.py | test_patch_druid_get_columns | DataJunction/datajunction | 0 | python | def test_patch_druid_get_columns(mocker: MockerFixture) -> None:
'\n \n '
pytest.importorskip('pydruid')
DruidDialect = mocker.patch('datajunction.fixes.DruidDialect')
connection = mocker.MagicMock()
mocker.patch('datajunction.fixes.PYDRUID_INSTALLED', new=False)
patch_druid_get_columns()
DruidDialect.assert_not_called()
mocker.patch('datajunction.fixes.PYDRUID_INSTALLED', new=True)
patch_druid_get_columns()
DruidDialect.get_columns(None, connection, 'table_name', 'schema')
assert (str(connection.execute.mock_calls[0].args[0]) == "\nSELECT COLUMN_NAME,\n DATA_TYPE,\n IS_NULLABLE,\n COLUMN_DEFAULT\n FROM INFORMATION_SCHEMA.COLUMNS\n WHERE TABLE_NAME = 'table_name'\n AND TABLE_SCHEMA = 'schema'")
connection.execute.reset_mock()
DruidDialect.get_columns(None, connection, 'table_name')
assert (str(connection.execute.mock_calls[0].args[0]) == "\nSELECT COLUMN_NAME,\n DATA_TYPE,\n IS_NULLABLE,\n COLUMN_DEFAULT\n FROM INFORMATION_SCHEMA.COLUMNS\n WHERE TABLE_NAME = 'table_name'\n") | def test_patch_druid_get_columns(mocker: MockerFixture) -> None:
'\n \n '
pytest.importorskip('pydruid')
DruidDialect = mocker.patch('datajunction.fixes.DruidDialect')
connection = mocker.MagicMock()
mocker.patch('datajunction.fixes.PYDRUID_INSTALLED', new=False)
patch_druid_get_columns()
DruidDialect.assert_not_called()
mocker.patch('datajunction.fixes.PYDRUID_INSTALLED', new=True)
patch_druid_get_columns()
DruidDialect.get_columns(None, connection, 'table_name', 'schema')
assert (str(connection.execute.mock_calls[0].args[0]) == "\nSELECT COLUMN_NAME,\n DATA_TYPE,\n IS_NULLABLE,\n COLUMN_DEFAULT\n FROM INFORMATION_SCHEMA.COLUMNS\n WHERE TABLE_NAME = 'table_name'\n AND TABLE_SCHEMA = 'schema'")
connection.execute.reset_mock()
DruidDialect.get_columns(None, connection, 'table_name')
assert (str(connection.execute.mock_calls[0].args[0]) == "\nSELECT COLUMN_NAME,\n DATA_TYPE,\n IS_NULLABLE,\n COLUMN_DEFAULT\n FROM INFORMATION_SCHEMA.COLUMNS\n WHERE TABLE_NAME = 'table_name'\n")<|docstring|>Test ``patch_druid_get_columns``.<|endoftext|> |
a6b73a99925ce8171b093b0a763ccd367c6bbf6d534a23c2532127be1e1837a5 | def forward(self, logits):
'\n Input: logits -> T x K # Where K is the number of classes and T is the batch size\n Output: L = MEL, BEL\n '
sum1 = torch.zeros([logits.shape[0], 1])
for t in range(logits.shape[0]):
sum1[t] = self.entropy(logits[(t, :)])
L1 = torch.mean(sum1)
mean_output = torch.mean(logits, dim=0)
L2 = ((- 1.0) * self.entropy(mean_output))
return (L1.cuda(), L2.cuda()) | Input: logits -> T x K # Where K is the number of classes and T is the batch size
Output: L = MEL, BEL | CIFAR10/losses.py | forward | ankanbansal/semi-supervised-learning | 0 | python | def forward(self, logits):
'\n Input: logits -> T x K # Where K is the number of classes and T is the batch size\n Output: L = MEL, BEL\n '
sum1 = torch.zeros([logits.shape[0], 1])
for t in range(logits.shape[0]):
sum1[t] = self.entropy(logits[(t, :)])
L1 = torch.mean(sum1)
mean_output = torch.mean(logits, dim=0)
L2 = ((- 1.0) * self.entropy(mean_output))
return (L1.cuda(), L2.cuda()) | def forward(self, logits):
'\n Input: logits -> T x K # Where K is the number of classes and T is the batch size\n Output: L = MEL, BEL\n '
sum1 = torch.zeros([logits.shape[0], 1])
for t in range(logits.shape[0]):
sum1[t] = self.entropy(logits[(t, :)])
L1 = torch.mean(sum1)
mean_output = torch.mean(logits, dim=0)
L2 = ((- 1.0) * self.entropy(mean_output))
return (L1.cuda(), L2.cuda())<|docstring|>Input: logits -> T x K # Where K is the number of classes and T is the batch size
Output: L = MEL, BEL<|endoftext|> |
f302c6535dfc985bdcc89c65f46b6f8ac3a15870d6dced70a6a74a25d61d679d | def entropy(self, logits):
'\n Input: logits -> N x 1 x D # Where D is the feature dimension\n Output: entropy -> N x 1\n '
return ((- 1.0) * (F.softmax(logits, dim=(- 1)) * F.log_softmax(logits, dim=(- 1))).sum((- 1))) | Input: logits -> N x 1 x D # Where D is the feature dimension
Output: entropy -> N x 1 | CIFAR10/losses.py | entropy | ankanbansal/semi-supervised-learning | 0 | python | def entropy(self, logits):
'\n Input: logits -> N x 1 x D # Where D is the feature dimension\n Output: entropy -> N x 1\n '
return ((- 1.0) * (F.softmax(logits, dim=(- 1)) * F.log_softmax(logits, dim=(- 1))).sum((- 1))) | def entropy(self, logits):
'\n Input: logits -> N x 1 x D # Where D is the feature dimension\n Output: entropy -> N x 1\n '
return ((- 1.0) * (F.softmax(logits, dim=(- 1)) * F.log_softmax(logits, dim=(- 1))).sum((- 1)))<|docstring|>Input: logits -> N x 1 x D # Where D is the feature dimension
Output: entropy -> N x 1<|endoftext|> |
27a0017511d15369bf40ebe9951409f442eab57ac95b0cf8b67b8d1a6bc42518 | def cross_entropy(self, logits1, logits2):
'\n Input: logits1 -> N x 1 x D # Where D is the feature dimension\n logits2 -> 1 x N x D # Where D is the feature dimension\n Output: Pairwise Cross-entropy -> N x N\n '
return ((- 1.0) * (F.softmax(logits1, dim=(- 1)) * F.log_softmax(logits2, dim=(- 1))).sum((- 1))) | Input: logits1 -> N x 1 x D # Where D is the feature dimension
logits2 -> 1 x N x D # Where D is the feature dimension
Output: Pairwise Cross-entropy -> N x N | CIFAR10/losses.py | cross_entropy | ankanbansal/semi-supervised-learning | 0 | python | def cross_entropy(self, logits1, logits2):
'\n Input: logits1 -> N x 1 x D # Where D is the feature dimension\n logits2 -> 1 x N x D # Where D is the feature dimension\n Output: Pairwise Cross-entropy -> N x N\n '
return ((- 1.0) * (F.softmax(logits1, dim=(- 1)) * F.log_softmax(logits2, dim=(- 1))).sum((- 1))) | def cross_entropy(self, logits1, logits2):
'\n Input: logits1 -> N x 1 x D # Where D is the feature dimension\n logits2 -> 1 x N x D # Where D is the feature dimension\n Output: Pairwise Cross-entropy -> N x N\n '
return ((- 1.0) * (F.softmax(logits1, dim=(- 1)) * F.log_softmax(logits2, dim=(- 1))).sum((- 1)))<|docstring|>Input: logits1 -> N x 1 x D # Where D is the feature dimension
logits2 -> 1 x N x D # Where D is the feature dimension
Output: Pairwise Cross-entropy -> N x N<|endoftext|> |
3e16aef19354ff2968657ce14efd8436a52718704497bf5c8d1df16e58b0f1e6 | def distances(self, A, distance_type='Euclidean', eps=1e-06):
"\n Input: A -> num_transformations x D # Where D is the feature dimension\n distance_type -> 'Euclidean'/'cosine'/'KL'\n Output: distances -> num_transformations x num_transformations pair wise distances\n "
assert (A.dim() == 2)
if (distance_type == 'Euclidean'):
B = A.unsqueeze(1)
C = A.unsqueeze(0)
differences = (B - C)
distances = torch.sum((differences * differences), (- 1))
elif (distance_type == 'cosine'):
B = F.normalize(A, p=2, dim=1)
distances = (1.0 - torch.matmul(B, B.t()))
elif (distance_type == 'KL'):
B = A.unsqueeze(1)
C = A.unsqueeze(0)
distances = (((- 1.0) * self.entropy(B)) + self.cross_entropy(B, C))
return distances | Input: A -> num_transformations x D # Where D is the feature dimension
distance_type -> 'Euclidean'/'cosine'/'KL'
Output: distances -> num_transformations x num_transformations pair wise distances | CIFAR10/losses.py | distances | ankanbansal/semi-supervised-learning | 0 | python | def distances(self, A, distance_type='Euclidean', eps=1e-06):
"\n Input: A -> num_transformations x D # Where D is the feature dimension\n distance_type -> 'Euclidean'/'cosine'/'KL'\n Output: distances -> num_transformations x num_transformations pair wise distances\n "
assert (A.dim() == 2)
if (distance_type == 'Euclidean'):
B = A.unsqueeze(1)
C = A.unsqueeze(0)
differences = (B - C)
distances = torch.sum((differences * differences), (- 1))
elif (distance_type == 'cosine'):
B = F.normalize(A, p=2, dim=1)
distances = (1.0 - torch.matmul(B, B.t()))
elif (distance_type == 'KL'):
B = A.unsqueeze(1)
C = A.unsqueeze(0)
distances = (((- 1.0) * self.entropy(B)) + self.cross_entropy(B, C))
return distances | def distances(self, A, distance_type='Euclidean', eps=1e-06):
"\n Input: A -> num_transformations x D # Where D is the feature dimension\n distance_type -> 'Euclidean'/'cosine'/'KL'\n Output: distances -> num_transformations x num_transformations pair wise distances\n "
assert (A.dim() == 2)
if (distance_type == 'Euclidean'):
B = A.unsqueeze(1)
C = A.unsqueeze(0)
differences = (B - C)
distances = torch.sum((differences * differences), (- 1))
elif (distance_type == 'cosine'):
B = F.normalize(A, p=2, dim=1)
distances = (1.0 - torch.matmul(B, B.t()))
elif (distance_type == 'KL'):
B = A.unsqueeze(1)
C = A.unsqueeze(0)
distances = (((- 1.0) * self.entropy(B)) + self.cross_entropy(B, C))
return distances<|docstring|>Input: A -> num_transformations x D # Where D is the feature dimension
distance_type -> 'Euclidean'/'cosine'/'KL'
Output: distances -> num_transformations x num_transformations pair wise distances<|endoftext|> |
7dce90b81a2c2ae052c479a64782c126f1b417c863337a850725e4fff84401c1 | def forward(self, features, num_transformations, distance_type='Euclidean'):
'\n Input: features -> T x D # Where D is the feature dimension and T is the batch size\n num_transformations -> Number of transformations applied to the data\n (Make sure that T is a multiple of num_transformations)\n Output: ST Loss\n '
batch_size = features.shape[0]
all_index_groups = [[((i * num_transformations) + j) for j in range(num_transformations)] for i in range((batch_size / num_transformations))]
total_loss = 0.0
for i in range(len(all_index_groups)):
split_features = torch.index_select(features, 0, torch.cuda.LongTensor(all_index_groups[i]))
distances = self.distances(split_features, distance_type=distance_type)
total_loss += (0.5 * torch.sum(distances))
total_loss = (total_loss / (1.0 * batch_size))
return total_loss | Input: features -> T x D # Where D is the feature dimension and T is the batch size
num_transformations -> Number of transformations applied to the data
(Make sure that T is a multiple of num_transformations)
Output: ST Loss | CIFAR10/losses.py | forward | ankanbansal/semi-supervised-learning | 0 | python | def forward(self, features, num_transformations, distance_type='Euclidean'):
'\n Input: features -> T x D # Where D is the feature dimension and T is the batch size\n num_transformations -> Number of transformations applied to the data\n (Make sure that T is a multiple of num_transformations)\n Output: ST Loss\n '
batch_size = features.shape[0]
all_index_groups = [[((i * num_transformations) + j) for j in range(num_transformations)] for i in range((batch_size / num_transformations))]
total_loss = 0.0
for i in range(len(all_index_groups)):
split_features = torch.index_select(features, 0, torch.cuda.LongTensor(all_index_groups[i]))
distances = self.distances(split_features, distance_type=distance_type)
total_loss += (0.5 * torch.sum(distances))
total_loss = (total_loss / (1.0 * batch_size))
return total_loss | def forward(self, features, num_transformations, distance_type='Euclidean'):
'\n Input: features -> T x D # Where D is the feature dimension and T is the batch size\n num_transformations -> Number of transformations applied to the data\n (Make sure that T is a multiple of num_transformations)\n Output: ST Loss\n '
batch_size = features.shape[0]
all_index_groups = [[((i * num_transformations) + j) for j in range(num_transformations)] for i in range((batch_size / num_transformations))]
total_loss = 0.0
for i in range(len(all_index_groups)):
split_features = torch.index_select(features, 0, torch.cuda.LongTensor(all_index_groups[i]))
distances = self.distances(split_features, distance_type=distance_type)
total_loss += (0.5 * torch.sum(distances))
total_loss = (total_loss / (1.0 * batch_size))
return total_loss<|docstring|>Input: features -> T x D # Where D is the feature dimension and T is the batch size
num_transformations -> Number of transformations applied to the data
(Make sure that T is a multiple of num_transformations)
Output: ST Loss<|endoftext|> |
a63499ca3a8931efa15f333017befaf1cdcc43e898f3d5875c5ad92c3a43eaf0 | def syscall(*args):
' Helper method to make a syscall, check for errors, and return output as a string.'
return subprocess.run(args, capture_output=True, check=True, text=True).stdout | Helper method to make a syscall, check for errors, and return output as a string. | switch.py | syscall | CydeWeys/static-window-switcher | 1 | python | def syscall(*args):
' '
return subprocess.run(args, capture_output=True, check=True, text=True).stdout | def syscall(*args):
' '
return subprocess.run(args, capture_output=True, check=True, text=True).stdout<|docstring|>Helper method to make a syscall, check for errors, and return output as a string.<|endoftext|> |
9cbf987a648bf6357bece8ae25d6e28f3f6df4833dd67e277adbf655d1065fe7 | @staticmethod
def _boot_psus_replicates(number_psus: int, number_reps: int, samp_rate: Number=0, size_gap: int=1) -> np.ndarray:
'Creates the bootstrap replicates structure'
if (number_psus <= size_gap):
raise AssertionError('size_gap should be smaller than the number of units')
sample_size = (number_psus - size_gap)
psu = np.arange(0, number_psus)
psu_boot = np.random.choice(psu, size=(number_reps, sample_size))
psu_replicates = np.zeros(shape=(number_psus, number_reps))
for rep in np.arange(0, number_reps):
(psu_ids, psus_counts) = np.unique(psu_boot[(rep, :)], return_counts=True)
psu_replicates[(:, rep)][psu_ids] = psus_counts
ratio_sqrt = np.sqrt((((1 - samp_rate) * sample_size) / (number_psus - 1)))
return np.asarray(((1 - ratio_sqrt) + ((ratio_sqrt * (number_psus / sample_size)) * psu_replicates))) | Creates the bootstrap replicates structure | src/samplics/weighting/replicates.py | _boot_psus_replicates | samplics-org/samplics | 14 | python | @staticmethod
def _boot_psus_replicates(number_psus: int, number_reps: int, samp_rate: Number=0, size_gap: int=1) -> np.ndarray:
if (number_psus <= size_gap):
raise AssertionError('size_gap should be smaller than the number of units')
sample_size = (number_psus - size_gap)
psu = np.arange(0, number_psus)
psu_boot = np.random.choice(psu, size=(number_reps, sample_size))
psu_replicates = np.zeros(shape=(number_psus, number_reps))
for rep in np.arange(0, number_reps):
(psu_ids, psus_counts) = np.unique(psu_boot[(rep, :)], return_counts=True)
psu_replicates[(:, rep)][psu_ids] = psus_counts
ratio_sqrt = np.sqrt((((1 - samp_rate) * sample_size) / (number_psus - 1)))
return np.asarray(((1 - ratio_sqrt) + ((ratio_sqrt * (number_psus / sample_size)) * psu_replicates))) | @staticmethod
def _boot_psus_replicates(number_psus: int, number_reps: int, samp_rate: Number=0, size_gap: int=1) -> np.ndarray:
if (number_psus <= size_gap):
raise AssertionError('size_gap should be smaller than the number of units')
sample_size = (number_psus - size_gap)
psu = np.arange(0, number_psus)
psu_boot = np.random.choice(psu, size=(number_reps, sample_size))
psu_replicates = np.zeros(shape=(number_psus, number_reps))
for rep in np.arange(0, number_reps):
(psu_ids, psus_counts) = np.unique(psu_boot[(rep, :)], return_counts=True)
psu_replicates[(:, rep)][psu_ids] = psus_counts
ratio_sqrt = np.sqrt((((1 - samp_rate) * sample_size) / (number_psus - 1)))
return np.asarray(((1 - ratio_sqrt) + ((ratio_sqrt * (number_psus / sample_size)) * psu_replicates)))<|docstring|>Creates the bootstrap replicates structure<|endoftext|> |
e2caa38086c04b4ee88086eac6e8483d9d892152b9ff29f36a1e3a3e9e1c99e5 | def _brr_replicates(self, psu: np.ndarray, stratum: Optional[np.ndarray]) -> np.ndarray:
'Creates the brr replicate structure'
if (not (0 <= self.fay_coef < 1)):
raise ValueError('The Fay coefficient must be greater or equal to 0 and lower than 1.')
self._brr_number_reps(psu, stratum)
self.rep_coefs = list(((1 / (self.number_reps * pow((1 - self.fay_coef), 2))) * np.ones(self.number_reps)))
brr_coefs = hdd.hadamard(self.number_reps).astype(float)
brr_coefs = brr_coefs[(:, 1:(self.number_strata + 1))]
brr_coefs = np.repeat(brr_coefs, 2, axis=1)
for r in np.arange(self.number_reps):
for h in np.arange(self.number_strata):
start = (2 * h)
end = (start + 2)
if (brr_coefs[(r, start)] == 1.0):
brr_coefs[(r, start:end)] = [self.fay_coef, (2 - self.fay_coef)]
else:
brr_coefs[(r, start:end)] = [(2 - self.fay_coef), self.fay_coef]
return brr_coefs.T | Creates the brr replicate structure | src/samplics/weighting/replicates.py | _brr_replicates | samplics-org/samplics | 14 | python | def _brr_replicates(self, psu: np.ndarray, stratum: Optional[np.ndarray]) -> np.ndarray:
if (not (0 <= self.fay_coef < 1)):
raise ValueError('The Fay coefficient must be greater or equal to 0 and lower than 1.')
self._brr_number_reps(psu, stratum)
self.rep_coefs = list(((1 / (self.number_reps * pow((1 - self.fay_coef), 2))) * np.ones(self.number_reps)))
brr_coefs = hdd.hadamard(self.number_reps).astype(float)
brr_coefs = brr_coefs[(:, 1:(self.number_strata + 1))]
brr_coefs = np.repeat(brr_coefs, 2, axis=1)
for r in np.arange(self.number_reps):
for h in np.arange(self.number_strata):
start = (2 * h)
end = (start + 2)
if (brr_coefs[(r, start)] == 1.0):
brr_coefs[(r, start:end)] = [self.fay_coef, (2 - self.fay_coef)]
else:
brr_coefs[(r, start:end)] = [(2 - self.fay_coef), self.fay_coef]
return brr_coefs.T | def _brr_replicates(self, psu: np.ndarray, stratum: Optional[np.ndarray]) -> np.ndarray:
if (not (0 <= self.fay_coef < 1)):
raise ValueError('The Fay coefficient must be greater or equal to 0 and lower than 1.')
self._brr_number_reps(psu, stratum)
self.rep_coefs = list(((1 / (self.number_reps * pow((1 - self.fay_coef), 2))) * np.ones(self.number_reps)))
brr_coefs = hdd.hadamard(self.number_reps).astype(float)
brr_coefs = brr_coefs[(:, 1:(self.number_strata + 1))]
brr_coefs = np.repeat(brr_coefs, 2, axis=1)
for r in np.arange(self.number_reps):
for h in np.arange(self.number_strata):
start = (2 * h)
end = (start + 2)
if (brr_coefs[(r, start)] == 1.0):
brr_coefs[(r, start:end)] = [self.fay_coef, (2 - self.fay_coef)]
else:
brr_coefs[(r, start:end)] = [(2 - self.fay_coef), self.fay_coef]
return brr_coefs.T<|docstring|>Creates the brr replicate structure<|endoftext|> |
66e718eff1a27156be1ffbb93926908c9234a35acec8c8e6a488cdeeadec8837 | @staticmethod
def _jkn_psus_replicates(number_psus: int) -> np.ndarray:
'Creates the jackknife delete-1 replicate structure '
jk_coefs = ((number_psus / (number_psus - 1)) * (np.ones((number_psus, number_psus)) - np.identity(number_psus)))
return np.asarray(jk_coefs) | Creates the jackknife delete-1 replicate structure | src/samplics/weighting/replicates.py | _jkn_psus_replicates | samplics-org/samplics | 14 | python | @staticmethod
def _jkn_psus_replicates(number_psus: int) -> np.ndarray:
' '
jk_coefs = ((number_psus / (number_psus - 1)) * (np.ones((number_psus, number_psus)) - np.identity(number_psus)))
return np.asarray(jk_coefs) | @staticmethod
def _jkn_psus_replicates(number_psus: int) -> np.ndarray:
' '
jk_coefs = ((number_psus / (number_psus - 1)) * (np.ones((number_psus, number_psus)) - np.identity(number_psus)))
return np.asarray(jk_coefs)<|docstring|>Creates the jackknife delete-1 replicate structure<|endoftext|> |
34904ee489a9001ab5c72a2fbc27238791334907254ddc95806527f9e922a928 | def replicate(self, samp_weight: Array, psu: Array, stratum: Optional[Array]=None, rep_coefs: Union[(Array, Number)]=False, rep_prefix: Optional[str]=None, psu_varname: str='_psu', str_varname: str='_stratum') -> pd.DataFrame:
'Computes replicate sample weights.\n\n Args:\n samp_weight (Array): array of sample weights. To incorporate the weights adjustment\n in the replicate weights, first replicate the design sample weights then apply\n the adjustments to the replicates.\n psu (Array):\n stratum (Array, optional): array of the strata. Defaults to None.\n rep_coefs (Union[Array, Number], optional): coefficients associated to the replicates.\n Defaults to False.\n rep_prefix (str, optional): prefix to apply to the replicate weights names.\n Defaults to None.\n psu_varname (str, optional): name of the psu variable in the output dataframe.\n Defaults to "_psu".\n str_varname (str, optional): name of the stratum variable in the output dataframe.\n Defaults to "_stratum".\n\n Raises:\n AssertionError: raises an assertion error when stratum is None for a stratified design.\n AssertionError: raises an assertion error when the replication method is not valid.\n\n Returns:\n pd.DataFrame: a dataframe of the replicates sample weights.\n '
samp_weight = formats.numpy_array(samp_weight)
psu = formats.numpy_array(psu)
if (not self.stratification):
stratum = None
else:
stratum = formats.numpy_array(stratum)
self._degree_of_freedom(samp_weight, stratum, psu)
if (self.stratification and (stratum is None)):
raise AssertionError('For a stratified design, stratum must be specified.')
elif (stratum is not None):
stratum_psu = pd.DataFrame({str_varname: stratum, psu_varname: psu})
stratum_psu.sort_values(by=str_varname, inplace=True)
key = [str_varname, psu_varname]
elif (self.method == 'brr'):
(_, str_index) = np.unique(psu, return_index=True)
checks.assert_brr_number_psus(str_index)
psus = psu[np.sort(str_index)]
strata = np.repeat(range(1, ((psus.size // 2) + 1)), 2)
stratum_psu = pd.DataFrame({str_varname: strata, psu_varname: psus})
psu_pd = pd.DataFrame({psu_varname: psu})
stratum_psu = pd.merge(psu_pd, stratum_psu, on=psu_varname, how='left', sort=False)
stratum_psu = stratum_psu[[str_varname, psu_varname]]
key = [str_varname, psu_varname]
else:
stratum_psu = pd.DataFrame({psu_varname: psu})
key = [psu_varname]
psus_ids = stratum_psu.drop_duplicates()
if (self.method == 'jackknife'):
self.number_reps = psus_ids.shape[0]
_rep_data = self._jkn_replicates(psu, stratum)
elif (self.method == 'bootstrap'):
_rep_data = self._boot_replicates(psu, stratum)
elif (self.method == 'brr'):
_rep_data = self._brr_replicates(psu, stratum)
self.rep_coefs = list((((1 / self.number_reps) * pow((1 - self.fay_coef), 2)) * np.ones(self.number_reps)))
else:
raise AssertionError("Replication method not recognized. Possible options are: 'bootstrap', 'brr', and 'jackknife'")
rep_prefix = self._rep_prefix(rep_prefix)
_rep_data = self._reps_to_dataframe(psus_ids, _rep_data, rep_prefix)
samp_weight = pd.DataFrame({'_samp_weight': samp_weight})
samp_weight.reset_index(drop=True, inplace=True)
full_sample = pd.concat([stratum_psu, samp_weight], axis=1)
full_sample = pd.merge(full_sample, _rep_data, on=key, how='left', sort=False)
if (not rep_coefs):
rep_cols = [col for col in full_sample if col.startswith(rep_prefix)]
full_sample[rep_cols] = full_sample[rep_cols].mul(samp_weight.values, axis=0)
return full_sample | Computes replicate sample weights.
Args:
samp_weight (Array): array of sample weights. To incorporate the weights adjustment
in the replicate weights, first replicate the design sample weights then apply
the adjustments to the replicates.
psu (Array):
stratum (Array, optional): array of the strata. Defaults to None.
rep_coefs (Union[Array, Number], optional): coefficients associated to the replicates.
Defaults to False.
rep_prefix (str, optional): prefix to apply to the replicate weights names.
Defaults to None.
psu_varname (str, optional): name of the psu variable in the output dataframe.
Defaults to "_psu".
str_varname (str, optional): name of the stratum variable in the output dataframe.
Defaults to "_stratum".
Raises:
AssertionError: raises an assertion error when stratum is None for a stratified design.
AssertionError: raises an assertion error when the replication method is not valid.
Returns:
pd.DataFrame: a dataframe of the replicates sample weights. | src/samplics/weighting/replicates.py | replicate | samplics-org/samplics | 14 | python | def replicate(self, samp_weight: Array, psu: Array, stratum: Optional[Array]=None, rep_coefs: Union[(Array, Number)]=False, rep_prefix: Optional[str]=None, psu_varname: str='_psu', str_varname: str='_stratum') -> pd.DataFrame:
'Computes replicate sample weights.\n\n Args:\n samp_weight (Array): array of sample weights. To incorporate the weights adjustment\n in the replicate weights, first replicate the design sample weights then apply\n the adjustments to the replicates.\n psu (Array):\n stratum (Array, optional): array of the strata. Defaults to None.\n rep_coefs (Union[Array, Number], optional): coefficients associated to the replicates.\n Defaults to False.\n rep_prefix (str, optional): prefix to apply to the replicate weights names.\n Defaults to None.\n psu_varname (str, optional): name of the psu variable in the output dataframe.\n Defaults to "_psu".\n str_varname (str, optional): name of the stratum variable in the output dataframe.\n Defaults to "_stratum".\n\n Raises:\n AssertionError: raises an assertion error when stratum is None for a stratified design.\n AssertionError: raises an assertion error when the replication method is not valid.\n\n Returns:\n pd.DataFrame: a dataframe of the replicates sample weights.\n '
samp_weight = formats.numpy_array(samp_weight)
psu = formats.numpy_array(psu)
if (not self.stratification):
stratum = None
else:
stratum = formats.numpy_array(stratum)
self._degree_of_freedom(samp_weight, stratum, psu)
if (self.stratification and (stratum is None)):
raise AssertionError('For a stratified design, stratum must be specified.')
elif (stratum is not None):
stratum_psu = pd.DataFrame({str_varname: stratum, psu_varname: psu})
stratum_psu.sort_values(by=str_varname, inplace=True)
key = [str_varname, psu_varname]
elif (self.method == 'brr'):
(_, str_index) = np.unique(psu, return_index=True)
checks.assert_brr_number_psus(str_index)
psus = psu[np.sort(str_index)]
strata = np.repeat(range(1, ((psus.size // 2) + 1)), 2)
stratum_psu = pd.DataFrame({str_varname: strata, psu_varname: psus})
psu_pd = pd.DataFrame({psu_varname: psu})
stratum_psu = pd.merge(psu_pd, stratum_psu, on=psu_varname, how='left', sort=False)
stratum_psu = stratum_psu[[str_varname, psu_varname]]
key = [str_varname, psu_varname]
else:
stratum_psu = pd.DataFrame({psu_varname: psu})
key = [psu_varname]
psus_ids = stratum_psu.drop_duplicates()
if (self.method == 'jackknife'):
self.number_reps = psus_ids.shape[0]
_rep_data = self._jkn_replicates(psu, stratum)
elif (self.method == 'bootstrap'):
_rep_data = self._boot_replicates(psu, stratum)
elif (self.method == 'brr'):
_rep_data = self._brr_replicates(psu, stratum)
self.rep_coefs = list((((1 / self.number_reps) * pow((1 - self.fay_coef), 2)) * np.ones(self.number_reps)))
else:
raise AssertionError("Replication method not recognized. Possible options are: 'bootstrap', 'brr', and 'jackknife'")
rep_prefix = self._rep_prefix(rep_prefix)
_rep_data = self._reps_to_dataframe(psus_ids, _rep_data, rep_prefix)
samp_weight = pd.DataFrame({'_samp_weight': samp_weight})
samp_weight.reset_index(drop=True, inplace=True)
full_sample = pd.concat([stratum_psu, samp_weight], axis=1)
full_sample = pd.merge(full_sample, _rep_data, on=key, how='left', sort=False)
if (not rep_coefs):
rep_cols = [col for col in full_sample if col.startswith(rep_prefix)]
full_sample[rep_cols] = full_sample[rep_cols].mul(samp_weight.values, axis=0)
return full_sample | def replicate(self, samp_weight: Array, psu: Array, stratum: Optional[Array]=None, rep_coefs: Union[(Array, Number)]=False, rep_prefix: Optional[str]=None, psu_varname: str='_psu', str_varname: str='_stratum') -> pd.DataFrame:
'Computes replicate sample weights.\n\n Args:\n samp_weight (Array): array of sample weights. To incorporate the weights adjustment\n in the replicate weights, first replicate the design sample weights then apply\n the adjustments to the replicates.\n psu (Array):\n stratum (Array, optional): array of the strata. Defaults to None.\n rep_coefs (Union[Array, Number], optional): coefficients associated to the replicates.\n Defaults to False.\n rep_prefix (str, optional): prefix to apply to the replicate weights names.\n Defaults to None.\n psu_varname (str, optional): name of the psu variable in the output dataframe.\n Defaults to "_psu".\n str_varname (str, optional): name of the stratum variable in the output dataframe.\n Defaults to "_stratum".\n\n Raises:\n AssertionError: raises an assertion error when stratum is None for a stratified design.\n AssertionError: raises an assertion error when the replication method is not valid.\n\n Returns:\n pd.DataFrame: a dataframe of the replicates sample weights.\n '
samp_weight = formats.numpy_array(samp_weight)
psu = formats.numpy_array(psu)
if (not self.stratification):
stratum = None
else:
stratum = formats.numpy_array(stratum)
self._degree_of_freedom(samp_weight, stratum, psu)
if (self.stratification and (stratum is None)):
raise AssertionError('For a stratified design, stratum must be specified.')
elif (stratum is not None):
stratum_psu = pd.DataFrame({str_varname: stratum, psu_varname: psu})
stratum_psu.sort_values(by=str_varname, inplace=True)
key = [str_varname, psu_varname]
elif (self.method == 'brr'):
(_, str_index) = np.unique(psu, return_index=True)
checks.assert_brr_number_psus(str_index)
psus = psu[np.sort(str_index)]
strata = np.repeat(range(1, ((psus.size // 2) + 1)), 2)
stratum_psu = pd.DataFrame({str_varname: strata, psu_varname: psus})
psu_pd = pd.DataFrame({psu_varname: psu})
stratum_psu = pd.merge(psu_pd, stratum_psu, on=psu_varname, how='left', sort=False)
stratum_psu = stratum_psu[[str_varname, psu_varname]]
key = [str_varname, psu_varname]
else:
stratum_psu = pd.DataFrame({psu_varname: psu})
key = [psu_varname]
psus_ids = stratum_psu.drop_duplicates()
if (self.method == 'jackknife'):
self.number_reps = psus_ids.shape[0]
_rep_data = self._jkn_replicates(psu, stratum)
elif (self.method == 'bootstrap'):
_rep_data = self._boot_replicates(psu, stratum)
elif (self.method == 'brr'):
_rep_data = self._brr_replicates(psu, stratum)
self.rep_coefs = list((((1 / self.number_reps) * pow((1 - self.fay_coef), 2)) * np.ones(self.number_reps)))
else:
raise AssertionError("Replication method not recognized. Possible options are: 'bootstrap', 'brr', and 'jackknife'")
rep_prefix = self._rep_prefix(rep_prefix)
_rep_data = self._reps_to_dataframe(psus_ids, _rep_data, rep_prefix)
samp_weight = pd.DataFrame({'_samp_weight': samp_weight})
samp_weight.reset_index(drop=True, inplace=True)
full_sample = pd.concat([stratum_psu, samp_weight], axis=1)
full_sample = pd.merge(full_sample, _rep_data, on=key, how='left', sort=False)
if (not rep_coefs):
rep_cols = [col for col in full_sample if col.startswith(rep_prefix)]
full_sample[rep_cols] = full_sample[rep_cols].mul(samp_weight.values, axis=0)
return full_sample<|docstring|>Computes replicate sample weights.
Args:
samp_weight (Array): array of sample weights. To incorporate the weights adjustment
in the replicate weights, first replicate the design sample weights then apply
the adjustments to the replicates.
psu (Array):
stratum (Array, optional): array of the strata. Defaults to None.
rep_coefs (Union[Array, Number], optional): coefficients associated to the replicates.
Defaults to False.
rep_prefix (str, optional): prefix to apply to the replicate weights names.
Defaults to None.
psu_varname (str, optional): name of the psu variable in the output dataframe.
Defaults to "_psu".
str_varname (str, optional): name of the stratum variable in the output dataframe.
Defaults to "_stratum".
Raises:
AssertionError: raises an assertion error when stratum is None for a stratified design.
AssertionError: raises an assertion error when the replication method is not valid.
Returns:
pd.DataFrame: a dataframe of the replicates sample weights.<|endoftext|> |
2ba8ce127003c1c2c4c454c4f55309fa003b78fcd4afd9e4e6e41f630cbf2e0d | def filter_anchor(points, rotate, properties, error):
'This function will add extreme weighting to the boundary points'
max_weight = 10000
points[0]['weight'] = max_weight
points[(- 1)]['weight'] = max_weight
points[0]['residual weight'] = 1
points[(- 1)]['residual weight'] = 1
return points | This function will add extreme weighting to the boundary points | mesh_viewport_vertex_alignment.py | filter_anchor | hdunderscore/mesh_viewport_vertex_align | 2 | python | def filter_anchor(points, rotate, properties, error):
max_weight = 10000
points[0]['weight'] = max_weight
points[(- 1)]['weight'] = max_weight
points[0]['residual weight'] = 1
points[(- 1)]['residual weight'] = 1
return points | def filter_anchor(points, rotate, properties, error):
max_weight = 10000
points[0]['weight'] = max_weight
points[(- 1)]['weight'] = max_weight
points[0]['residual weight'] = 1
points[(- 1)]['residual weight'] = 1
return points<|docstring|>This function will add extreme weighting to the boundary points<|endoftext|> |
03092600cdcf14ac7f20e3168dff036991a96e708bde96f0ca079fba59f944af | def fit1(properties, points):
'This function applies the fitting function several times, finding the axis rotation that causes the smallest error and returns the points.\n This expects a 1D fit where x is the domain, y is the range (and therefore y is being affected in fit).'
fit_function = properties['function']
iterations = properties['iterations']
max_error = 9999999999999999999999999
error = []
smallest_error = max_error
min_error = 0
min_theta = 0
theta = 0
theta_step_initial = 45
theta_step = theta_step_initial
theta_forward = True
for i in range(iterations):
anchor = properties['anchor']
points = filter_reset_weights(points)
try:
error.append({'failed': True, 'error sum': max_error, 'stdev': 0, 'mean': max_error, 'residuals': [max_error], 'devs': [0]})
while True:
error[i] = {'failed': True, 'error sum': max_error, 'stdev': 0, 'mean': max_error, 'residuals': [max_error], 'devs': [0]}
points = fit_function(points, theta, properties)
error[i] = {'failed': False, 'error sum': 0, 'stdev': 0, 'mean': 0, 'residuals': [], 'devs': []}
SrN = 0
for p in points:
error[i]['residuals'].append((math.pow(math.sqrt((((math.pow(p['delta'].x, 2) + math.pow(p['delta'].y, 2)) + math.pow(p['delta'].z, 2)) + math.pow(p['delta'].w, 2))), 2) * p['residual weight']))
error[i]['error sum'] += error[i]['residuals'][(- 1)]
SrN += p['residual weight']
N = SrN
error[i]['mean'] = (error[i]['error sum'] / N)
for e in error[i]['residuals']:
error[i]['devs'].append(math.pow((e - error[i]['mean']), 2))
error[i]['stdev'] += error[i]['devs'][(- 1)]
error[i]['stdev'] = math.sqrt((error[i]['stdev'] / N))
if (not anchor):
break
if anchor:
points = filter_anchor(points, theta, properties, error)
anchor = False
if (error[i]['error sum'] < smallest_error):
smallest_error = error[i]['error sum']
min_error = i
min_theta = theta
except ValueError as e:
print(e)
except ZeroDivisionError as e:
print(e)
if (i > (360 / theta_step_initial)):
if theta_forward:
if (error[i]['error sum'] == smallest_error):
theta += theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step /= 2.0
theta -= theta_step
theta_forward = False
elif (error[i]['error sum'] == smallest_error):
theta -= theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step /= 2.0
theta += theta_step
theta_forward = True
elif (i == (360 / theta_step_initial)):
theta = min_theta
theta_step /= 2.0
else:
theta += theta_step
if (theta_step <= 1e-09):
break
anchor = properties['anchor']
points = filter_reset_weights(points)
points = fit_function(points, min_theta, properties)
if anchor:
points = filter_anchor(points, min_theta, properties, error)
anchor = False
points = fit_function(points, min_theta, properties)
return points | This function applies the fitting function several times, finding the axis rotation that causes the smallest error and returns the points.
This expects a 1D fit where x is the domain, y is the range (and therefore y is being affected in fit). | mesh_viewport_vertex_alignment.py | fit1 | hdunderscore/mesh_viewport_vertex_align | 2 | python | def fit1(properties, points):
'This function applies the fitting function several times, finding the axis rotation that causes the smallest error and returns the points.\n This expects a 1D fit where x is the domain, y is the range (and therefore y is being affected in fit).'
fit_function = properties['function']
iterations = properties['iterations']
max_error = 9999999999999999999999999
error = []
smallest_error = max_error
min_error = 0
min_theta = 0
theta = 0
theta_step_initial = 45
theta_step = theta_step_initial
theta_forward = True
for i in range(iterations):
anchor = properties['anchor']
points = filter_reset_weights(points)
try:
error.append({'failed': True, 'error sum': max_error, 'stdev': 0, 'mean': max_error, 'residuals': [max_error], 'devs': [0]})
while True:
error[i] = {'failed': True, 'error sum': max_error, 'stdev': 0, 'mean': max_error, 'residuals': [max_error], 'devs': [0]}
points = fit_function(points, theta, properties)
error[i] = {'failed': False, 'error sum': 0, 'stdev': 0, 'mean': 0, 'residuals': [], 'devs': []}
SrN = 0
for p in points:
error[i]['residuals'].append((math.pow(math.sqrt((((math.pow(p['delta'].x, 2) + math.pow(p['delta'].y, 2)) + math.pow(p['delta'].z, 2)) + math.pow(p['delta'].w, 2))), 2) * p['residual weight']))
error[i]['error sum'] += error[i]['residuals'][(- 1)]
SrN += p['residual weight']
N = SrN
error[i]['mean'] = (error[i]['error sum'] / N)
for e in error[i]['residuals']:
error[i]['devs'].append(math.pow((e - error[i]['mean']), 2))
error[i]['stdev'] += error[i]['devs'][(- 1)]
error[i]['stdev'] = math.sqrt((error[i]['stdev'] / N))
if (not anchor):
break
if anchor:
points = filter_anchor(points, theta, properties, error)
anchor = False
if (error[i]['error sum'] < smallest_error):
smallest_error = error[i]['error sum']
min_error = i
min_theta = theta
except ValueError as e:
print(e)
except ZeroDivisionError as e:
print(e)
if (i > (360 / theta_step_initial)):
if theta_forward:
if (error[i]['error sum'] == smallest_error):
theta += theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step /= 2.0
theta -= theta_step
theta_forward = False
elif (error[i]['error sum'] == smallest_error):
theta -= theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step /= 2.0
theta += theta_step
theta_forward = True
elif (i == (360 / theta_step_initial)):
theta = min_theta
theta_step /= 2.0
else:
theta += theta_step
if (theta_step <= 1e-09):
break
anchor = properties['anchor']
points = filter_reset_weights(points)
points = fit_function(points, min_theta, properties)
if anchor:
points = filter_anchor(points, min_theta, properties, error)
anchor = False
points = fit_function(points, min_theta, properties)
return points | def fit1(properties, points):
'This function applies the fitting function several times, finding the axis rotation that causes the smallest error and returns the points.\n This expects a 1D fit where x is the domain, y is the range (and therefore y is being affected in fit).'
fit_function = properties['function']
iterations = properties['iterations']
max_error = 9999999999999999999999999
error = []
smallest_error = max_error
min_error = 0
min_theta = 0
theta = 0
theta_step_initial = 45
theta_step = theta_step_initial
theta_forward = True
for i in range(iterations):
anchor = properties['anchor']
points = filter_reset_weights(points)
try:
error.append({'failed': True, 'error sum': max_error, 'stdev': 0, 'mean': max_error, 'residuals': [max_error], 'devs': [0]})
while True:
error[i] = {'failed': True, 'error sum': max_error, 'stdev': 0, 'mean': max_error, 'residuals': [max_error], 'devs': [0]}
points = fit_function(points, theta, properties)
error[i] = {'failed': False, 'error sum': 0, 'stdev': 0, 'mean': 0, 'residuals': [], 'devs': []}
SrN = 0
for p in points:
error[i]['residuals'].append((math.pow(math.sqrt((((math.pow(p['delta'].x, 2) + math.pow(p['delta'].y, 2)) + math.pow(p['delta'].z, 2)) + math.pow(p['delta'].w, 2))), 2) * p['residual weight']))
error[i]['error sum'] += error[i]['residuals'][(- 1)]
SrN += p['residual weight']
N = SrN
error[i]['mean'] = (error[i]['error sum'] / N)
for e in error[i]['residuals']:
error[i]['devs'].append(math.pow((e - error[i]['mean']), 2))
error[i]['stdev'] += error[i]['devs'][(- 1)]
error[i]['stdev'] = math.sqrt((error[i]['stdev'] / N))
if (not anchor):
break
if anchor:
points = filter_anchor(points, theta, properties, error)
anchor = False
if (error[i]['error sum'] < smallest_error):
smallest_error = error[i]['error sum']
min_error = i
min_theta = theta
except ValueError as e:
print(e)
except ZeroDivisionError as e:
print(e)
if (i > (360 / theta_step_initial)):
if theta_forward:
if (error[i]['error sum'] == smallest_error):
theta += theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step /= 2.0
theta -= theta_step
theta_forward = False
elif (error[i]['error sum'] == smallest_error):
theta -= theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step /= 2.0
theta += theta_step
theta_forward = True
elif (i == (360 / theta_step_initial)):
theta = min_theta
theta_step /= 2.0
else:
theta += theta_step
if (theta_step <= 1e-09):
break
anchor = properties['anchor']
points = filter_reset_weights(points)
points = fit_function(points, min_theta, properties)
if anchor:
points = filter_anchor(points, min_theta, properties, error)
anchor = False
points = fit_function(points, min_theta, properties)
return points<|docstring|>This function applies the fitting function several times, finding the axis rotation that causes the smallest error and returns the points.
This expects a 1D fit where x is the domain, y is the range (and therefore y is being affected in fit).<|endoftext|> |
bc2e4a8567cd881c5e39401cd7e954d139c2305c4d3e4d834665a751be7796e6 | def error_residual1(points, r, rr, properties, line_func, line_parameters):
'This function is used in the fitting functions to determine the deltas '
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
yy = line_func(x, line_parameters)
p['delta'] = (mathutils.Vector((0, (y - yy), 0, 0)) * rr)
return points | This function is used in the fitting functions to determine the deltas | mesh_viewport_vertex_alignment.py | error_residual1 | hdunderscore/mesh_viewport_vertex_align | 2 | python | def error_residual1(points, r, rr, properties, line_func, line_parameters):
' '
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
yy = line_func(x, line_parameters)
p['delta'] = (mathutils.Vector((0, (y - yy), 0, 0)) * rr)
return points | def error_residual1(points, r, rr, properties, line_func, line_parameters):
' '
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
yy = line_func(x, line_parameters)
p['delta'] = (mathutils.Vector((0, (y - yy), 0, 0)) * rr)
return points<|docstring|>This function is used in the fitting functions to determine the deltas<|endoftext|> |
01d93957589bca76e807b58408f07308288a5ec1c0341c580038c1b2f7e6dbd5 | def sort_index1(points, r):
'This function sorts points based on their domain (assumed as x axis when rotated) '
points = sorted(points, key=(lambda xx: (xx['point'] * r).x))
return points | This function sorts points based on their domain (assumed as x axis when rotated) | mesh_viewport_vertex_alignment.py | sort_index1 | hdunderscore/mesh_viewport_vertex_align | 2 | python | def sort_index1(points, r):
' '
points = sorted(points, key=(lambda xx: (xx['point'] * r).x))
return points | def sort_index1(points, r):
' '
points = sorted(points, key=(lambda xx: (xx['point'] * r).x))
return points<|docstring|>This function sorts points based on their domain (assumed as x axis when rotated)<|endoftext|> |
1ae0d8fdf631491fa17d610e476e96d372295bfb18cd6d081ee758bb99126f5b | def fit_linear1(points, rotate, properties=None):
'This function attempts to fit a given set of points to a linear line: y = a1*x + a0'
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sxy += ((x * y) * p['weight'])
Sx += (x * p['weight'])
Sy += (y * p['weight'])
Sx2 += (math.pow(x, 2) * p['weight'])
Sw += p['weight']
N = Sw
a1 = (((N * Sxy) - (Sx * Sy)) / ((N * Sx2) - math.pow(Sx, 2)))
a0 = (((1 / N) * Sy) - (((a1 * 1) / N) * Sx))
def line_func(x, a):
return (a[0] + (a[1] * x))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1]) | This function attempts to fit a given set of points to a linear line: y = a1*x + a0 | mesh_viewport_vertex_alignment.py | fit_linear1 | hdunderscore/mesh_viewport_vertex_align | 2 | python | def fit_linear1(points, rotate, properties=None):
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sxy += ((x * y) * p['weight'])
Sx += (x * p['weight'])
Sy += (y * p['weight'])
Sx2 += (math.pow(x, 2) * p['weight'])
Sw += p['weight']
N = Sw
a1 = (((N * Sxy) - (Sx * Sy)) / ((N * Sx2) - math.pow(Sx, 2)))
a0 = (((1 / N) * Sy) - (((a1 * 1) / N) * Sx))
def line_func(x, a):
return (a[0] + (a[1] * x))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1]) | def fit_linear1(points, rotate, properties=None):
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sxy += ((x * y) * p['weight'])
Sx += (x * p['weight'])
Sy += (y * p['weight'])
Sx2 += (math.pow(x, 2) * p['weight'])
Sw += p['weight']
N = Sw
a1 = (((N * Sxy) - (Sx * Sy)) / ((N * Sx2) - math.pow(Sx, 2)))
a0 = (((1 / N) * Sy) - (((a1 * 1) / N) * Sx))
def line_func(x, a):
return (a[0] + (a[1] * x))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1])<|docstring|>This function attempts to fit a given set of points to a linear line: y = a1*x + a0<|endoftext|> |
801b7cbde1b10e65abeb4bb89eb87ea9ebf05f8ad0cc7f1410c39b22572bd3ce | def fit_quadratic1(points, rotate, properties=None):
'This function attempts to fit a given set of points to a quadratic polynomial line: y = a2*x^2 + a1*x + a0'
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3 = 0
Sx4 = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sxy = (Sxy + ((x * y) * p['weight']))
Sx = (Sx + (x * p['weight']))
Sy = (Sy + (y * p['weight']))
Sx2 = (Sx2 + (math.pow(x, 2) * p['weight']))
Sx2y = (Sx2y + ((math.pow(x, 2) * y) * p['weight']))
Sx3 = (Sx3 + (math.pow(x, 3) * p['weight']))
Sx4 = (Sx4 + (math.pow(x, 4) * p['weight']))
Sw += p['weight']
N = Sw
A = [[N, Sx, Sx2, Sy], [Sx, Sx2, Sx3, Sxy], [Sx2, Sx3, Sx4, Sx2y]]
xM = like_a_gauss(A)
a0 = xM[0][3]
a1 = xM[1][3]
a2 = xM[2][3]
def line_func(x, a):
return ((a[0] + (a[1] * x)) + (a[2] * math.pow(x, 2)))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1, a2]) | This function attempts to fit a given set of points to a quadratic polynomial line: y = a2*x^2 + a1*x + a0 | mesh_viewport_vertex_alignment.py | fit_quadratic1 | hdunderscore/mesh_viewport_vertex_align | 2 | python | def fit_quadratic1(points, rotate, properties=None):
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3 = 0
Sx4 = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sxy = (Sxy + ((x * y) * p['weight']))
Sx = (Sx + (x * p['weight']))
Sy = (Sy + (y * p['weight']))
Sx2 = (Sx2 + (math.pow(x, 2) * p['weight']))
Sx2y = (Sx2y + ((math.pow(x, 2) * y) * p['weight']))
Sx3 = (Sx3 + (math.pow(x, 3) * p['weight']))
Sx4 = (Sx4 + (math.pow(x, 4) * p['weight']))
Sw += p['weight']
N = Sw
A = [[N, Sx, Sx2, Sy], [Sx, Sx2, Sx3, Sxy], [Sx2, Sx3, Sx4, Sx2y]]
xM = like_a_gauss(A)
a0 = xM[0][3]
a1 = xM[1][3]
a2 = xM[2][3]
def line_func(x, a):
return ((a[0] + (a[1] * x)) + (a[2] * math.pow(x, 2)))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1, a2]) | def fit_quadratic1(points, rotate, properties=None):
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3 = 0
Sx4 = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sxy = (Sxy + ((x * y) * p['weight']))
Sx = (Sx + (x * p['weight']))
Sy = (Sy + (y * p['weight']))
Sx2 = (Sx2 + (math.pow(x, 2) * p['weight']))
Sx2y = (Sx2y + ((math.pow(x, 2) * y) * p['weight']))
Sx3 = (Sx3 + (math.pow(x, 3) * p['weight']))
Sx4 = (Sx4 + (math.pow(x, 4) * p['weight']))
Sw += p['weight']
N = Sw
A = [[N, Sx, Sx2, Sy], [Sx, Sx2, Sx3, Sxy], [Sx2, Sx3, Sx4, Sx2y]]
xM = like_a_gauss(A)
a0 = xM[0][3]
a1 = xM[1][3]
a2 = xM[2][3]
def line_func(x, a):
return ((a[0] + (a[1] * x)) + (a[2] * math.pow(x, 2)))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1, a2])<|docstring|>This function attempts to fit a given set of points to a quadratic polynomial line: y = a2*x^2 + a1*x + a0<|endoftext|> |
dd90d1d7756f8a46f12ee0a681f29b5d35f006c78a1b55b6ad115e833f1ff83f | def fit_cubic1(points, rotate, properties=None):
'This function attempts to fit a given set of points to a cubic polynomial line: y = a3*x^3 + a2*x^2 + a1*x + a0'
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3y = 0
Sx3 = 0
Sx4 = 0
Sx5 = 0
Sx6 = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sxy = (Sxy + ((x * y) * p['weight']))
Sx = (Sx + (x * p['weight']))
Sy = (Sy + (y * p['weight']))
Sx2 = (Sx2 + (math.pow(x, 2) * p['weight']))
Sx2y = (Sx2y + ((math.pow(x, 2) * y) * p['weight']))
Sx3y = (Sx3y + ((math.pow(x, 3) * y) * p['weight']))
Sx3 = (Sx3 + (math.pow(x, 3) * p['weight']))
Sx4 = (Sx4 + (math.pow(x, 4) * p['weight']))
Sx5 = (Sx5 + (math.pow(x, 5) * p['weight']))
Sx6 = (Sx6 + (math.pow(x, 6) * p['weight']))
Sw += p['weight']
N = Sw
A = [[N, Sx, Sx2, Sx3, Sy], [Sx, Sx2, Sx3, Sx4, Sxy], [Sx2, Sx3, Sx4, Sx5, Sx2y], [Sx3, Sx4, Sx5, Sx6, Sx3y]]
xM = like_a_gauss(A)
a0 = xM[0][4]
a1 = xM[1][4]
a2 = xM[2][4]
a3 = xM[3][4]
def line_func(x, a):
return (((a[0] + (a[1] * x)) + (a[2] * math.pow(x, 2))) + (a[3] * math.pow(x, 3)))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1, a2, a3]) | This function attempts to fit a given set of points to a cubic polynomial line: y = a3*x^3 + a2*x^2 + a1*x + a0 | mesh_viewport_vertex_alignment.py | fit_cubic1 | hdunderscore/mesh_viewport_vertex_align | 2 | python | def fit_cubic1(points, rotate, properties=None):
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3y = 0
Sx3 = 0
Sx4 = 0
Sx5 = 0
Sx6 = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sxy = (Sxy + ((x * y) * p['weight']))
Sx = (Sx + (x * p['weight']))
Sy = (Sy + (y * p['weight']))
Sx2 = (Sx2 + (math.pow(x, 2) * p['weight']))
Sx2y = (Sx2y + ((math.pow(x, 2) * y) * p['weight']))
Sx3y = (Sx3y + ((math.pow(x, 3) * y) * p['weight']))
Sx3 = (Sx3 + (math.pow(x, 3) * p['weight']))
Sx4 = (Sx4 + (math.pow(x, 4) * p['weight']))
Sx5 = (Sx5 + (math.pow(x, 5) * p['weight']))
Sx6 = (Sx6 + (math.pow(x, 6) * p['weight']))
Sw += p['weight']
N = Sw
A = [[N, Sx, Sx2, Sx3, Sy], [Sx, Sx2, Sx3, Sx4, Sxy], [Sx2, Sx3, Sx4, Sx5, Sx2y], [Sx3, Sx4, Sx5, Sx6, Sx3y]]
xM = like_a_gauss(A)
a0 = xM[0][4]
a1 = xM[1][4]
a2 = xM[2][4]
a3 = xM[3][4]
def line_func(x, a):
return (((a[0] + (a[1] * x)) + (a[2] * math.pow(x, 2))) + (a[3] * math.pow(x, 3)))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1, a2, a3]) | def fit_cubic1(points, rotate, properties=None):
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3y = 0
Sx3 = 0
Sx4 = 0
Sx5 = 0
Sx6 = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sxy = (Sxy + ((x * y) * p['weight']))
Sx = (Sx + (x * p['weight']))
Sy = (Sy + (y * p['weight']))
Sx2 = (Sx2 + (math.pow(x, 2) * p['weight']))
Sx2y = (Sx2y + ((math.pow(x, 2) * y) * p['weight']))
Sx3y = (Sx3y + ((math.pow(x, 3) * y) * p['weight']))
Sx3 = (Sx3 + (math.pow(x, 3) * p['weight']))
Sx4 = (Sx4 + (math.pow(x, 4) * p['weight']))
Sx5 = (Sx5 + (math.pow(x, 5) * p['weight']))
Sx6 = (Sx6 + (math.pow(x, 6) * p['weight']))
Sw += p['weight']
N = Sw
A = [[N, Sx, Sx2, Sx3, Sy], [Sx, Sx2, Sx3, Sx4, Sxy], [Sx2, Sx3, Sx4, Sx5, Sx2y], [Sx3, Sx4, Sx5, Sx6, Sx3y]]
xM = like_a_gauss(A)
a0 = xM[0][4]
a1 = xM[1][4]
a2 = xM[2][4]
a3 = xM[3][4]
def line_func(x, a):
return (((a[0] + (a[1] * x)) + (a[2] * math.pow(x, 2))) + (a[3] * math.pow(x, 3)))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1, a2, a3])<|docstring|>This function attempts to fit a given set of points to a cubic polynomial line: y = a3*x^3 + a2*x^2 + a1*x + a0<|endoftext|> |
66dabc3919176fd60e6d60af3fdc14dbf65749efa7c0ee82c63ae4242fef5e7b | def fit_cosine1(points, rotate, properties):
'This function attempts to fit a given set of points to a cosine curve: y = a0 + a1*cos(w*x) + a2*cos(w*x) '
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
omega = properties['cosine_omega']
Sycos = 0
Sysin = 0
Scos = 0
Scos2 = 0
Ssin = 0
Ssin2 = 0
Sy = 0
Scossin = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sy = (Sy + (y * p['weight']))
Sycos = (Sycos + ((y * math.cos((omega * x))) * p['weight']))
Sysin = (Sysin + ((y * math.sin((omega * x))) * p['weight']))
Scos = (Scos + (math.cos((omega * x)) * p['weight']))
Ssin = (Ssin + (math.sin((omega * x)) * p['weight']))
Scos2 = (Scos2 + (math.pow(math.cos((omega * x)), 2) * p['weight']))
Ssin2 = (Ssin2 + (math.pow(math.sin((omega * x)), 2) * p['weight']))
Scossin = (Scossin + ((math.cos((omega * x)) * math.sin((omega * x))) * p['weight']))
Sw += p['weight']
N = Sw
A = [[N, Scos, Ssin, Sy], [Scos, Scos2, Scossin, Sycos], [Ssin, Scossin, Ssin2, Sysin]]
xM = like_a_gauss(A)
a0 = xM[0][3]
a1 = xM[1][3]
a2 = xM[2][3]
def line_func(x, a):
return ((a[0] + (a[1] * math.cos((a[3] * x)))) + (a[2] * math.sin((a[3] * x))))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1, a2, omega]) | This function attempts to fit a given set of points to a cosine curve: y = a0 + a1*cos(w*x) + a2*cos(w*x) | mesh_viewport_vertex_alignment.py | fit_cosine1 | hdunderscore/mesh_viewport_vertex_align | 2 | python | def fit_cosine1(points, rotate, properties):
' '
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
omega = properties['cosine_omega']
Sycos = 0
Sysin = 0
Scos = 0
Scos2 = 0
Ssin = 0
Ssin2 = 0
Sy = 0
Scossin = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sy = (Sy + (y * p['weight']))
Sycos = (Sycos + ((y * math.cos((omega * x))) * p['weight']))
Sysin = (Sysin + ((y * math.sin((omega * x))) * p['weight']))
Scos = (Scos + (math.cos((omega * x)) * p['weight']))
Ssin = (Ssin + (math.sin((omega * x)) * p['weight']))
Scos2 = (Scos2 + (math.pow(math.cos((omega * x)), 2) * p['weight']))
Ssin2 = (Ssin2 + (math.pow(math.sin((omega * x)), 2) * p['weight']))
Scossin = (Scossin + ((math.cos((omega * x)) * math.sin((omega * x))) * p['weight']))
Sw += p['weight']
N = Sw
A = [[N, Scos, Ssin, Sy], [Scos, Scos2, Scossin, Sycos], [Ssin, Scossin, Ssin2, Sysin]]
xM = like_a_gauss(A)
a0 = xM[0][3]
a1 = xM[1][3]
a2 = xM[2][3]
def line_func(x, a):
return ((a[0] + (a[1] * math.cos((a[3] * x)))) + (a[2] * math.sin((a[3] * x))))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1, a2, omega]) | def fit_cosine1(points, rotate, properties):
' '
r = mathutils.Matrix.Rotation(math.radians(rotate), 4, 'Z')
rr = mathutils.Matrix.Rotation(math.radians((- rotate)), 4, 'Z')
omega = properties['cosine_omega']
Sycos = 0
Sysin = 0
Scos = 0
Scos2 = 0
Ssin = 0
Ssin2 = 0
Sy = 0
Scossin = 0
Sw = 0
for p in points:
pr = (p['point'] * r)
x = pr.x
y = pr.y
Sy = (Sy + (y * p['weight']))
Sycos = (Sycos + ((y * math.cos((omega * x))) * p['weight']))
Sysin = (Sysin + ((y * math.sin((omega * x))) * p['weight']))
Scos = (Scos + (math.cos((omega * x)) * p['weight']))
Ssin = (Ssin + (math.sin((omega * x)) * p['weight']))
Scos2 = (Scos2 + (math.pow(math.cos((omega * x)), 2) * p['weight']))
Ssin2 = (Ssin2 + (math.pow(math.sin((omega * x)), 2) * p['weight']))
Scossin = (Scossin + ((math.cos((omega * x)) * math.sin((omega * x))) * p['weight']))
Sw += p['weight']
N = Sw
A = [[N, Scos, Ssin, Sy], [Scos, Scos2, Scossin, Sycos], [Ssin, Scossin, Ssin2, Sysin]]
xM = like_a_gauss(A)
a0 = xM[0][3]
a1 = xM[1][3]
a2 = xM[2][3]
def line_func(x, a):
return ((a[0] + (a[1] * math.cos((a[3] * x)))) + (a[2] * math.sin((a[3] * x))))
points = sort_index1(points, r)
return error_residual1(points, r, rr, properties, line_func, [a0, a1, a2, omega])<|docstring|>This function attempts to fit a given set of points to a cosine curve: y = a0 + a1*cos(w*x) + a2*cos(w*x)<|endoftext|> |
eb6996e0da4033b131cc148d07c44aba93e7acf09c6515cffc51863e35e37d79 | def get_vertices(mesh):
'Returns the active list of selected vertices.'
verts = []
for v in mesh.verts:
if v.select:
verts.append(v)
return verts | Returns the active list of selected vertices. | mesh_viewport_vertex_alignment.py | get_vertices | hdunderscore/mesh_viewport_vertex_align | 2 | python | def get_vertices(mesh):
verts = []
for v in mesh.verts:
if v.select:
verts.append(v)
return verts | def get_vertices(mesh):
verts = []
for v in mesh.verts:
if v.select:
verts.append(v)
return verts<|docstring|>Returns the active list of selected vertices.<|endoftext|> |
2550d3763631dc500823d267d6c2c1166ab3ca189f453208555f27f5820751a1 | def get_axis(type):
'Gets the axis we will be performing the rotation on. Returns a projection matrix'
if (type == 'perspective'):
region = bpy.context.region
rv3d = bpy.context.region_data
else:
return None
return {'region': region, 'rv3d': rv3d} | Gets the axis we will be performing the rotation on. Returns a projection matrix | mesh_viewport_vertex_alignment.py | get_axis | hdunderscore/mesh_viewport_vertex_align | 2 | python | def get_axis(type):
if (type == 'perspective'):
region = bpy.context.region
rv3d = bpy.context.region_data
else:
return None
return {'region': region, 'rv3d': rv3d} | def get_axis(type):
if (type == 'perspective'):
region = bpy.context.region
rv3d = bpy.context.region_data
else:
return None
return {'region': region, 'rv3d': rv3d}<|docstring|>Gets the axis we will be performing the rotation on. Returns a projection matrix<|endoftext|> |
99bf769309f44f801407c7bff8912bc3a56f356eca70887a964a8229806cd54c | def project(vertices, axis):
'Project the vertices onto a plane of the given axis.'
points = []
for v in vertices:
vec = mathutils.Vector(v.co)
p = bpy_extras.view3d_utils.location_3d_to_region_2d(axis['region'], axis['rv3d'], vec).to_4d()
depth = vec
points.append({'id': v, 'point': p, 'delta': None, "v'": None, 'depth': depth, 'weight': 1.0, 'residual weight': 1.0, 'index': None})
return points | Project the vertices onto a plane of the given axis. | mesh_viewport_vertex_alignment.py | project | hdunderscore/mesh_viewport_vertex_align | 2 | python | def project(vertices, axis):
points = []
for v in vertices:
vec = mathutils.Vector(v.co)
p = bpy_extras.view3d_utils.location_3d_to_region_2d(axis['region'], axis['rv3d'], vec).to_4d()
depth = vec
points.append({'id': v, 'point': p, 'delta': None, "v'": None, 'depth': depth, 'weight': 1.0, 'residual weight': 1.0, 'index': None})
return points | def project(vertices, axis):
points = []
for v in vertices:
vec = mathutils.Vector(v.co)
p = bpy_extras.view3d_utils.location_3d_to_region_2d(axis['region'], axis['rv3d'], vec).to_4d()
depth = vec
points.append({'id': v, 'point': p, 'delta': None, "v'": None, 'depth': depth, 'weight': 1.0, 'residual weight': 1.0, 'index': None})
return points<|docstring|>Project the vertices onto a plane of the given axis.<|endoftext|> |
6c4c3466dae071fc31405638d886aba67c5b4853be6e7d006321c29d7689b467 | def unproject(points, axis, properties):
'Unproject points on a plane to vertices in 3d space.'
for p in points:
new_p = (p['point'] - (p['delta'] * properties['influence']))
old_v = p['id'].co
new_v = bpy_extras.view3d_utils.region_2d_to_location_3d(axis['region'], axis['rv3d'], new_p.to_2d(), p['depth'])
p["v'"] = new_v
return points | Unproject points on a plane to vertices in 3d space. | mesh_viewport_vertex_alignment.py | unproject | hdunderscore/mesh_viewport_vertex_align | 2 | python | def unproject(points, axis, properties):
for p in points:
new_p = (p['point'] - (p['delta'] * properties['influence']))
old_v = p['id'].co
new_v = bpy_extras.view3d_utils.region_2d_to_location_3d(axis['region'], axis['rv3d'], new_p.to_2d(), p['depth'])
p["v'"] = new_v
return points | def unproject(points, axis, properties):
for p in points:
new_p = (p['point'] - (p['delta'] * properties['influence']))
old_v = p['id'].co
new_v = bpy_extras.view3d_utils.region_2d_to_location_3d(axis['region'], axis['rv3d'], new_p.to_2d(), p['depth'])
p["v'"] = new_v
return points<|docstring|>Unproject points on a plane to vertices in 3d space.<|endoftext|> |
4aa2ffe5acddabc24a79933b44c12d0f0181dc26585db685f0993f39f61c7448 | def update_vertices(mesh, points):
'Update the active set of selected vertices with their fitted positions.'
for p in points:
p['id'].co = p["v'"].to_3d().to_tuple()
bmesh.update_edit_mesh(mesh) | Update the active set of selected vertices with their fitted positions. | mesh_viewport_vertex_alignment.py | update_vertices | hdunderscore/mesh_viewport_vertex_align | 2 | python | def update_vertices(mesh, points):
for p in points:
p['id'].co = p["v'"].to_3d().to_tuple()
bmesh.update_edit_mesh(mesh) | def update_vertices(mesh, points):
for p in points:
p['id'].co = p["v'"].to_3d().to_tuple()
bmesh.update_edit_mesh(mesh)<|docstring|>Update the active set of selected vertices with their fitted positions.<|endoftext|> |
df8dbf2e61345fb064c43f3a3ff84a51b46b493d2c437f6a6806f707a0c70fd2 | def like_a_gauss(mat):
"\n Implementation of the Gaussian Elimination Algorithm for finding the row-reduced echelon form of a given matrix.\n No pivoting is done.\n Requires Python 3 due to the different behaviour of the division operation in earlier versions of Python.\n Released under the Public Domain (if you want it - you probably don't)\n https://gist.github.com/zhuowei/7149445\n Changes mat into Reduced Row-Echelon Form.\n "
for i in range(min(len(mat), len(mat[0]))):
for r in range(i, len(mat)):
zero_row = (mat[r][i] == 0)
if zero_row:
continue
(mat[i], mat[r]) = (mat[r], mat[i])
first_row_first_col = mat[i][i]
for rr in range((i + 1), len(mat)):
this_row_first = mat[rr][i]
scalarMultiple = (((- 1) * this_row_first) / first_row_first_col)
for cc in range(i, len(mat[0])):
mat[rr][cc] += (mat[i][cc] * scalarMultiple)
break
for i in range((min(len(mat), len(mat[0])) - 1), (- 1), (- 1)):
first_elem_col = (- 1)
first_elem = (- 1)
for c in range(len(mat[0])):
if (mat[i][c] == 0):
continue
if (first_elem_col == (- 1)):
first_elem_col = c
first_elem = mat[i][c]
mat[i][c] /= first_elem
for r in range(i):
this_row_above = mat[r][first_elem_col]
scalarMultiple = ((- 1) * this_row_above)
for cc in range(len(mat[0])):
mat[r][cc] += (mat[i][cc] * scalarMultiple)
return mat | Implementation of the Gaussian Elimination Algorithm for finding the row-reduced echelon form of a given matrix.
No pivoting is done.
Requires Python 3 due to the different behaviour of the division operation in earlier versions of Python.
Released under the Public Domain (if you want it - you probably don't)
https://gist.github.com/zhuowei/7149445
Changes mat into Reduced Row-Echelon Form. | mesh_viewport_vertex_alignment.py | like_a_gauss | hdunderscore/mesh_viewport_vertex_align | 2 | python | def like_a_gauss(mat):
"\n Implementation of the Gaussian Elimination Algorithm for finding the row-reduced echelon form of a given matrix.\n No pivoting is done.\n Requires Python 3 due to the different behaviour of the division operation in earlier versions of Python.\n Released under the Public Domain (if you want it - you probably don't)\n https://gist.github.com/zhuowei/7149445\n Changes mat into Reduced Row-Echelon Form.\n "
for i in range(min(len(mat), len(mat[0]))):
for r in range(i, len(mat)):
zero_row = (mat[r][i] == 0)
if zero_row:
continue
(mat[i], mat[r]) = (mat[r], mat[i])
first_row_first_col = mat[i][i]
for rr in range((i + 1), len(mat)):
this_row_first = mat[rr][i]
scalarMultiple = (((- 1) * this_row_first) / first_row_first_col)
for cc in range(i, len(mat[0])):
mat[rr][cc] += (mat[i][cc] * scalarMultiple)
break
for i in range((min(len(mat), len(mat[0])) - 1), (- 1), (- 1)):
first_elem_col = (- 1)
first_elem = (- 1)
for c in range(len(mat[0])):
if (mat[i][c] == 0):
continue
if (first_elem_col == (- 1)):
first_elem_col = c
first_elem = mat[i][c]
mat[i][c] /= first_elem
for r in range(i):
this_row_above = mat[r][first_elem_col]
scalarMultiple = ((- 1) * this_row_above)
for cc in range(len(mat[0])):
mat[r][cc] += (mat[i][cc] * scalarMultiple)
return mat | def like_a_gauss(mat):
"\n Implementation of the Gaussian Elimination Algorithm for finding the row-reduced echelon form of a given matrix.\n No pivoting is done.\n Requires Python 3 due to the different behaviour of the division operation in earlier versions of Python.\n Released under the Public Domain (if you want it - you probably don't)\n https://gist.github.com/zhuowei/7149445\n Changes mat into Reduced Row-Echelon Form.\n "
for i in range(min(len(mat), len(mat[0]))):
for r in range(i, len(mat)):
zero_row = (mat[r][i] == 0)
if zero_row:
continue
(mat[i], mat[r]) = (mat[r], mat[i])
first_row_first_col = mat[i][i]
for rr in range((i + 1), len(mat)):
this_row_first = mat[rr][i]
scalarMultiple = (((- 1) * this_row_first) / first_row_first_col)
for cc in range(i, len(mat[0])):
mat[rr][cc] += (mat[i][cc] * scalarMultiple)
break
for i in range((min(len(mat), len(mat[0])) - 1), (- 1), (- 1)):
first_elem_col = (- 1)
first_elem = (- 1)
for c in range(len(mat[0])):
if (mat[i][c] == 0):
continue
if (first_elem_col == (- 1)):
first_elem_col = c
first_elem = mat[i][c]
mat[i][c] /= first_elem
for r in range(i):
this_row_above = mat[r][first_elem_col]
scalarMultiple = ((- 1) * this_row_above)
for cc in range(len(mat[0])):
mat[r][cc] += (mat[i][cc] * scalarMultiple)
return mat<|docstring|>Implementation of the Gaussian Elimination Algorithm for finding the row-reduced echelon form of a given matrix.
No pivoting is done.
Requires Python 3 due to the different behaviour of the division operation in earlier versions of Python.
Released under the Public Domain (if you want it - you probably don't)
https://gist.github.com/zhuowei/7149445
Changes mat into Reduced Row-Echelon Form.<|endoftext|> |
17498c96b3f8df099a37a41c19504885ace2cbd822726ae508d8d01b2af9de34 | def get_ols(force_download: bool=False):
'Get the OLS registry.'
if (PROCESSED_PATH.exists() and (not force_download)):
with PROCESSED_PATH.open() as file:
return json.load(file)
download(url=URL, path=RAW_PATH, force=True)
with RAW_PATH.open() as file:
data = json.load(file)
if ('next' in data['_links']):
raise NotImplementedError('Need to implement paging since there are more entries than fit into one page')
processed = {}
for ontology in data['_embedded']['ontologies']:
ols_id = ontology['ontologyId']
config = _PROCESSING.get(ols_id)
if (config is None):
logger.warning('need to curate processing file for OLS prefix %s', ols_id)
continue
processed[ols_id] = _process(ontology, config)
with PROCESSED_PATH.open('w') as file:
json.dump(processed, file, indent=2, sort_keys=True)
return processed | Get the OLS registry. | src/bioregistry/external/ols.py | get_ols | cthoyt/bioregistry | 2 | python | def get_ols(force_download: bool=False):
if (PROCESSED_PATH.exists() and (not force_download)):
with PROCESSED_PATH.open() as file:
return json.load(file)
download(url=URL, path=RAW_PATH, force=True)
with RAW_PATH.open() as file:
data = json.load(file)
if ('next' in data['_links']):
raise NotImplementedError('Need to implement paging since there are more entries than fit into one page')
processed = {}
for ontology in data['_embedded']['ontologies']:
ols_id = ontology['ontologyId']
config = _PROCESSING.get(ols_id)
if (config is None):
logger.warning('need to curate processing file for OLS prefix %s', ols_id)
continue
processed[ols_id] = _process(ontology, config)
with PROCESSED_PATH.open('w') as file:
json.dump(processed, file, indent=2, sort_keys=True)
return processed | def get_ols(force_download: bool=False):
if (PROCESSED_PATH.exists() and (not force_download)):
with PROCESSED_PATH.open() as file:
return json.load(file)
download(url=URL, path=RAW_PATH, force=True)
with RAW_PATH.open() as file:
data = json.load(file)
if ('next' in data['_links']):
raise NotImplementedError('Need to implement paging since there are more entries than fit into one page')
processed = {}
for ontology in data['_embedded']['ontologies']:
ols_id = ontology['ontologyId']
config = _PROCESSING.get(ols_id)
if (config is None):
logger.warning('need to curate processing file for OLS prefix %s', ols_id)
continue
processed[ols_id] = _process(ontology, config)
with PROCESSED_PATH.open('w') as file:
json.dump(processed, file, indent=2, sort_keys=True)
return processed<|docstring|>Get the OLS registry.<|endoftext|> |
e24278d0c74797a5e9c2c22f90eca30ffe82ae3c7c120be16e6152a74a7a982a | @click.command()
def main():
'Reload the OLS data.'
get_ols(force_download=True) | Reload the OLS data. | src/bioregistry/external/ols.py | main | cthoyt/bioregistry | 2 | python | @click.command()
def main():
get_ols(force_download=True) | @click.command()
def main():
get_ols(force_download=True)<|docstring|>Reload the OLS data.<|endoftext|> |
1878d0f3280149992723ccb245d5358ba74379175d00cbc492a2761e72b6a524 | def test():
'\n 异常处理\n '
try:
a = (10 / 0)
print(('a is %s' % a))
except Exception as e:
print(('exception is %s' % e)) | 异常处理 | bookcode/pythonproject/pythonlearning/pythonlearning/base/except.py | test | zhangymPerson/Think-in-java-note | 0 | python | def test():
'\n \n '
try:
a = (10 / 0)
print(('a is %s' % a))
except Exception as e:
print(('exception is %s' % e)) | def test():
'\n \n '
try:
a = (10 / 0)
print(('a is %s' % a))
except Exception as e:
print(('exception is %s' % e))<|docstring|>异常处理<|endoftext|> |
c6a5457920b6907aa5c46f6cd8b50eebcaf9eeb3854d90a945f045a7fbd5a5d4 | def OpenEditor(self, col, row):
'\n Opens an editor at the current position.\n Modified to allow a generic getter to set editor text.\n '
evt = wx.ListEvent(wx.wxEVT_COMMAND_LIST_BEGIN_LABEL_EDIT, self.GetId())
evt.m_itemIndex = row
evt.m_col = col
item = self.GetItem(row, col)
evt.m_item.SetId(item.GetId())
evt.m_item.SetColumn(item.GetColumn())
evt.m_item.SetData(item.GetData())
evt.m_item.SetText(item.GetText())
ret = self.GetEventHandler().ProcessEvent(evt)
if (ret and (not evt.IsAllowed())):
return
if (self.GetColumn(col).m_format != self.col_style):
self.make_editor(self.GetColumn(col).m_format)
x0 = self.col_locs[col]
x1 = (self.col_locs[(col + 1)] - x0)
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
if (((x0 + x1) - scrolloffset) > self.GetSize()[0]):
if (wx.Platform == '__WXMSW__'):
offset = (((x0 + x1) - self.GetSize()[0]) - scrolloffset)
addoffset = (self.GetSize()[0] / 4)
if ((addoffset + scrolloffset) < self.GetSize()[0]):
offset += addoffset
self.ScrollList(offset, 0)
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
else:
self.editor.SetValue(self.GetItem(row, col).GetText())
self.curRow = row
self.curCol = col
self.CloseEditor()
return
y0 = self.GetItemRect(row)[1]
editor = self.editor
editor.SetDimensions((x0 - scrolloffset), y0, x1, (- 1))
editor.SetValue(self.GetEditValue(row, col))
editor.Show()
editor.Raise()
editor.SetSelection((- 1), (- 1))
editor.SetFocus()
self.curRow = row
self.curCol = col | Opens an editor at the current position.
Modified to allow a generic getter to set editor text. | ListEditorCtrl.py | OpenEditor | fprimex/lad | 0 | python | def OpenEditor(self, col, row):
'\n Opens an editor at the current position.\n Modified to allow a generic getter to set editor text.\n '
evt = wx.ListEvent(wx.wxEVT_COMMAND_LIST_BEGIN_LABEL_EDIT, self.GetId())
evt.m_itemIndex = row
evt.m_col = col
item = self.GetItem(row, col)
evt.m_item.SetId(item.GetId())
evt.m_item.SetColumn(item.GetColumn())
evt.m_item.SetData(item.GetData())
evt.m_item.SetText(item.GetText())
ret = self.GetEventHandler().ProcessEvent(evt)
if (ret and (not evt.IsAllowed())):
return
if (self.GetColumn(col).m_format != self.col_style):
self.make_editor(self.GetColumn(col).m_format)
x0 = self.col_locs[col]
x1 = (self.col_locs[(col + 1)] - x0)
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
if (((x0 + x1) - scrolloffset) > self.GetSize()[0]):
if (wx.Platform == '__WXMSW__'):
offset = (((x0 + x1) - self.GetSize()[0]) - scrolloffset)
addoffset = (self.GetSize()[0] / 4)
if ((addoffset + scrolloffset) < self.GetSize()[0]):
offset += addoffset
self.ScrollList(offset, 0)
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
else:
self.editor.SetValue(self.GetItem(row, col).GetText())
self.curRow = row
self.curCol = col
self.CloseEditor()
return
y0 = self.GetItemRect(row)[1]
editor = self.editor
editor.SetDimensions((x0 - scrolloffset), y0, x1, (- 1))
editor.SetValue(self.GetEditValue(row, col))
editor.Show()
editor.Raise()
editor.SetSelection((- 1), (- 1))
editor.SetFocus()
self.curRow = row
self.curCol = col | def OpenEditor(self, col, row):
'\n Opens an editor at the current position.\n Modified to allow a generic getter to set editor text.\n '
evt = wx.ListEvent(wx.wxEVT_COMMAND_LIST_BEGIN_LABEL_EDIT, self.GetId())
evt.m_itemIndex = row
evt.m_col = col
item = self.GetItem(row, col)
evt.m_item.SetId(item.GetId())
evt.m_item.SetColumn(item.GetColumn())
evt.m_item.SetData(item.GetData())
evt.m_item.SetText(item.GetText())
ret = self.GetEventHandler().ProcessEvent(evt)
if (ret and (not evt.IsAllowed())):
return
if (self.GetColumn(col).m_format != self.col_style):
self.make_editor(self.GetColumn(col).m_format)
x0 = self.col_locs[col]
x1 = (self.col_locs[(col + 1)] - x0)
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
if (((x0 + x1) - scrolloffset) > self.GetSize()[0]):
if (wx.Platform == '__WXMSW__'):
offset = (((x0 + x1) - self.GetSize()[0]) - scrolloffset)
addoffset = (self.GetSize()[0] / 4)
if ((addoffset + scrolloffset) < self.GetSize()[0]):
offset += addoffset
self.ScrollList(offset, 0)
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
else:
self.editor.SetValue(self.GetItem(row, col).GetText())
self.curRow = row
self.curCol = col
self.CloseEditor()
return
y0 = self.GetItemRect(row)[1]
editor = self.editor
editor.SetDimensions((x0 - scrolloffset), y0, x1, (- 1))
editor.SetValue(self.GetEditValue(row, col))
editor.Show()
editor.Raise()
editor.SetSelection((- 1), (- 1))
editor.SetFocus()
self.curRow = row
self.curCol = col<|docstring|>Opens an editor at the current position.
Modified to allow a generic getter to set editor text.<|endoftext|> |
86a23d488cd1031fca30b125a9373c1f3aff1e0771beee3aac253240e936f541 | def CloseEditor(self, evt=None):
'\n Close the editor and save the new value to the ListCtrl.\n Modified to allow a generic setter to save edited data.\n '
if (not self.editor.IsShown()):
return
text = self.editor.GetValue()
self.editor.Hide()
self.SetFocus()
self.SetValue(self.curRow, self.curCol, text)
evt = wx.ListEvent(wx.wxEVT_COMMAND_LIST_END_LABEL_EDIT, self.GetId())
evt.m_itemIndex = self.curRow
evt.m_col = self.curCol
item = self.GetItem(self.curRow, self.curCol)
evt.m_item.SetId(item.GetId())
evt.m_item.SetColumn(item.GetColumn())
evt.m_item.SetData(item.GetData())
evt.m_item.SetText(text)
ret = self.GetEventHandler().ProcessEvent(evt)
if ((not ret) or evt.IsAllowed()):
if self.IsVirtual():
self.SetVirtualData(self.curRow, self.curCol, text)
else:
self.SetStringItem(self.curRow, self.curCol, text)
self.RefreshItem(self.curRow)
self.RefreshList() | Close the editor and save the new value to the ListCtrl.
Modified to allow a generic setter to save edited data. | ListEditorCtrl.py | CloseEditor | fprimex/lad | 0 | python | def CloseEditor(self, evt=None):
'\n Close the editor and save the new value to the ListCtrl.\n Modified to allow a generic setter to save edited data.\n '
if (not self.editor.IsShown()):
return
text = self.editor.GetValue()
self.editor.Hide()
self.SetFocus()
self.SetValue(self.curRow, self.curCol, text)
evt = wx.ListEvent(wx.wxEVT_COMMAND_LIST_END_LABEL_EDIT, self.GetId())
evt.m_itemIndex = self.curRow
evt.m_col = self.curCol
item = self.GetItem(self.curRow, self.curCol)
evt.m_item.SetId(item.GetId())
evt.m_item.SetColumn(item.GetColumn())
evt.m_item.SetData(item.GetData())
evt.m_item.SetText(text)
ret = self.GetEventHandler().ProcessEvent(evt)
if ((not ret) or evt.IsAllowed()):
if self.IsVirtual():
self.SetVirtualData(self.curRow, self.curCol, text)
else:
self.SetStringItem(self.curRow, self.curCol, text)
self.RefreshItem(self.curRow)
self.RefreshList() | def CloseEditor(self, evt=None):
'\n Close the editor and save the new value to the ListCtrl.\n Modified to allow a generic setter to save edited data.\n '
if (not self.editor.IsShown()):
return
text = self.editor.GetValue()
self.editor.Hide()
self.SetFocus()
self.SetValue(self.curRow, self.curCol, text)
evt = wx.ListEvent(wx.wxEVT_COMMAND_LIST_END_LABEL_EDIT, self.GetId())
evt.m_itemIndex = self.curRow
evt.m_col = self.curCol
item = self.GetItem(self.curRow, self.curCol)
evt.m_item.SetId(item.GetId())
evt.m_item.SetColumn(item.GetColumn())
evt.m_item.SetData(item.GetData())
evt.m_item.SetText(text)
ret = self.GetEventHandler().ProcessEvent(evt)
if ((not ret) or evt.IsAllowed()):
if self.IsVirtual():
self.SetVirtualData(self.curRow, self.curCol, text)
else:
self.SetStringItem(self.curRow, self.curCol, text)
self.RefreshItem(self.curRow)
self.RefreshList()<|docstring|>Close the editor and save the new value to the ListCtrl.
Modified to allow a generic setter to save edited data.<|endoftext|> |
e6b90a400d0fdcbb2eecc3ae97cb796f296636dde0f4854daae20a3e62d48cdc | def OnChar(self, event):
'Catch ESC and cancel gracefully, preserving data'
if (event.GetKeyCode() == wx.WXK_ESCAPE):
if (not self.editor.IsShown()):
return
self.editor.Hide()
self.SetFocus()
else:
listmix.TextEditMixin.OnChar(self, event) | Catch ESC and cancel gracefully, preserving data | ListEditorCtrl.py | OnChar | fprimex/lad | 0 | python | def OnChar(self, event):
if (event.GetKeyCode() == wx.WXK_ESCAPE):
if (not self.editor.IsShown()):
return
self.editor.Hide()
self.SetFocus()
else:
listmix.TextEditMixin.OnChar(self, event) | def OnChar(self, event):
if (event.GetKeyCode() == wx.WXK_ESCAPE):
if (not self.editor.IsShown()):
return
self.editor.Hide()
self.SetFocus()
else:
listmix.TextEditMixin.OnChar(self, event)<|docstring|>Catch ESC and cancel gracefully, preserving data<|endoftext|> |
95444b0eb4873b02cc254511153d2bdbafae23b713169d73591f9857c122000d | def OnLeftDouble(self, evt=None):
'Open the editor on double clicks'
if self.editor.IsShown():
self.CloseEditor()
(x, y) = evt.GetPosition()
(row, flags) = self.HitTest((x, y))
self.col_locs = [0]
loc = 0
for n in range(self.GetColumnCount()):
loc = (loc + self.GetColumnWidth(n))
self.col_locs.append(loc)
col = (bisect(self.col_locs, (x + self.GetScrollPos(wx.HORIZONTAL))) - 1)
self.OpenEditor(col, row) | Open the editor on double clicks | ListEditorCtrl.py | OnLeftDouble | fprimex/lad | 0 | python | def OnLeftDouble(self, evt=None):
if self.editor.IsShown():
self.CloseEditor()
(x, y) = evt.GetPosition()
(row, flags) = self.HitTest((x, y))
self.col_locs = [0]
loc = 0
for n in range(self.GetColumnCount()):
loc = (loc + self.GetColumnWidth(n))
self.col_locs.append(loc)
col = (bisect(self.col_locs, (x + self.GetScrollPos(wx.HORIZONTAL))) - 1)
self.OpenEditor(col, row) | def OnLeftDouble(self, evt=None):
if self.editor.IsShown():
self.CloseEditor()
(x, y) = evt.GetPosition()
(row, flags) = self.HitTest((x, y))
self.col_locs = [0]
loc = 0
for n in range(self.GetColumnCount()):
loc = (loc + self.GetColumnWidth(n))
self.col_locs.append(loc)
col = (bisect(self.col_locs, (x + self.GetScrollPos(wx.HORIZONTAL))) - 1)
self.OpenEditor(col, row)<|docstring|>Open the editor on double clicks<|endoftext|> |
f90aa81ddaab14dd17a128f5a0e20b33820347ccf1c1e411ef4351aa26c50129 | @staticmethod
def memlets_intersect(graph_a: SDFGState, group_a: List[nodes.AccessNode], inputs_a: bool, graph_b: SDFGState, group_b: List[nodes.AccessNode], inputs_b: bool) -> bool:
'\n Performs an all-pairs check for subset intersection on two\n groups of nodes. If group intersects or result is indeterminate,\n returns True as a precaution.\n :param graph_a: The graph in which the first set of nodes reside.\n :param group_a: The first set of nodes to check.\n :param inputs_a: If True, checks inputs of the first group.\n :param graph_b: The graph in which the second set of nodes reside.\n :param group_b: The second set of nodes to check.\n :param inputs_b: If True, checks inputs of the second group.\n :returns True if subsets intersect or result is indeterminate.\n '
src_subset = (lambda e: (e.data.src_subset if (e.data.src_subset is not None) else e.data.dst_subset))
dst_subset = (lambda e: (e.data.dst_subset if (e.data.dst_subset is not None) else e.data.src_subset))
if inputs_a:
edges_a = [e for n in group_a for e in graph_a.out_edges(n)]
subset_a = src_subset
else:
edges_a = [e for n in group_a for e in graph_a.in_edges(n)]
subset_a = dst_subset
if inputs_b:
edges_b = [e for n in group_b for e in graph_b.out_edges(n)]
subset_b = src_subset
else:
edges_b = [e for n in group_b for e in graph_b.in_edges(n)]
subset_b = dst_subset
for ea in edges_a:
for eb in edges_b:
result = subsets.intersects(subset_a(ea), subset_b(eb))
if ((result is True) or (result is None)):
return True
return False | Performs an all-pairs check for subset intersection on two
groups of nodes. If group intersects or result is indeterminate,
returns True as a precaution.
:param graph_a: The graph in which the first set of nodes reside.
:param group_a: The first set of nodes to check.
:param inputs_a: If True, checks inputs of the first group.
:param graph_b: The graph in which the second set of nodes reside.
:param group_b: The second set of nodes to check.
:param inputs_b: If True, checks inputs of the second group.
:returns True if subsets intersect or result is indeterminate. | dace/transformation/interstate/state_fusion.py | memlets_intersect | jnice-81/dace | 227 | python | @staticmethod
def memlets_intersect(graph_a: SDFGState, group_a: List[nodes.AccessNode], inputs_a: bool, graph_b: SDFGState, group_b: List[nodes.AccessNode], inputs_b: bool) -> bool:
'\n Performs an all-pairs check for subset intersection on two\n groups of nodes. If group intersects or result is indeterminate,\n returns True as a precaution.\n :param graph_a: The graph in which the first set of nodes reside.\n :param group_a: The first set of nodes to check.\n :param inputs_a: If True, checks inputs of the first group.\n :param graph_b: The graph in which the second set of nodes reside.\n :param group_b: The second set of nodes to check.\n :param inputs_b: If True, checks inputs of the second group.\n :returns True if subsets intersect or result is indeterminate.\n '
src_subset = (lambda e: (e.data.src_subset if (e.data.src_subset is not None) else e.data.dst_subset))
dst_subset = (lambda e: (e.data.dst_subset if (e.data.dst_subset is not None) else e.data.src_subset))
if inputs_a:
edges_a = [e for n in group_a for e in graph_a.out_edges(n)]
subset_a = src_subset
else:
edges_a = [e for n in group_a for e in graph_a.in_edges(n)]
subset_a = dst_subset
if inputs_b:
edges_b = [e for n in group_b for e in graph_b.out_edges(n)]
subset_b = src_subset
else:
edges_b = [e for n in group_b for e in graph_b.in_edges(n)]
subset_b = dst_subset
for ea in edges_a:
for eb in edges_b:
result = subsets.intersects(subset_a(ea), subset_b(eb))
if ((result is True) or (result is None)):
return True
return False | @staticmethod
def memlets_intersect(graph_a: SDFGState, group_a: List[nodes.AccessNode], inputs_a: bool, graph_b: SDFGState, group_b: List[nodes.AccessNode], inputs_b: bool) -> bool:
'\n Performs an all-pairs check for subset intersection on two\n groups of nodes. If group intersects or result is indeterminate,\n returns True as a precaution.\n :param graph_a: The graph in which the first set of nodes reside.\n :param group_a: The first set of nodes to check.\n :param inputs_a: If True, checks inputs of the first group.\n :param graph_b: The graph in which the second set of nodes reside.\n :param group_b: The second set of nodes to check.\n :param inputs_b: If True, checks inputs of the second group.\n :returns True if subsets intersect or result is indeterminate.\n '
src_subset = (lambda e: (e.data.src_subset if (e.data.src_subset is not None) else e.data.dst_subset))
dst_subset = (lambda e: (e.data.dst_subset if (e.data.dst_subset is not None) else e.data.src_subset))
if inputs_a:
edges_a = [e for n in group_a for e in graph_a.out_edges(n)]
subset_a = src_subset
else:
edges_a = [e for n in group_a for e in graph_a.in_edges(n)]
subset_a = dst_subset
if inputs_b:
edges_b = [e for n in group_b for e in graph_b.out_edges(n)]
subset_b = src_subset
else:
edges_b = [e for n in group_b for e in graph_b.in_edges(n)]
subset_b = dst_subset
for ea in edges_a:
for eb in edges_b:
result = subsets.intersects(subset_a(ea), subset_b(eb))
if ((result is True) or (result is None)):
return True
return False<|docstring|>Performs an all-pairs check for subset intersection on two
groups of nodes. If group intersects or result is indeterminate,
returns True as a precaution.
:param graph_a: The graph in which the first set of nodes reside.
:param group_a: The first set of nodes to check.
:param inputs_a: If True, checks inputs of the first group.
:param graph_b: The graph in which the second set of nodes reside.
:param group_b: The second set of nodes to check.
:param inputs_b: If True, checks inputs of the second group.
:returns True if subsets intersect or result is indeterminate.<|endoftext|> |
d90d02ca1725d1848b40e6a3afc4b2b855e257945c729832059e2fe29048468c | def populate_movie_queue(self):
'populates a new MovieQueue'
self.queue.insert(0, 'Donatello')
self.queue.insert(1, 'Raphael')
self.queue.insert(2, 'Michelangelo')
self.queue.insert(3, 'Leonardo')
return | populates a new MovieQueue | exercises/structures/src/movie_queue.py | populate_movie_queue | bmazey/summer2020 | 0 | python | def populate_movie_queue(self):
self.queue.insert(0, 'Donatello')
self.queue.insert(1, 'Raphael')
self.queue.insert(2, 'Michelangelo')
self.queue.insert(3, 'Leonardo')
return | def populate_movie_queue(self):
self.queue.insert(0, 'Donatello')
self.queue.insert(1, 'Raphael')
self.queue.insert(2, 'Michelangelo')
self.queue.insert(3, 'Leonardo')
return<|docstring|>populates a new MovieQueue<|endoftext|> |
3f1afca9ecdaa9765dd1e246bf45369de084deb5871a055205f602a222eeb472 | @abstractmethod
def train(self, mini_batch, discount):
' Trains the current neural network on a batch of experiences.\n \n inputs:\n mini_batch - an iterable object containing experiences observed\n from a Markov decision process, of the form \n (state, action, reward, next_state, done)\n discount - the discount factor in [0, 1]\n '
pass | Trains the current neural network on a batch of experiences.
inputs:
mini_batch - an iterable object containing experiences observed
from a Markov decision process, of the form
(state, action, reward, next_state, done)
discount - the discount factor in [0, 1] | agents/Neural.py | train | mike-gimelfarb/mfpy | 1 | python | @abstractmethod
def train(self, mini_batch, discount):
' Trains the current neural network on a batch of experiences.\n \n inputs:\n mini_batch - an iterable object containing experiences observed\n from a Markov decision process, of the form \n (state, action, reward, next_state, done)\n discount - the discount factor in [0, 1]\n '
pass | @abstractmethod
def train(self, mini_batch, discount):
' Trains the current neural network on a batch of experiences.\n \n inputs:\n mini_batch - an iterable object containing experiences observed\n from a Markov decision process, of the form \n (state, action, reward, next_state, done)\n discount - the discount factor in [0, 1]\n '
pass<|docstring|>Trains the current neural network on a batch of experiences.
inputs:
mini_batch - an iterable object containing experiences observed
from a Markov decision process, of the form
(state, action, reward, next_state, done)
discount - the discount factor in [0, 1]<|endoftext|> |
742385b441afb60c2b2e816ae5e8ad535c7107a490416041e509cc3e58354e41 | @staticmethod
def clear_model(model):
' A recursive method to re-initialize all layers in a Keras model.\n \n This method will recursively check all layers in the current model. For each\n layer, if a weight initializer exists, it calls the weight initializer to initialize\n all weights in the layer to their default values. \n \n inputs:\n model - a Keras model whose weights to re-initialize\n '
session = K.get_session()
for layer in model.layers:
if isinstance(layer, Network):
Neural.clear_model(layer)
continue
for v in layer.__dict__:
v_arg = getattr(layer, v)
if hasattr(v_arg, 'initializer'):
initializer_method = getattr(v_arg, 'initializer')
initializer_method.run(session=session) | A recursive method to re-initialize all layers in a Keras model.
This method will recursively check all layers in the current model. For each
layer, if a weight initializer exists, it calls the weight initializer to initialize
all weights in the layer to their default values.
inputs:
model - a Keras model whose weights to re-initialize | agents/Neural.py | clear_model | mike-gimelfarb/mfpy | 1 | python | @staticmethod
def clear_model(model):
' A recursive method to re-initialize all layers in a Keras model.\n \n This method will recursively check all layers in the current model. For each\n layer, if a weight initializer exists, it calls the weight initializer to initialize\n all weights in the layer to their default values. \n \n inputs:\n model - a Keras model whose weights to re-initialize\n '
session = K.get_session()
for layer in model.layers:
if isinstance(layer, Network):
Neural.clear_model(layer)
continue
for v in layer.__dict__:
v_arg = getattr(layer, v)
if hasattr(v_arg, 'initializer'):
initializer_method = getattr(v_arg, 'initializer')
initializer_method.run(session=session) | @staticmethod
def clear_model(model):
' A recursive method to re-initialize all layers in a Keras model.\n \n This method will recursively check all layers in the current model. For each\n layer, if a weight initializer exists, it calls the weight initializer to initialize\n all weights in the layer to their default values. \n \n inputs:\n model - a Keras model whose weights to re-initialize\n '
session = K.get_session()
for layer in model.layers:
if isinstance(layer, Network):
Neural.clear_model(layer)
continue
for v in layer.__dict__:
v_arg = getattr(layer, v)
if hasattr(v_arg, 'initializer'):
initializer_method = getattr(v_arg, 'initializer')
initializer_method.run(session=session)<|docstring|>A recursive method to re-initialize all layers in a Keras model.
This method will recursively check all layers in the current model. For each
layer, if a weight initializer exists, it calls the weight initializer to initialize
all weights in the layer to their default values.
inputs:
model - a Keras model whose weights to re-initialize<|endoftext|> |
c95875569dcc5ffc8950d2c4ece5616d06ab78164eb865331804a7f284b936a2 | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) | 3x3 convolution with padding | models/resnet_imagenet.py | conv3x3 | winycg/HSAKD | 36 | python | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)<|docstring|>3x3 convolution with padding<|endoftext|> |
7447c07b06cc8d16674f31fc29f40a376c8d7a0321f9f661635b233109ed88c5 | def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 1x1 convolution | models/resnet_imagenet.py | conv1x1 | winycg/HSAKD | 36 | python | def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)<|docstring|>1x1 convolution<|endoftext|> |
bf293f1f83f0c096a558de1db7ffb4360fab645bf2da38f987abf78c98a57395 | @coroutine
def recv_packet(self):
'Parse the packet header and read entire packet payload into buffer.'
buff = b''
while True:
packet_header = (yield self.connection.stream.read_bytes(4))
if DEBUG:
dump_packet(packet_header)
packet_length_bin = packet_header[:3]
self._packet_number = byte2int(packet_header[3])
bin_length = (packet_length_bin + b'\x00')
bytes_to_read = struct.unpack('<I', bin_length)[0]
recv_data = (yield self.connection.stream.read_bytes(bytes_to_read))
if DEBUG:
dump_packet(recv_data)
buff += recv_data
if (bytes_to_read < MAX_PACKET_LEN):
break
self._data = buff | Parse the packet header and read entire packet payload into buffer. | asynctorndb/connection.py | recv_packet | mayflaver/AsyncTorndb | 103 | python | @coroutine
def recv_packet(self):
buff = b
while True:
packet_header = (yield self.connection.stream.read_bytes(4))
if DEBUG:
dump_packet(packet_header)
packet_length_bin = packet_header[:3]
self._packet_number = byte2int(packet_header[3])
bin_length = (packet_length_bin + b'\x00')
bytes_to_read = struct.unpack('<I', bin_length)[0]
recv_data = (yield self.connection.stream.read_bytes(bytes_to_read))
if DEBUG:
dump_packet(recv_data)
buff += recv_data
if (bytes_to_read < MAX_PACKET_LEN):
break
self._data = buff | @coroutine
def recv_packet(self):
buff = b
while True:
packet_header = (yield self.connection.stream.read_bytes(4))
if DEBUG:
dump_packet(packet_header)
packet_length_bin = packet_header[:3]
self._packet_number = byte2int(packet_header[3])
bin_length = (packet_length_bin + b'\x00')
bytes_to_read = struct.unpack('<I', bin_length)[0]
recv_data = (yield self.connection.stream.read_bytes(bytes_to_read))
if DEBUG:
dump_packet(recv_data)
buff += recv_data
if (bytes_to_read < MAX_PACKET_LEN):
break
self._data = buff<|docstring|>Parse the packet header and read entire packet payload into buffer.<|endoftext|> |
908ee200050a41c690b14364c77c5cbe47de20dabd0a07ad7c9b669324b6a958 | def read(self, size):
"Read the first 'size' bytes in packet and advance cursor past them."
result = self._data[self._position:(self._position + size)]
if (len(result) != size):
error = ('Result length not requested length:\nExpected=%s. Actual=%s. Position: %s. Data Length: %s' % (size, len(result), self._position, len(self._data)))
if DEBUG:
print(error)
self.dump()
raise AssertionError(error)
self._position += size
return result | Read the first 'size' bytes in packet and advance cursor past them. | asynctorndb/connection.py | read | mayflaver/AsyncTorndb | 103 | python | def read(self, size):
result = self._data[self._position:(self._position + size)]
if (len(result) != size):
error = ('Result length not requested length:\nExpected=%s. Actual=%s. Position: %s. Data Length: %s' % (size, len(result), self._position, len(self._data)))
if DEBUG:
print(error)
self.dump()
raise AssertionError(error)
self._position += size
return result | def read(self, size):
result = self._data[self._position:(self._position + size)]
if (len(result) != size):
error = ('Result length not requested length:\nExpected=%s. Actual=%s. Position: %s. Data Length: %s' % (size, len(result), self._position, len(self._data)))
if DEBUG:
print(error)
self.dump()
raise AssertionError(error)
self._position += size
return result<|docstring|>Read the first 'size' bytes in packet and advance cursor past them.<|endoftext|> |
c3e079808f9fb2ca29e7a92a36b0dc24a225c28005651fef74ec084701fb2150 | def read_all(self):
'Read all remaining data in the packet.\n\n (Subsequent read() will return errors.)\n '
result = self._data[self._position:]
self._position = None
return result | Read all remaining data in the packet.
(Subsequent read() will return errors.) | asynctorndb/connection.py | read_all | mayflaver/AsyncTorndb | 103 | python | def read_all(self):
'Read all remaining data in the packet.\n\n (Subsequent read() will return errors.)\n '
result = self._data[self._position:]
self._position = None
return result | def read_all(self):
'Read all remaining data in the packet.\n\n (Subsequent read() will return errors.)\n '
result = self._data[self._position:]
self._position = None
return result<|docstring|>Read all remaining data in the packet.
(Subsequent read() will return errors.)<|endoftext|> |
b5bf74d9936cfe196fef2615c15bc75b36c70a158e3a4d3e4b9d34ea88e91753 | def advance(self, length):
"Advance the cursor in data buffer 'length' bytes."
new_position = (self._position + length)
if ((new_position < 0) or (new_position > len(self._data))):
raise Exception(('Invalid advance amount (%s) for cursor. Position=%s' % (length, new_position)))
self._position = new_position | Advance the cursor in data buffer 'length' bytes. | asynctorndb/connection.py | advance | mayflaver/AsyncTorndb | 103 | python | def advance(self, length):
new_position = (self._position + length)
if ((new_position < 0) or (new_position > len(self._data))):
raise Exception(('Invalid advance amount (%s) for cursor. Position=%s' % (length, new_position)))
self._position = new_position | def advance(self, length):
new_position = (self._position + length)
if ((new_position < 0) or (new_position > len(self._data))):
raise Exception(('Invalid advance amount (%s) for cursor. Position=%s' % (length, new_position)))
self._position = new_position<|docstring|>Advance the cursor in data buffer 'length' bytes.<|endoftext|> |
6d9bd32a5189335699dd9614a7a57c10147c7474f818e2fe322a17e608101584 | def rewind(self, position=0):
"Set the position of the data buffer cursor to 'position'."
if ((position < 0) or (position > len(self._data))):
raise Exception(('Invalid position to rewind cursor to: %s.' % position))
self._position = position | Set the position of the data buffer cursor to 'position'. | asynctorndb/connection.py | rewind | mayflaver/AsyncTorndb | 103 | python | def rewind(self, position=0):
if ((position < 0) or (position > len(self._data))):
raise Exception(('Invalid position to rewind cursor to: %s.' % position))
self._position = position | def rewind(self, position=0):
if ((position < 0) or (position > len(self._data))):
raise Exception(('Invalid position to rewind cursor to: %s.' % position))
self._position = position<|docstring|>Set the position of the data buffer cursor to 'position'.<|endoftext|> |
361d3bd82bc5d23fdfe59ce0a99ec705b267a4094d5a303ba79273e903b33788 | def get_bytes(self, position, length=1):
"Get 'length' bytes starting at 'position'.\n\n Position is start of payload (first four packet header bytes are not\n included) starting at index '0'.\n\n No error checking is done. If requesting outside end of buffer\n an empty string (or string shorter than 'length') may be returned!\n "
return self._data[position:(position + length)] | Get 'length' bytes starting at 'position'.
Position is start of payload (first four packet header bytes are not
included) starting at index '0'.
No error checking is done. If requesting outside end of buffer
an empty string (or string shorter than 'length') may be returned! | asynctorndb/connection.py | get_bytes | mayflaver/AsyncTorndb | 103 | python | def get_bytes(self, position, length=1):
"Get 'length' bytes starting at 'position'.\n\n Position is start of payload (first four packet header bytes are not\n included) starting at index '0'.\n\n No error checking is done. If requesting outside end of buffer\n an empty string (or string shorter than 'length') may be returned!\n "
return self._data[position:(position + length)] | def get_bytes(self, position, length=1):
"Get 'length' bytes starting at 'position'.\n\n Position is start of payload (first four packet header bytes are not\n included) starting at index '0'.\n\n No error checking is done. If requesting outside end of buffer\n an empty string (or string shorter than 'length') may be returned!\n "
return self._data[position:(position + length)]<|docstring|>Get 'length' bytes starting at 'position'.
Position is start of payload (first four packet header bytes are not
included) starting at index '0'.
No error checking is done. If requesting outside end of buffer
an empty string (or string shorter than 'length') may be returned!<|endoftext|> |
2f2aa1f24bc0c43e9f29827e820d3a29b12fec396a7ab994f84efaa65ae5284a | def read_length_encoded_integer(self):
"Read a 'Length Coded Binary' number from the data buffer.\n\n Length coded numbers can be anywhere from 1 to 9 bytes depending\n on the value of the first byte.\n "
c = ord(self.read(1))
if (c == NULL_COLUMN):
return None
if (c < UNSIGNED_CHAR_COLUMN):
return c
elif (c == UNSIGNED_SHORT_COLUMN):
return unpack_uint16(self.read(UNSIGNED_SHORT_LENGTH))
elif (c == UNSIGNED_INT24_COLUMN):
return unpack_int24(self.read(UNSIGNED_INT24_LENGTH))
elif (c == UNSIGNED_INT64_COLUMN):
return unpack_int64(self.read(UNSIGNED_INT64_LENGTH)) | Read a 'Length Coded Binary' number from the data buffer.
Length coded numbers can be anywhere from 1 to 9 bytes depending
on the value of the first byte. | asynctorndb/connection.py | read_length_encoded_integer | mayflaver/AsyncTorndb | 103 | python | def read_length_encoded_integer(self):
"Read a 'Length Coded Binary' number from the data buffer.\n\n Length coded numbers can be anywhere from 1 to 9 bytes depending\n on the value of the first byte.\n "
c = ord(self.read(1))
if (c == NULL_COLUMN):
return None
if (c < UNSIGNED_CHAR_COLUMN):
return c
elif (c == UNSIGNED_SHORT_COLUMN):
return unpack_uint16(self.read(UNSIGNED_SHORT_LENGTH))
elif (c == UNSIGNED_INT24_COLUMN):
return unpack_int24(self.read(UNSIGNED_INT24_LENGTH))
elif (c == UNSIGNED_INT64_COLUMN):
return unpack_int64(self.read(UNSIGNED_INT64_LENGTH)) | def read_length_encoded_integer(self):
"Read a 'Length Coded Binary' number from the data buffer.\n\n Length coded numbers can be anywhere from 1 to 9 bytes depending\n on the value of the first byte.\n "
c = ord(self.read(1))
if (c == NULL_COLUMN):
return None
if (c < UNSIGNED_CHAR_COLUMN):
return c
elif (c == UNSIGNED_SHORT_COLUMN):
return unpack_uint16(self.read(UNSIGNED_SHORT_LENGTH))
elif (c == UNSIGNED_INT24_COLUMN):
return unpack_int24(self.read(UNSIGNED_INT24_LENGTH))
elif (c == UNSIGNED_INT64_COLUMN):
return unpack_int64(self.read(UNSIGNED_INT64_LENGTH))<|docstring|>Read a 'Length Coded Binary' number from the data buffer.
Length coded numbers can be anywhere from 1 to 9 bytes depending
on the value of the first byte.<|endoftext|> |
eee51d0ed24e371397264aa158f517c001eeec7b8b1bc641380ab646cfedcef9 | def read_length_coded_string(self):
'Read a \'Length Coded String\' from the data buffer.\n\n A \'Length Coded String\' consists first of a length coded\n (unsigned, positive) integer represented in 1-9 bytes followed by\n that many bytes of binary data. (For example "cat" would be "3cat".)\n '
length = self.read_length_encoded_integer()
if (length is None):
return None
return self.read(length) | Read a 'Length Coded String' from the data buffer.
A 'Length Coded String' consists first of a length coded
(unsigned, positive) integer represented in 1-9 bytes followed by
that many bytes of binary data. (For example "cat" would be "3cat".) | asynctorndb/connection.py | read_length_coded_string | mayflaver/AsyncTorndb | 103 | python | def read_length_coded_string(self):
'Read a \'Length Coded String\' from the data buffer.\n\n A \'Length Coded String\' consists first of a length coded\n (unsigned, positive) integer represented in 1-9 bytes followed by\n that many bytes of binary data. (For example "cat" would be "3cat".)\n '
length = self.read_length_encoded_integer()
if (length is None):
return None
return self.read(length) | def read_length_coded_string(self):
'Read a \'Length Coded String\' from the data buffer.\n\n A \'Length Coded String\' consists first of a length coded\n (unsigned, positive) integer represented in 1-9 bytes followed by\n that many bytes of binary data. (For example "cat" would be "3cat".)\n '
length = self.read_length_encoded_integer()
if (length is None):
return None
return self.read(length)<|docstring|>Read a 'Length Coded String' from the data buffer.
A 'Length Coded String' consists first of a length coded
(unsigned, positive) integer represented in 1-9 bytes followed by
that many bytes of binary data. (For example "cat" would be "3cat".)<|endoftext|> |
ec1f56ae6fa8320c131cf5e8af6f5addc2cecc64f8a841fb08317ea866f55eb7 | def __parse_field_descriptor(self, encoding):
"Parse the 'Field Descriptor' (Metadata) packet.\n\n This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0).\n "
self.catalog = self.read_length_coded_string()
self.db = self.read_length_coded_string()
self.table_name = self.read_length_coded_string().decode(encoding)
self.org_table = self.read_length_coded_string().decode(encoding)
self.name = self.read_length_coded_string().decode(encoding)
self.org_name = self.read_length_coded_string().decode(encoding)
self.advance(1)
self.charsetnr = struct.unpack('<H', self.read(2))[0]
self.length = struct.unpack('<I', self.read(4))[0]
self.type_code = byte2int(self.read(1))
self.flags = struct.unpack('<H', self.read(2))[0]
self.scale = byte2int(self.read(1))
self.advance(2) | Parse the 'Field Descriptor' (Metadata) packet.
This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0). | asynctorndb/connection.py | __parse_field_descriptor | mayflaver/AsyncTorndb | 103 | python | def __parse_field_descriptor(self, encoding):
"Parse the 'Field Descriptor' (Metadata) packet.\n\n This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0).\n "
self.catalog = self.read_length_coded_string()
self.db = self.read_length_coded_string()
self.table_name = self.read_length_coded_string().decode(encoding)
self.org_table = self.read_length_coded_string().decode(encoding)
self.name = self.read_length_coded_string().decode(encoding)
self.org_name = self.read_length_coded_string().decode(encoding)
self.advance(1)
self.charsetnr = struct.unpack('<H', self.read(2))[0]
self.length = struct.unpack('<I', self.read(4))[0]
self.type_code = byte2int(self.read(1))
self.flags = struct.unpack('<H', self.read(2))[0]
self.scale = byte2int(self.read(1))
self.advance(2) | def __parse_field_descriptor(self, encoding):
"Parse the 'Field Descriptor' (Metadata) packet.\n\n This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0).\n "
self.catalog = self.read_length_coded_string()
self.db = self.read_length_coded_string()
self.table_name = self.read_length_coded_string().decode(encoding)
self.org_table = self.read_length_coded_string().decode(encoding)
self.name = self.read_length_coded_string().decode(encoding)
self.org_name = self.read_length_coded_string().decode(encoding)
self.advance(1)
self.charsetnr = struct.unpack('<H', self.read(2))[0]
self.length = struct.unpack('<I', self.read(4))[0]
self.type_code = byte2int(self.read(1))
self.flags = struct.unpack('<H', self.read(2))[0]
self.scale = byte2int(self.read(1))
self.advance(2)<|docstring|>Parse the 'Field Descriptor' (Metadata) packet.
This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0).<|endoftext|> |
daf1cc2483838de93573d588ee4997789473d8ec3950b21ef65429115b30394a | def description(self):
'Provides a 7-item tuple compatible with the Python PEP249 DB Spec.'
desc = []
desc.append(self.name)
desc.append(self.type_code)
desc.append(None)
desc.append(self.get_column_length())
desc.append(self.get_column_length())
desc.append(self.scale)
if ((self.flags % 2) == 0):
desc.append(1)
else:
desc.append(0)
return tuple(desc) | Provides a 7-item tuple compatible with the Python PEP249 DB Spec. | asynctorndb/connection.py | description | mayflaver/AsyncTorndb | 103 | python | def description(self):
desc = []
desc.append(self.name)
desc.append(self.type_code)
desc.append(None)
desc.append(self.get_column_length())
desc.append(self.get_column_length())
desc.append(self.scale)
if ((self.flags % 2) == 0):
desc.append(1)
else:
desc.append(0)
return tuple(desc) | def description(self):
desc = []
desc.append(self.name)
desc.append(self.type_code)
desc.append(None)
desc.append(self.get_column_length())
desc.append(self.get_column_length())
desc.append(self.scale)
if ((self.flags % 2) == 0):
desc.append(1)
else:
desc.append(0)
return tuple(desc)<|docstring|>Provides a 7-item tuple compatible with the Python PEP249 DB Spec.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.