body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
f9154c99a4b0e7de78de902ef45e8de5e743c6243d8f0f113f3bb9432e9f2e4d
|
def defringeflatAll(data_folder_path, wbin=10, start_col=10, end_col=980, diagnostic=True, movefiles=False):
'\n\tPerform the defringe flat function and save the \n\tefringed flat files under the data folder and \n\tmove the raw flat files under anotehr folder \n\tcalled "defringeflat" with optional \n\tdiagnostic plots.\n\n\tParameters\n\t----------\n\tdata_folder_path: \tstr\n\t\t\t\t\t\tdata folder for processing defringe flat\n\n\tOptional Parameters\n\t-------------------\n\twbin \t\t\t:\tint\n\t\t\t\t\t\tthe bin width to calculate each \n\t\t\t\t\t\tenhance row\n\t\t\t\t\t\tDefault is 32\n\n\tstart_col \t\t: \tint\n\t\t\t\t\t\tstarting column number for the\n\t\t\t\t\t\twavelet analysis\n\t\t\t\t\t\tDefault is 10\n\n\tend_col \t\t: \tint\n\t\t\t\t\t\tending column number for the\n\t\t\t\t\t\twavelet analysis\n\t\t\t\t\t\tDefault is 980\n\n\tdiagnostic \t\t: \tboolean\n\t\t\t\t\t\toutput the diagnostic plots\n\t\t\t\t\t\tThe option may cause much more \n\t\t\t\t\t\tcomputation time and have some issues\n\t\t\t\t\t\tin plotting.\n\t\t\t\t\t\tDefault is True\n\n\tReturns\n\t-------\n\tdefringe file \t: \tfits\n\t\t\t\t\t\tdefringed flat file\n\n\tExamples\n\t--------\n\t>>> import nirspec_fmp as nsp\n\t>>> nsp.defringeflatAll(data_folder_path, diagnostic=False)\n\n\t'
originalpath = os.getcwd()
save_to_path = (data_folder_path + '/defringeflat/')
if (not os.path.exists(save_to_path)):
os.makedirs(save_to_path)
files = glob.glob1(data_folder_path, '*.fits')
for filename in files:
file_path = (data_folder_path + filename)
data = fits.open(file_path, ignore_missing_end=True)
date = Time(data[0].header['DATE-OBS'], scale='utc')
jd = date.jd
if (jd >= 2458401.5):
image_type = 'IMTYPE'
if (end_col is None):
end_col = 2000
else:
image_type = 'IMAGETYP'
if ((('flat' in str(data[0].header['COMMENT']).lower()) is True) or (('flatlamp' in str(data[0].header[image_type]).lower()) is True)):
if (('flatlampoff' in str(data[0].header[image_type]).lower()) is True):
continue
if (('flat lamp off' in str(data[0].header['COMMENT']).lower()) is True):
continue
if (('dark for flat' in str(data[0].header['COMMENT']).lower()) is True):
continue
defringeflat_file = defringeflat(file_path, wbin=wbin, start_col=start_col, end_col=end_col, diagnostic=diagnostic, save_to_path=save_to_path, filename=filename)
save_name = ((save_to_path + filename.split('.')[0]) + '_defringe.fits')
if movefiles:
save_name = (((data_folder_path + '/') + filename.split('.')[0]) + '_defringe.fits')
shutil.move(((data_folder_path + '/') + filename), (save_to_path + filename))
defringeflat_file.writeto(save_name, overwrite=True, output_verify='ignore')
return None
|
Perform the defringe flat function and save the
efringed flat files under the data folder and
move the raw flat files under anotehr folder
called "defringeflat" with optional
diagnostic plots.
Parameters
----------
data_folder_path: str
data folder for processing defringe flat
Optional Parameters
-------------------
wbin : int
the bin width to calculate each
enhance row
Default is 32
start_col : int
starting column number for the
wavelet analysis
Default is 10
end_col : int
ending column number for the
wavelet analysis
Default is 980
diagnostic : boolean
output the diagnostic plots
The option may cause much more
computation time and have some issues
in plotting.
Default is True
Returns
-------
defringe file : fits
defringed flat file
Examples
--------
>>> import nirspec_fmp as nsp
>>> nsp.defringeflatAll(data_folder_path, diagnostic=False)
|
smart/utils/defringeflat.py
|
defringeflatAll
|
chihchunhsu/smart
| 10 |
python
|
def defringeflatAll(data_folder_path, wbin=10, start_col=10, end_col=980, diagnostic=True, movefiles=False):
'\n\tPerform the defringe flat function and save the \n\tefringed flat files under the data folder and \n\tmove the raw flat files under anotehr folder \n\tcalled "defringeflat" with optional \n\tdiagnostic plots.\n\n\tParameters\n\t----------\n\tdata_folder_path: \tstr\n\t\t\t\t\t\tdata folder for processing defringe flat\n\n\tOptional Parameters\n\t-------------------\n\twbin \t\t\t:\tint\n\t\t\t\t\t\tthe bin width to calculate each \n\t\t\t\t\t\tenhance row\n\t\t\t\t\t\tDefault is 32\n\n\tstart_col \t\t: \tint\n\t\t\t\t\t\tstarting column number for the\n\t\t\t\t\t\twavelet analysis\n\t\t\t\t\t\tDefault is 10\n\n\tend_col \t\t: \tint\n\t\t\t\t\t\tending column number for the\n\t\t\t\t\t\twavelet analysis\n\t\t\t\t\t\tDefault is 980\n\n\tdiagnostic \t\t: \tboolean\n\t\t\t\t\t\toutput the diagnostic plots\n\t\t\t\t\t\tThe option may cause much more \n\t\t\t\t\t\tcomputation time and have some issues\n\t\t\t\t\t\tin plotting.\n\t\t\t\t\t\tDefault is True\n\n\tReturns\n\t-------\n\tdefringe file \t: \tfits\n\t\t\t\t\t\tdefringed flat file\n\n\tExamples\n\t--------\n\t>>> import nirspec_fmp as nsp\n\t>>> nsp.defringeflatAll(data_folder_path, diagnostic=False)\n\n\t'
originalpath = os.getcwd()
save_to_path = (data_folder_path + '/defringeflat/')
if (not os.path.exists(save_to_path)):
os.makedirs(save_to_path)
files = glob.glob1(data_folder_path, '*.fits')
for filename in files:
file_path = (data_folder_path + filename)
data = fits.open(file_path, ignore_missing_end=True)
date = Time(data[0].header['DATE-OBS'], scale='utc')
jd = date.jd
if (jd >= 2458401.5):
image_type = 'IMTYPE'
if (end_col is None):
end_col = 2000
else:
image_type = 'IMAGETYP'
if ((('flat' in str(data[0].header['COMMENT']).lower()) is True) or (('flatlamp' in str(data[0].header[image_type]).lower()) is True)):
if (('flatlampoff' in str(data[0].header[image_type]).lower()) is True):
continue
if (('flat lamp off' in str(data[0].header['COMMENT']).lower()) is True):
continue
if (('dark for flat' in str(data[0].header['COMMENT']).lower()) is True):
continue
defringeflat_file = defringeflat(file_path, wbin=wbin, start_col=start_col, end_col=end_col, diagnostic=diagnostic, save_to_path=save_to_path, filename=filename)
save_name = ((save_to_path + filename.split('.')[0]) + '_defringe.fits')
if movefiles:
save_name = (((data_folder_path + '/') + filename.split('.')[0]) + '_defringe.fits')
shutil.move(((data_folder_path + '/') + filename), (save_to_path + filename))
defringeflat_file.writeto(save_name, overwrite=True, output_verify='ignore')
return None
|
def defringeflatAll(data_folder_path, wbin=10, start_col=10, end_col=980, diagnostic=True, movefiles=False):
'\n\tPerform the defringe flat function and save the \n\tefringed flat files under the data folder and \n\tmove the raw flat files under anotehr folder \n\tcalled "defringeflat" with optional \n\tdiagnostic plots.\n\n\tParameters\n\t----------\n\tdata_folder_path: \tstr\n\t\t\t\t\t\tdata folder for processing defringe flat\n\n\tOptional Parameters\n\t-------------------\n\twbin \t\t\t:\tint\n\t\t\t\t\t\tthe bin width to calculate each \n\t\t\t\t\t\tenhance row\n\t\t\t\t\t\tDefault is 32\n\n\tstart_col \t\t: \tint\n\t\t\t\t\t\tstarting column number for the\n\t\t\t\t\t\twavelet analysis\n\t\t\t\t\t\tDefault is 10\n\n\tend_col \t\t: \tint\n\t\t\t\t\t\tending column number for the\n\t\t\t\t\t\twavelet analysis\n\t\t\t\t\t\tDefault is 980\n\n\tdiagnostic \t\t: \tboolean\n\t\t\t\t\t\toutput the diagnostic plots\n\t\t\t\t\t\tThe option may cause much more \n\t\t\t\t\t\tcomputation time and have some issues\n\t\t\t\t\t\tin plotting.\n\t\t\t\t\t\tDefault is True\n\n\tReturns\n\t-------\n\tdefringe file \t: \tfits\n\t\t\t\t\t\tdefringed flat file\n\n\tExamples\n\t--------\n\t>>> import nirspec_fmp as nsp\n\t>>> nsp.defringeflatAll(data_folder_path, diagnostic=False)\n\n\t'
originalpath = os.getcwd()
save_to_path = (data_folder_path + '/defringeflat/')
if (not os.path.exists(save_to_path)):
os.makedirs(save_to_path)
files = glob.glob1(data_folder_path, '*.fits')
for filename in files:
file_path = (data_folder_path + filename)
data = fits.open(file_path, ignore_missing_end=True)
date = Time(data[0].header['DATE-OBS'], scale='utc')
jd = date.jd
if (jd >= 2458401.5):
image_type = 'IMTYPE'
if (end_col is None):
end_col = 2000
else:
image_type = 'IMAGETYP'
if ((('flat' in str(data[0].header['COMMENT']).lower()) is True) or (('flatlamp' in str(data[0].header[image_type]).lower()) is True)):
if (('flatlampoff' in str(data[0].header[image_type]).lower()) is True):
continue
if (('flat lamp off' in str(data[0].header['COMMENT']).lower()) is True):
continue
if (('dark for flat' in str(data[0].header['COMMENT']).lower()) is True):
continue
defringeflat_file = defringeflat(file_path, wbin=wbin, start_col=start_col, end_col=end_col, diagnostic=diagnostic, save_to_path=save_to_path, filename=filename)
save_name = ((save_to_path + filename.split('.')[0]) + '_defringe.fits')
if movefiles:
save_name = (((data_folder_path + '/') + filename.split('.')[0]) + '_defringe.fits')
shutil.move(((data_folder_path + '/') + filename), (save_to_path + filename))
defringeflat_file.writeto(save_name, overwrite=True, output_verify='ignore')
return None<|docstring|>Perform the defringe flat function and save the
efringed flat files under the data folder and
move the raw flat files under anotehr folder
called "defringeflat" with optional
diagnostic plots.
Parameters
----------
data_folder_path: str
data folder for processing defringe flat
Optional Parameters
-------------------
wbin : int
the bin width to calculate each
enhance row
Default is 32
start_col : int
starting column number for the
wavelet analysis
Default is 10
end_col : int
ending column number for the
wavelet analysis
Default is 980
diagnostic : boolean
output the diagnostic plots
The option may cause much more
computation time and have some issues
in plotting.
Default is True
Returns
-------
defringe file : fits
defringed flat file
Examples
--------
>>> import nirspec_fmp as nsp
>>> nsp.defringeflatAll(data_folder_path, diagnostic=False)<|endoftext|>
|
c3afcfc12edbd9b05a73174104b777f9e0a993a2fa9a0d342e6217a3091dc7e0
|
def evaluate_faults(faults):
'\n Enables faults to be given as a single fault, or a list of faults,\n or a function to generate a fault or list of faults,\n to the instantiation of the fallible object.\n\n :param faults:\n :return:\n '
from noisify.faults import Fault
if isinstance(faults, Fault):
return [faults]
try:
return evaluate_faults(faults())
except TypeError:
return [i for i in faults if isinstance(i, Fault)]
|
Enables faults to be given as a single fault, or a list of faults,
or a function to generate a fault or list of faults,
to the instantiation of the fallible object.
:param faults:
:return:
|
noisify/helpers/fallible.py
|
evaluate_faults
|
dstl/Noisify
| 11 |
python
|
def evaluate_faults(faults):
'\n Enables faults to be given as a single fault, or a list of faults,\n or a function to generate a fault or list of faults,\n to the instantiation of the fallible object.\n\n :param faults:\n :return:\n '
from noisify.faults import Fault
if isinstance(faults, Fault):
return [faults]
try:
return evaluate_faults(faults())
except TypeError:
return [i for i in faults if isinstance(i, Fault)]
|
def evaluate_faults(faults):
'\n Enables faults to be given as a single fault, or a list of faults,\n or a function to generate a fault or list of faults,\n to the instantiation of the fallible object.\n\n :param faults:\n :return:\n '
from noisify.faults import Fault
if isinstance(faults, Fault):
return [faults]
try:
return evaluate_faults(faults())
except TypeError:
return [i for i in faults if isinstance(i, Fault)]<|docstring|>Enables faults to be given as a single fault, or a list of faults,
or a function to generate a fault or list of faults,
to the instantiation of the fallible object.
:param faults:
:return:<|endoftext|>
|
9786b1ebce7ec6ee9ccf3c06b69b166c77faae2e90b0fa62b95ac2d96eed67e1
|
def add_fault(self, fault):
'\n Add a fault to the fallible object\n\n :param fault:\n :return:\n '
self.faults.append(fault)
return self
|
Add a fault to the fallible object
:param fault:
:return:
|
noisify/helpers/fallible.py
|
add_fault
|
dstl/Noisify
| 11 |
python
|
def add_fault(self, fault):
'\n Add a fault to the fallible object\n\n :param fault:\n :return:\n '
self.faults.append(fault)
return self
|
def add_fault(self, fault):
'\n Add a fault to the fallible object\n\n :param fault:\n :return:\n '
self.faults.append(fault)
return self<|docstring|>Add a fault to the fallible object
:param fault:
:return:<|endoftext|>
|
e64363ecc4f877824cdfef8d368aabdbbbd31ab09dab8292a3e375a0dd4b6d24
|
def apply_all_faults(self, incompletely_flawed_object):
'\n Runs through the fallible objects faults and applies them to an object, returns\n activated faults as well as the finished object\n\n :param incompletely_flawed_object:\n :return:\n '
applied_faults = []
for fault in self.faults:
(applied_fault, result) = fault.apply(incompletely_flawed_object)
if applied_fault:
incompletely_flawed_object = result
applied_faults.append(applied_fault)
return (applied_faults, incompletely_flawed_object)
|
Runs through the fallible objects faults and applies them to an object, returns
activated faults as well as the finished object
:param incompletely_flawed_object:
:return:
|
noisify/helpers/fallible.py
|
apply_all_faults
|
dstl/Noisify
| 11 |
python
|
def apply_all_faults(self, incompletely_flawed_object):
'\n Runs through the fallible objects faults and applies them to an object, returns\n activated faults as well as the finished object\n\n :param incompletely_flawed_object:\n :return:\n '
applied_faults = []
for fault in self.faults:
(applied_fault, result) = fault.apply(incompletely_flawed_object)
if applied_fault:
incompletely_flawed_object = result
applied_faults.append(applied_fault)
return (applied_faults, incompletely_flawed_object)
|
def apply_all_faults(self, incompletely_flawed_object):
'\n Runs through the fallible objects faults and applies them to an object, returns\n activated faults as well as the finished object\n\n :param incompletely_flawed_object:\n :return:\n '
applied_faults = []
for fault in self.faults:
(applied_fault, result) = fault.apply(incompletely_flawed_object)
if applied_fault:
incompletely_flawed_object = result
applied_faults.append(applied_fault)
return (applied_faults, incompletely_flawed_object)<|docstring|>Runs through the fallible objects faults and applies them to an object, returns
activated faults as well as the finished object
:param incompletely_flawed_object:
:return:<|endoftext|>
|
928034029296f92d667be97ec6dc0f2fcd9c3c6958d8fb0c6a90c1e1d4ee57c3
|
def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
'\n _bitmap: single map with shape (1, H, W),\n whose values are binarized as {0, 1}\n '
bitmap = _bitmap
(height, width) = bitmap.shape
outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if (len(outs) == 3):
(img, contours, _) = (outs[0], outs[1], outs[2])
elif (len(outs) == 2):
(contours, _) = (outs[0], outs[1])
num_contours = min(len(contours), self.max_candidates)
boxes = []
scores = []
for index in range(num_contours):
contour = contours[index]
(points, sside) = self.get_mini_boxes(contour)
if (sside < self.min_size):
continue
points = np.array(points)
if (self.score_mode == 'fast'):
score = self.box_score_fast(pred, points.reshape((- 1), 2))
else:
score = self.box_score_slow(pred, contour)
if (self.box_thresh > score):
continue
box = self.unclip(points).reshape((- 1), 1, 2)
(box, sside) = self.get_mini_boxes(box)
if (sside < (self.min_size + 2)):
continue
box = np.array(box)
box[(:, 0)] = np.clip(np.round(((box[(:, 0)] / width) * dest_width)), 0, dest_width)
box[(:, 1)] = np.clip(np.round(((box[(:, 1)] / height) * dest_height)), 0, dest_height)
boxes.append(box.astype(np.int16))
scores.append(score)
return (np.array(boxes, dtype=np.int16), scores)
|
_bitmap: single map with shape (1, H, W),
whose values are binarized as {0, 1}
|
ppocr/postprocess/db_postprocess.py
|
boxes_from_bitmap
|
ChenYu-K/PaddleOCR
| 20,401 |
python
|
def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
'\n _bitmap: single map with shape (1, H, W),\n whose values are binarized as {0, 1}\n '
bitmap = _bitmap
(height, width) = bitmap.shape
outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if (len(outs) == 3):
(img, contours, _) = (outs[0], outs[1], outs[2])
elif (len(outs) == 2):
(contours, _) = (outs[0], outs[1])
num_contours = min(len(contours), self.max_candidates)
boxes = []
scores = []
for index in range(num_contours):
contour = contours[index]
(points, sside) = self.get_mini_boxes(contour)
if (sside < self.min_size):
continue
points = np.array(points)
if (self.score_mode == 'fast'):
score = self.box_score_fast(pred, points.reshape((- 1), 2))
else:
score = self.box_score_slow(pred, contour)
if (self.box_thresh > score):
continue
box = self.unclip(points).reshape((- 1), 1, 2)
(box, sside) = self.get_mini_boxes(box)
if (sside < (self.min_size + 2)):
continue
box = np.array(box)
box[(:, 0)] = np.clip(np.round(((box[(:, 0)] / width) * dest_width)), 0, dest_width)
box[(:, 1)] = np.clip(np.round(((box[(:, 1)] / height) * dest_height)), 0, dest_height)
boxes.append(box.astype(np.int16))
scores.append(score)
return (np.array(boxes, dtype=np.int16), scores)
|
def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
'\n _bitmap: single map with shape (1, H, W),\n whose values are binarized as {0, 1}\n '
bitmap = _bitmap
(height, width) = bitmap.shape
outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if (len(outs) == 3):
(img, contours, _) = (outs[0], outs[1], outs[2])
elif (len(outs) == 2):
(contours, _) = (outs[0], outs[1])
num_contours = min(len(contours), self.max_candidates)
boxes = []
scores = []
for index in range(num_contours):
contour = contours[index]
(points, sside) = self.get_mini_boxes(contour)
if (sside < self.min_size):
continue
points = np.array(points)
if (self.score_mode == 'fast'):
score = self.box_score_fast(pred, points.reshape((- 1), 2))
else:
score = self.box_score_slow(pred, contour)
if (self.box_thresh > score):
continue
box = self.unclip(points).reshape((- 1), 1, 2)
(box, sside) = self.get_mini_boxes(box)
if (sside < (self.min_size + 2)):
continue
box = np.array(box)
box[(:, 0)] = np.clip(np.round(((box[(:, 0)] / width) * dest_width)), 0, dest_width)
box[(:, 1)] = np.clip(np.round(((box[(:, 1)] / height) * dest_height)), 0, dest_height)
boxes.append(box.astype(np.int16))
scores.append(score)
return (np.array(boxes, dtype=np.int16), scores)<|docstring|>_bitmap: single map with shape (1, H, W),
whose values are binarized as {0, 1}<|endoftext|>
|
9c5a838319783136c5ec752e8cbb4baf7bbccfa03876d4144aa1d732d1bbdb6a
|
def box_score_fast(self, bitmap, _box):
'\n box_score_fast: use bbox mean score as the mean score\n '
(h, w) = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[(:, 0)].min()).astype(np.int), 0, (w - 1))
xmax = np.clip(np.ceil(box[(:, 0)].max()).astype(np.int), 0, (w - 1))
ymin = np.clip(np.floor(box[(:, 1)].min()).astype(np.int), 0, (h - 1))
ymax = np.clip(np.ceil(box[(:, 1)].max()).astype(np.int), 0, (h - 1))
mask = np.zeros((((ymax - ymin) + 1), ((xmax - xmin) + 1)), dtype=np.uint8)
box[(:, 0)] = (box[(:, 0)] - xmin)
box[(:, 1)] = (box[(:, 1)] - ymin)
cv2.fillPoly(mask, box.reshape(1, (- 1), 2).astype(np.int32), 1)
return cv2.mean(bitmap[(ymin:(ymax + 1), xmin:(xmax + 1))], mask)[0]
|
box_score_fast: use bbox mean score as the mean score
|
ppocr/postprocess/db_postprocess.py
|
box_score_fast
|
ChenYu-K/PaddleOCR
| 20,401 |
python
|
def box_score_fast(self, bitmap, _box):
'\n \n '
(h, w) = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[(:, 0)].min()).astype(np.int), 0, (w - 1))
xmax = np.clip(np.ceil(box[(:, 0)].max()).astype(np.int), 0, (w - 1))
ymin = np.clip(np.floor(box[(:, 1)].min()).astype(np.int), 0, (h - 1))
ymax = np.clip(np.ceil(box[(:, 1)].max()).astype(np.int), 0, (h - 1))
mask = np.zeros((((ymax - ymin) + 1), ((xmax - xmin) + 1)), dtype=np.uint8)
box[(:, 0)] = (box[(:, 0)] - xmin)
box[(:, 1)] = (box[(:, 1)] - ymin)
cv2.fillPoly(mask, box.reshape(1, (- 1), 2).astype(np.int32), 1)
return cv2.mean(bitmap[(ymin:(ymax + 1), xmin:(xmax + 1))], mask)[0]
|
def box_score_fast(self, bitmap, _box):
'\n \n '
(h, w) = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[(:, 0)].min()).astype(np.int), 0, (w - 1))
xmax = np.clip(np.ceil(box[(:, 0)].max()).astype(np.int), 0, (w - 1))
ymin = np.clip(np.floor(box[(:, 1)].min()).astype(np.int), 0, (h - 1))
ymax = np.clip(np.ceil(box[(:, 1)].max()).astype(np.int), 0, (h - 1))
mask = np.zeros((((ymax - ymin) + 1), ((xmax - xmin) + 1)), dtype=np.uint8)
box[(:, 0)] = (box[(:, 0)] - xmin)
box[(:, 1)] = (box[(:, 1)] - ymin)
cv2.fillPoly(mask, box.reshape(1, (- 1), 2).astype(np.int32), 1)
return cv2.mean(bitmap[(ymin:(ymax + 1), xmin:(xmax + 1))], mask)[0]<|docstring|>box_score_fast: use bbox mean score as the mean score<|endoftext|>
|
bf512c8971ecf908c84fc1ff8d00e454dc6464937d503e46cdca7f5ef6dd8ebf
|
def box_score_slow(self, bitmap, contour):
'\n box_score_slow: use polyon mean score as the mean score\n '
(h, w) = bitmap.shape[:2]
contour = contour.copy()
contour = np.reshape(contour, ((- 1), 2))
xmin = np.clip(np.min(contour[(:, 0)]), 0, (w - 1))
xmax = np.clip(np.max(contour[(:, 0)]), 0, (w - 1))
ymin = np.clip(np.min(contour[(:, 1)]), 0, (h - 1))
ymax = np.clip(np.max(contour[(:, 1)]), 0, (h - 1))
mask = np.zeros((((ymax - ymin) + 1), ((xmax - xmin) + 1)), dtype=np.uint8)
contour[(:, 0)] = (contour[(:, 0)] - xmin)
contour[(:, 1)] = (contour[(:, 1)] - ymin)
cv2.fillPoly(mask, contour.reshape(1, (- 1), 2).astype(np.int32), 1)
return cv2.mean(bitmap[(ymin:(ymax + 1), xmin:(xmax + 1))], mask)[0]
|
box_score_slow: use polyon mean score as the mean score
|
ppocr/postprocess/db_postprocess.py
|
box_score_slow
|
ChenYu-K/PaddleOCR
| 20,401 |
python
|
def box_score_slow(self, bitmap, contour):
'\n \n '
(h, w) = bitmap.shape[:2]
contour = contour.copy()
contour = np.reshape(contour, ((- 1), 2))
xmin = np.clip(np.min(contour[(:, 0)]), 0, (w - 1))
xmax = np.clip(np.max(contour[(:, 0)]), 0, (w - 1))
ymin = np.clip(np.min(contour[(:, 1)]), 0, (h - 1))
ymax = np.clip(np.max(contour[(:, 1)]), 0, (h - 1))
mask = np.zeros((((ymax - ymin) + 1), ((xmax - xmin) + 1)), dtype=np.uint8)
contour[(:, 0)] = (contour[(:, 0)] - xmin)
contour[(:, 1)] = (contour[(:, 1)] - ymin)
cv2.fillPoly(mask, contour.reshape(1, (- 1), 2).astype(np.int32), 1)
return cv2.mean(bitmap[(ymin:(ymax + 1), xmin:(xmax + 1))], mask)[0]
|
def box_score_slow(self, bitmap, contour):
'\n \n '
(h, w) = bitmap.shape[:2]
contour = contour.copy()
contour = np.reshape(contour, ((- 1), 2))
xmin = np.clip(np.min(contour[(:, 0)]), 0, (w - 1))
xmax = np.clip(np.max(contour[(:, 0)]), 0, (w - 1))
ymin = np.clip(np.min(contour[(:, 1)]), 0, (h - 1))
ymax = np.clip(np.max(contour[(:, 1)]), 0, (h - 1))
mask = np.zeros((((ymax - ymin) + 1), ((xmax - xmin) + 1)), dtype=np.uint8)
contour[(:, 0)] = (contour[(:, 0)] - xmin)
contour[(:, 1)] = (contour[(:, 1)] - ymin)
cv2.fillPoly(mask, contour.reshape(1, (- 1), 2).astype(np.int32), 1)
return cv2.mean(bitmap[(ymin:(ymax + 1), xmin:(xmax + 1))], mask)[0]<|docstring|>box_score_slow: use polyon mean score as the mean score<|endoftext|>
|
af608dcff506bfd6e08cb26f8c33455ebe334325d2f75d81c67496d2e5adbb90
|
def constraint(self, const):
'Add a new constraint into the model.'
assert (const.optype is not None), 'You must provide the RHS of constraint'
const.fill_buffers(self.colbuff, self.rowbuff)
ret = lib.add_constraintex(self.lp, len(const.vars), cast(self.rowbuff, c_double_p), cast(self.colbuff, c_int_p), const.optype, const.rhs)
assert (ret == 1), "Can't add constraint into model"
|
Add a new constraint into the model.
|
home/scripts/memory/lpsolve.py
|
constraint
|
ParksProjets/Mips-Applications
| 1 |
python
|
def constraint(self, const):
assert (const.optype is not None), 'You must provide the RHS of constraint'
const.fill_buffers(self.colbuff, self.rowbuff)
ret = lib.add_constraintex(self.lp, len(const.vars), cast(self.rowbuff, c_double_p), cast(self.colbuff, c_int_p), const.optype, const.rhs)
assert (ret == 1), "Can't add constraint into model"
|
def constraint(self, const):
assert (const.optype is not None), 'You must provide the RHS of constraint'
const.fill_buffers(self.colbuff, self.rowbuff)
ret = lib.add_constraintex(self.lp, len(const.vars), cast(self.rowbuff, c_double_p), cast(self.colbuff, c_int_p), const.optype, const.rhs)
assert (ret == 1), "Can't add constraint into model"<|docstring|>Add a new constraint into the model.<|endoftext|>
|
d88d9ace97d9a909b61ecafbba3e5de8707474bb8d710f3b6868df1ad9c55f0c
|
def objective(self, const):
'Set the objective function.'
lib.set_add_rowmode(self.lp, 0)
const.fill_buffers(self.colbuff, self.rowbuff)
ret = lib.set_obj_fnex(self.lp, len(const.vars), cast(self.rowbuff, c_double_p), cast(self.colbuff, c_int_p))
assert (ret == 1), "Can't set objective function of model"
|
Set the objective function.
|
home/scripts/memory/lpsolve.py
|
objective
|
ParksProjets/Mips-Applications
| 1 |
python
|
def objective(self, const):
lib.set_add_rowmode(self.lp, 0)
const.fill_buffers(self.colbuff, self.rowbuff)
ret = lib.set_obj_fnex(self.lp, len(const.vars), cast(self.rowbuff, c_double_p), cast(self.colbuff, c_int_p))
assert (ret == 1), "Can't set objective function of model"
|
def objective(self, const):
lib.set_add_rowmode(self.lp, 0)
const.fill_buffers(self.colbuff, self.rowbuff)
ret = lib.set_obj_fnex(self.lp, len(const.vars), cast(self.rowbuff, c_double_p), cast(self.colbuff, c_int_p))
assert (ret == 1), "Can't set objective function of model"<|docstring|>Set the objective function.<|endoftext|>
|
061b33586f353d42fae2440de9cf1ed635fa11f69a273a9a14e5216660068c4f
|
def update_variables(self):
'Update the variable values.'
ret = lib.get_variables(self.lp, cast(self.rowbuff, c_double_p))
assert (ret == 1), "Can't get variable values"
for (i, var) in enumerate(self.vars):
var.value = self.rowbuff[i]
|
Update the variable values.
|
home/scripts/memory/lpsolve.py
|
update_variables
|
ParksProjets/Mips-Applications
| 1 |
python
|
def update_variables(self):
ret = lib.get_variables(self.lp, cast(self.rowbuff, c_double_p))
assert (ret == 1), "Can't get variable values"
for (i, var) in enumerate(self.vars):
var.value = self.rowbuff[i]
|
def update_variables(self):
ret = lib.get_variables(self.lp, cast(self.rowbuff, c_double_p))
assert (ret == 1), "Can't get variable values"
for (i, var) in enumerate(self.vars):
var.value = self.rowbuff[i]<|docstring|>Update the variable values.<|endoftext|>
|
d026092e795efbd2e63b6395ca9cdcf9905c70b53de1d662ce6273f4f339f69e
|
def solve(self):
'Solve the model.'
lib.set_maxim(self.lp)
if self.debug:
lib.write_lp(self.lp, b'debug-model.lp')
else:
lib.set_verbose(self.lp, 3)
ret = lib.solve(self.lp)
if ((ret == 0) or (ret == 1)):
self.update_variables()
return ret
|
Solve the model.
|
home/scripts/memory/lpsolve.py
|
solve
|
ParksProjets/Mips-Applications
| 1 |
python
|
def solve(self):
lib.set_maxim(self.lp)
if self.debug:
lib.write_lp(self.lp, b'debug-model.lp')
else:
lib.set_verbose(self.lp, 3)
ret = lib.solve(self.lp)
if ((ret == 0) or (ret == 1)):
self.update_variables()
return ret
|
def solve(self):
lib.set_maxim(self.lp)
if self.debug:
lib.write_lp(self.lp, b'debug-model.lp')
else:
lib.set_verbose(self.lp, 3)
ret = lib.solve(self.lp)
if ((ret == 0) or (ret == 1)):
self.update_variables()
return ret<|docstring|>Solve the model.<|endoftext|>
|
eb8cfe2dbf204e9f16a24e0f0dc78a66efb5ba0dde4ed2239a8c2d327cd84235
|
def retype(self, vtype):
'Change the type of the variable'
if ('bin' in (self.type, vtype)):
lib.set_binary(self.lp.lp, self.index, (vtype == 'bin'))
elif ('int' in (self.type, vtype)):
lib.set_binary(self.lp.lp, self.index, (vtype == 'int'))
|
Change the type of the variable
|
home/scripts/memory/lpsolve.py
|
retype
|
ParksProjets/Mips-Applications
| 1 |
python
|
def retype(self, vtype):
if ('bin' in (self.type, vtype)):
lib.set_binary(self.lp.lp, self.index, (vtype == 'bin'))
elif ('int' in (self.type, vtype)):
lib.set_binary(self.lp.lp, self.index, (vtype == 'int'))
|
def retype(self, vtype):
if ('bin' in (self.type, vtype)):
lib.set_binary(self.lp.lp, self.index, (vtype == 'bin'))
elif ('int' in (self.type, vtype)):
lib.set_binary(self.lp.lp, self.index, (vtype == 'int'))<|docstring|>Change the type of the variable<|endoftext|>
|
c64945829c7d1fd65bbbbf2de3d6234118b9a4cdc708690aefde2517fb415902
|
def fill_buffers(self, colno, row):
'Fill colno and row buffers for calling LpSolve.'
for (i, (num, var)) in enumerate(zip(self.numbers, self.vars)):
colno[i] = var.index
row[i] = num
|
Fill colno and row buffers for calling LpSolve.
|
home/scripts/memory/lpsolve.py
|
fill_buffers
|
ParksProjets/Mips-Applications
| 1 |
python
|
def fill_buffers(self, colno, row):
for (i, (num, var)) in enumerate(zip(self.numbers, self.vars)):
colno[i] = var.index
row[i] = num
|
def fill_buffers(self, colno, row):
for (i, (num, var)) in enumerate(zip(self.numbers, self.vars)):
colno[i] = var.index
row[i] = num<|docstring|>Fill colno and row buffers for calling LpSolve.<|endoftext|>
|
55ed4b85ad2cccec9651c39500d5a0e33d197e65f7ce593aad942a1365b1f40b
|
def __init__(self, code=500, msg='', *args, **kwargs):
'\n create the error\n\n code -- integer -- http status code\n msg -- string -- the message you want to accompany your status code\n '
self.code = code
self.headers = kwargs.pop('headers', {})
super(CallError, self).__init__(msg, *args, **kwargs)
|
create the error
code -- integer -- http status code
msg -- string -- the message you want to accompany your status code
|
endpoints/exception.py
|
__init__
|
Jaymon/endpoints
| 18 |
python
|
def __init__(self, code=500, msg=, *args, **kwargs):
'\n create the error\n\n code -- integer -- http status code\n msg -- string -- the message you want to accompany your status code\n '
self.code = code
self.headers = kwargs.pop('headers', {})
super(CallError, self).__init__(msg, *args, **kwargs)
|
def __init__(self, code=500, msg=, *args, **kwargs):
'\n create the error\n\n code -- integer -- http status code\n msg -- string -- the message you want to accompany your status code\n '
self.code = code
self.headers = kwargs.pop('headers', {})
super(CallError, self).__init__(msg, *args, **kwargs)<|docstring|>create the error
code -- integer -- http status code
msg -- string -- the message you want to accompany your status code<|endoftext|>
|
585607a2365917069c870f2033135108f4e41402b792a7bb2a90a9e40ca9c5b8
|
def __init__(self, msg='', scheme='', realm='', **kwargs):
"create an access denied error (401)\n\n This error adds the needed WWW-Authenticate header using the passed in\n scheme and realm. Rfc 7235 also includes type and title params but I didn't\n think they were worth adding right now so they are ignored\n\n Realm is no longer required, see https://tools.ietf.org/html/rfc7235#appendix-A\n\n :param msg: string, the message you want to accompany your status code\n :param scheme: usually one of the SCHEME_* constants but can really be anything\n :param realm: this is the namespace for the authentication scheme\n :param **kwargs: headers if you want to add custom headers\n "
self.scheme = (scheme.title() if scheme else self.SCHEME_DEFAULT)
self.realm = realm
kwargs.setdefault('headers', {})
v = ('{} realm="{}"'.format(self.scheme, self.realm) if self.realm else self.scheme)
kwargs['headers'].setdefault('WWW-Authenticate', v)
super(AccessDenied, self).__init__(401, msg, **kwargs)
|
create an access denied error (401)
This error adds the needed WWW-Authenticate header using the passed in
scheme and realm. Rfc 7235 also includes type and title params but I didn't
think they were worth adding right now so they are ignored
Realm is no longer required, see https://tools.ietf.org/html/rfc7235#appendix-A
:param msg: string, the message you want to accompany your status code
:param scheme: usually one of the SCHEME_* constants but can really be anything
:param realm: this is the namespace for the authentication scheme
:param **kwargs: headers if you want to add custom headers
|
endpoints/exception.py
|
__init__
|
Jaymon/endpoints
| 18 |
python
|
def __init__(self, msg=, scheme=, realm=, **kwargs):
"create an access denied error (401)\n\n This error adds the needed WWW-Authenticate header using the passed in\n scheme and realm. Rfc 7235 also includes type and title params but I didn't\n think they were worth adding right now so they are ignored\n\n Realm is no longer required, see https://tools.ietf.org/html/rfc7235#appendix-A\n\n :param msg: string, the message you want to accompany your status code\n :param scheme: usually one of the SCHEME_* constants but can really be anything\n :param realm: this is the namespace for the authentication scheme\n :param **kwargs: headers if you want to add custom headers\n "
self.scheme = (scheme.title() if scheme else self.SCHEME_DEFAULT)
self.realm = realm
kwargs.setdefault('headers', {})
v = ('{} realm="{}"'.format(self.scheme, self.realm) if self.realm else self.scheme)
kwargs['headers'].setdefault('WWW-Authenticate', v)
super(AccessDenied, self).__init__(401, msg, **kwargs)
|
def __init__(self, msg=, scheme=, realm=, **kwargs):
"create an access denied error (401)\n\n This error adds the needed WWW-Authenticate header using the passed in\n scheme and realm. Rfc 7235 also includes type and title params but I didn't\n think they were worth adding right now so they are ignored\n\n Realm is no longer required, see https://tools.ietf.org/html/rfc7235#appendix-A\n\n :param msg: string, the message you want to accompany your status code\n :param scheme: usually one of the SCHEME_* constants but can really be anything\n :param realm: this is the namespace for the authentication scheme\n :param **kwargs: headers if you want to add custom headers\n "
self.scheme = (scheme.title() if scheme else self.SCHEME_DEFAULT)
self.realm = realm
kwargs.setdefault('headers', {})
v = ('{} realm="{}"'.format(self.scheme, self.realm) if self.realm else self.scheme)
kwargs['headers'].setdefault('WWW-Authenticate', v)
super(AccessDenied, self).__init__(401, msg, **kwargs)<|docstring|>create an access denied error (401)
This error adds the needed WWW-Authenticate header using the passed in
scheme and realm. Rfc 7235 also includes type and title params but I didn't
think they were worth adding right now so they are ignored
Realm is no longer required, see https://tools.ietf.org/html/rfc7235#appendix-A
:param msg: string, the message you want to accompany your status code
:param scheme: usually one of the SCHEME_* constants but can really be anything
:param realm: this is the namespace for the authentication scheme
:param **kwargs: headers if you want to add custom headers<|endoftext|>
|
34cd05532156c8d5383a93d5303cd55134a489b6a5952e2362b74de56c85257a
|
def __init__(self, code, body=None, msg='', **kwargs):
'\n create the stop object\n\n code -- integer -- http status code\n body -- mixed -- the body of the response\n '
self.body = body
super(CallStop, self).__init__(code, msg, **kwargs)
|
create the stop object
code -- integer -- http status code
body -- mixed -- the body of the response
|
endpoints/exception.py
|
__init__
|
Jaymon/endpoints
| 18 |
python
|
def __init__(self, code, body=None, msg=, **kwargs):
'\n create the stop object\n\n code -- integer -- http status code\n body -- mixed -- the body of the response\n '
self.body = body
super(CallStop, self).__init__(code, msg, **kwargs)
|
def __init__(self, code, body=None, msg=, **kwargs):
'\n create the stop object\n\n code -- integer -- http status code\n body -- mixed -- the body of the response\n '
self.body = body
super(CallStop, self).__init__(code, msg, **kwargs)<|docstring|>create the stop object
code -- integer -- http status code
body -- mixed -- the body of the response<|endoftext|>
|
d4fe86305b088a63d7884926b57b229019ad6b45281ef9387af11ff7f3ba3b5a
|
def __init__(self, msg='', *args, **kwargs):
'\n Close a connection, so in a websocket request this will cause the server\n to close the websocket connection.\n\n You have to be careful with this since it might have unexpected effects\n if the connection is not a websocket connection\n\n :param msg: string, the message you want to accompany your close\n '
super(CloseConnection, self).__init__(0, msg, *args, **kwargs)
|
Close a connection, so in a websocket request this will cause the server
to close the websocket connection.
You have to be careful with this since it might have unexpected effects
if the connection is not a websocket connection
:param msg: string, the message you want to accompany your close
|
endpoints/exception.py
|
__init__
|
Jaymon/endpoints
| 18 |
python
|
def __init__(self, msg=, *args, **kwargs):
'\n Close a connection, so in a websocket request this will cause the server\n to close the websocket connection.\n\n You have to be careful with this since it might have unexpected effects\n if the connection is not a websocket connection\n\n :param msg: string, the message you want to accompany your close\n '
super(CloseConnection, self).__init__(0, msg, *args, **kwargs)
|
def __init__(self, msg=, *args, **kwargs):
'\n Close a connection, so in a websocket request this will cause the server\n to close the websocket connection.\n\n You have to be careful with this since it might have unexpected effects\n if the connection is not a websocket connection\n\n :param msg: string, the message you want to accompany your close\n '
super(CloseConnection, self).__init__(0, msg, *args, **kwargs)<|docstring|>Close a connection, so in a websocket request this will cause the server
to close the websocket connection.
You have to be careful with this since it might have unexpected effects
if the connection is not a websocket connection
:param msg: string, the message you want to accompany your close<|endoftext|>
|
8338089166a8dd20b6bdb894e281118020170d2449f29fe587e56b835f0ef25a
|
def create_host(app):
'\n configuration point of bm_cat server\n you can do everything with app at this point to build host\n\n :param app: flask server instance\n :return: app\n '
register_handler(app, bm_cat_handler)
return app
|
configuration point of bm_cat server
you can do everything with app at this point to build host
:param app: flask server instance
:return: app
|
src/hostbuilder.py
|
create_host
|
Evalle/bm-cat
| 5 |
python
|
def create_host(app):
'\n configuration point of bm_cat server\n you can do everything with app at this point to build host\n\n :param app: flask server instance\n :return: app\n '
register_handler(app, bm_cat_handler)
return app
|
def create_host(app):
'\n configuration point of bm_cat server\n you can do everything with app at this point to build host\n\n :param app: flask server instance\n :return: app\n '
register_handler(app, bm_cat_handler)
return app<|docstring|>configuration point of bm_cat server
you can do everything with app at this point to build host
:param app: flask server instance
:return: app<|endoftext|>
|
b5be26adab9bf3715210c6c43358236aa860309e4a9ec46e54f328f52109b463
|
def register_handler(app, handler):
'\n registration of handler for bm_cat bot server\n only POST requests will processed\n\n :param app: instance of flask server\n :param handler: web request handler\n :return: void\n '
app.add_url_rule(('/' + BMCAT_APIKEY), 'handler', handler, methods=['POST'])
|
registration of handler for bm_cat bot server
only POST requests will processed
:param app: instance of flask server
:param handler: web request handler
:return: void
|
src/hostbuilder.py
|
register_handler
|
Evalle/bm-cat
| 5 |
python
|
def register_handler(app, handler):
'\n registration of handler for bm_cat bot server\n only POST requests will processed\n\n :param app: instance of flask server\n :param handler: web request handler\n :return: void\n '
app.add_url_rule(('/' + BMCAT_APIKEY), 'handler', handler, methods=['POST'])
|
def register_handler(app, handler):
'\n registration of handler for bm_cat bot server\n only POST requests will processed\n\n :param app: instance of flask server\n :param handler: web request handler\n :return: void\n '
app.add_url_rule(('/' + BMCAT_APIKEY), 'handler', handler, methods=['POST'])<|docstring|>registration of handler for bm_cat bot server
only POST requests will processed
:param app: instance of flask server
:param handler: web request handler
:return: void<|endoftext|>
|
a363231696c688acc8614086ec7d25ea2e189dd6964a262cb97dbda52cb479eb
|
def bm_cat_handler():
'\n bm_cat_handler() - root handler of bm_cat webhook\n\n :return: OK - 200\n '
if (request.method == 'POST'):
update = telegram.Update.de_json(request.get_json(force=True), bot=bmcat_bot.get_bot())
bmcat_bot.send_random_video(update)
return 'ok'
|
bm_cat_handler() - root handler of bm_cat webhook
:return: OK - 200
|
src/hostbuilder.py
|
bm_cat_handler
|
Evalle/bm-cat
| 5 |
python
|
def bm_cat_handler():
'\n bm_cat_handler() - root handler of bm_cat webhook\n\n :return: OK - 200\n '
if (request.method == 'POST'):
update = telegram.Update.de_json(request.get_json(force=True), bot=bmcat_bot.get_bot())
bmcat_bot.send_random_video(update)
return 'ok'
|
def bm_cat_handler():
'\n bm_cat_handler() - root handler of bm_cat webhook\n\n :return: OK - 200\n '
if (request.method == 'POST'):
update = telegram.Update.de_json(request.get_json(force=True), bot=bmcat_bot.get_bot())
bmcat_bot.send_random_video(update)
return 'ok'<|docstring|>bm_cat_handler() - root handler of bm_cat webhook
:return: OK - 200<|endoftext|>
|
4d14edfb6bcc488f7a6dc1259ff5c95e2280299aa30a2eb71cb9ec481bf92aaa
|
def get_form(self, request, obj=None, change=False, **kwargs):
' 获取页面表单 '
form = super().get_form(request, obj=obj, change=change, **kwargs)
form.base_fields['password'].help_text = '管理员可以直接修密码,会自动进行加密操作'
return form
|
获取页面表单
|
user/admin.py
|
get_form
|
enjoy-binbin/Django-blog
| 111 |
python
|
def get_form(self, request, obj=None, change=False, **kwargs):
' '
form = super().get_form(request, obj=obj, change=change, **kwargs)
form.base_fields['password'].help_text = '管理员可以直接修密码,会自动进行加密操作'
return form
|
def get_form(self, request, obj=None, change=False, **kwargs):
' '
form = super().get_form(request, obj=obj, change=change, **kwargs)
form.base_fields['password'].help_text = '管理员可以直接修密码,会自动进行加密操作'
return form<|docstring|>获取页面表单<|endoftext|>
|
90beb230c495bb7b32293acfbac9a8f6a11fc1885b75a818f15534a8ffe2dca3
|
def save_model(self, request, obj, form, change):
' 保存model之前添加自己的逻辑 '
if change:
user = User.objects.get(id=obj.id)
if (obj.password != user.password):
obj.set_password(obj.password)
else:
obj.set_password(obj.password)
return super().save_model(request, obj, form, change)
|
保存model之前添加自己的逻辑
|
user/admin.py
|
save_model
|
enjoy-binbin/Django-blog
| 111 |
python
|
def save_model(self, request, obj, form, change):
' '
if change:
user = User.objects.get(id=obj.id)
if (obj.password != user.password):
obj.set_password(obj.password)
else:
obj.set_password(obj.password)
return super().save_model(request, obj, form, change)
|
def save_model(self, request, obj, form, change):
' '
if change:
user = User.objects.get(id=obj.id)
if (obj.password != user.password):
obj.set_password(obj.password)
else:
obj.set_password(obj.password)
return super().save_model(request, obj, form, change)<|docstring|>保存model之前添加自己的逻辑<|endoftext|>
|
b209b4ec8e4e3178470830d174e70fb43560ec98351bee3eef5855f1dfed7cc5
|
def has_add_permission(self, request):
' 不允许添加 '
return False
|
不允许添加
|
user/admin.py
|
has_add_permission
|
enjoy-binbin/Django-blog
| 111 |
python
|
def has_add_permission(self, request):
' '
return False
|
def has_add_permission(self, request):
' '
return False<|docstring|>不允许添加<|endoftext|>
|
9d374779e40fd4f58a61673926b45158ff362094574255f64195c3cc579febb2
|
def user_link(self, obj):
' 链接到用户页面, obj是一个当前对象 '
content_type = ContentType.objects.get_for_model(obj.user)
app_label = content_type.app_label
model_name = content_type.model
link = reverse(('admin:%s_%s_change' % (app_label, model_name)), kwargs={'object_id': obj.user_id})
return format_html(('<a href="%s">%s</a>' % (link, obj.user)))
|
链接到用户页面, obj是一个当前对象
|
user/admin.py
|
user_link
|
enjoy-binbin/Django-blog
| 111 |
python
|
def user_link(self, obj):
' '
content_type = ContentType.objects.get_for_model(obj.user)
app_label = content_type.app_label
model_name = content_type.model
link = reverse(('admin:%s_%s_change' % (app_label, model_name)), kwargs={'object_id': obj.user_id})
return format_html(('<a href="%s">%s</a>' % (link, obj.user)))
|
def user_link(self, obj):
' '
content_type = ContentType.objects.get_for_model(obj.user)
app_label = content_type.app_label
model_name = content_type.model
link = reverse(('admin:%s_%s_change' % (app_label, model_name)), kwargs={'object_id': obj.user_id})
return format_html(('<a href="%s">%s</a>' % (link, obj.user)))<|docstring|>链接到用户页面, obj是一个当前对象<|endoftext|>
|
43429cf2a20a4df98ce99f6aaa75676b2e5e83c4ce9f6597acccba4c6e00ac28
|
def get_change_message(self, obj):
'\n 会将json格式的消息翻译成字符串\n [{"changed": {"fields": ["password", "last_login"]}}] --> 已修改password 和 last_login。\n '
return obj.get_change_message()
|
会将json格式的消息翻译成字符串
[{"changed": {"fields": ["password", "last_login"]}}] --> 已修改password 和 last_login。
|
user/admin.py
|
get_change_message
|
enjoy-binbin/Django-blog
| 111 |
python
|
def get_change_message(self, obj):
'\n 会将json格式的消息翻译成字符串\n [{"changed": {"fields": ["password", "last_login"]}}] --> 已修改password 和 last_login。\n '
return obj.get_change_message()
|
def get_change_message(self, obj):
'\n 会将json格式的消息翻译成字符串\n [{"changed": {"fields": ["password", "last_login"]}}] --> 已修改password 和 last_login。\n '
return obj.get_change_message()<|docstring|>会将json格式的消息翻译成字符串
[{"changed": {"fields": ["password", "last_login"]}}] --> 已修改password 和 last_login。<|endoftext|>
|
cfdada10141d7b6f6260513b3b1a76f31a6ec88aec1e3cac81b406e51aa60b24
|
def worker(num):
'The `num` passed should be pickle-lable'
print('hello there', num)
return
|
The `num` passed should be pickle-lable
|
python/multiprocessing/simple_print_worker.py
|
worker
|
prodicus/TIL
| 0 |
python
|
def worker(num):
print('hello there', num)
return
|
def worker(num):
print('hello there', num)
return<|docstring|>The `num` passed should be pickle-lable<|endoftext|>
|
da8f9567b6fc9c7c5d4c81b65a3e06d6ddb299b13536161e124cbadd12c68721
|
def record_bounty_state(self, event_date):
'Makes sure no duplicates are created'
return BountyState.objects.get_or_create(bounty=self, bounty_stage=self.bounty_stage, change_date=event_date)
|
Makes sure no duplicates are created
|
bounties_api/std_bounties/models.py
|
record_bounty_state
|
tenthirtyone/BountiesAPI
| 45 |
python
|
def record_bounty_state(self, event_date):
return BountyState.objects.get_or_create(bounty=self, bounty_stage=self.bounty_stage, change_date=event_date)
|
def record_bounty_state(self, event_date):
return BountyState.objects.get_or_create(bounty=self, bounty_stage=self.bounty_stage, change_date=event_date)<|docstring|>Makes sure no duplicates are created<|endoftext|>
|
8c57a2d7baf8b7f03199c8ba105e5dc71127850522e76a27d5dcecc9332b3ef9
|
def low_level_handler(nCode, wParam, lParam):
'\n Processes a low level Windows keybqoard event.\n '
if ((wParam == win32con.WM_KEYUP) or (wParam == 261)):
event = str(translate[(lParam[0] >> 32)])
for handler in def_list:
if handler(event):
break
return windll.user32.CallNextHookEx(hook_id, nCode, wParam, lParam)
|
Processes a low level Windows keybqoard event.
|
pkghito/Hook3v2.py
|
low_level_handler
|
pastahito/PyStrokes
| 0 |
python
|
def low_level_handler(nCode, wParam, lParam):
'\n \n '
if ((wParam == win32con.WM_KEYUP) or (wParam == 261)):
event = str(translate[(lParam[0] >> 32)])
for handler in def_list:
if handler(event):
break
return windll.user32.CallNextHookEx(hook_id, nCode, wParam, lParam)
|
def low_level_handler(nCode, wParam, lParam):
'\n \n '
if ((wParam == win32con.WM_KEYUP) or (wParam == 261)):
event = str(translate[(lParam[0] >> 32)])
for handler in def_list:
if handler(event):
break
return windll.user32.CallNextHookEx(hook_id, nCode, wParam, lParam)<|docstring|>Processes a low level Windows keybqoard event.<|endoftext|>
|
8dfa40568c254014489262a19ce8e9599736760216c6a39ab452779640beb2cf
|
def ogr_sde_1():
'Test basic opening of a database'
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base)
if (ds is None):
print(('Could not open %s' % base))
gdaltest.sde_dr = None
return 'skip'
ds.Destroy()
ds = ogr.Open(base, update=1)
ds.Destroy()
return 'success'
|
Test basic opening of a database
|
autotest/ogr/ogr_sde.py
|
ogr_sde_1
|
praiskup/gdal
| 9 |
python
|
def ogr_sde_1():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base)
if (ds is None):
print(('Could not open %s' % base))
gdaltest.sde_dr = None
return 'skip'
ds.Destroy()
ds = ogr.Open(base, update=1)
ds.Destroy()
return 'success'
|
def ogr_sde_1():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base)
if (ds is None):
print(('Could not open %s' % base))
gdaltest.sde_dr = None
return 'skip'
ds.Destroy()
ds = ogr.Open(base, update=1)
ds.Destroy()
return 'success'<|docstring|>Test basic opening of a database<|endoftext|>
|
93a85a274aa738d8e4ea3c3e6d1627f438eca420ecd25b8a252e1d21b51849c8
|
def ogr_sde_2():
'Test creation of a layer'
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
shp_ds = ogr.Open('data/poly.shp')
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
ds = ogr.Open(base, update=1)
lyr = ds.CreateLayer('SDE.TPOLY', geom_type=ogr.wkbPolygon, srs=shp_lyr.GetSpatialRef(), options=['OVERWRITE=YES'])
ogrtest.quick_create_layer_def(lyr, [('AREA', ogr.OFTReal), ('EAS_ID', ogr.OFTInteger), ('PRFEDEA', ogr.OFTString), ('WHEN', ogr.OFTDateTime)])
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while (feat is not None):
gdaltest.poly_feat.append(feat)
dst_feat.SetFrom(feat)
lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
dst_feat.Destroy()
return 'success'
|
Test creation of a layer
|
autotest/ogr/ogr_sde.py
|
ogr_sde_2
|
praiskup/gdal
| 9 |
python
|
def ogr_sde_2():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
shp_ds = ogr.Open('data/poly.shp')
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
ds = ogr.Open(base, update=1)
lyr = ds.CreateLayer('SDE.TPOLY', geom_type=ogr.wkbPolygon, srs=shp_lyr.GetSpatialRef(), options=['OVERWRITE=YES'])
ogrtest.quick_create_layer_def(lyr, [('AREA', ogr.OFTReal), ('EAS_ID', ogr.OFTInteger), ('PRFEDEA', ogr.OFTString), ('WHEN', ogr.OFTDateTime)])
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while (feat is not None):
gdaltest.poly_feat.append(feat)
dst_feat.SetFrom(feat)
lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
dst_feat.Destroy()
return 'success'
|
def ogr_sde_2():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
shp_ds = ogr.Open('data/poly.shp')
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
ds = ogr.Open(base, update=1)
lyr = ds.CreateLayer('SDE.TPOLY', geom_type=ogr.wkbPolygon, srs=shp_lyr.GetSpatialRef(), options=['OVERWRITE=YES'])
ogrtest.quick_create_layer_def(lyr, [('AREA', ogr.OFTReal), ('EAS_ID', ogr.OFTInteger), ('PRFEDEA', ogr.OFTString), ('WHEN', ogr.OFTDateTime)])
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while (feat is not None):
gdaltest.poly_feat.append(feat)
dst_feat.SetFrom(feat)
lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
dst_feat.Destroy()
return 'success'<|docstring|>Test creation of a layer<|endoftext|>
|
686743e6074cefebeef7f977d268b3a5406a4c9c8c6e0bef44eda147e3678e06
|
def ogr_sde_3():
'Test basic version locking'
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base, update=1)
ds2 = ogr.Open(base, update=1)
if (ds2 is not None):
gdaltest.post_reason('A locked version was able to be opened')
return 'fail'
ds.Destroy()
return 'success'
|
Test basic version locking
|
autotest/ogr/ogr_sde.py
|
ogr_sde_3
|
praiskup/gdal
| 9 |
python
|
def ogr_sde_3():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base, update=1)
ds2 = ogr.Open(base, update=1)
if (ds2 is not None):
gdaltest.post_reason('A locked version was able to be opened')
return 'fail'
ds.Destroy()
return 'success'
|
def ogr_sde_3():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base, update=1)
ds2 = ogr.Open(base, update=1)
if (ds2 is not None):
gdaltest.post_reason('A locked version was able to be opened')
return 'fail'
ds.Destroy()
return 'success'<|docstring|>Test basic version locking<|endoftext|>
|
0f97d689f07c10de84d49ab31cee6c55a92284700d2a770dab3efc625fe55fe2
|
def ogr_sde_4():
'Test basic version creation'
if (gdaltest.sde_dr is None):
return 'skip'
version_name = 'TESTING'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'TRUE')
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name))
ds = ogr.Open(base, update=1)
ds.Destroy()
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name))
ds = ogr.Open(base, update=1)
ds.Destroy()
return 'success'
|
Test basic version creation
|
autotest/ogr/ogr_sde.py
|
ogr_sde_4
|
praiskup/gdal
| 9 |
python
|
def ogr_sde_4():
if (gdaltest.sde_dr is None):
return 'skip'
version_name = 'TESTING'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'TRUE')
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name))
ds = ogr.Open(base, update=1)
ds.Destroy()
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name))
ds = ogr.Open(base, update=1)
ds.Destroy()
return 'success'
|
def ogr_sde_4():
if (gdaltest.sde_dr is None):
return 'skip'
version_name = 'TESTING'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'TRUE')
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name))
ds = ogr.Open(base, update=1)
ds.Destroy()
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name))
ds = ogr.Open(base, update=1)
ds.Destroy()
return 'success'<|docstring|>Test basic version creation<|endoftext|>
|
39a39838f9d36e7e4e28e81a04556807cb50331e66362907554094243030cbd0
|
def ogr_sde_5():
'Test versioned editing'
if (gdaltest.sde_dr is None):
return 'skip'
version_name = 'TESTING'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'TRUE')
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name))
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY')
f1 = l1.GetFeature(1)
f1.SetField('PRFEDEA', 'SDE.TESTING')
l1.SetFeature(f1)
ds.Destroy()
del ds
default = 'DEFAULT'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
default = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, default))
ds2 = ogr.Open(default, update=1)
l2 = ds2.GetLayerByName('SDE.TPOLY')
f2 = l2.GetFeature(1)
f2.SetField('PRFEDEA', 'SDE.DEFAULT')
f2.SetField('WHEN', 2008, 3, 19, 16, 15, 0, 0)
l2.SetFeature(f2)
ds2.Destroy()
del ds2
ds3 = ogr.Open(base)
l3 = ds3.GetLayerByName('SDE.TPOLY')
f3 = l3.GetFeature(1)
if (f3.GetField('PRFEDEA') != 'SDE.TESTING'):
gdaltest.post_reason('versioned editing failed for child version SDE.TESTING')
return 'fail'
ds3.Destroy()
del ds3
ds4 = ogr.Open(default)
l4 = ds4.GetLayerByName('SDE.TPOLY')
f4 = l4.GetFeature(1)
if (f4.GetField('PRFEDEA') != 'SDE.DEFAULT'):
gdaltest.post_reason('versioned editing failed for parent version SDE.DEFAULT')
return 'fail'
idx = f4.GetFieldIndex('WHEN')
df = f4.GetField(idx)
if (df != '2008/03/19 16:15:00'):
gdaltest.post_reason(("datetime handling did not work -- expected '2008/03/19 16:15:00' got '%s' " % df))
ds4.Destroy()
del ds4
return 'success'
|
Test versioned editing
|
autotest/ogr/ogr_sde.py
|
ogr_sde_5
|
praiskup/gdal
| 9 |
python
|
def ogr_sde_5():
if (gdaltest.sde_dr is None):
return 'skip'
version_name = 'TESTING'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'TRUE')
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name))
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY')
f1 = l1.GetFeature(1)
f1.SetField('PRFEDEA', 'SDE.TESTING')
l1.SetFeature(f1)
ds.Destroy()
del ds
default = 'DEFAULT'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
default = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, default))
ds2 = ogr.Open(default, update=1)
l2 = ds2.GetLayerByName('SDE.TPOLY')
f2 = l2.GetFeature(1)
f2.SetField('PRFEDEA', 'SDE.DEFAULT')
f2.SetField('WHEN', 2008, 3, 19, 16, 15, 0, 0)
l2.SetFeature(f2)
ds2.Destroy()
del ds2
ds3 = ogr.Open(base)
l3 = ds3.GetLayerByName('SDE.TPOLY')
f3 = l3.GetFeature(1)
if (f3.GetField('PRFEDEA') != 'SDE.TESTING'):
gdaltest.post_reason('versioned editing failed for child version SDE.TESTING')
return 'fail'
ds3.Destroy()
del ds3
ds4 = ogr.Open(default)
l4 = ds4.GetLayerByName('SDE.TPOLY')
f4 = l4.GetFeature(1)
if (f4.GetField('PRFEDEA') != 'SDE.DEFAULT'):
gdaltest.post_reason('versioned editing failed for parent version SDE.DEFAULT')
return 'fail'
idx = f4.GetFieldIndex('WHEN')
df = f4.GetField(idx)
if (df != '2008/03/19 16:15:00'):
gdaltest.post_reason(("datetime handling did not work -- expected '2008/03/19 16:15:00' got '%s' " % df))
ds4.Destroy()
del ds4
return 'success'
|
def ogr_sde_5():
if (gdaltest.sde_dr is None):
return 'skip'
version_name = 'TESTING'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'TRUE')
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name))
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY')
f1 = l1.GetFeature(1)
f1.SetField('PRFEDEA', 'SDE.TESTING')
l1.SetFeature(f1)
ds.Destroy()
del ds
default = 'DEFAULT'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
default = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, default))
ds2 = ogr.Open(default, update=1)
l2 = ds2.GetLayerByName('SDE.TPOLY')
f2 = l2.GetFeature(1)
f2.SetField('PRFEDEA', 'SDE.DEFAULT')
f2.SetField('WHEN', 2008, 3, 19, 16, 15, 0, 0)
l2.SetFeature(f2)
ds2.Destroy()
del ds2
ds3 = ogr.Open(base)
l3 = ds3.GetLayerByName('SDE.TPOLY')
f3 = l3.GetFeature(1)
if (f3.GetField('PRFEDEA') != 'SDE.TESTING'):
gdaltest.post_reason('versioned editing failed for child version SDE.TESTING')
return 'fail'
ds3.Destroy()
del ds3
ds4 = ogr.Open(default)
l4 = ds4.GetLayerByName('SDE.TPOLY')
f4 = l4.GetFeature(1)
if (f4.GetField('PRFEDEA') != 'SDE.DEFAULT'):
gdaltest.post_reason('versioned editing failed for parent version SDE.DEFAULT')
return 'fail'
idx = f4.GetFieldIndex('WHEN')
df = f4.GetField(idx)
if (df != '2008/03/19 16:15:00'):
gdaltest.post_reason(("datetime handling did not work -- expected '2008/03/19 16:15:00' got '%s' " % df))
ds4.Destroy()
del ds4
return 'success'<|docstring|>Test versioned editing<|endoftext|>
|
b13615f0b24ebd1cde32fd5e1288e4c622a5de3173ee41cb25873a6474f155f8
|
def ogr_sde_6():
'Extent fetching'
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY')
extent = l1.GetExtent(force=0)
if (extent != (0.0, 2147483645.0, 0.0, 2147483645.0)):
gdaltest.post_reason('unforced extent did not equal expected value')
extent = l1.GetExtent(force=1)
if (extent != (478316.0, 481645.0, 4762881.0, 4765611.0)):
gdaltest.post_reason('forced extent did not equal expected value')
return 'success'
|
Extent fetching
|
autotest/ogr/ogr_sde.py
|
ogr_sde_6
|
praiskup/gdal
| 9 |
python
|
def ogr_sde_6():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY')
extent = l1.GetExtent(force=0)
if (extent != (0.0, 2147483645.0, 0.0, 2147483645.0)):
gdaltest.post_reason('unforced extent did not equal expected value')
extent = l1.GetExtent(force=1)
if (extent != (478316.0, 481645.0, 4762881.0, 4765611.0)):
gdaltest.post_reason('forced extent did not equal expected value')
return 'success'
|
def ogr_sde_6():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY')
extent = l1.GetExtent(force=0)
if (extent != (0.0, 2147483645.0, 0.0, 2147483645.0)):
gdaltest.post_reason('unforced extent did not equal expected value')
extent = l1.GetExtent(force=1)
if (extent != (478316.0, 481645.0, 4762881.0, 4765611.0)):
gdaltest.post_reason('forced extent did not equal expected value')
return 'success'<|docstring|>Extent fetching<|endoftext|>
|
34e7b790d1dbf919e1cc5f16813abc09ec38d29187275ad6889828848637268b
|
def ogr_sde_7():
'Bad layer test'
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason('we got a layer when we should not have')
ds.Destroy()
default = 'DEFAULT'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
default = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, default))
ds = ogr.Open(default, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason('we got a layer when we should not have')
ds.Destroy()
default = 'DEFAULT'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
default = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(default)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason('we got a layer when we should not have')
ds.Destroy()
return 'success'
|
Bad layer test
|
autotest/ogr/ogr_sde.py
|
ogr_sde_7
|
praiskup/gdal
| 9 |
python
|
def ogr_sde_7():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason('we got a layer when we should not have')
ds.Destroy()
default = 'DEFAULT'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
default = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, default))
ds = ogr.Open(default, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason('we got a layer when we should not have')
ds.Destroy()
default = 'DEFAULT'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
default = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(default)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason('we got a layer when we should not have')
ds.Destroy()
return 'success'
|
def ogr_sde_7():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason('we got a layer when we should not have')
ds.Destroy()
default = 'DEFAULT'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
default = ('SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, default))
ds = ogr.Open(default, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason('we got a layer when we should not have')
ds.Destroy()
default = 'DEFAULT'
gdal.SetConfigOption('SDE_VERSIONOVERWRITE', 'FALSE')
default = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
ds = ogr.Open(default)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason('we got a layer when we should not have')
ds.Destroy()
return 'success'<|docstring|>Bad layer test<|endoftext|>
|
6e8c0ca9f64bfc7257f66a116eed42644d25901cfcd9c9ca59ae5aad482c9915
|
def ogr_sde_8():
'Test spatial references'
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
shp_ds = ogr.Open('data/poly.shp')
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
ref = osr.SpatialReference()
ref.ImportFromWkt('LOCAL_CS["IMAGE"]')
ds = ogr.Open(base, update=1)
lyr = ds.CreateLayer('SDE.TPOLY', geom_type=ogr.wkbPolygon, srs=ref, options=['OVERWRITE=YES'])
ref.ImportFromEPSG(4326)
lyr = ds.CreateLayer('SDE.TPOLY', geom_type=ogr.wkbPolygon, srs=ref, options=['OVERWRITE=YES'])
ogrtest.quick_create_layer_def(lyr, [('AREA', ogr.OFTReal), ('EAS_ID', ogr.OFTInteger), ('PRFEDEA', ogr.OFTString), ('WHEN', ogr.OFTDateTime)])
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while (feat is not None):
gdaltest.poly_feat.append(feat)
dst_feat.SetFrom(feat)
lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
dst_feat.Destroy()
return 'success'
|
Test spatial references
|
autotest/ogr/ogr_sde.py
|
ogr_sde_8
|
praiskup/gdal
| 9 |
python
|
def ogr_sde_8():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
shp_ds = ogr.Open('data/poly.shp')
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
ref = osr.SpatialReference()
ref.ImportFromWkt('LOCAL_CS["IMAGE"]')
ds = ogr.Open(base, update=1)
lyr = ds.CreateLayer('SDE.TPOLY', geom_type=ogr.wkbPolygon, srs=ref, options=['OVERWRITE=YES'])
ref.ImportFromEPSG(4326)
lyr = ds.CreateLayer('SDE.TPOLY', geom_type=ogr.wkbPolygon, srs=ref, options=['OVERWRITE=YES'])
ogrtest.quick_create_layer_def(lyr, [('AREA', ogr.OFTReal), ('EAS_ID', ogr.OFTInteger), ('PRFEDEA', ogr.OFTString), ('WHEN', ogr.OFTDateTime)])
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while (feat is not None):
gdaltest.poly_feat.append(feat)
dst_feat.SetFrom(feat)
lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
dst_feat.Destroy()
return 'success'
|
def ogr_sde_8():
if (gdaltest.sde_dr is None):
return 'skip'
base = ('SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password))
shp_ds = ogr.Open('data/poly.shp')
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
ref = osr.SpatialReference()
ref.ImportFromWkt('LOCAL_CS["IMAGE"]')
ds = ogr.Open(base, update=1)
lyr = ds.CreateLayer('SDE.TPOLY', geom_type=ogr.wkbPolygon, srs=ref, options=['OVERWRITE=YES'])
ref.ImportFromEPSG(4326)
lyr = ds.CreateLayer('SDE.TPOLY', geom_type=ogr.wkbPolygon, srs=ref, options=['OVERWRITE=YES'])
ogrtest.quick_create_layer_def(lyr, [('AREA', ogr.OFTReal), ('EAS_ID', ogr.OFTInteger), ('PRFEDEA', ogr.OFTString), ('WHEN', ogr.OFTDateTime)])
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while (feat is not None):
gdaltest.poly_feat.append(feat)
dst_feat.SetFrom(feat)
lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
dst_feat.Destroy()
return 'success'<|docstring|>Test spatial references<|endoftext|>
|
b70319abf7ae06ad8663e2b737b405a4651e0faed67613d2a866db5afd798c97
|
def test_other_formats(image_examples):
'Test additional image examples in dials_data, not dials_regression'
for image in image_examples:
format_class = dxtbx.format.Registry.get_format_class_for_file(image)
reader = format_class.get_reader()([image])
N = len(reader)
for i in range(N):
reader.read(i)
assert format_class.get_imageset([image])
|
Test additional image examples in dials_data, not dials_regression
|
tests/test_imageset.py
|
test_other_formats
|
dials/dxtbx
| 3 |
python
|
def test_other_formats(image_examples):
for image in image_examples:
format_class = dxtbx.format.Registry.get_format_class_for_file(image)
reader = format_class.get_reader()([image])
N = len(reader)
for i in range(N):
reader.read(i)
assert format_class.get_imageset([image])
|
def test_other_formats(image_examples):
for image in image_examples:
format_class = dxtbx.format.Registry.get_format_class_for_file(image)
reader = format_class.get_reader()([image])
N = len(reader)
for i in range(N):
reader.read(i)
assert format_class.get_imageset([image])<|docstring|>Test additional image examples in dials_data, not dials_regression<|endoftext|>
|
7fd65f05322c47386c3af9da081ecea92c0256638b619cf8d4d206c0482317c8
|
def download_cifar():
"Download the CIFAR-10 dataset if it's not already available."
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
dir_name = 'cifar-10-batches-bin'
filename = 'cifar-10-binary.tar.gz'
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'Datasets')
filepath = os.path.join(data_dir, dir_name)
if (not os.path.exists(filepath)):
with tempfile.TemporaryDirectory(dir=data_dir) as tmpdirname:
tmpfilepath = os.path.join(tmpdirname, filename)
print('Downloading', filename, 'to', tmpfilepath)
error_count = 0
while True:
try:
(tmpfilepath, _) = request.urlretrieve(DATA_URL, tmpfilepath)
break
except:
error_count += 1
if (error_count > 5):
print("Couldn't download", DATA_URL)
raise
time.sleep(5)
print('Successfully downloaded, extracting to', tmpdirname)
tarfile.open(tmpfilepath, 'r:gz').extractall(tmpdirname)
print('Moving', tmpdirname, 'to', data_dir)
try:
os.rename(os.path.join(tmpdirname, dir_name), os.path.join(data_dir, dir_name))
except OSError:
pass
return os.path.join(data_dir, dir_name)
|
Download the CIFAR-10 dataset if it's not already available.
|
Graphcore/benchmarks/resnet/implementations/tensorflow/test/test_common.py
|
download_cifar
|
CaoZhongZ/training_results_v1.0
| 27 |
python
|
def download_cifar():
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
dir_name = 'cifar-10-batches-bin'
filename = 'cifar-10-binary.tar.gz'
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'Datasets')
filepath = os.path.join(data_dir, dir_name)
if (not os.path.exists(filepath)):
with tempfile.TemporaryDirectory(dir=data_dir) as tmpdirname:
tmpfilepath = os.path.join(tmpdirname, filename)
print('Downloading', filename, 'to', tmpfilepath)
error_count = 0
while True:
try:
(tmpfilepath, _) = request.urlretrieve(DATA_URL, tmpfilepath)
break
except:
error_count += 1
if (error_count > 5):
print("Couldn't download", DATA_URL)
raise
time.sleep(5)
print('Successfully downloaded, extracting to', tmpdirname)
tarfile.open(tmpfilepath, 'r:gz').extractall(tmpdirname)
print('Moving', tmpdirname, 'to', data_dir)
try:
os.rename(os.path.join(tmpdirname, dir_name), os.path.join(data_dir, dir_name))
except OSError:
pass
return os.path.join(data_dir, dir_name)
|
def download_cifar():
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
dir_name = 'cifar-10-batches-bin'
filename = 'cifar-10-binary.tar.gz'
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'Datasets')
filepath = os.path.join(data_dir, dir_name)
if (not os.path.exists(filepath)):
with tempfile.TemporaryDirectory(dir=data_dir) as tmpdirname:
tmpfilepath = os.path.join(tmpdirname, filename)
print('Downloading', filename, 'to', tmpfilepath)
error_count = 0
while True:
try:
(tmpfilepath, _) = request.urlretrieve(DATA_URL, tmpfilepath)
break
except:
error_count += 1
if (error_count > 5):
print("Couldn't download", DATA_URL)
raise
time.sleep(5)
print('Successfully downloaded, extracting to', tmpdirname)
tarfile.open(tmpfilepath, 'r:gz').extractall(tmpdirname)
print('Moving', tmpdirname, 'to', data_dir)
try:
os.rename(os.path.join(tmpdirname, dir_name), os.path.join(data_dir, dir_name))
except OSError:
pass
return os.path.join(data_dir, dir_name)<|docstring|>Download the CIFAR-10 dataset if it's not already available.<|endoftext|>
|
2ccd002ef5b9dc32a183fb984ad2460c02aec7430fc439645e07375ddec53306
|
def sysinfo():
'\n Prints system the dependency information\n '
pyndl = pkg_resources.working_set.by_key['pyndl']
dependencies = [r.project_name for r in pyndl.requires()]
header = 'Pyndl Information\n=================\n\n'
general = 'General Information\n-------------------\nPython version: {}\nPyndl version: {}\n\n'.format(sys.version.split()[0], __version__)
uname = os.uname()
osinfo = 'Operating System\n----------------\nOS: {s.sysname} {s.machine}\nKernel: {s.release}\nCPU: {cpu_count}\n'.format(s=uname, cpu_count=mp.cpu_count())
if (uname.sysname == 'Linux'):
(_, *lines) = os.popen('free -m').readlines()
for identifier in ('Mem:', 'Swap:'):
memory = [line for line in lines if (identifier in line)]
if (len(memory) > 0):
(_, total, used, *_) = memory[0].split()
else:
(total, used) = ('?', '?')
osinfo += '{} {}MiB/{}MiB\n'.format(identifier, used, total)
osinfo += '\n'
deps = 'Dependencies\n------------\n'
deps += '\n'.join(('{pkg.__name__}: {pkg.__version__}'.format(pkg=__import__(dep)) for dep in dependencies))
print((((header + general) + osinfo) + deps))
|
Prints system the dependency information
|
pyndl/__init__.py
|
sysinfo
|
pn2200/pyndl
| 0 |
python
|
def sysinfo():
'\n \n '
pyndl = pkg_resources.working_set.by_key['pyndl']
dependencies = [r.project_name for r in pyndl.requires()]
header = 'Pyndl Information\n=================\n\n'
general = 'General Information\n-------------------\nPython version: {}\nPyndl version: {}\n\n'.format(sys.version.split()[0], __version__)
uname = os.uname()
osinfo = 'Operating System\n----------------\nOS: {s.sysname} {s.machine}\nKernel: {s.release}\nCPU: {cpu_count}\n'.format(s=uname, cpu_count=mp.cpu_count())
if (uname.sysname == 'Linux'):
(_, *lines) = os.popen('free -m').readlines()
for identifier in ('Mem:', 'Swap:'):
memory = [line for line in lines if (identifier in line)]
if (len(memory) > 0):
(_, total, used, *_) = memory[0].split()
else:
(total, used) = ('?', '?')
osinfo += '{} {}MiB/{}MiB\n'.format(identifier, used, total)
osinfo += '\n'
deps = 'Dependencies\n------------\n'
deps += '\n'.join(('{pkg.__name__}: {pkg.__version__}'.format(pkg=__import__(dep)) for dep in dependencies))
print((((header + general) + osinfo) + deps))
|
def sysinfo():
'\n \n '
pyndl = pkg_resources.working_set.by_key['pyndl']
dependencies = [r.project_name for r in pyndl.requires()]
header = 'Pyndl Information\n=================\n\n'
general = 'General Information\n-------------------\nPython version: {}\nPyndl version: {}\n\n'.format(sys.version.split()[0], __version__)
uname = os.uname()
osinfo = 'Operating System\n----------------\nOS: {s.sysname} {s.machine}\nKernel: {s.release}\nCPU: {cpu_count}\n'.format(s=uname, cpu_count=mp.cpu_count())
if (uname.sysname == 'Linux'):
(_, *lines) = os.popen('free -m').readlines()
for identifier in ('Mem:', 'Swap:'):
memory = [line for line in lines if (identifier in line)]
if (len(memory) > 0):
(_, total, used, *_) = memory[0].split()
else:
(total, used) = ('?', '?')
osinfo += '{} {}MiB/{}MiB\n'.format(identifier, used, total)
osinfo += '\n'
deps = 'Dependencies\n------------\n'
deps += '\n'.join(('{pkg.__name__}: {pkg.__version__}'.format(pkg=__import__(dep)) for dep in dependencies))
print((((header + general) + osinfo) + deps))<|docstring|>Prints system the dependency information<|endoftext|>
|
49b80051b15400aeacba72b0ecfc2e8f8bee390a897e43a7354f14700e7f3d35
|
def __init__(self, temboo_session):
'\n Create a new instance of the ListBatchUnsubscribe Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n '
super(ListBatchUnsubscribe, self).__init__(temboo_session, '/Library/MailChimp/ListBatchUnsubscribe')
|
Create a new instance of the ListBatchUnsubscribe Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
__init__
|
jordanemedlock/psychtruths
| 7 |
python
|
def __init__(self, temboo_session):
'\n Create a new instance of the ListBatchUnsubscribe Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n '
super(ListBatchUnsubscribe, self).__init__(temboo_session, '/Library/MailChimp/ListBatchUnsubscribe')
|
def __init__(self, temboo_session):
'\n Create a new instance of the ListBatchUnsubscribe Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n '
super(ListBatchUnsubscribe, self).__init__(temboo_session, '/Library/MailChimp/ListBatchUnsubscribe')<|docstring|>Create a new instance of the ListBatchUnsubscribe Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.<|endoftext|>
|
fce89bb8622135cbdfae9e2c7103e2847588bf3cc97deaea181d2d5af0848f40
|
def set_APIKey(self, value):
'\n Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp)\n '
super(ListBatchUnsubscribeInputSet, self)._set_input('APIKey', value)
|
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp)
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
set_APIKey
|
jordanemedlock/psychtruths
| 7 |
python
|
def set_APIKey(self, value):
'\n \n '
super(ListBatchUnsubscribeInputSet, self)._set_input('APIKey', value)
|
def set_APIKey(self, value):
'\n \n '
super(ListBatchUnsubscribeInputSet, self)._set_input('APIKey', value)<|docstring|>Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp)<|endoftext|>
|
e685ba4f379573fe34fee7d6c0dfd321a5077e26df9bca71122a6b437e7609a0
|
def set_DeleteMember(self, value):
"\n Set the value of the DeleteMember input for this Choreo. ((optional, boolean) A flag used to completely delete the member from your list instead of just unsubscribing. Specify '1' (true) or '0' (false). Defaults to 0.)\n "
super(ListBatchUnsubscribeInputSet, self)._set_input('DeleteMember', value)
|
Set the value of the DeleteMember input for this Choreo. ((optional, boolean) A flag used to completely delete the member from your list instead of just unsubscribing. Specify '1' (true) or '0' (false). Defaults to 0.)
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
set_DeleteMember
|
jordanemedlock/psychtruths
| 7 |
python
|
def set_DeleteMember(self, value):
"\n \n "
super(ListBatchUnsubscribeInputSet, self)._set_input('DeleteMember', value)
|
def set_DeleteMember(self, value):
"\n \n "
super(ListBatchUnsubscribeInputSet, self)._set_input('DeleteMember', value)<|docstring|>Set the value of the DeleteMember input for this Choreo. ((optional, boolean) A flag used to completely delete the member from your list instead of just unsubscribing. Specify '1' (true) or '0' (false). Defaults to 0.)<|endoftext|>
|
3b42dd6956705449f77fbd2fc40edb2196328988cdfbbdc908d52d21d5c6c86a
|
def set_Email(self, value):
'\n Set the value of the Email input for this Choreo. ((required, string) The email address to unsubscribe from a Mailchimp list . Multiple emails can be supplied separated by commas.)\n '
super(ListBatchUnsubscribeInputSet, self)._set_input('Email', value)
|
Set the value of the Email input for this Choreo. ((required, string) The email address to unsubscribe from a Mailchimp list . Multiple emails can be supplied separated by commas.)
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
set_Email
|
jordanemedlock/psychtruths
| 7 |
python
|
def set_Email(self, value):
'\n \n '
super(ListBatchUnsubscribeInputSet, self)._set_input('Email', value)
|
def set_Email(self, value):
'\n \n '
super(ListBatchUnsubscribeInputSet, self)._set_input('Email', value)<|docstring|>Set the value of the Email input for this Choreo. ((required, string) The email address to unsubscribe from a Mailchimp list . Multiple emails can be supplied separated by commas.)<|endoftext|>
|
d7f8e566c299c4f76603114c7905471bab0b49abfc15c5b3d43cae23a239d970
|
def set_ListId(self, value):
'\n Set the value of the ListId input for this Choreo. ((required, string) The Mailchimp List ID)\n '
super(ListBatchUnsubscribeInputSet, self)._set_input('ListId', value)
|
Set the value of the ListId input for this Choreo. ((required, string) The Mailchimp List ID)
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
set_ListId
|
jordanemedlock/psychtruths
| 7 |
python
|
def set_ListId(self, value):
'\n \n '
super(ListBatchUnsubscribeInputSet, self)._set_input('ListId', value)
|
def set_ListId(self, value):
'\n \n '
super(ListBatchUnsubscribeInputSet, self)._set_input('ListId', value)<|docstring|>Set the value of the ListId input for this Choreo. ((required, string) The Mailchimp List ID)<|endoftext|>
|
c920970d992d7c762229a3980ad79a2e2ee50e9211b2eb5e7346b8a826ddad57
|
def set_SendGoodbye(self, value):
"\n Set the value of the SendGoodbye input for this Choreo. ((optional, boolean) A flag used to send the goodbye email to the email address. Specify '1' (true) or '0' (false). Defaults to 0.)\n "
super(ListBatchUnsubscribeInputSet, self)._set_input('SendGoodbye', value)
|
Set the value of the SendGoodbye input for this Choreo. ((optional, boolean) A flag used to send the goodbye email to the email address. Specify '1' (true) or '0' (false). Defaults to 0.)
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
set_SendGoodbye
|
jordanemedlock/psychtruths
| 7 |
python
|
def set_SendGoodbye(self, value):
"\n \n "
super(ListBatchUnsubscribeInputSet, self)._set_input('SendGoodbye', value)
|
def set_SendGoodbye(self, value):
"\n \n "
super(ListBatchUnsubscribeInputSet, self)._set_input('SendGoodbye', value)<|docstring|>Set the value of the SendGoodbye input for this Choreo. ((optional, boolean) A flag used to send the goodbye email to the email address. Specify '1' (true) or '0' (false). Defaults to 0.)<|endoftext|>
|
505c43393c2f1ab9bb3698e7fdbdaa7bf774290f45f83c50284cb62bdd140b0b
|
def set_SendNotify(self, value):
"\n Set the value of the SendNotify input for this Choreo. ((optional, boolean) A flag used to send the unsubscribe notification email to the address defined in the list email notification settings. Specify '1' (true) or '0' (false). Defaults to 0.)\n "
super(ListBatchUnsubscribeInputSet, self)._set_input('SendNotify', value)
|
Set the value of the SendNotify input for this Choreo. ((optional, boolean) A flag used to send the unsubscribe notification email to the address defined in the list email notification settings. Specify '1' (true) or '0' (false). Defaults to 0.)
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
set_SendNotify
|
jordanemedlock/psychtruths
| 7 |
python
|
def set_SendNotify(self, value):
"\n \n "
super(ListBatchUnsubscribeInputSet, self)._set_input('SendNotify', value)
|
def set_SendNotify(self, value):
"\n \n "
super(ListBatchUnsubscribeInputSet, self)._set_input('SendNotify', value)<|docstring|>Set the value of the SendNotify input for this Choreo. ((optional, boolean) A flag used to send the unsubscribe notification email to the address defined in the list email notification settings. Specify '1' (true) or '0' (false). Defaults to 0.)<|endoftext|>
|
5805c1f4c1c586a855fe7014311d8c23d7ad1c5ad88b23cb908272520be1cb73
|
def set_SupressErrors(self, value):
'\n Set the value of the SupressErrors input for this Choreo. ((optional, boolean) Whether or not to suppress errors that arise from attempting to unsubscribe an email address. Defaults to 0 (false). Set to 1 (true) to supress errors.)\n '
super(ListBatchUnsubscribeInputSet, self)._set_input('SupressErrors', value)
|
Set the value of the SupressErrors input for this Choreo. ((optional, boolean) Whether or not to suppress errors that arise from attempting to unsubscribe an email address. Defaults to 0 (false). Set to 1 (true) to supress errors.)
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
set_SupressErrors
|
jordanemedlock/psychtruths
| 7 |
python
|
def set_SupressErrors(self, value):
'\n \n '
super(ListBatchUnsubscribeInputSet, self)._set_input('SupressErrors', value)
|
def set_SupressErrors(self, value):
'\n \n '
super(ListBatchUnsubscribeInputSet, self)._set_input('SupressErrors', value)<|docstring|>Set the value of the SupressErrors input for this Choreo. ((optional, boolean) Whether or not to suppress errors that arise from attempting to unsubscribe an email address. Defaults to 0 (false). Set to 1 (true) to supress errors.)<|endoftext|>
|
ef2c40650a3354a2276184959caaea3bbda7abc22ec99aff7d3c74f4a9992b79
|
def get_ErrorList(self):
'\n Retrieve the value for the "ErrorList" output from this Choreo execution. ((json) A list of emails that were not successfully unsubscribed.)\n '
return self._output.get('ErrorList', None)
|
Retrieve the value for the "ErrorList" output from this Choreo execution. ((json) A list of emails that were not successfully unsubscribed.)
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
get_ErrorList
|
jordanemedlock/psychtruths
| 7 |
python
|
def get_ErrorList(self):
'\n \n '
return self._output.get('ErrorList', None)
|
def get_ErrorList(self):
'\n \n '
return self._output.get('ErrorList', None)<|docstring|>Retrieve the value for the "ErrorList" output from this Choreo execution. ((json) A list of emails that were not successfully unsubscribed.)<|endoftext|>
|
e032aec101f8ff0cbea3cc9c42df075c959be28a4dadd213b093b6fef5167624
|
def get_SuccessList(self):
'\n Retrieve the value for the "SuccessList" output from this Choreo execution. ((json) A list of email successfully unsubscribed.)\n '
return self._output.get('SuccessList', None)
|
Retrieve the value for the "SuccessList" output from this Choreo execution. ((json) A list of email successfully unsubscribed.)
|
temboo/core/Library/MailChimp/ListBatchUnsubscribe.py
|
get_SuccessList
|
jordanemedlock/psychtruths
| 7 |
python
|
def get_SuccessList(self):
'\n \n '
return self._output.get('SuccessList', None)
|
def get_SuccessList(self):
'\n \n '
return self._output.get('SuccessList', None)<|docstring|>Retrieve the value for the "SuccessList" output from this Choreo execution. ((json) A list of email successfully unsubscribed.)<|endoftext|>
|
1ccc5ab131d4597e3a457a0f3b00fb4591498cc5b6fa6f1cc0e2c980df49a70e
|
def get_max_preds(batch_heatmaps):
'\n get predictions from score maps\n heatmaps: numpy.ndarray([batch_size, num_joints, height, width])\n '
assert isinstance(batch_heatmaps, np.ndarray), 'batch_heatmaps should be numpy.ndarray'
assert (batch_heatmaps.ndim == 4), 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, (- 1)))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[(:, :, 0)] = (preds[(:, :, 0)] % width)
preds[(:, :, 1)] = np.floor((preds[(:, :, 1)] / width))
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return (preds, maxvals)
|
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
|
lib/core/inference.py
|
get_max_preds
|
MickaelCormier/MIPNet
| 35 |
python
|
def get_max_preds(batch_heatmaps):
'\n get predictions from score maps\n heatmaps: numpy.ndarray([batch_size, num_joints, height, width])\n '
assert isinstance(batch_heatmaps, np.ndarray), 'batch_heatmaps should be numpy.ndarray'
assert (batch_heatmaps.ndim == 4), 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, (- 1)))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[(:, :, 0)] = (preds[(:, :, 0)] % width)
preds[(:, :, 1)] = np.floor((preds[(:, :, 1)] / width))
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return (preds, maxvals)
|
def get_max_preds(batch_heatmaps):
'\n get predictions from score maps\n heatmaps: numpy.ndarray([batch_size, num_joints, height, width])\n '
assert isinstance(batch_heatmaps, np.ndarray), 'batch_heatmaps should be numpy.ndarray'
assert (batch_heatmaps.ndim == 4), 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, (- 1)))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[(:, :, 0)] = (preds[(:, :, 0)] % width)
preds[(:, :, 1)] = np.floor((preds[(:, :, 1)] / width))
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return (preds, maxvals)<|docstring|>get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])<|endoftext|>
|
969b645ead9e2686ead9b74bc2841b1b8ebb730ab7d5e52e5bd22b0b363cd2d9
|
def get_norm(cfg):
'\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n '
norm_type = cfg.MODEL.NORM.TYPE
if (norm_type == 'BatchNorm2d'):
return nn.BatchNorm2d
elif (norm_type == 'BatchNorm3d'):
return nn.BatchNorm3d
elif (norm_type == 'GroupNorm'):
num_groups = cfg.MODEL.NORM.GROUPS
return partial(GroupNormWrapper, num_groups=num_groups)
else:
raise NotImplementedError('Norm type {} is not supported'.format(norm_type))
|
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
|
zcls/model/norm_helper.py
|
get_norm
|
zjykzj/PyCls
| 110 |
python
|
def get_norm(cfg):
'\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n '
norm_type = cfg.MODEL.NORM.TYPE
if (norm_type == 'BatchNorm2d'):
return nn.BatchNorm2d
elif (norm_type == 'BatchNorm3d'):
return nn.BatchNorm3d
elif (norm_type == 'GroupNorm'):
num_groups = cfg.MODEL.NORM.GROUPS
return partial(GroupNormWrapper, num_groups=num_groups)
else:
raise NotImplementedError('Norm type {} is not supported'.format(norm_type))
|
def get_norm(cfg):
'\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n '
norm_type = cfg.MODEL.NORM.TYPE
if (norm_type == 'BatchNorm2d'):
return nn.BatchNorm2d
elif (norm_type == 'BatchNorm3d'):
return nn.BatchNorm3d
elif (norm_type == 'GroupNorm'):
num_groups = cfg.MODEL.NORM.GROUPS
return partial(GroupNormWrapper, num_groups=num_groups)
else:
raise NotImplementedError('Norm type {} is not supported'.format(norm_type))<|docstring|>Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.<|endoftext|>
|
73815795f2db6c2c0017c6579dd9e9276e94fe1c49980e22f1e63be64765c62b
|
@manager.command
def test(coverage=False):
'Run the unit tests.'
if (coverage and (not os.environ.get('FLASK_COVERAGE'))):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, ([sys.executable] + sys.argv))
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print(('HTML version: file://%s/index.html' % covdir))
COV.erase()
|
Run the unit tests.
|
manage.py
|
test
|
kongyinfang/funny
| 327 |
python
|
@manager.command
def test(coverage=False):
if (coverage and (not os.environ.get('FLASK_COVERAGE'))):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, ([sys.executable] + sys.argv))
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print(('HTML version: file://%s/index.html' % covdir))
COV.erase()
|
@manager.command
def test(coverage=False):
if (coverage and (not os.environ.get('FLASK_COVERAGE'))):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, ([sys.executable] + sys.argv))
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print(('HTML version: file://%s/index.html' % covdir))
COV.erase()<|docstring|>Run the unit tests.<|endoftext|>
|
3cb34dd52687e3b4a226c37ac0fbb698176347e0375d7bf7af31cf95c04d4ccd
|
@manager.command
def profile(length=25, profile_dir=None):
'Start the application under the code profiler.'
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length], profile_dir=profile_dir)
app.run()
|
Start the application under the code profiler.
|
manage.py
|
profile
|
kongyinfang/funny
| 327 |
python
|
@manager.command
def profile(length=25, profile_dir=None):
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length], profile_dir=profile_dir)
app.run()
|
@manager.command
def profile(length=25, profile_dir=None):
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length], profile_dir=profile_dir)
app.run()<|docstring|>Start the application under the code profiler.<|endoftext|>
|
9dbd55e9913eecca3e43a293cc1709f2e9f36d4fb62f1e413951c47d11f56ac6
|
@manager.command
def deploy():
'Run deployment tasks.'
from flask.ext.migrate import upgrade
from app.models import Role, User
upgrade()
Role.insert_roles()
User.add_self_follows()
|
Run deployment tasks.
|
manage.py
|
deploy
|
kongyinfang/funny
| 327 |
python
|
@manager.command
def deploy():
from flask.ext.migrate import upgrade
from app.models import Role, User
upgrade()
Role.insert_roles()
User.add_self_follows()
|
@manager.command
def deploy():
from flask.ext.migrate import upgrade
from app.models import Role, User
upgrade()
Role.insert_roles()
User.add_self_follows()<|docstring|>Run deployment tasks.<|endoftext|>
|
b3aa10a5b55121b868bf0507d584f9d701915563d21d1e6d1b0a72e0a8fc6244
|
def estimate(self, D: Tensor):
'\n Compute the median trick to estimate a kernel bandwidth parameter.\n Args:\n D: Distance matrix\n\n Returns: Float64\n '
lower = tfp.stats.percentile(D, 50.0, interpolation='lower')
upper = tfp.stats.percentile(D, 50.0, interpolation='higher')
n = D.shape[0]
median = tf.cast(((lower + upper) / 2.0), tf.float64)
h = tf.sqrt(((0.5 * median) / tf.cast(tf.math.log((n + 1.0)), dtype=tf.float64)))
tf.stop_gradient(h)
self.history.append(h)
return h
|
Compute the median trick to estimate a kernel bandwidth parameter.
Args:
D: Distance matrix
Returns: Float64
|
steingp/bandwidths.py
|
estimate
|
thomaspinder/SteinGP
| 6 |
python
|
def estimate(self, D: Tensor):
'\n Compute the median trick to estimate a kernel bandwidth parameter.\n Args:\n D: Distance matrix\n\n Returns: Float64\n '
lower = tfp.stats.percentile(D, 50.0, interpolation='lower')
upper = tfp.stats.percentile(D, 50.0, interpolation='higher')
n = D.shape[0]
median = tf.cast(((lower + upper) / 2.0), tf.float64)
h = tf.sqrt(((0.5 * median) / tf.cast(tf.math.log((n + 1.0)), dtype=tf.float64)))
tf.stop_gradient(h)
self.history.append(h)
return h
|
def estimate(self, D: Tensor):
'\n Compute the median trick to estimate a kernel bandwidth parameter.\n Args:\n D: Distance matrix\n\n Returns: Float64\n '
lower = tfp.stats.percentile(D, 50.0, interpolation='lower')
upper = tfp.stats.percentile(D, 50.0, interpolation='higher')
n = D.shape[0]
median = tf.cast(((lower + upper) / 2.0), tf.float64)
h = tf.sqrt(((0.5 * median) / tf.cast(tf.math.log((n + 1.0)), dtype=tf.float64)))
tf.stop_gradient(h)
self.history.append(h)
return h<|docstring|>Compute the median trick to estimate a kernel bandwidth parameter.
Args:
D: Distance matrix
Returns: Float64<|endoftext|>
|
0dfb4939e06ef1249498822830752a4ecfe3ff977d2d3adcbc5b983430d09a3f
|
@coroutine
def snmp_get(address, oids, port=161, community='public', version=SNMP_v2c, timeout=10, tos=None, ioloop=None, udp_socket=None, raw_varbinds=False):
'\n Perform SNMP get request and returns Future to be used\n inside @tornado.gen.coroutine\n '
oid_map = {}
if isinstance(oids, six.string_types):
oids = [oids]
elif isinstance(oids, dict):
oid_map = dict(((oids[k], k) for k in oids))
oids = list(six.itervalues(oids))
else:
raise ValueError('oids must be either string or dict')
logger.debug('[%s] SNMP GET %s', address, oids)
pdu = get_pdu(community=community, oids=oids, version=version)
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=oids[0])
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oids[0])
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oids[0])
finally:
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
try:
if raw_varbinds:
resp = parse_get_response_raw(data)
else:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oids[0])
if (resp.error_status == NO_ERROR):
if oid_map:
result = {}
for (k, v) in resp.varbinds:
if (k in oid_map):
result[oid_map[k]] = v
else:
logger.error('[%s] Invalid oid %s returned in reply', address, k)
else:
result = resp.varbinds[0][1]
logger.debug('[%s] GET result: %r', address, result)
raise Return(result)
elif ((resp.error_status == NO_SUCH_NAME) and (len(oids) > 1)):
b_idx = (resp.error_index - 1)
logger.debug('[%s] Invalid oid %s detected, trying to exclude', address, resp.varbinds[b_idx][0])
result = {}
oid_parts = []
if b_idx:
oid_parts += [[vb[0] for vb in resp.varbinds[:b_idx]]]
if (b_idx < (len(resp.varbinds) - 1)):
oid_parts += [[vb[0] for vb in resp.varbinds[(b_idx + 1):]]]
for new_oids in oid_parts:
try:
new_result = (yield snmp_get(address=address, oids=dict(((k, k) for k in new_oids)), port=port, community=community, version=version, timeout=timeout, tos=tos, ioloop=ioloop, udp_socket=sock))
except SNMPError as e:
if ((e.code == NO_SUCH_NAME) and (len(new_oids) == 1)):
new_result = {}
else:
raise
for k in new_result:
if (k in oid_map):
result[oid_map[k]] = new_result[k]
else:
logger.info('[%s] Invalid oid %s returned in reply', address, k)
if result:
logger.debug('[%s] GET result: %r', address, result)
raise Return(result)
else:
logger.debug('[%s] All oids are broken', address)
raise SNMPError(code=NO_SUCH_NAME, oid=oids[0])
else:
oid = None
if (resp.error_index and resp.varbinds):
if (resp.error_index & 32768):
oid = resp.varbinds[(65536 - resp.error_index)][0]
else:
oid = resp.varbinds[(resp.error_index - 1)][0]
logger.debug('[%s] SNMP error: %s %s', address, oid, resp.error_status)
raise SNMPError(code=resp.error_status, oid=oid)
|
Perform SNMP get request and returns Future to be used
inside @tornado.gen.coroutine
|
core/ioloop/snmp.py
|
snmp_get
|
xUndero/noc
| 1 |
python
|
@coroutine
def snmp_get(address, oids, port=161, community='public', version=SNMP_v2c, timeout=10, tos=None, ioloop=None, udp_socket=None, raw_varbinds=False):
'\n Perform SNMP get request and returns Future to be used\n inside @tornado.gen.coroutine\n '
oid_map = {}
if isinstance(oids, six.string_types):
oids = [oids]
elif isinstance(oids, dict):
oid_map = dict(((oids[k], k) for k in oids))
oids = list(six.itervalues(oids))
else:
raise ValueError('oids must be either string or dict')
logger.debug('[%s] SNMP GET %s', address, oids)
pdu = get_pdu(community=community, oids=oids, version=version)
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=oids[0])
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oids[0])
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oids[0])
finally:
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
try:
if raw_varbinds:
resp = parse_get_response_raw(data)
else:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oids[0])
if (resp.error_status == NO_ERROR):
if oid_map:
result = {}
for (k, v) in resp.varbinds:
if (k in oid_map):
result[oid_map[k]] = v
else:
logger.error('[%s] Invalid oid %s returned in reply', address, k)
else:
result = resp.varbinds[0][1]
logger.debug('[%s] GET result: %r', address, result)
raise Return(result)
elif ((resp.error_status == NO_SUCH_NAME) and (len(oids) > 1)):
b_idx = (resp.error_index - 1)
logger.debug('[%s] Invalid oid %s detected, trying to exclude', address, resp.varbinds[b_idx][0])
result = {}
oid_parts = []
if b_idx:
oid_parts += [[vb[0] for vb in resp.varbinds[:b_idx]]]
if (b_idx < (len(resp.varbinds) - 1)):
oid_parts += [[vb[0] for vb in resp.varbinds[(b_idx + 1):]]]
for new_oids in oid_parts:
try:
new_result = (yield snmp_get(address=address, oids=dict(((k, k) for k in new_oids)), port=port, community=community, version=version, timeout=timeout, tos=tos, ioloop=ioloop, udp_socket=sock))
except SNMPError as e:
if ((e.code == NO_SUCH_NAME) and (len(new_oids) == 1)):
new_result = {}
else:
raise
for k in new_result:
if (k in oid_map):
result[oid_map[k]] = new_result[k]
else:
logger.info('[%s] Invalid oid %s returned in reply', address, k)
if result:
logger.debug('[%s] GET result: %r', address, result)
raise Return(result)
else:
logger.debug('[%s] All oids are broken', address)
raise SNMPError(code=NO_SUCH_NAME, oid=oids[0])
else:
oid = None
if (resp.error_index and resp.varbinds):
if (resp.error_index & 32768):
oid = resp.varbinds[(65536 - resp.error_index)][0]
else:
oid = resp.varbinds[(resp.error_index - 1)][0]
logger.debug('[%s] SNMP error: %s %s', address, oid, resp.error_status)
raise SNMPError(code=resp.error_status, oid=oid)
|
@coroutine
def snmp_get(address, oids, port=161, community='public', version=SNMP_v2c, timeout=10, tos=None, ioloop=None, udp_socket=None, raw_varbinds=False):
'\n Perform SNMP get request and returns Future to be used\n inside @tornado.gen.coroutine\n '
oid_map = {}
if isinstance(oids, six.string_types):
oids = [oids]
elif isinstance(oids, dict):
oid_map = dict(((oids[k], k) for k in oids))
oids = list(six.itervalues(oids))
else:
raise ValueError('oids must be either string or dict')
logger.debug('[%s] SNMP GET %s', address, oids)
pdu = get_pdu(community=community, oids=oids, version=version)
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=oids[0])
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oids[0])
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oids[0])
finally:
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
try:
if raw_varbinds:
resp = parse_get_response_raw(data)
else:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oids[0])
if (resp.error_status == NO_ERROR):
if oid_map:
result = {}
for (k, v) in resp.varbinds:
if (k in oid_map):
result[oid_map[k]] = v
else:
logger.error('[%s] Invalid oid %s returned in reply', address, k)
else:
result = resp.varbinds[0][1]
logger.debug('[%s] GET result: %r', address, result)
raise Return(result)
elif ((resp.error_status == NO_SUCH_NAME) and (len(oids) > 1)):
b_idx = (resp.error_index - 1)
logger.debug('[%s] Invalid oid %s detected, trying to exclude', address, resp.varbinds[b_idx][0])
result = {}
oid_parts = []
if b_idx:
oid_parts += [[vb[0] for vb in resp.varbinds[:b_idx]]]
if (b_idx < (len(resp.varbinds) - 1)):
oid_parts += [[vb[0] for vb in resp.varbinds[(b_idx + 1):]]]
for new_oids in oid_parts:
try:
new_result = (yield snmp_get(address=address, oids=dict(((k, k) for k in new_oids)), port=port, community=community, version=version, timeout=timeout, tos=tos, ioloop=ioloop, udp_socket=sock))
except SNMPError as e:
if ((e.code == NO_SUCH_NAME) and (len(new_oids) == 1)):
new_result = {}
else:
raise
for k in new_result:
if (k in oid_map):
result[oid_map[k]] = new_result[k]
else:
logger.info('[%s] Invalid oid %s returned in reply', address, k)
if result:
logger.debug('[%s] GET result: %r', address, result)
raise Return(result)
else:
logger.debug('[%s] All oids are broken', address)
raise SNMPError(code=NO_SUCH_NAME, oid=oids[0])
else:
oid = None
if (resp.error_index and resp.varbinds):
if (resp.error_index & 32768):
oid = resp.varbinds[(65536 - resp.error_index)][0]
else:
oid = resp.varbinds[(resp.error_index - 1)][0]
logger.debug('[%s] SNMP error: %s %s', address, oid, resp.error_status)
raise SNMPError(code=resp.error_status, oid=oid)<|docstring|>Perform SNMP get request and returns Future to be used
inside @tornado.gen.coroutine<|endoftext|>
|
2a23335b5b67cbd045b94b5c8979d78a002c2582928673ba6be99d96c542228c
|
@coroutine
def snmp_count(address, oid, port=161, community='public', version=SNMP_v2c, timeout=10, bulk=False, filter=None, max_repetitions=BULK_MAX_REPETITIONS, tos=None, ioloop=None, udp_socket=None):
'\n Perform SNMP get request and returns Future to be used\n inside @tornado.gen.coroutine\n '
def true(x, y):
return true
logger.debug('[%s] SNMP COUNT %s', address, oid)
if (not filter):
filter = true
poid = (oid + '.')
result = 0
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
while True:
if bulk:
pdu = getbulk_pdu(community, oid, max_repetitions=max_repetitions, version=version)
else:
pdu = getnext_pdu(community, oid, version=version)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=oid)
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
finally:
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
try:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oid)
if (resp.error_status == NO_SUCH_NAME):
break
elif (resp.error_status != NO_ERROR):
raise SNMPError(code=resp.error_status, oid=oid)
else:
for (oid, v) in resp.varbinds:
if oid.startswith(poid):
if filter(oid, v):
result += 1
else:
logger.debug('[%s] COUNT result: %s', address, result)
sock.close()
raise Return(result)
|
Perform SNMP get request and returns Future to be used
inside @tornado.gen.coroutine
|
core/ioloop/snmp.py
|
snmp_count
|
xUndero/noc
| 1 |
python
|
@coroutine
def snmp_count(address, oid, port=161, community='public', version=SNMP_v2c, timeout=10, bulk=False, filter=None, max_repetitions=BULK_MAX_REPETITIONS, tos=None, ioloop=None, udp_socket=None):
'\n Perform SNMP get request and returns Future to be used\n inside @tornado.gen.coroutine\n '
def true(x, y):
return true
logger.debug('[%s] SNMP COUNT %s', address, oid)
if (not filter):
filter = true
poid = (oid + '.')
result = 0
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
while True:
if bulk:
pdu = getbulk_pdu(community, oid, max_repetitions=max_repetitions, version=version)
else:
pdu = getnext_pdu(community, oid, version=version)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=oid)
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
finally:
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
try:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oid)
if (resp.error_status == NO_SUCH_NAME):
break
elif (resp.error_status != NO_ERROR):
raise SNMPError(code=resp.error_status, oid=oid)
else:
for (oid, v) in resp.varbinds:
if oid.startswith(poid):
if filter(oid, v):
result += 1
else:
logger.debug('[%s] COUNT result: %s', address, result)
sock.close()
raise Return(result)
|
@coroutine
def snmp_count(address, oid, port=161, community='public', version=SNMP_v2c, timeout=10, bulk=False, filter=None, max_repetitions=BULK_MAX_REPETITIONS, tos=None, ioloop=None, udp_socket=None):
'\n Perform SNMP get request and returns Future to be used\n inside @tornado.gen.coroutine\n '
def true(x, y):
return true
logger.debug('[%s] SNMP COUNT %s', address, oid)
if (not filter):
filter = true
poid = (oid + '.')
result = 0
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
while True:
if bulk:
pdu = getbulk_pdu(community, oid, max_repetitions=max_repetitions, version=version)
else:
pdu = getnext_pdu(community, oid, version=version)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=oid)
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
finally:
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
try:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oid)
if (resp.error_status == NO_SUCH_NAME):
break
elif (resp.error_status != NO_ERROR):
raise SNMPError(code=resp.error_status, oid=oid)
else:
for (oid, v) in resp.varbinds:
if oid.startswith(poid):
if filter(oid, v):
result += 1
else:
logger.debug('[%s] COUNT result: %s', address, result)
sock.close()
raise Return(result)<|docstring|>Perform SNMP get request and returns Future to be used
inside @tornado.gen.coroutine<|endoftext|>
|
b665de4a5bf0d28eb40dc8f9283b5339ff2e7d4891d1bda338a01cbffe9dc967
|
@coroutine
def snmp_getnext(address, oid, port=161, community='public', version=SNMP_v2c, timeout=10, bulk=False, filter=None, max_repetitions=BULK_MAX_REPETITIONS, only_first=False, tos=None, ioloop=None, udp_socket=None, max_retries=0, raw_varbinds=False):
'\n Perform SNMP GETNEXT/BULK request and returns Future to be used\n inside @tornado.gen.coroutine\n '
def true(x, y):
return True
def close_socket():
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
logger.debug('[%s] SNMP GETNEXT %s', address, oid)
if (not filter):
filter = true
poid = (oid + '.')
result = []
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
last_oid = None
while True:
if bulk:
pdu = getbulk_pdu(community, oid, max_repetitions=(max_repetitions or BULK_MAX_REPETITIONS), version=version)
else:
pdu = getnext_pdu(community, oid, version=version)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
if (not max_retries):
close_socket()
raise SNMPError(code=TIMED_OUT, oid=oid)
max_retries -= 1
continue
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
close_socket()
raise SNMPError(code=UNREACHABLE, oid=oid)
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
close_socket()
raise SNMPError(code=UNREACHABLE, oid=oid)
try:
if raw_varbinds:
resp = parse_get_response_raw(data)
else:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oid)
if (resp.error_status == NO_SUCH_NAME):
break
elif (resp.error_status != NO_ERROR):
close_socket()
raise SNMPError(code=resp.error_status, oid=oid)
else:
for (oid, v) in resp.varbinds:
if (oid.startswith(poid) and (not (only_first and result)) and (oid != last_oid)):
if filter(oid, v):
result += [(oid, v)]
last_oid = oid
else:
logger.debug('[%s] GETNEXT result: %s', address, result)
close_socket()
raise Return(result)
close_socket()
|
Perform SNMP GETNEXT/BULK request and returns Future to be used
inside @tornado.gen.coroutine
|
core/ioloop/snmp.py
|
snmp_getnext
|
xUndero/noc
| 1 |
python
|
@coroutine
def snmp_getnext(address, oid, port=161, community='public', version=SNMP_v2c, timeout=10, bulk=False, filter=None, max_repetitions=BULK_MAX_REPETITIONS, only_first=False, tos=None, ioloop=None, udp_socket=None, max_retries=0, raw_varbinds=False):
'\n Perform SNMP GETNEXT/BULK request and returns Future to be used\n inside @tornado.gen.coroutine\n '
def true(x, y):
return True
def close_socket():
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
logger.debug('[%s] SNMP GETNEXT %s', address, oid)
if (not filter):
filter = true
poid = (oid + '.')
result = []
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
last_oid = None
while True:
if bulk:
pdu = getbulk_pdu(community, oid, max_repetitions=(max_repetitions or BULK_MAX_REPETITIONS), version=version)
else:
pdu = getnext_pdu(community, oid, version=version)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
if (not max_retries):
close_socket()
raise SNMPError(code=TIMED_OUT, oid=oid)
max_retries -= 1
continue
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
close_socket()
raise SNMPError(code=UNREACHABLE, oid=oid)
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
close_socket()
raise SNMPError(code=UNREACHABLE, oid=oid)
try:
if raw_varbinds:
resp = parse_get_response_raw(data)
else:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oid)
if (resp.error_status == NO_SUCH_NAME):
break
elif (resp.error_status != NO_ERROR):
close_socket()
raise SNMPError(code=resp.error_status, oid=oid)
else:
for (oid, v) in resp.varbinds:
if (oid.startswith(poid) and (not (only_first and result)) and (oid != last_oid)):
if filter(oid, v):
result += [(oid, v)]
last_oid = oid
else:
logger.debug('[%s] GETNEXT result: %s', address, result)
close_socket()
raise Return(result)
close_socket()
|
@coroutine
def snmp_getnext(address, oid, port=161, community='public', version=SNMP_v2c, timeout=10, bulk=False, filter=None, max_repetitions=BULK_MAX_REPETITIONS, only_first=False, tos=None, ioloop=None, udp_socket=None, max_retries=0, raw_varbinds=False):
'\n Perform SNMP GETNEXT/BULK request and returns Future to be used\n inside @tornado.gen.coroutine\n '
def true(x, y):
return True
def close_socket():
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
logger.debug('[%s] SNMP GETNEXT %s', address, oid)
if (not filter):
filter = true
poid = (oid + '.')
result = []
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
last_oid = None
while True:
if bulk:
pdu = getbulk_pdu(community, oid, max_repetitions=(max_repetitions or BULK_MAX_REPETITIONS), version=version)
else:
pdu = getnext_pdu(community, oid, version=version)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
if (not max_retries):
close_socket()
raise SNMPError(code=TIMED_OUT, oid=oid)
max_retries -= 1
continue
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
close_socket()
raise SNMPError(code=UNREACHABLE, oid=oid)
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
close_socket()
raise SNMPError(code=UNREACHABLE, oid=oid)
try:
if raw_varbinds:
resp = parse_get_response_raw(data)
else:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oid)
if (resp.error_status == NO_SUCH_NAME):
break
elif (resp.error_status != NO_ERROR):
close_socket()
raise SNMPError(code=resp.error_status, oid=oid)
else:
for (oid, v) in resp.varbinds:
if (oid.startswith(poid) and (not (only_first and result)) and (oid != last_oid)):
if filter(oid, v):
result += [(oid, v)]
last_oid = oid
else:
logger.debug('[%s] GETNEXT result: %s', address, result)
close_socket()
raise Return(result)
close_socket()<|docstring|>Perform SNMP GETNEXT/BULK request and returns Future to be used
inside @tornado.gen.coroutine<|endoftext|>
|
a6791fe4d7280bee1fa3b260edbd2cc53473810067b33ed518b35e0c18e57b86
|
@coroutine
def snmp_set(address, varbinds, port=161, community='public', version=SNMP_v2c, timeout=10, tos=None, ioloop=None, udp_socket=None):
'\n Perform SNMP set request and returns Future to be used\n inside @tornado.gen.coroutine\n '
logger.debug('[%s] SNMP SET %s', address, varbinds)
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
pdu = set_pdu(community=community, varbinds=varbinds, version=version)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=varbinds[0][0])
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=varbinds[0][0])
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=varbinds[0][0])
finally:
if udp_socket:
sock.settimeout(None)
else:
sock.close()
try:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=varbinds[0][0])
if (resp.error_status != NO_ERROR):
oid = None
if (resp.error_index and resp.varbinds):
oid = resp.varbinds[(resp.error_index - 1)][0]
logger.debug('[%s] SNMP error: %s %s', address, oid, resp.error_status)
raise SNMPError(code=resp.error_status, oid=oid)
else:
logger.debug('[%s] SET result: OK', address)
raise Return(True)
|
Perform SNMP set request and returns Future to be used
inside @tornado.gen.coroutine
|
core/ioloop/snmp.py
|
snmp_set
|
xUndero/noc
| 1 |
python
|
@coroutine
def snmp_set(address, varbinds, port=161, community='public', version=SNMP_v2c, timeout=10, tos=None, ioloop=None, udp_socket=None):
'\n Perform SNMP set request and returns Future to be used\n inside @tornado.gen.coroutine\n '
logger.debug('[%s] SNMP SET %s', address, varbinds)
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
pdu = set_pdu(community=community, varbinds=varbinds, version=version)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=varbinds[0][0])
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=varbinds[0][0])
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=varbinds[0][0])
finally:
if udp_socket:
sock.settimeout(None)
else:
sock.close()
try:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=varbinds[0][0])
if (resp.error_status != NO_ERROR):
oid = None
if (resp.error_index and resp.varbinds):
oid = resp.varbinds[(resp.error_index - 1)][0]
logger.debug('[%s] SNMP error: %s %s', address, oid, resp.error_status)
raise SNMPError(code=resp.error_status, oid=oid)
else:
logger.debug('[%s] SET result: OK', address)
raise Return(True)
|
@coroutine
def snmp_set(address, varbinds, port=161, community='public', version=SNMP_v2c, timeout=10, tos=None, ioloop=None, udp_socket=None):
'\n Perform SNMP set request and returns Future to be used\n inside @tornado.gen.coroutine\n '
logger.debug('[%s] SNMP SET %s', address, varbinds)
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
pdu = set_pdu(community=community, varbinds=varbinds, version=version)
try:
(yield sock.sendto(pdu, (address, port)))
(data, addr) = (yield sock.recvfrom(4096))
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=varbinds[0][0])
except socket.gaierror as e:
logger.debug('[%s] Cannot resolve address: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=varbinds[0][0])
except socket.error as e:
logger.debug('[%s] Socket error: %s', address, e)
raise SNMPError(code=UNREACHABLE, oid=varbinds[0][0])
finally:
if udp_socket:
sock.settimeout(None)
else:
sock.close()
try:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=varbinds[0][0])
if (resp.error_status != NO_ERROR):
oid = None
if (resp.error_index and resp.varbinds):
oid = resp.varbinds[(resp.error_index - 1)][0]
logger.debug('[%s] SNMP error: %s %s', address, oid, resp.error_status)
raise SNMPError(code=resp.error_status, oid=oid)
else:
logger.debug('[%s] SET result: OK', address)
raise Return(True)<|docstring|>Perform SNMP set request and returns Future to be used
inside @tornado.gen.coroutine<|endoftext|>
|
084e5f4f0762ceac225fb871bbf570968aaea465aeffe2ce84f8e2c6f468ef0f
|
async def parse(self):
'Gets the page and runs all parsing operations'
page = (await self.get_page())
if page:
self.get_meta(page)
self.get_post(page)
if self.post:
self.get_name()
self.get_avlink()
self.get_time()
self.twitter_embed()
self.format_images()
self.youtube_embed()
self.format_quotes()
self.format_spoilers()
self.get_contents()
|
Gets the page and runs all parsing operations
|
forum_parser.py
|
parse
|
SMorgan4/Guy.Robot
| 0 |
python
|
async def parse(self):
page = (await self.get_page())
if page:
self.get_meta(page)
self.get_post(page)
if self.post:
self.get_name()
self.get_avlink()
self.get_time()
self.twitter_embed()
self.format_images()
self.youtube_embed()
self.format_quotes()
self.format_spoilers()
self.get_contents()
|
async def parse(self):
page = (await self.get_page())
if page:
self.get_meta(page)
self.get_post(page)
if self.post:
self.get_name()
self.get_avlink()
self.get_time()
self.twitter_embed()
self.format_images()
self.youtube_embed()
self.format_quotes()
self.format_spoilers()
self.get_contents()<|docstring|>Gets the page and runs all parsing operations<|endoftext|>
|
7cde85eb3cbfeed86739789b356b2e46d7f18fca83a48babc1947de999f269ea
|
async def get_page(self):
'Gets the forum page'
with aiohttp.ClientSession() as a_session:
async with a_session.get(self.link.url) as response:
if (response.status == 200):
page = BeautifulSoup((await response.text()), 'html.parser')
return page
|
Gets the forum page
|
forum_parser.py
|
get_page
|
SMorgan4/Guy.Robot
| 0 |
python
|
async def get_page(self):
with aiohttp.ClientSession() as a_session:
async with a_session.get(self.link.url) as response:
if (response.status == 200):
page = BeautifulSoup((await response.text()), 'html.parser')
return page
|
async def get_page(self):
with aiohttp.ClientSession() as a_session:
async with a_session.get(self.link.url) as response:
if (response.status == 200):
page = BeautifulSoup((await response.text()), 'html.parser')
return page<|docstring|>Gets the forum page<|endoftext|>
|
dffdf73c313548b73a89b3f2ea5b0d4c1e4bca7d5eb5f3ba7f8de2f543837acc
|
def get_meta(self, page):
'Gets page, icon and title from metatags, should work for all forums'
self.title = page.find('meta', property='og:title')['content']
if page.find('meta', property='og:image'):
self.icon = page.find('meta', property='og:image')['content']
elif page.find('link', rel='icon'):
self.icon = page.find('link', rel='icon')['href']
self.site_name = page.find('meta', property='og:site_name')['content']
|
Gets page, icon and title from metatags, should work for all forums
|
forum_parser.py
|
get_meta
|
SMorgan4/Guy.Robot
| 0 |
python
|
def get_meta(self, page):
self.title = page.find('meta', property='og:title')['content']
if page.find('meta', property='og:image'):
self.icon = page.find('meta', property='og:image')['content']
elif page.find('link', rel='icon'):
self.icon = page.find('link', rel='icon')['href']
self.site_name = page.find('meta', property='og:site_name')['content']
|
def get_meta(self, page):
self.title = page.find('meta', property='og:title')['content']
if page.find('meta', property='og:image'):
self.icon = page.find('meta', property='og:image')['content']
elif page.find('link', rel='icon'):
self.icon = page.find('link', rel='icon')['href']
self.site_name = page.find('meta', property='og:site_name')['content']<|docstring|>Gets page, icon and title from metatags, should work for all forums<|endoftext|>
|
a633739be323c7d3e924101a0512476feb09820d768ef7f748a9ef2df5a6917f
|
def get_post(self, page):
'Gets a post from a page'
if (self.link.site == 'era'):
if (self.link.type == 'post'):
self.post = page.find('article', id=f'js-{self.link.post_id}')
else:
self.post = page.find('article')
elif (self.link.site == 'gaf'):
if (self.link.type == 'post'):
self.post = page.find('article', {'data-content': self.link.post_id})
else:
self.post = page.find('span', class_='thread-op').parent.parent
if (not self.post):
print(f'Error identifying post in {self.link.site} {self.link.type}: {self.link.url}')
|
Gets a post from a page
|
forum_parser.py
|
get_post
|
SMorgan4/Guy.Robot
| 0 |
python
|
def get_post(self, page):
if (self.link.site == 'era'):
if (self.link.type == 'post'):
self.post = page.find('article', id=f'js-{self.link.post_id}')
else:
self.post = page.find('article')
elif (self.link.site == 'gaf'):
if (self.link.type == 'post'):
self.post = page.find('article', {'data-content': self.link.post_id})
else:
self.post = page.find('span', class_='thread-op').parent.parent
if (not self.post):
print(f'Error identifying post in {self.link.site} {self.link.type}: {self.link.url}')
|
def get_post(self, page):
if (self.link.site == 'era'):
if (self.link.type == 'post'):
self.post = page.find('article', id=f'js-{self.link.post_id}')
else:
self.post = page.find('article')
elif (self.link.site == 'gaf'):
if (self.link.type == 'post'):
self.post = page.find('article', {'data-content': self.link.post_id})
else:
self.post = page.find('span', class_='thread-op').parent.parent
if (not self.post):
print(f'Error identifying post in {self.link.site} {self.link.type}: {self.link.url}')<|docstring|>Gets a post from a page<|endoftext|>
|
63b892c7c1649d10b1a7157bb68abf3ccf285b83bb300f9f035d2ed9dc3e4856
|
def get_avlink(self):
"Gets the link to the poster's avatar"
avlink = self.post.find('a', class_='avatar')
avlink = avlink.find('img')
if avlink:
self.avlink = (self.base_url + avlink['src'])
avlink.decompose()
|
Gets the link to the poster's avatar
|
forum_parser.py
|
get_avlink
|
SMorgan4/Guy.Robot
| 0 |
python
|
def get_avlink(self):
avlink = self.post.find('a', class_='avatar')
avlink = avlink.find('img')
if avlink:
self.avlink = (self.base_url + avlink['src'])
avlink.decompose()
|
def get_avlink(self):
avlink = self.post.find('a', class_='avatar')
avlink = avlink.find('img')
if avlink:
self.avlink = (self.base_url + avlink['src'])
avlink.decompose()<|docstring|>Gets the link to the poster's avatar<|endoftext|>
|
8f14726b4310e6a9f7f589800bf559f8dc98755454e50cd5e1138f2f2c6e97a7
|
def get_contents(self):
'Gets the post text'
if (self.link.site == 'era'):
self.content = self.post.find('div', class_='bbWrapper')
else:
self.content = self.post.find('div', class_='bbWrapper')
for tag in self.content.findAll('script'):
tag.decompose()
self.content = self.mark_down_links(self.content)
self.content = self.content.get_text().strip()
self.content = re.sub('\n+', '\n', self.content)
|
Gets the post text
|
forum_parser.py
|
get_contents
|
SMorgan4/Guy.Robot
| 0 |
python
|
def get_contents(self):
if (self.link.site == 'era'):
self.content = self.post.find('div', class_='bbWrapper')
else:
self.content = self.post.find('div', class_='bbWrapper')
for tag in self.content.findAll('script'):
tag.decompose()
self.content = self.mark_down_links(self.content)
self.content = self.content.get_text().strip()
self.content = re.sub('\n+', '\n', self.content)
|
def get_contents(self):
if (self.link.site == 'era'):
self.content = self.post.find('div', class_='bbWrapper')
else:
self.content = self.post.find('div', class_='bbWrapper')
for tag in self.content.findAll('script'):
tag.decompose()
self.content = self.mark_down_links(self.content)
self.content = self.content.get_text().strip()
self.content = re.sub('\n+', '\n', self.content)<|docstring|>Gets the post text<|endoftext|>
|
0a2dd26a03b2c2a0c221553a01754718aec3575c75ae90cadd557012c9b683c3
|
def mark_down_links(self, content):
'Marks down all links in a post. Runs after format quotes as to not mark down links within quotes because\n marked down links are not supported within the code blocks the bot uses for quotes.'
for tag in content.findAll('a', href=True):
if (tag.get_text() != ''):
mark_down = f"[{tag.get_text()}]({tag['href']})"
tag.replace_with(mark_down)
return content
|
Marks down all links in a post. Runs after format quotes as to not mark down links within quotes because
marked down links are not supported within the code blocks the bot uses for quotes.
|
forum_parser.py
|
mark_down_links
|
SMorgan4/Guy.Robot
| 0 |
python
|
def mark_down_links(self, content):
'Marks down all links in a post. Runs after format quotes as to not mark down links within quotes because\n marked down links are not supported within the code blocks the bot uses for quotes.'
for tag in content.findAll('a', href=True):
if (tag.get_text() != ):
mark_down = f"[{tag.get_text()}]({tag['href']})"
tag.replace_with(mark_down)
return content
|
def mark_down_links(self, content):
'Marks down all links in a post. Runs after format quotes as to not mark down links within quotes because\n marked down links are not supported within the code blocks the bot uses for quotes.'
for tag in content.findAll('a', href=True):
if (tag.get_text() != ):
mark_down = f"[{tag.get_text()}]({tag['href']})"
tag.replace_with(mark_down)
return content<|docstring|>Marks down all links in a post. Runs after format quotes as to not mark down links within quotes because
marked down links are not supported within the code blocks the bot uses for quotes.<|endoftext|>
|
224411c85c8f90303cb41772471cd71e83b1ddbb9c6966bcea2c0822380cc847
|
def format_quotes(self):
'"Wraps quotes in code tag for aesthetics. Adds quote attribution link where possible.'
if (self.link.site == 'era'):
for attribution in self.post.findAll('div', class_='attribution type'):
self.attribute_quote(attribution)
for tag in self.post.findAll('div', class_='quote'):
tag.replace_with(f'```{tag.get_text().strip()}```')
for tag in self.post.findAll('div', class_='quoteExpand'):
tag.decompose()
if (self.link.site == 'gaf'):
for tag in self.post.findAll('div', class_=re.compile('bbCodeBlock.+quote')):
attribution = tag.find('a', class_='bbCodeBlock-sourceJump')
if attribution:
self.attribute_quote(attribution)
quote = tag.find('div', class_='bbCodeBlock-expandContent')
quote.replace_with(f'```{quote.get_text().strip()}```')
tag.find('div', class_='bbCodeBlock-expandLink').decompose()
|
"Wraps quotes in code tag for aesthetics. Adds quote attribution link where possible.
|
forum_parser.py
|
format_quotes
|
SMorgan4/Guy.Robot
| 0 |
python
|
def format_quotes(self):
if (self.link.site == 'era'):
for attribution in self.post.findAll('div', class_='attribution type'):
self.attribute_quote(attribution)
for tag in self.post.findAll('div', class_='quote'):
tag.replace_with(f'```{tag.get_text().strip()}```')
for tag in self.post.findAll('div', class_='quoteExpand'):
tag.decompose()
if (self.link.site == 'gaf'):
for tag in self.post.findAll('div', class_=re.compile('bbCodeBlock.+quote')):
attribution = tag.find('a', class_='bbCodeBlock-sourceJump')
if attribution:
self.attribute_quote(attribution)
quote = tag.find('div', class_='bbCodeBlock-expandContent')
quote.replace_with(f'```{quote.get_text().strip()}```')
tag.find('div', class_='bbCodeBlock-expandLink').decompose()
|
def format_quotes(self):
if (self.link.site == 'era'):
for attribution in self.post.findAll('div', class_='attribution type'):
self.attribute_quote(attribution)
for tag in self.post.findAll('div', class_='quote'):
tag.replace_with(f'```{tag.get_text().strip()}```')
for tag in self.post.findAll('div', class_='quoteExpand'):
tag.decompose()
if (self.link.site == 'gaf'):
for tag in self.post.findAll('div', class_=re.compile('bbCodeBlock.+quote')):
attribution = tag.find('a', class_='bbCodeBlock-sourceJump')
if attribution:
self.attribute_quote(attribution)
quote = tag.find('div', class_='bbCodeBlock-expandContent')
quote.replace_with(f'```{quote.get_text().strip()}```')
tag.find('div', class_='bbCodeBlock-expandLink').decompose()<|docstring|>"Wraps quotes in code tag for aesthetics. Adds quote attribution link where possible.<|endoftext|>
|
90c5776f3ff347560a4dc8d37d0de53797cff4498ef1f852a916ade74532783d
|
def attribute_quote(self, tag):
'Gets the original poster and links to the original post if available'
text = tag.get_text()
text = text.split('said:')[0]
if (self.link.site == 'era'):
link = tag.find('a', href=True)['href']
elif (self.link.site == 'gaf'):
link = tag['href']
if link:
tag.replace_with(f'[{text} said:]({(self.base_url + link)})')
else:
tag.replace_with(f'{text} said:')
|
Gets the original poster and links to the original post if available
|
forum_parser.py
|
attribute_quote
|
SMorgan4/Guy.Robot
| 0 |
python
|
def attribute_quote(self, tag):
text = tag.get_text()
text = text.split('said:')[0]
if (self.link.site == 'era'):
link = tag.find('a', href=True)['href']
elif (self.link.site == 'gaf'):
link = tag['href']
if link:
tag.replace_with(f'[{text} said:]({(self.base_url + link)})')
else:
tag.replace_with(f'{text} said:')
|
def attribute_quote(self, tag):
text = tag.get_text()
text = text.split('said:')[0]
if (self.link.site == 'era'):
link = tag.find('a', href=True)['href']
elif (self.link.site == 'gaf'):
link = tag['href']
if link:
tag.replace_with(f'[{text} said:]({(self.base_url + link)})')
else:
tag.replace_with(f'{text} said:')<|docstring|>Gets the original poster and links to the original post if available<|endoftext|>
|
16c89f6ed0cae68be91199be6eb1f9dd2c48b9fd9accf392784a44f6f098755b
|
def format_images(self):
'Creates a list of images. Changes image tag to a URL.'
for tag in self.post.findAll('img', class_=re.compile('bb')):
self.images.append(tag['src'])
if (self.link.site == 'era'):
tag.replace_with(tag['src'])
else:
tag.decompose()
if (self.link.site == 'gaf'):
for (count, tag) in enumerate(self.post.findAll('img', class_='smilie')):
tag.replace_with((self.images[count] + '\n'))
tag.decompose()
|
Creates a list of images. Changes image tag to a URL.
|
forum_parser.py
|
format_images
|
SMorgan4/Guy.Robot
| 0 |
python
|
def format_images(self):
for tag in self.post.findAll('img', class_=re.compile('bb')):
self.images.append(tag['src'])
if (self.link.site == 'era'):
tag.replace_with(tag['src'])
else:
tag.decompose()
if (self.link.site == 'gaf'):
for (count, tag) in enumerate(self.post.findAll('img', class_='smilie')):
tag.replace_with((self.images[count] + '\n'))
tag.decompose()
|
def format_images(self):
for tag in self.post.findAll('img', class_=re.compile('bb')):
self.images.append(tag['src'])
if (self.link.site == 'era'):
tag.replace_with(tag['src'])
else:
tag.decompose()
if (self.link.site == 'gaf'):
for (count, tag) in enumerate(self.post.findAll('img', class_='smilie')):
tag.replace_with((self.images[count] + '\n'))
tag.decompose()<|docstring|>Creates a list of images. Changes image tag to a URL.<|endoftext|>
|
e333b192c9d2c8d8ddd34b51d0661119f55fff09008e08f441fb686176d5581a
|
def twitter_embed(self):
'Creates a link to Twitter from a Twitter embed.'
for tag in self.post.findAll('iframe', attrs={'data-s9e-mediaembed': 'twitter'}):
if (self.link.site == 'era'):
tweet_id = tag['data-s9e-lazyload-src'].split('.html#')[1]
else:
tweet_id = tag['src'].split('.html#')[1]
tag.replace_with(('https://twitter.com/user/status/' + tweet_id))
|
Creates a link to Twitter from a Twitter embed.
|
forum_parser.py
|
twitter_embed
|
SMorgan4/Guy.Robot
| 0 |
python
|
def twitter_embed(self):
for tag in self.post.findAll('iframe', attrs={'data-s9e-mediaembed': 'twitter'}):
if (self.link.site == 'era'):
tweet_id = tag['data-s9e-lazyload-src'].split('.html#')[1]
else:
tweet_id = tag['src'].split('.html#')[1]
tag.replace_with(('https://twitter.com/user/status/' + tweet_id))
|
def twitter_embed(self):
for tag in self.post.findAll('iframe', attrs={'data-s9e-mediaembed': 'twitter'}):
if (self.link.site == 'era'):
tweet_id = tag['data-s9e-lazyload-src'].split('.html#')[1]
else:
tweet_id = tag['src'].split('.html#')[1]
tag.replace_with(('https://twitter.com/user/status/' + tweet_id))<|docstring|>Creates a link to Twitter from a Twitter embed.<|endoftext|>
|
b07878018fbdf3c47d40837d4779a80720d27f16835b9ab678669c1a3075c453
|
def youtube_embed(self):
'Creates a link to Youtube from a youtube embed. Adds to a list of video links.'
tags = None
if (self.link.site == 'era'):
tags = self.post.findAll('span', attrs={'data-s9e-mediaembed': 'youtube'})
elif (self.link.site == 'gaf'):
tags = self.post.findAll('div', class_='bbMediaWrapper')
for tag in tags:
url = tag.find('iframe')['src']
self.videos.append(url)
tag.replace_with(url)
|
Creates a link to Youtube from a youtube embed. Adds to a list of video links.
|
forum_parser.py
|
youtube_embed
|
SMorgan4/Guy.Robot
| 0 |
python
|
def youtube_embed(self):
tags = None
if (self.link.site == 'era'):
tags = self.post.findAll('span', attrs={'data-s9e-mediaembed': 'youtube'})
elif (self.link.site == 'gaf'):
tags = self.post.findAll('div', class_='bbMediaWrapper')
for tag in tags:
url = tag.find('iframe')['src']
self.videos.append(url)
tag.replace_with(url)
|
def youtube_embed(self):
tags = None
if (self.link.site == 'era'):
tags = self.post.findAll('span', attrs={'data-s9e-mediaembed': 'youtube'})
elif (self.link.site == 'gaf'):
tags = self.post.findAll('div', class_='bbMediaWrapper')
for tag in tags:
url = tag.find('iframe')['src']
self.videos.append(url)
tag.replace_with(url)<|docstring|>Creates a link to Youtube from a youtube embed. Adds to a list of video links.<|endoftext|>
|
779fc57b1c5455cd4d6603d5e2036a426619c22d0e58ef17cf90a172992f497b
|
def InitializePyMol(self):
' does some initializations to set up PyMol according to our\n tastes\n \n '
self.server.do('set valence,1')
self.server.do('set stick_rad,0.15')
self.server.do('set mouse_selection_mode,0')
self.server.do('set line_width,2')
self.server.do('set selection_width,10')
self.server.do('set auto_zoom,0')
|
does some initializations to set up PyMol according to our
tastes
|
hivwholeseq/utils/ipymol.py
|
InitializePyMol
|
iosonofabio/hivwholeseq
| 3 |
python
|
def InitializePyMol(self):
' does some initializations to set up PyMol according to our\n tastes\n \n '
self.server.do('set valence,1')
self.server.do('set stick_rad,0.15')
self.server.do('set mouse_selection_mode,0')
self.server.do('set line_width,2')
self.server.do('set selection_width,10')
self.server.do('set auto_zoom,0')
|
def InitializePyMol(self):
' does some initializations to set up PyMol according to our\n tastes\n \n '
self.server.do('set valence,1')
self.server.do('set stick_rad,0.15')
self.server.do('set mouse_selection_mode,0')
self.server.do('set line_width,2')
self.server.do('set selection_width,10')
self.server.do('set auto_zoom,0')<|docstring|>does some initializations to set up PyMol according to our
tastes<|endoftext|>
|
415a21ddf7152d1da10ff3ce9f950376069ece7c8f9c642bfbcfd4a2b15087fa
|
def get_args():
'get the arguments as a main parser with subparsers\n for named required arguments and optional arguments\n '
parser = argparse.ArgumentParser(description=('This is used to combine regions extracted with ' + 'riboSnag, creating a single sequence'))
parser.add_argument('indir', help="Directory with fasta's to concatenate")
parser.add_argument('ext', help='Extension of files to concatenate')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('-o', '--output', help='output directory;default: %(default)s', default=os.getcwd(), type=str, dest='output')
optional = parser.add_argument_group('optional arguments')
optional.add_argument('-n', '--name', help='name for output fasta', default='concatenated_seq', type=str)
optional.add_argument('-v', '--verbose', action='store_true', help='output verbose status')
args = parser.parse_args()
return args
|
get the arguments as a main parser with subparsers
for named required arguments and optional arguments
|
scripts/concatToyGenome.py
|
get_args
|
nickp60/riboSeed
| 7 |
python
|
def get_args():
'get the arguments as a main parser with subparsers\n for named required arguments and optional arguments\n '
parser = argparse.ArgumentParser(description=('This is used to combine regions extracted with ' + 'riboSnag, creating a single sequence'))
parser.add_argument('indir', help="Directory with fasta's to concatenate")
parser.add_argument('ext', help='Extension of files to concatenate')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('-o', '--output', help='output directory;default: %(default)s', default=os.getcwd(), type=str, dest='output')
optional = parser.add_argument_group('optional arguments')
optional.add_argument('-n', '--name', help='name for output fasta', default='concatenated_seq', type=str)
optional.add_argument('-v', '--verbose', action='store_true', help='output verbose status')
args = parser.parse_args()
return args
|
def get_args():
'get the arguments as a main parser with subparsers\n for named required arguments and optional arguments\n '
parser = argparse.ArgumentParser(description=('This is used to combine regions extracted with ' + 'riboSnag, creating a single sequence'))
parser.add_argument('indir', help="Directory with fasta's to concatenate")
parser.add_argument('ext', help='Extension of files to concatenate')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('-o', '--output', help='output directory;default: %(default)s', default=os.getcwd(), type=str, dest='output')
optional = parser.add_argument_group('optional arguments')
optional.add_argument('-n', '--name', help='name for output fasta', default='concatenated_seq', type=str)
optional.add_argument('-v', '--verbose', action='store_true', help='output verbose status')
args = parser.parse_args()
return args<|docstring|>get the arguments as a main parser with subparsers
for named required arguments and optional arguments<|endoftext|>
|
ef85189a65594c89f4bde3947e9cdd18d5ce3f4006c421d4dbe497de42c87331
|
def concat_genome(input_dir, ext, outpath, verbose=False):
'for each fasta, read in, add to existing string, and when finished,\n write out as single-entry fasta\n '
fastas = sorted(glob.glob(str((input_dir + ext))))
if (len(fastas) == 0):
if verbose:
print('No files found!')
return 1
if verbose:
print(str(('combining the following files matching extension ' + '{0}:{1}'.format(ext, ' '.join(fastas)))))
new_seq = ''
for filen in fastas:
print(('Adding %s to combined sequence' % filen))
with open(filen, 'r') as i_file:
seq_rec = list(SeqIO.parse(i_file, 'fasta'))[0]
new_seq = (new_seq + str(seq_rec.seq))
if verbose:
print(str(('Len of sequence:{0}\nLen of concatenated ' + 'sequence:{1}')).format(len(seq_rec), len(new_seq)))
try:
with open(outpath, 'w') as o_file:
success = SeqIO.write(SeqRecord(seq=Seq(new_seq, IUPAC.IUPACAmbiguousDNA()), description='from concatToyGenome', id='concatenated_genome'), o_file, 'fasta')
if success:
print('wrote out concatenated file!')
return 0
except Exception as e:
if verbose:
print(e)
return 1
|
for each fasta, read in, add to existing string, and when finished,
write out as single-entry fasta
|
scripts/concatToyGenome.py
|
concat_genome
|
nickp60/riboSeed
| 7 |
python
|
def concat_genome(input_dir, ext, outpath, verbose=False):
'for each fasta, read in, add to existing string, and when finished,\n write out as single-entry fasta\n '
fastas = sorted(glob.glob(str((input_dir + ext))))
if (len(fastas) == 0):
if verbose:
print('No files found!')
return 1
if verbose:
print(str(('combining the following files matching extension ' + '{0}:{1}'.format(ext, ' '.join(fastas)))))
new_seq =
for filen in fastas:
print(('Adding %s to combined sequence' % filen))
with open(filen, 'r') as i_file:
seq_rec = list(SeqIO.parse(i_file, 'fasta'))[0]
new_seq = (new_seq + str(seq_rec.seq))
if verbose:
print(str(('Len of sequence:{0}\nLen of concatenated ' + 'sequence:{1}')).format(len(seq_rec), len(new_seq)))
try:
with open(outpath, 'w') as o_file:
success = SeqIO.write(SeqRecord(seq=Seq(new_seq, IUPAC.IUPACAmbiguousDNA()), description='from concatToyGenome', id='concatenated_genome'), o_file, 'fasta')
if success:
print('wrote out concatenated file!')
return 0
except Exception as e:
if verbose:
print(e)
return 1
|
def concat_genome(input_dir, ext, outpath, verbose=False):
'for each fasta, read in, add to existing string, and when finished,\n write out as single-entry fasta\n '
fastas = sorted(glob.glob(str((input_dir + ext))))
if (len(fastas) == 0):
if verbose:
print('No files found!')
return 1
if verbose:
print(str(('combining the following files matching extension ' + '{0}:{1}'.format(ext, ' '.join(fastas)))))
new_seq =
for filen in fastas:
print(('Adding %s to combined sequence' % filen))
with open(filen, 'r') as i_file:
seq_rec = list(SeqIO.parse(i_file, 'fasta'))[0]
new_seq = (new_seq + str(seq_rec.seq))
if verbose:
print(str(('Len of sequence:{0}\nLen of concatenated ' + 'sequence:{1}')).format(len(seq_rec), len(new_seq)))
try:
with open(outpath, 'w') as o_file:
success = SeqIO.write(SeqRecord(seq=Seq(new_seq, IUPAC.IUPACAmbiguousDNA()), description='from concatToyGenome', id='concatenated_genome'), o_file, 'fasta')
if success:
print('wrote out concatenated file!')
return 0
except Exception as e:
if verbose:
print(e)
return 1<|docstring|>for each fasta, read in, add to existing string, and when finished,
write out as single-entry fasta<|endoftext|>
|
d6053c5fb20f79c16ebe6f68126932be4441b09c0afe60d99dea9270de6993d4
|
def annotate_cmap(annotated, annotate_join_on, cell_id='unknown', perturbation_mode='none'):
'Annotates data frame with custom options according to CMAP specifications\n\n Parameters\n ----------\n annotated : pandas.core.frame.DataFrame\n DataFrame of profiles.\n annotate_join_on : str\n Typically the well metadata, but how to join external data\n cell_id : str, default "unknown"\n provide a string to annotate cell id column\n perturbation_mode : str, default "none"\n How to annotate CMAP specific data (options = ["chemical" , "genetic"])\n\n Returns\n -------\n annotated\n CMAP annotated data\n '
pert_opts = ['none', 'chemical', 'genetic']
assert (perturbation_mode in pert_opts), 'perturbation mode must be one of {}'.format(pert_opts)
assert ('Metadata_broad_sample' in annotated.columns), "Are you sure this is a CMAP file? 'Metadata_broad_sample column not found.'"
annotated = annotated.assign(Metadata_pert_id=annotated.Metadata_broad_sample.str.extract('(BRD[-N][A-Z0-9]+)'), Metadata_pert_mfc_id=annotated.Metadata_broad_sample, Metadata_pert_well=annotated.loc[(:, annotate_join_on)], Metadata_pert_id_vendor='')
if ('Metadata_pert_iname' in annotated.columns):
annotated = annotated.assign(Metadata_pert_mfc_desc=annotated.Metadata_pert_iname, Metadata_pert_name=annotated.Metadata_pert_iname)
if ('Metadata_cell_id' not in annotated.columns):
annotated = annotated.assign(Metadata_cell_id=cell_id)
if (perturbation_mode == 'chemical'):
annotated = annotated.assign(Metadata_broad_sample_type=[('control' if (x in ['DMSO', np.nan]) else 'trt') for x in annotated.Metadata_broad_sample])
annotated.loc[((annotated.Metadata_broad_sample_type == 'control'), 'Metadata_broad_sample')] = 'DMSO'
annotated.loc[((annotated.Metadata_broad_sample == 'empty'), 'Metadata_broad_sample_type')] = 'empty'
if ('Metadata_mmoles_per_liter' in annotated.columns):
annotated.loc[((annotated.Metadata_broad_sample_type == 'control'), 'Metadata_mmoles_per_liter')] = 0
if ('Metadata_solvent' in annotated.columns):
annotated = annotated.assign(Metadata_pert_vehicle=annotated.Metadata_solvent)
if ('Metadata_mg_per_ml' in annotated.columns):
annotated.loc[((annotated.Metadata_broad_sample_type == 'control'), 'Metadata_mg_per_ml')] = 0
if (perturbation_mode == 'genetic'):
if ('Metadata_pert_name' in annotated.columns):
annotated = annotated.assign(Metadata_broad_sample_type=[('control' if (x == 'EMPTY') else 'trt') for x in annotated.Metadata_pert_name])
if ('Metadata_broad_sample_type' in annotated.columns):
annotated = annotated.assign(Metadata_pert_type=annotated.Metadata_broad_sample_type)
else:
annotated = annotated.assign(Metadata_pert_type='', Metadata_broad_sample_type='')
return annotated
|
Annotates data frame with custom options according to CMAP specifications
Parameters
----------
annotated : pandas.core.frame.DataFrame
DataFrame of profiles.
annotate_join_on : str
Typically the well metadata, but how to join external data
cell_id : str, default "unknown"
provide a string to annotate cell id column
perturbation_mode : str, default "none"
How to annotate CMAP specific data (options = ["chemical" , "genetic"])
Returns
-------
annotated
CMAP annotated data
|
pycytominer/cyto_utils/annotate_custom.py
|
annotate_cmap
|
staylorx/pycytominer
| 0 |
python
|
def annotate_cmap(annotated, annotate_join_on, cell_id='unknown', perturbation_mode='none'):
'Annotates data frame with custom options according to CMAP specifications\n\n Parameters\n ----------\n annotated : pandas.core.frame.DataFrame\n DataFrame of profiles.\n annotate_join_on : str\n Typically the well metadata, but how to join external data\n cell_id : str, default "unknown"\n provide a string to annotate cell id column\n perturbation_mode : str, default "none"\n How to annotate CMAP specific data (options = ["chemical" , "genetic"])\n\n Returns\n -------\n annotated\n CMAP annotated data\n '
pert_opts = ['none', 'chemical', 'genetic']
assert (perturbation_mode in pert_opts), 'perturbation mode must be one of {}'.format(pert_opts)
assert ('Metadata_broad_sample' in annotated.columns), "Are you sure this is a CMAP file? 'Metadata_broad_sample column not found.'"
annotated = annotated.assign(Metadata_pert_id=annotated.Metadata_broad_sample.str.extract('(BRD[-N][A-Z0-9]+)'), Metadata_pert_mfc_id=annotated.Metadata_broad_sample, Metadata_pert_well=annotated.loc[(:, annotate_join_on)], Metadata_pert_id_vendor=)
if ('Metadata_pert_iname' in annotated.columns):
annotated = annotated.assign(Metadata_pert_mfc_desc=annotated.Metadata_pert_iname, Metadata_pert_name=annotated.Metadata_pert_iname)
if ('Metadata_cell_id' not in annotated.columns):
annotated = annotated.assign(Metadata_cell_id=cell_id)
if (perturbation_mode == 'chemical'):
annotated = annotated.assign(Metadata_broad_sample_type=[('control' if (x in ['DMSO', np.nan]) else 'trt') for x in annotated.Metadata_broad_sample])
annotated.loc[((annotated.Metadata_broad_sample_type == 'control'), 'Metadata_broad_sample')] = 'DMSO'
annotated.loc[((annotated.Metadata_broad_sample == 'empty'), 'Metadata_broad_sample_type')] = 'empty'
if ('Metadata_mmoles_per_liter' in annotated.columns):
annotated.loc[((annotated.Metadata_broad_sample_type == 'control'), 'Metadata_mmoles_per_liter')] = 0
if ('Metadata_solvent' in annotated.columns):
annotated = annotated.assign(Metadata_pert_vehicle=annotated.Metadata_solvent)
if ('Metadata_mg_per_ml' in annotated.columns):
annotated.loc[((annotated.Metadata_broad_sample_type == 'control'), 'Metadata_mg_per_ml')] = 0
if (perturbation_mode == 'genetic'):
if ('Metadata_pert_name' in annotated.columns):
annotated = annotated.assign(Metadata_broad_sample_type=[('control' if (x == 'EMPTY') else 'trt') for x in annotated.Metadata_pert_name])
if ('Metadata_broad_sample_type' in annotated.columns):
annotated = annotated.assign(Metadata_pert_type=annotated.Metadata_broad_sample_type)
else:
annotated = annotated.assign(Metadata_pert_type=, Metadata_broad_sample_type=)
return annotated
|
def annotate_cmap(annotated, annotate_join_on, cell_id='unknown', perturbation_mode='none'):
'Annotates data frame with custom options according to CMAP specifications\n\n Parameters\n ----------\n annotated : pandas.core.frame.DataFrame\n DataFrame of profiles.\n annotate_join_on : str\n Typically the well metadata, but how to join external data\n cell_id : str, default "unknown"\n provide a string to annotate cell id column\n perturbation_mode : str, default "none"\n How to annotate CMAP specific data (options = ["chemical" , "genetic"])\n\n Returns\n -------\n annotated\n CMAP annotated data\n '
pert_opts = ['none', 'chemical', 'genetic']
assert (perturbation_mode in pert_opts), 'perturbation mode must be one of {}'.format(pert_opts)
assert ('Metadata_broad_sample' in annotated.columns), "Are you sure this is a CMAP file? 'Metadata_broad_sample column not found.'"
annotated = annotated.assign(Metadata_pert_id=annotated.Metadata_broad_sample.str.extract('(BRD[-N][A-Z0-9]+)'), Metadata_pert_mfc_id=annotated.Metadata_broad_sample, Metadata_pert_well=annotated.loc[(:, annotate_join_on)], Metadata_pert_id_vendor=)
if ('Metadata_pert_iname' in annotated.columns):
annotated = annotated.assign(Metadata_pert_mfc_desc=annotated.Metadata_pert_iname, Metadata_pert_name=annotated.Metadata_pert_iname)
if ('Metadata_cell_id' not in annotated.columns):
annotated = annotated.assign(Metadata_cell_id=cell_id)
if (perturbation_mode == 'chemical'):
annotated = annotated.assign(Metadata_broad_sample_type=[('control' if (x in ['DMSO', np.nan]) else 'trt') for x in annotated.Metadata_broad_sample])
annotated.loc[((annotated.Metadata_broad_sample_type == 'control'), 'Metadata_broad_sample')] = 'DMSO'
annotated.loc[((annotated.Metadata_broad_sample == 'empty'), 'Metadata_broad_sample_type')] = 'empty'
if ('Metadata_mmoles_per_liter' in annotated.columns):
annotated.loc[((annotated.Metadata_broad_sample_type == 'control'), 'Metadata_mmoles_per_liter')] = 0
if ('Metadata_solvent' in annotated.columns):
annotated = annotated.assign(Metadata_pert_vehicle=annotated.Metadata_solvent)
if ('Metadata_mg_per_ml' in annotated.columns):
annotated.loc[((annotated.Metadata_broad_sample_type == 'control'), 'Metadata_mg_per_ml')] = 0
if (perturbation_mode == 'genetic'):
if ('Metadata_pert_name' in annotated.columns):
annotated = annotated.assign(Metadata_broad_sample_type=[('control' if (x == 'EMPTY') else 'trt') for x in annotated.Metadata_pert_name])
if ('Metadata_broad_sample_type' in annotated.columns):
annotated = annotated.assign(Metadata_pert_type=annotated.Metadata_broad_sample_type)
else:
annotated = annotated.assign(Metadata_pert_type=, Metadata_broad_sample_type=)
return annotated<|docstring|>Annotates data frame with custom options according to CMAP specifications
Parameters
----------
annotated : pandas.core.frame.DataFrame
DataFrame of profiles.
annotate_join_on : str
Typically the well metadata, but how to join external data
cell_id : str, default "unknown"
provide a string to annotate cell id column
perturbation_mode : str, default "none"
How to annotate CMAP specific data (options = ["chemical" , "genetic"])
Returns
-------
annotated
CMAP annotated data<|endoftext|>
|
d90b02788fcff2f63bfeb1a0e5b9dd8b9f249f05926aa017fad18395ea3b3328
|
def cp_clean(profiles):
'Specifically clean certain column names derived from different CellProfiler versions\n\n Parameters\n ----------\n profiles : pandas.core.frame.DataFrame\n DataFrame of profiles.\n\n Returns\n -------\n profiles\n Renamed to standard metadata\n '
profiles = profiles.rename({'Image_Metadata_Plate': 'Metadata_Plate', 'Image_Metadata_Well': 'Metadata_Well'}, axis='columns')
return profiles
|
Specifically clean certain column names derived from different CellProfiler versions
Parameters
----------
profiles : pandas.core.frame.DataFrame
DataFrame of profiles.
Returns
-------
profiles
Renamed to standard metadata
|
pycytominer/cyto_utils/annotate_custom.py
|
cp_clean
|
staylorx/pycytominer
| 0 |
python
|
def cp_clean(profiles):
'Specifically clean certain column names derived from different CellProfiler versions\n\n Parameters\n ----------\n profiles : pandas.core.frame.DataFrame\n DataFrame of profiles.\n\n Returns\n -------\n profiles\n Renamed to standard metadata\n '
profiles = profiles.rename({'Image_Metadata_Plate': 'Metadata_Plate', 'Image_Metadata_Well': 'Metadata_Well'}, axis='columns')
return profiles
|
def cp_clean(profiles):
'Specifically clean certain column names derived from different CellProfiler versions\n\n Parameters\n ----------\n profiles : pandas.core.frame.DataFrame\n DataFrame of profiles.\n\n Returns\n -------\n profiles\n Renamed to standard metadata\n '
profiles = profiles.rename({'Image_Metadata_Plate': 'Metadata_Plate', 'Image_Metadata_Well': 'Metadata_Well'}, axis='columns')
return profiles<|docstring|>Specifically clean certain column names derived from different CellProfiler versions
Parameters
----------
profiles : pandas.core.frame.DataFrame
DataFrame of profiles.
Returns
-------
profiles
Renamed to standard metadata<|endoftext|>
|
9098d3d60e33740d5635581fac171aa136635a109a77baa133bba5d8c3b86000
|
def DataWranglerDf(self, df):
'\n This functions cleans the provided Dataframe\n :return: Cleaned Dataframe (removed_sp_char_Df)\n '
removed_sp_char_Df = df
removed_sp_char_Df['Wikipedia_Paragraphs'] = [re.sub('[^a-zA-Z0-9]+', ' ', doc) for doc in df['Wikipedia_Paragraphs']]
return removed_sp_char_Df
|
This functions cleans the provided Dataframe
:return: Cleaned Dataframe (removed_sp_char_Df)
|
streamlit_application/DataWrangling.py
|
DataWranglerDf
|
coryroyce/wiki_based_nlp_chat_bot
| 2 |
python
|
def DataWranglerDf(self, df):
'\n This functions cleans the provided Dataframe\n :return: Cleaned Dataframe (removed_sp_char_Df)\n '
removed_sp_char_Df = df
removed_sp_char_Df['Wikipedia_Paragraphs'] = [re.sub('[^a-zA-Z0-9]+', ' ', doc) for doc in df['Wikipedia_Paragraphs']]
return removed_sp_char_Df
|
def DataWranglerDf(self, df):
'\n This functions cleans the provided Dataframe\n :return: Cleaned Dataframe (removed_sp_char_Df)\n '
removed_sp_char_Df = df
removed_sp_char_Df['Wikipedia_Paragraphs'] = [re.sub('[^a-zA-Z0-9]+', ' ', doc) for doc in df['Wikipedia_Paragraphs']]
return removed_sp_char_Df<|docstring|>This functions cleans the provided Dataframe
:return: Cleaned Dataframe (removed_sp_char_Df)<|endoftext|>
|
90efa3a3069b2c02260c495d17becaeb46652aecdaee68f8ccb3deb801d1aca5
|
def lemmatization(self, corpus):
'\n This Function removes stop words and lemmatizes the provided corpus\n :return: Stop word removed & Canonical form corpus (canonical)\n '
canonical = []
for x in corpus:
temp = []
for token in self.nlp(x):
if (not token.is_stop):
temp.append(token.lemma_)
canonical.append(' '.join(temp))
return canonical
|
This Function removes stop words and lemmatizes the provided corpus
:return: Stop word removed & Canonical form corpus (canonical)
|
streamlit_application/DataWrangling.py
|
lemmatization
|
coryroyce/wiki_based_nlp_chat_bot
| 2 |
python
|
def lemmatization(self, corpus):
'\n This Function removes stop words and lemmatizes the provided corpus\n :return: Stop word removed & Canonical form corpus (canonical)\n '
canonical = []
for x in corpus:
temp = []
for token in self.nlp(x):
if (not token.is_stop):
temp.append(token.lemma_)
canonical.append(' '.join(temp))
return canonical
|
def lemmatization(self, corpus):
'\n This Function removes stop words and lemmatizes the provided corpus\n :return: Stop word removed & Canonical form corpus (canonical)\n '
canonical = []
for x in corpus:
temp = []
for token in self.nlp(x):
if (not token.is_stop):
temp.append(token.lemma_)
canonical.append(' '.join(temp))
return canonical<|docstring|>This Function removes stop words and lemmatizes the provided corpus
:return: Stop word removed & Canonical form corpus (canonical)<|endoftext|>
|
ab20b930f463a883099d316266c29d144758572c9d996ff1448577732cbe1bf8
|
def configure(self, project):
'\n Called when plugin is initialized to perform any pre-configuration.\n '
|
Called when plugin is initialized to perform any pre-configuration.
|
sentry/plugins/__init__.py
|
configure
|
Kronuz/django-sentry
| 0 |
python
|
def configure(self, project):
'\n \n '
|
def configure(self, project):
'\n \n '<|docstring|>Called when plugin is initialized to perform any pre-configuration.<|endoftext|>
|
aab761fc07e15cb3b39b3379e33a0cd2c0e62cb236642234108c62ae46bf454d
|
def view(self, group, **kwargs):
'\n Handles the view logic. If no response is given, we continue to the next action provider.\n '
|
Handles the view logic. If no response is given, we continue to the next action provider.
|
sentry/plugins/__init__.py
|
view
|
Kronuz/django-sentry
| 0 |
python
|
def view(self, group, **kwargs):
'\n \n '
|
def view(self, group, **kwargs):
'\n \n '<|docstring|>Handles the view logic. If no response is given, we continue to the next action provider.<|endoftext|>
|
6ecce5e4683a3feb27ed8cf2511160419a51cd20504816204ff85aee03100b4f
|
def before_events(self, group_list, **kwargs):
'\n Allows preprocessing of groups in the list view.\n\n This is generally useful if you need to cache lookups\n for something like ``tags`` which would otherwise do\n multiple queries.\n '
|
Allows preprocessing of groups in the list view.
This is generally useful if you need to cache lookups
for something like ``tags`` which would otherwise do
multiple queries.
|
sentry/plugins/__init__.py
|
before_events
|
Kronuz/django-sentry
| 0 |
python
|
def before_events(self, group_list, **kwargs):
'\n Allows preprocessing of groups in the list view.\n\n This is generally useful if you need to cache lookups\n for something like ``tags`` which would otherwise do\n multiple queries.\n '
|
def before_events(self, group_list, **kwargs):
'\n Allows preprocessing of groups in the list view.\n\n This is generally useful if you need to cache lookups\n for something like ``tags`` which would otherwise do\n multiple queries.\n '<|docstring|>Allows preprocessing of groups in the list view.
This is generally useful if you need to cache lookups
for something like ``tags`` which would otherwise do
multiple queries.<|endoftext|>
|
09baa20bde11ab908a71c81edea1ee05215a23ab845714b70eceeaf3d051bb64
|
def tags(self, group, tag_list, **kwargs):
'Modifies the tag list for a grouped message.'
return tag_list
|
Modifies the tag list for a grouped message.
|
sentry/plugins/__init__.py
|
tags
|
Kronuz/django-sentry
| 0 |
python
|
def tags(self, group, tag_list, **kwargs):
return tag_list
|
def tags(self, group, tag_list, **kwargs):
return tag_list<|docstring|>Modifies the tag list for a grouped message.<|endoftext|>
|
c5636535e57850a2c8a85cfaa1462163a05bf391ca4d613b67ba14a16ce7c621
|
def actions(self, group, action_list, **kwargs):
'Modifies the action list for a grouped message.'
return action_list
|
Modifies the action list for a grouped message.
|
sentry/plugins/__init__.py
|
actions
|
Kronuz/django-sentry
| 0 |
python
|
def actions(self, group, action_list, **kwargs):
return action_list
|
def actions(self, group, action_list, **kwargs):
return action_list<|docstring|>Modifies the action list for a grouped message.<|endoftext|>
|
85b9e206ae0818fe4fccb00c9555902ce8ec5fb650191778aec488e279ad70d6
|
def panels(self, group, panel_list, **kwargs):
'Modifies the panel list for a grouped message.'
return panel_list
|
Modifies the panel list for a grouped message.
|
sentry/plugins/__init__.py
|
panels
|
Kronuz/django-sentry
| 0 |
python
|
def panels(self, group, panel_list, **kwargs):
return panel_list
|
def panels(self, group, panel_list, **kwargs):
return panel_list<|docstring|>Modifies the panel list for a grouped message.<|endoftext|>
|
0a9ac97f4171c3401dc57a420e6cef9253a2ad09d875f33ac8514bc09e93b2b5
|
def widget(self, group, **kwargs):
'\n Renders as a widget in the group details sidebar.\n '
|
Renders as a widget in the group details sidebar.
|
sentry/plugins/__init__.py
|
widget
|
Kronuz/django-sentry
| 0 |
python
|
def widget(self, group, **kwargs):
'\n \n '
|
def widget(self, group, **kwargs):
'\n \n '<|docstring|>Renders as a widget in the group details sidebar.<|endoftext|>
|
afb27acedc951a0418c26c732af82f6e134d26f7d4207ac8cbbe03e7f440b1aa
|
def block_until_scheduler_wait_call(self):
"Blocks until the clock's wait method is called or test times out."
self.scheduler_wait_event.wait(TEST_TIMEOUT_SECONDS)
self.scheduler_wait_event.clear()
|
Blocks until the clock's wait method is called or test times out.
|
tests/test_poller.py
|
block_until_scheduler_wait_call
|
JakeDean101/Greenpithumb
| 83 |
python
|
def block_until_scheduler_wait_call(self):
self.scheduler_wait_event.wait(TEST_TIMEOUT_SECONDS)
self.scheduler_wait_event.clear()
|
def block_until_scheduler_wait_call(self):
self.scheduler_wait_event.wait(TEST_TIMEOUT_SECONDS)
self.scheduler_wait_event.clear()<|docstring|>Blocks until the clock's wait method is called or test times out.<|endoftext|>
|
fa4c81297fa1f66f4f1df528d0e1014ee025136fd076a530a46a2a07fe212efe
|
def block_until_poll_completes(self):
'Blocks until the poller has completed one poll event.'
self.block_until_scheduler_wait_call()
self.block_until_scheduler_wait_call()
|
Blocks until the poller has completed one poll event.
|
tests/test_poller.py
|
block_until_poll_completes
|
JakeDean101/Greenpithumb
| 83 |
python
|
def block_until_poll_completes(self):
self.block_until_scheduler_wait_call()
self.block_until_scheduler_wait_call()
|
def block_until_poll_completes(self):
self.block_until_scheduler_wait_call()
self.block_until_scheduler_wait_call()<|docstring|>Blocks until the poller has completed one poll event.<|endoftext|>
|
69ec5903322a75ef341d8067d030897f2790d5ce95958d116672529bfcc9d9d3
|
def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
'Add shared or separable branch\n\n convs -> avg pool (optional) -> fcs\n '
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim)
|
Add shared or separable branch
convs -> avg pool (optional) -> fcs
|
mmdet/models/bbox_heads/carcls_rot_head.py
|
_add_conv_fc_branch
|
tyunist/Kaggle_PKU_Baidu
| 59 |
python
|
def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
'Add shared or separable branch\n\n convs -> avg pool (optional) -> fcs\n '
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim)
|
def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
'Add shared or separable branch\n\n convs -> avg pool (optional) -> fcs\n '
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim)<|docstring|>Add shared or separable branch
convs -> avg pool (optional) -> fcs<|endoftext|>
|
dd561cc9a90b5790f4502167e0b28b8a555d5f401beb0c07e86cceeefe8320f1
|
def forward(self, x, res_feat=None, return_logits=False, return_feat=False, return_last=False):
'\n\n :param x:\n :param res_feat:\n :param return_logits:\n :param return_feat: used for interleaving (currently no use!)\n :param return_last: used for tranlsation estimation\n :return:\n '
if (self.num_shared_convs > 0):
for conv in self.shared_convs:
x = conv(x)
if (self.num_shared_fcs > 0):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), (- 1))
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
last_feat = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if (x_cls.dim() > 2):
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.view(x_cls.size(0), (- 1))
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if (x_reg.dim() > 2):
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.view(x_reg.size(0), (- 1))
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
car_cls_score_pred = (self.fc_cls(x_cls) if self.with_cls else None)
quaternion_pred = (self.fc_reg(x_reg) if self.with_reg else None)
quaternion_pred = nn.functional.normalize(quaternion_pred, p=2, dim=1)
outs = []
if return_logits:
outs.append(car_cls_score_pred)
outs.append(quaternion_pred)
if return_feat:
raise NotImplementedError
if return_last:
outs.append(last_feat)
return (outs if (len(outs) > 1) else outs[0])
|
:param x:
:param res_feat:
:param return_logits:
:param return_feat: used for interleaving (currently no use!)
:param return_last: used for tranlsation estimation
:return:
|
mmdet/models/bbox_heads/carcls_rot_head.py
|
forward
|
tyunist/Kaggle_PKU_Baidu
| 59 |
python
|
def forward(self, x, res_feat=None, return_logits=False, return_feat=False, return_last=False):
'\n\n :param x:\n :param res_feat:\n :param return_logits:\n :param return_feat: used for interleaving (currently no use!)\n :param return_last: used for tranlsation estimation\n :return:\n '
if (self.num_shared_convs > 0):
for conv in self.shared_convs:
x = conv(x)
if (self.num_shared_fcs > 0):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), (- 1))
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
last_feat = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if (x_cls.dim() > 2):
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.view(x_cls.size(0), (- 1))
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if (x_reg.dim() > 2):
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.view(x_reg.size(0), (- 1))
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
car_cls_score_pred = (self.fc_cls(x_cls) if self.with_cls else None)
quaternion_pred = (self.fc_reg(x_reg) if self.with_reg else None)
quaternion_pred = nn.functional.normalize(quaternion_pred, p=2, dim=1)
outs = []
if return_logits:
outs.append(car_cls_score_pred)
outs.append(quaternion_pred)
if return_feat:
raise NotImplementedError
if return_last:
outs.append(last_feat)
return (outs if (len(outs) > 1) else outs[0])
|
def forward(self, x, res_feat=None, return_logits=False, return_feat=False, return_last=False):
'\n\n :param x:\n :param res_feat:\n :param return_logits:\n :param return_feat: used for interleaving (currently no use!)\n :param return_last: used for tranlsation estimation\n :return:\n '
if (self.num_shared_convs > 0):
for conv in self.shared_convs:
x = conv(x)
if (self.num_shared_fcs > 0):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), (- 1))
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
last_feat = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if (x_cls.dim() > 2):
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.view(x_cls.size(0), (- 1))
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if (x_reg.dim() > 2):
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.view(x_reg.size(0), (- 1))
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
car_cls_score_pred = (self.fc_cls(x_cls) if self.with_cls else None)
quaternion_pred = (self.fc_reg(x_reg) if self.with_reg else None)
quaternion_pred = nn.functional.normalize(quaternion_pred, p=2, dim=1)
outs = []
if return_logits:
outs.append(car_cls_score_pred)
outs.append(quaternion_pred)
if return_feat:
raise NotImplementedError
if return_last:
outs.append(last_feat)
return (outs if (len(outs) > 1) else outs[0])<|docstring|>:param x:
:param res_feat:
:param return_logits:
:param return_feat: used for interleaving (currently no use!)
:param return_last: used for tranlsation estimation
:return:<|endoftext|>
|
793a27a3365a47c8d170a7353b20417aae2733d74436dbdfbda282002bc4a4b4
|
def get_allowed_auths(self, username):
'List availble auth mechanisms.'
return 'password,publickey'
|
List availble auth mechanisms.
|
sftpServer/StubServer.py
|
get_allowed_auths
|
wengyan123/SFTPServer
| 0 |
python
|
def get_allowed_auths(self, username):
return 'password,publickey'
|
def get_allowed_auths(self, username):
return 'password,publickey'<|docstring|>List availble auth mechanisms.<|endoftext|>
|
a4ec26e9c4fe83a69fd4faaa79e29a4f2e466f689ab69f5ce8a989dc42b74289
|
def heapify(arr, n, i):
'\n Heapifies subtree rooted at index i\n n - size of the heap\n '
largest = i
l = ((2 * i) + 1)
r = ((2 * i) + 2)
if ((l < n) and (arr[largest] < arr[l])):
largest = l
if ((r < n) and (arr[largest] < arr[r])):
largest = r
if (largest != i):
(arr[i], arr[largest]) = (arr[largest], arr[i])
heapify(arr, n, largest)
|
Heapifies subtree rooted at index i
n - size of the heap
|
Heapsort/heapsort.py
|
heapify
|
billyateallcookies/python
| 1 |
python
|
def heapify(arr, n, i):
'\n Heapifies subtree rooted at index i\n n - size of the heap\n '
largest = i
l = ((2 * i) + 1)
r = ((2 * i) + 2)
if ((l < n) and (arr[largest] < arr[l])):
largest = l
if ((r < n) and (arr[largest] < arr[r])):
largest = r
if (largest != i):
(arr[i], arr[largest]) = (arr[largest], arr[i])
heapify(arr, n, largest)
|
def heapify(arr, n, i):
'\n Heapifies subtree rooted at index i\n n - size of the heap\n '
largest = i
l = ((2 * i) + 1)
r = ((2 * i) + 2)
if ((l < n) and (arr[largest] < arr[l])):
largest = l
if ((r < n) and (arr[largest] < arr[r])):
largest = r
if (largest != i):
(arr[i], arr[largest]) = (arr[largest], arr[i])
heapify(arr, n, largest)<|docstring|>Heapifies subtree rooted at index i
n - size of the heap<|endoftext|>
|
83976c6c1ae5ca25e2eb07784ec47d8daf94be8034ad471471367ec5dfdbda2b
|
def dis(chars):
'A :func:`dis.dis` for terminals.\n\n >>> dis(u"\x07")\n BELL\n >>> dis(u"\x9b20m")\n SELECT-GRAPHIC-RENDITION 20\n '
if isinstance(chars, str):
chars = chars.encode('utf-8')
return DebugStream().feed(chars)
|
A :func:`dis.dis` for terminals.
>>> dis(u"")
BELL
>>> dis(u"20m")
SELECT-GRAPHIC-RENDITION 20
|
pyte/__init__.py
|
dis
|
bitst0rm-st3/TerminalView
| 948 |
python
|
def dis(chars):
'A :func:`dis.dis` for terminals.\n\n >>> dis(u"\x07")\n BELL\n >>> dis(u"\x9b20m")\n SELECT-GRAPHIC-RENDITION 20\n '
if isinstance(chars, str):
chars = chars.encode('utf-8')
return DebugStream().feed(chars)
|
def dis(chars):
'A :func:`dis.dis` for terminals.\n\n >>> dis(u"\x07")\n BELL\n >>> dis(u"\x9b20m")\n SELECT-GRAPHIC-RENDITION 20\n '
if isinstance(chars, str):
chars = chars.encode('utf-8')
return DebugStream().feed(chars)<|docstring|>A :func:`dis.dis` for terminals.
>>> dis(u"")
BELL
>>> dis(u"20m")
SELECT-GRAPHIC-RENDITION 20<|endoftext|>
|
d0848da0e4ae585032838c3f400d2df3ea67a50b710674cde06cd809c0197b86
|
def test_create_tag(self):
'Test case for create_tag\n\n Create a new tag # noqa: E501\n '
pass
|
Test case for create_tag
Create a new tag # noqa: E501
|
test/test_tags_api.py
|
test_create_tag
|
p-fruck/python-contabo
| 2 |
python
|
def test_create_tag(self):
'Test case for create_tag\n\n Create a new tag # noqa: E501\n '
pass
|
def test_create_tag(self):
'Test case for create_tag\n\n Create a new tag # noqa: E501\n '
pass<|docstring|>Test case for create_tag
Create a new tag # noqa: E501<|endoftext|>
|
e4eb9a8a1dc2cd2ea18873c8c0f39dad076a5ee9f11a850fbf3f7c52cbb98300
|
def test_delete_tag(self):
'Test case for delete_tag\n\n Delete existing tag by id # noqa: E501\n '
pass
|
Test case for delete_tag
Delete existing tag by id # noqa: E501
|
test/test_tags_api.py
|
test_delete_tag
|
p-fruck/python-contabo
| 2 |
python
|
def test_delete_tag(self):
'Test case for delete_tag\n\n Delete existing tag by id # noqa: E501\n '
pass
|
def test_delete_tag(self):
'Test case for delete_tag\n\n Delete existing tag by id # noqa: E501\n '
pass<|docstring|>Test case for delete_tag
Delete existing tag by id # noqa: E501<|endoftext|>
|
3e2b3936c5c2d2e9ccf952e32cca516573f9bd33aa8c1f3abc2996232ee3448e
|
def test_retrieve_tag(self):
'Test case for retrieve_tag\n\n Get specific tag by id # noqa: E501\n '
pass
|
Test case for retrieve_tag
Get specific tag by id # noqa: E501
|
test/test_tags_api.py
|
test_retrieve_tag
|
p-fruck/python-contabo
| 2 |
python
|
def test_retrieve_tag(self):
'Test case for retrieve_tag\n\n Get specific tag by id # noqa: E501\n '
pass
|
def test_retrieve_tag(self):
'Test case for retrieve_tag\n\n Get specific tag by id # noqa: E501\n '
pass<|docstring|>Test case for retrieve_tag
Get specific tag by id # noqa: E501<|endoftext|>
|
42125c3833b4b0f4b6cfdf65c9fc2a32e5063ba89122893115cbb7d34d3bd3d6
|
def test_retrieve_tag_list(self):
'Test case for retrieve_tag_list\n\n List tags # noqa: E501\n '
pass
|
Test case for retrieve_tag_list
List tags # noqa: E501
|
test/test_tags_api.py
|
test_retrieve_tag_list
|
p-fruck/python-contabo
| 2 |
python
|
def test_retrieve_tag_list(self):
'Test case for retrieve_tag_list\n\n List tags # noqa: E501\n '
pass
|
def test_retrieve_tag_list(self):
'Test case for retrieve_tag_list\n\n List tags # noqa: E501\n '
pass<|docstring|>Test case for retrieve_tag_list
List tags # noqa: E501<|endoftext|>
|
020c1cc63dd143b5cc00bd0f4eda4a2ca2b2bd9bda152534214a40e4aa05da3b
|
def test_update_tag(self):
'Test case for update_tag\n\n Update specific tag by id # noqa: E501\n '
pass
|
Test case for update_tag
Update specific tag by id # noqa: E501
|
test/test_tags_api.py
|
test_update_tag
|
p-fruck/python-contabo
| 2 |
python
|
def test_update_tag(self):
'Test case for update_tag\n\n Update specific tag by id # noqa: E501\n '
pass
|
def test_update_tag(self):
'Test case for update_tag\n\n Update specific tag by id # noqa: E501\n '
pass<|docstring|>Test case for update_tag
Update specific tag by id # noqa: E501<|endoftext|>
|
6cfcad68afd7070ae7d755f1527600b0f3af076213e2becfba5fdcb78bc3981d
|
@pytest.fixture
def model(self):
'A model with a foreign key to Page\n which we want to render as a page chooser\n '
return PageChooserModel
|
A model with a foreign key to Page
which we want to render as a page chooser
|
tests/test_modeladminutils/test_edit_handlers.py
|
model
|
kinaklub/next.filmfest.by
| 7 |
python
|
@pytest.fixture
def model(self):
'A model with a foreign key to Page\n which we want to render as a page chooser\n '
return PageChooserModel
|
@pytest.fixture
def model(self):
'A model with a foreign key to Page\n which we want to render as a page chooser\n '
return PageChooserModel<|docstring|>A model with a foreign key to Page
which we want to render as a page chooser<|endoftext|>
|
62b668c3945a8952c58337797cdf3cf4c808bed6920caed9f8a4206efbd6e375
|
@pytest.fixture
def edit_handler_class(self, model):
"A AdminModelChooserPanel class that works on\n PageChooserModel's 'page' field\n "
object_list = ObjectList([AdminModelChooserPanel('page')])
return object_list.bind_to_model(model)
|
A AdminModelChooserPanel class that works on
PageChooserModel's 'page' field
|
tests/test_modeladminutils/test_edit_handlers.py
|
edit_handler_class
|
kinaklub/next.filmfest.by
| 7 |
python
|
@pytest.fixture
def edit_handler_class(self, model):
"A AdminModelChooserPanel class that works on\n PageChooserModel's 'page' field\n "
object_list = ObjectList([AdminModelChooserPanel('page')])
return object_list.bind_to_model(model)
|
@pytest.fixture
def edit_handler_class(self, model):
"A AdminModelChooserPanel class that works on\n PageChooserModel's 'page' field\n "
object_list = ObjectList([AdminModelChooserPanel('page')])
return object_list.bind_to_model(model)<|docstring|>A AdminModelChooserPanel class that works on
PageChooserModel's 'page' field<|endoftext|>
|
472af7c6357d3a654b9db860d9142f8e79b9f561b17ae48b798284d2863f7d34
|
@pytest.fixture
def form_class(self, model, edit_handler_class):
'A form class containing the fields\n that MyPageChooserPanel wants\n '
return edit_handler_class.get_form_class(model)
|
A form class containing the fields
that MyPageChooserPanel wants
|
tests/test_modeladminutils/test_edit_handlers.py
|
form_class
|
kinaklub/next.filmfest.by
| 7 |
python
|
@pytest.fixture
def form_class(self, model, edit_handler_class):
'A form class containing the fields\n that MyPageChooserPanel wants\n '
return edit_handler_class.get_form_class(model)
|
@pytest.fixture
def form_class(self, model, edit_handler_class):
'A form class containing the fields\n that MyPageChooserPanel wants\n '
return edit_handler_class.get_form_class(model)<|docstring|>A form class containing the fields
that MyPageChooserPanel wants<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.