body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
49efe8e2b62956d4e7a80a04ad69832b505d38ef4ac50f446f051acfe410f7d0
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): ' Randomly jitter points. jittering is per point.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, jittered batch of point clouds\n ' (B, N, C) = batch_data.shape assert (clip > 0) jittered_data = np.clip((sigma * np.random.randn(B, N, C)), ((- 1) * clip), clip) jittered_data += batch_data return jittered_data
Randomly jitter points. jittering is per point. Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, jittered batch of point clouds
torchpcp/datasets/S3DIS/utils/provider.py
jitter_point_cloud
Obarads/torch_point_cloud
1
python
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): ' Randomly jitter points. jittering is per point.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, jittered batch of point clouds\n ' (B, N, C) = batch_data.shape assert (clip > 0) jittered_data = np.clip((sigma * np.random.randn(B, N, C)), ((- 1) * clip), clip) jittered_data += batch_data return jittered_data
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): ' Randomly jitter points. jittering is per point.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, jittered batch of point clouds\n ' (B, N, C) = batch_data.shape assert (clip > 0) jittered_data = np.clip((sigma * np.random.randn(B, N, C)), ((- 1) * clip), clip) jittered_data += batch_data return jittered_data<|docstring|>Randomly jitter points. jittering is per point. Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, jittered batch of point clouds<|endoftext|>
caa8426af58ddbd818b02c638dc254846a249ac58a34dbcc925aba581b132721
def simpledistance(seq_1, seq_2): 'From two binary sequences, compute their distance.' differences = (1 for (a, b) in zip(seq_1, seq_2) if (a != b)) return sum(differences)
From two binary sequences, compute their distance.
phylogeny/.ipynb_checkpoints/clocklike_reconstruction-checkpoint.py
simpledistance
Ad115/Phylogeny
2
python
def simpledistance(seq_1, seq_2): differences = (1 for (a, b) in zip(seq_1, seq_2) if (a != b)) return sum(differences)
def simpledistance(seq_1, seq_2): differences = (1 for (a, b) in zip(seq_1, seq_2) if (a != b)) return sum(differences)<|docstring|>From two binary sequences, compute their distance.<|endoftext|>
f16cbc0ae4cb0c3d584c1ce54ba623e571ac83a9a5fbb127367acac98dffbdc4
def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n ' (img, _) = (self.data[index], int(self.targets[index])) if (self.train is False): angle = self.rotations_legit[(index % len(self.rotations_legit))] else: angle = random.choice(self.rotations_legit) angle_deg = ((angle * 180) / np.pi) img = Image.fromarray(img.numpy(), mode='L') img_rot = TF.rotate(img, angle_deg) if (self.transform is not None): img = self.transform(img) img_rot = self.transform(img_rot) return (img_rot, angle, img)
Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class.
src/dataset_utils.py
__getitem__
kobybibas/CyroEM_rotation_invariant
2
python
def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n ' (img, _) = (self.data[index], int(self.targets[index])) if (self.train is False): angle = self.rotations_legit[(index % len(self.rotations_legit))] else: angle = random.choice(self.rotations_legit) angle_deg = ((angle * 180) / np.pi) img = Image.fromarray(img.numpy(), mode='L') img_rot = TF.rotate(img, angle_deg) if (self.transform is not None): img = self.transform(img) img_rot = self.transform(img_rot) return (img_rot, angle, img)
def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n ' (img, _) = (self.data[index], int(self.targets[index])) if (self.train is False): angle = self.rotations_legit[(index % len(self.rotations_legit))] else: angle = random.choice(self.rotations_legit) angle_deg = ((angle * 180) / np.pi) img = Image.fromarray(img.numpy(), mode='L') img_rot = TF.rotate(img, angle_deg) if (self.transform is not None): img = self.transform(img) img_rot = self.transform(img_rot) return (img_rot, angle, img)<|docstring|>Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class.<|endoftext|>
6980fecca44f302f7663d412d7b38fca684c256a5f083a383cefc6d384ab8635
def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n ' (img_rot, angle_deg, img) = (self.imgs_rot[index], self.targets[index], self.imgs[index]) if (angle_deg > 180): angle_deg = (- (360 - angle_deg)) img = Image.fromarray(img, mode='P') img_rot = Image.fromarray(img_rot, mode='P') if (self.transform is not None): img = self.transform(img) img_rot = self.transform(img_rot) angle_rad = ((np.pi * angle_deg) / 180) return (img_rot, angle_rad, img)
Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class.
src/dataset_utils.py
__getitem__
kobybibas/CyroEM_rotation_invariant
2
python
def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n ' (img_rot, angle_deg, img) = (self.imgs_rot[index], self.targets[index], self.imgs[index]) if (angle_deg > 180): angle_deg = (- (360 - angle_deg)) img = Image.fromarray(img, mode='P') img_rot = Image.fromarray(img_rot, mode='P') if (self.transform is not None): img = self.transform(img) img_rot = self.transform(img_rot) angle_rad = ((np.pi * angle_deg) / 180) return (img_rot, angle_rad, img)
def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n ' (img_rot, angle_deg, img) = (self.imgs_rot[index], self.targets[index], self.imgs[index]) if (angle_deg > 180): angle_deg = (- (360 - angle_deg)) img = Image.fromarray(img, mode='P') img_rot = Image.fromarray(img_rot, mode='P') if (self.transform is not None): img = self.transform(img) img_rot = self.transform(img_rot) angle_rad = ((np.pi * angle_deg) / 180) return (img_rot, angle_rad, img)<|docstring|>Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class.<|endoftext|>
47617a2ad61e511a4ca2ea90f688811e9ee93430f19ef1ad1fd3972b0ee06347
def get_test_family(): 'Get font family with actual size equal to given size\n ' for family in teek.Font.families(): font = teek.Font((family, 42)) if (font.size == 42): return family return teek.Font.families()[0]
Get font family with actual size equal to given size
tests/test_font.py
get_test_family
codetent/teek
23
python
def get_test_family(): '\n ' for family in teek.Font.families(): font = teek.Font((family, 42)) if (font.size == 42): return family return teek.Font.families()[0]
def get_test_family(): '\n ' for family in teek.Font.families(): font = teek.Font((family, 42)) if (font.size == 42): return family return teek.Font.families()[0]<|docstring|>Get font family with actual size equal to given size<|endoftext|>
c8de05a95c713a3bdc965f028d1a934abdf02a2577fbf179da8a05891626ce00
@classmethod def import_handlers(cls, directory): 'A text file named "manifest" can be placed in the dir we are importing the handlers from.\n It can contain the list of the files to import, the bot will import only these\n modules as ordered in the manifest file.\n Inline comments are allowed, they must start by #' paths_to_import = list() manifest_modules = cls._load_manifest(os.path.join(directory, 'manifest')) if manifest_modules: target_dir_path = os.path.splitext(directory)[0] target_dir_import_path_list = list() while target_dir_path: (target_dir_path, tail) = os.path.split(target_dir_path) target_dir_import_path_list.insert(0, tail) base_import_path = '.'.join(target_dir_import_path_list) for module in manifest_modules: import_path = (base_import_path + module) logger.debug('importing module: %s', import_path) paths_to_import.append(import_path) else: for path in sorted(Path(directory).rglob('*.py')): file_path = os.path.splitext(str(path))[0] import_path = [] while file_path: (file_path, tail) = os.path.split(file_path) import_path.insert(0, tail) import_path = '.'.join(import_path) paths_to_import.append(import_path) for import_path in paths_to_import: logger.debug('importing module: %s', import_path) importlib.import_module(import_path)
A text file named "manifest" can be placed in the dir we are importing the handlers from. It can contain the list of the files to import, the bot will import only these modules as ordered in the manifest file. Inline comments are allowed, they must start by #
bot/bot.py
import_handlers
cheriimoya/sticker-thief
0
python
@classmethod def import_handlers(cls, directory): 'A text file named "manifest" can be placed in the dir we are importing the handlers from.\n It can contain the list of the files to import, the bot will import only these\n modules as ordered in the manifest file.\n Inline comments are allowed, they must start by #' paths_to_import = list() manifest_modules = cls._load_manifest(os.path.join(directory, 'manifest')) if manifest_modules: target_dir_path = os.path.splitext(directory)[0] target_dir_import_path_list = list() while target_dir_path: (target_dir_path, tail) = os.path.split(target_dir_path) target_dir_import_path_list.insert(0, tail) base_import_path = '.'.join(target_dir_import_path_list) for module in manifest_modules: import_path = (base_import_path + module) logger.debug('importing module: %s', import_path) paths_to_import.append(import_path) else: for path in sorted(Path(directory).rglob('*.py')): file_path = os.path.splitext(str(path))[0] import_path = [] while file_path: (file_path, tail) = os.path.split(file_path) import_path.insert(0, tail) import_path = '.'.join(import_path) paths_to_import.append(import_path) for import_path in paths_to_import: logger.debug('importing module: %s', import_path) importlib.import_module(import_path)
@classmethod def import_handlers(cls, directory): 'A text file named "manifest" can be placed in the dir we are importing the handlers from.\n It can contain the list of the files to import, the bot will import only these\n modules as ordered in the manifest file.\n Inline comments are allowed, they must start by #' paths_to_import = list() manifest_modules = cls._load_manifest(os.path.join(directory, 'manifest')) if manifest_modules: target_dir_path = os.path.splitext(directory)[0] target_dir_import_path_list = list() while target_dir_path: (target_dir_path, tail) = os.path.split(target_dir_path) target_dir_import_path_list.insert(0, tail) base_import_path = '.'.join(target_dir_import_path_list) for module in manifest_modules: import_path = (base_import_path + module) logger.debug('importing module: %s', import_path) paths_to_import.append(import_path) else: for path in sorted(Path(directory).rglob('*.py')): file_path = os.path.splitext(str(path))[0] import_path = [] while file_path: (file_path, tail) = os.path.split(file_path) import_path.insert(0, tail) import_path = '.'.join(import_path) paths_to_import.append(import_path) for import_path in paths_to_import: logger.debug('importing module: %s', import_path) importlib.import_module(import_path)<|docstring|>A text file named "manifest" can be placed in the dir we are importing the handlers from. It can contain the list of the files to import, the bot will import only these modules as ordered in the manifest file. Inline comments are allowed, they must start by #<|endoftext|>
ad94a10c735d1ec4cf351cddad7c75a7d19c6551570fb3554e0193a8272bbf14
def proc_test_match(self, status, entry): '\n @status\n @entry\n ' self.amend_test_match(entry) return self.status
@status @entry
stage_check/stage_check/OutputDeviceState.py
proc_test_match
128technology/stage_check
2
python
def proc_test_match(self, status, entry): '\n @status\n @entry\n ' self.amend_test_match(entry) return self.status
def proc_test_match(self, status, entry): '\n @status\n @entry\n ' self.amend_test_match(entry) return self.status<|docstring|>@status @entry<|endoftext|>
32c58283ffe95d94277b011d1710713f8c63b50ba6f728762a4c957889a2374f
def amend_test_match(self, entry): '\n @status\n @entry\n ' return True
@status @entry
stage_check/stage_check/OutputDeviceState.py
amend_test_match
128technology/stage_check
2
python
def amend_test_match(self, entry): '\n @status\n @entry\n ' return True
def amend_test_match(self, entry): '\n @status\n @entry\n ' return True<|docstring|>@status @entry<|endoftext|>
4cfa23c6fd2b8c4b3149f8152da72b25b9112eeae1b744e995cabad6d3ffa8e2
def proc_test_fail_result(self, count): '\n @count\n ' self.status = Output.Status.FAIL self.amend_test_fail_result(count) return self.status
@count
stage_check/stage_check/OutputDeviceState.py
proc_test_fail_result
128technology/stage_check
2
python
def proc_test_fail_result(self, count): '\n \n ' self.status = Output.Status.FAIL self.amend_test_fail_result(count) return self.status
def proc_test_fail_result(self, count): '\n \n ' self.status = Output.Status.FAIL self.amend_test_fail_result(count) return self.status<|docstring|>@count<|endoftext|>
05e9bfa51dd4182466ca1e88e60fd7dd6e0b87159a52d7f5714d8319c31ea508
def amend_test_fail_result(self, count): '\n @count\n ' return True
@count
stage_check/stage_check/OutputDeviceState.py
amend_test_fail_result
128technology/stage_check
2
python
def amend_test_fail_result(self, count): '\n \n ' return True
def amend_test_fail_result(self, count): '\n \n ' return True<|docstring|>@count<|endoftext|>
a7d7e0999c01a32caee8dad61f78cb19d1eccd6f5e276f5496fe3b54c422a767
def proc_test_warn_result(self, count): '\n @count\n ' self.status = Output.Status.WARN self.amend_test_warn_result(count) return self.status
@count
stage_check/stage_check/OutputDeviceState.py
proc_test_warn_result
128technology/stage_check
2
python
def proc_test_warn_result(self, count): '\n \n ' self.status = Output.Status.WARN self.amend_test_warn_result(count) return self.status
def proc_test_warn_result(self, count): '\n \n ' self.status = Output.Status.WARN self.amend_test_warn_result(count) return self.status<|docstring|>@count<|endoftext|>
e6b4eb411503e050469424cbe667e161cff3a860402c3c6a67594be21ef732e2
def amend_test_warn_result(self, count): '\n @count\n ' return True
@count
stage_check/stage_check/OutputDeviceState.py
amend_test_warn_result
128technology/stage_check
2
python
def amend_test_warn_result(self, count): '\n \n ' return True
def amend_test_warn_result(self, count): '\n \n ' return True<|docstring|>@count<|endoftext|>
c8a3be1d99070ee5d140c5ae69a23add2dadbca1d432a8cc00d13c3e43e41d03
def test_login_required(self): 'Test that login is required for retrieving tags' res = self.client.get(TAGS_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
Test that login is required for retrieving tags
app/recipe/tests/test_tags_api.py
test_login_required
ViniciusTaborda/recipe-app-api
1
python
def test_login_required(self): res = self.client.get(TAGS_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_login_required(self): res = self.client.get(TAGS_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)<|docstring|>Test that login is required for retrieving tags<|endoftext|>
7dec0d303d6c808bd6abbb3c61989a2aa79cf2e9eedf4df938467843e2f56b0c
def test_retrieve_tags(self): 'Test retrieving tags' Tag.objects.create(user=self.user, name='Vegan') Tag.objects.create(user=self.user, name='Dessert') res = self.client.get(TAGS_URL) tags = Tag.objects.all().order_by('-name') serializer = TagSerializer(tags, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data)
Test retrieving tags
app/recipe/tests/test_tags_api.py
test_retrieve_tags
ViniciusTaborda/recipe-app-api
1
python
def test_retrieve_tags(self): Tag.objects.create(user=self.user, name='Vegan') Tag.objects.create(user=self.user, name='Dessert') res = self.client.get(TAGS_URL) tags = Tag.objects.all().order_by('-name') serializer = TagSerializer(tags, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data)
def test_retrieve_tags(self): Tag.objects.create(user=self.user, name='Vegan') Tag.objects.create(user=self.user, name='Dessert') res = self.client.get(TAGS_URL) tags = Tag.objects.all().order_by('-name') serializer = TagSerializer(tags, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data)<|docstring|>Test retrieving tags<|endoftext|>
22e7c2d82547f62c7d08901e13b55b18a50073b2a1ed565b0433140e2f1bb77c
def test_tags_limited_to_user(self): 'Test that tags returned are for the authenticated user' user2 = get_user_model().objects.create_user('[email protected]', 'testpass') Tag.objects.create(user=user2, name='Fruity') tag = Tag.objects.create(user=self.user, name='Comfort Food') res = self.client.get(TAGS_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data[0]['name'], tag.name)
Test that tags returned are for the authenticated user
app/recipe/tests/test_tags_api.py
test_tags_limited_to_user
ViniciusTaborda/recipe-app-api
1
python
def test_tags_limited_to_user(self): user2 = get_user_model().objects.create_user('[email protected]', 'testpass') Tag.objects.create(user=user2, name='Fruity') tag = Tag.objects.create(user=self.user, name='Comfort Food') res = self.client.get(TAGS_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data[0]['name'], tag.name)
def test_tags_limited_to_user(self): user2 = get_user_model().objects.create_user('[email protected]', 'testpass') Tag.objects.create(user=user2, name='Fruity') tag = Tag.objects.create(user=self.user, name='Comfort Food') res = self.client.get(TAGS_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data[0]['name'], tag.name)<|docstring|>Test that tags returned are for the authenticated user<|endoftext|>
726b888001375bce3e2cb87f9fadef5a077a264cbf1451f90aeabd487f476f46
def handle_nan_entries(data, mean_cols=[], median_cols=[], mode_cols=[], n_cols=[], skip_cols=[]): '\n Generic handling strategy for Nan entries in columns with 3 strategies\n fill with mean, \n fill with mode\n fill with "N"\n By default if no columns are provided in any of the options the following is assumed\n All numericals are filled with mean\n All categoricals and dates are filled with Mode\n All strings are filled with N\n\n skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run\n returns None. Is an inplace function\n ' l = _make_strategy_lists(data, mean_cols, median_cols, mode_cols, n_cols, skip_cols) for (i, strategy) in enumerate(l): for col in strategy: val = None if (i == 0): val = data[col].mean() elif (i == 1): val = data[col].median() elif (i == 2): val = data[col].mode() elif (i == 3): val = 'N' if (i == 4): continue else: data[col].fillna(val, inplace=True) return
Generic handling strategy for Nan entries in columns with 3 strategies fill with mean, fill with mode fill with "N" By default if no columns are provided in any of the options the following is assumed All numericals are filled with mean All categoricals and dates are filled with Mode All strings are filled with N skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run returns None. Is an inplace function
KagglePipeline/preprocessing/cleanup/NanCleanup.py
handle_nan_entries
DhananjayAshok/KagglePipeline
0
python
def handle_nan_entries(data, mean_cols=[], median_cols=[], mode_cols=[], n_cols=[], skip_cols=[]): '\n Generic handling strategy for Nan entries in columns with 3 strategies\n fill with mean, \n fill with mode\n fill with "N"\n By default if no columns are provided in any of the options the following is assumed\n All numericals are filled with mean\n All categoricals and dates are filled with Mode\n All strings are filled with N\n\n skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run\n returns None. Is an inplace function\n ' l = _make_strategy_lists(data, mean_cols, median_cols, mode_cols, n_cols, skip_cols) for (i, strategy) in enumerate(l): for col in strategy: val = None if (i == 0): val = data[col].mean() elif (i == 1): val = data[col].median() elif (i == 2): val = data[col].mode() elif (i == 3): val = 'N' if (i == 4): continue else: data[col].fillna(val, inplace=True) return
def handle_nan_entries(data, mean_cols=[], median_cols=[], mode_cols=[], n_cols=[], skip_cols=[]): '\n Generic handling strategy for Nan entries in columns with 3 strategies\n fill with mean, \n fill with mode\n fill with "N"\n By default if no columns are provided in any of the options the following is assumed\n All numericals are filled with mean\n All categoricals and dates are filled with Mode\n All strings are filled with N\n\n skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run\n returns None. Is an inplace function\n ' l = _make_strategy_lists(data, mean_cols, median_cols, mode_cols, n_cols, skip_cols) for (i, strategy) in enumerate(l): for col in strategy: val = None if (i == 0): val = data[col].mean() elif (i == 1): val = data[col].median() elif (i == 2): val = data[col].mode() elif (i == 3): val = 'N' if (i == 4): continue else: data[col].fillna(val, inplace=True) return<|docstring|>Generic handling strategy for Nan entries in columns with 3 strategies fill with mean, fill with mode fill with "N" By default if no columns are provided in any of the options the following is assumed All numericals are filled with mean All categoricals and dates are filled with Mode All strings are filled with N skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run returns None. Is an inplace function<|endoftext|>
79072a3d33efd08916a38354df9f6866ad4fea44fa10306940c20035e061042e
def handle_test_nan_entries(train, test, mean_cols=[], median_cols=[], mode_cols=[], n_cols=[], skip_cols=[]): '\n Same as handle nan entries but meant for testing data, this will use the mode/mean of the training data to fill testing data to ensure integrity of statistics\n Do this step before you encode the training data in order to ensure that the guessing of numericals, categoricals, strings and dates works\n\n Generic handling strategy for Nan entries in columns with 3 strategies\n fill with mean, \n fill with mode\n fill with "N"\n By default if no columns are provided in any of the options the following is assumed\n All numericals are filled with mean\n All categoricals and dates are filled with Mode\n All strings are filled with N\n\n skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run\n returns None. Is an inplace function\n ' l = _make_strategy_lists(train, mean_cols, median_cols, mode_cols, n_cols, skip_cols) for (i, strategy) in enumerate(l): for col in strategy: val = None if (i == 0): val = train[col].mean() elif (i == 1): val = train[col].median() elif (i == 2): val = train[col].mode() elif (i == 3): val = 'N' test[col].fillna(val, inplace=True) return
Same as handle nan entries but meant for testing data, this will use the mode/mean of the training data to fill testing data to ensure integrity of statistics Do this step before you encode the training data in order to ensure that the guessing of numericals, categoricals, strings and dates works Generic handling strategy for Nan entries in columns with 3 strategies fill with mean, fill with mode fill with "N" By default if no columns are provided in any of the options the following is assumed All numericals are filled with mean All categoricals and dates are filled with Mode All strings are filled with N skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run returns None. Is an inplace function
KagglePipeline/preprocessing/cleanup/NanCleanup.py
handle_test_nan_entries
DhananjayAshok/KagglePipeline
0
python
def handle_test_nan_entries(train, test, mean_cols=[], median_cols=[], mode_cols=[], n_cols=[], skip_cols=[]): '\n Same as handle nan entries but meant for testing data, this will use the mode/mean of the training data to fill testing data to ensure integrity of statistics\n Do this step before you encode the training data in order to ensure that the guessing of numericals, categoricals, strings and dates works\n\n Generic handling strategy for Nan entries in columns with 3 strategies\n fill with mean, \n fill with mode\n fill with "N"\n By default if no columns are provided in any of the options the following is assumed\n All numericals are filled with mean\n All categoricals and dates are filled with Mode\n All strings are filled with N\n\n skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run\n returns None. Is an inplace function\n ' l = _make_strategy_lists(train, mean_cols, median_cols, mode_cols, n_cols, skip_cols) for (i, strategy) in enumerate(l): for col in strategy: val = None if (i == 0): val = train[col].mean() elif (i == 1): val = train[col].median() elif (i == 2): val = train[col].mode() elif (i == 3): val = 'N' test[col].fillna(val, inplace=True) return
def handle_test_nan_entries(train, test, mean_cols=[], median_cols=[], mode_cols=[], n_cols=[], skip_cols=[]): '\n Same as handle nan entries but meant for testing data, this will use the mode/mean of the training data to fill testing data to ensure integrity of statistics\n Do this step before you encode the training data in order to ensure that the guessing of numericals, categoricals, strings and dates works\n\n Generic handling strategy for Nan entries in columns with 3 strategies\n fill with mean, \n fill with mode\n fill with "N"\n By default if no columns are provided in any of the options the following is assumed\n All numericals are filled with mean\n All categoricals and dates are filled with Mode\n All strings are filled with N\n\n skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run\n returns None. Is an inplace function\n ' l = _make_strategy_lists(train, mean_cols, median_cols, mode_cols, n_cols, skip_cols) for (i, strategy) in enumerate(l): for col in strategy: val = None if (i == 0): val = train[col].mean() elif (i == 1): val = train[col].median() elif (i == 2): val = train[col].mode() elif (i == 3): val = 'N' test[col].fillna(val, inplace=True) return<|docstring|>Same as handle nan entries but meant for testing data, this will use the mode/mean of the training data to fill testing data to ensure integrity of statistics Do this step before you encode the training data in order to ensure that the guessing of numericals, categoricals, strings and dates works Generic handling strategy for Nan entries in columns with 3 strategies fill with mean, fill with mode fill with "N" By default if no columns are provided in any of the options the following is assumed All numericals are filled with mean All categoricals and dates are filled with Mode All strings are filled with N skip_cols is for columns you want to manually handle with a different strategy and so should not be messed with in this run returns None. Is an inplace function<|endoftext|>
adf6e74fca8358ff762af3926623a6d6c5eec5c2aa69abcaa673d5fb000cde86
def categorical_fillna(data, col, value='N'): '\n Fills a categorical features Nan Values with the provided value, returns a new column. \n The pandas function sometimes does not work\n ' def helper(x): if isinstance(x, float): return value return x return data[col].apply(helper)
Fills a categorical features Nan Values with the provided value, returns a new column. The pandas function sometimes does not work
KagglePipeline/preprocessing/cleanup/NanCleanup.py
categorical_fillna
DhananjayAshok/KagglePipeline
0
python
def categorical_fillna(data, col, value='N'): '\n Fills a categorical features Nan Values with the provided value, returns a new column. \n The pandas function sometimes does not work\n ' def helper(x): if isinstance(x, float): return value return x return data[col].apply(helper)
def categorical_fillna(data, col, value='N'): '\n Fills a categorical features Nan Values with the provided value, returns a new column. \n The pandas function sometimes does not work\n ' def helper(x): if isinstance(x, float): return value return x return data[col].apply(helper)<|docstring|>Fills a categorical features Nan Values with the provided value, returns a new column. The pandas function sometimes does not work<|endoftext|>
2b3fdd9873cf1c0a00b205eee0112e55467164daddf34624212c2b9542b905f1
def drop_nan_columns(data, ratio=1.0): "\n The ratio parameter (0.0<=ratio<1.0) lets you drop columns which has 'ratio'% of nans. (i.e if ratio is 0.8 then all columns with 80% or more entries being nan get dropped)\n Returns a new dataframe\n " col_list = [] na_df = data.isna() total_size = na_df.shape[0] for col in na_df: a = na_df[col].value_counts() if (False not in a.keys()): col_list.append(col) elif (True not in a.keys()): pass elif ((a[True] / total_size) >= ratio): col_list.append(col) print(f'{len(col_list)} columns dropped- {col_list}') return data.drop(col_list, axis=1)
The ratio parameter (0.0<=ratio<1.0) lets you drop columns which has 'ratio'% of nans. (i.e if ratio is 0.8 then all columns with 80% or more entries being nan get dropped) Returns a new dataframe
KagglePipeline/preprocessing/cleanup/NanCleanup.py
drop_nan_columns
DhananjayAshok/KagglePipeline
0
python
def drop_nan_columns(data, ratio=1.0): "\n The ratio parameter (0.0<=ratio<1.0) lets you drop columns which has 'ratio'% of nans. (i.e if ratio is 0.8 then all columns with 80% or more entries being nan get dropped)\n Returns a new dataframe\n " col_list = [] na_df = data.isna() total_size = na_df.shape[0] for col in na_df: a = na_df[col].value_counts() if (False not in a.keys()): col_list.append(col) elif (True not in a.keys()): pass elif ((a[True] / total_size) >= ratio): col_list.append(col) print(f'{len(col_list)} columns dropped- {col_list}') return data.drop(col_list, axis=1)
def drop_nan_columns(data, ratio=1.0): "\n The ratio parameter (0.0<=ratio<1.0) lets you drop columns which has 'ratio'% of nans. (i.e if ratio is 0.8 then all columns with 80% or more entries being nan get dropped)\n Returns a new dataframe\n " col_list = [] na_df = data.isna() total_size = na_df.shape[0] for col in na_df: a = na_df[col].value_counts() if (False not in a.keys()): col_list.append(col) elif (True not in a.keys()): pass elif ((a[True] / total_size) >= ratio): col_list.append(col) print(f'{len(col_list)} columns dropped- {col_list}') return data.drop(col_list, axis=1)<|docstring|>The ratio parameter (0.0<=ratio<1.0) lets you drop columns which has 'ratio'% of nans. (i.e if ratio is 0.8 then all columns with 80% or more entries being nan get dropped) Returns a new dataframe<|endoftext|>
6a5e1c76c77b208da1abcb175b89875131ffa360d79e7b9ba9d54bbe705c30c6
def xgb_fillna(train, test, col_to_fill, classification, target_col, real_test=None): '\n Uses an XGB Regressor or Classifier to populate the Nan values of the dataframe.\n Returns a list with train, test, real_test new dataframes with the Nans filled. or if real_test is none just returns train and test\n\n -> Do this before splitting\n -> Every other column in the dataframes should be ready to put into an XGBoost Model\n -> If the column to fill is a classification / categorical then you must ensure that the non NaN values are numerically encoded and the NaN values are NaN (Not some constant N etc)\n -> Assumes there are more than 2 columns in data and also assumes that the ordering of columns is the same in train, test and real_test\n -> \n ' import pandas as pd l = [train, test] if (real_test is not None): l.append(real_test) for d in range(len(l)): l[d] = l[d][_col_list_adapter(l[d].columns.tolist(), col_to_fill, target_col)] model = None if classification: from xgboost import XGBClassifier model = XGBClassifier() else: from xgboost import XGBRegressor model = XGBRegressor() temptotal = pd.concat(l, join='inner') temptrain = temptotal.loc[temptotal[col_to_fill].notnull()] y = temptrain[col_to_fill] x = temptrain.drop(col_to_fill, axis=1) model.fit(x, y) for d in range(len(l)): nans = l[d].loc[l[d][col_to_fill].isnull()] nanx = nans.drop(col_to_fill, axis=1) try: nanx = nanx.drop(target_col, axis=1) except: pass l[d].loc[(l[d][col_to_fill].isnull(), col_to_fill)] = model.predict(nanx) return l
Uses an XGB Regressor or Classifier to populate the Nan values of the dataframe. Returns a list with train, test, real_test new dataframes with the Nans filled. or if real_test is none just returns train and test -> Do this before splitting -> Every other column in the dataframes should be ready to put into an XGBoost Model -> If the column to fill is a classification / categorical then you must ensure that the non NaN values are numerically encoded and the NaN values are NaN (Not some constant N etc) -> Assumes there are more than 2 columns in data and also assumes that the ordering of columns is the same in train, test and real_test ->
KagglePipeline/preprocessing/cleanup/NanCleanup.py
xgb_fillna
DhananjayAshok/KagglePipeline
0
python
def xgb_fillna(train, test, col_to_fill, classification, target_col, real_test=None): '\n Uses an XGB Regressor or Classifier to populate the Nan values of the dataframe.\n Returns a list with train, test, real_test new dataframes with the Nans filled. or if real_test is none just returns train and test\n\n -> Do this before splitting\n -> Every other column in the dataframes should be ready to put into an XGBoost Model\n -> If the column to fill is a classification / categorical then you must ensure that the non NaN values are numerically encoded and the NaN values are NaN (Not some constant N etc)\n -> Assumes there are more than 2 columns in data and also assumes that the ordering of columns is the same in train, test and real_test\n -> \n ' import pandas as pd l = [train, test] if (real_test is not None): l.append(real_test) for d in range(len(l)): l[d] = l[d][_col_list_adapter(l[d].columns.tolist(), col_to_fill, target_col)] model = None if classification: from xgboost import XGBClassifier model = XGBClassifier() else: from xgboost import XGBRegressor model = XGBRegressor() temptotal = pd.concat(l, join='inner') temptrain = temptotal.loc[temptotal[col_to_fill].notnull()] y = temptrain[col_to_fill] x = temptrain.drop(col_to_fill, axis=1) model.fit(x, y) for d in range(len(l)): nans = l[d].loc[l[d][col_to_fill].isnull()] nanx = nans.drop(col_to_fill, axis=1) try: nanx = nanx.drop(target_col, axis=1) except: pass l[d].loc[(l[d][col_to_fill].isnull(), col_to_fill)] = model.predict(nanx) return l
def xgb_fillna(train, test, col_to_fill, classification, target_col, real_test=None): '\n Uses an XGB Regressor or Classifier to populate the Nan values of the dataframe.\n Returns a list with train, test, real_test new dataframes with the Nans filled. or if real_test is none just returns train and test\n\n -> Do this before splitting\n -> Every other column in the dataframes should be ready to put into an XGBoost Model\n -> If the column to fill is a classification / categorical then you must ensure that the non NaN values are numerically encoded and the NaN values are NaN (Not some constant N etc)\n -> Assumes there are more than 2 columns in data and also assumes that the ordering of columns is the same in train, test and real_test\n -> \n ' import pandas as pd l = [train, test] if (real_test is not None): l.append(real_test) for d in range(len(l)): l[d] = l[d][_col_list_adapter(l[d].columns.tolist(), col_to_fill, target_col)] model = None if classification: from xgboost import XGBClassifier model = XGBClassifier() else: from xgboost import XGBRegressor model = XGBRegressor() temptotal = pd.concat(l, join='inner') temptrain = temptotal.loc[temptotal[col_to_fill].notnull()] y = temptrain[col_to_fill] x = temptrain.drop(col_to_fill, axis=1) model.fit(x, y) for d in range(len(l)): nans = l[d].loc[l[d][col_to_fill].isnull()] nanx = nans.drop(col_to_fill, axis=1) try: nanx = nanx.drop(target_col, axis=1) except: pass l[d].loc[(l[d][col_to_fill].isnull(), col_to_fill)] = model.predict(nanx) return l<|docstring|>Uses an XGB Regressor or Classifier to populate the Nan values of the dataframe. Returns a list with train, test, real_test new dataframes with the Nans filled. or if real_test is none just returns train and test -> Do this before splitting -> Every other column in the dataframes should be ready to put into an XGBoost Model -> If the column to fill is a classification / categorical then you must ensure that the non NaN values are numerically encoded and the NaN values are NaN (Not some constant N etc) -> Assumes there are more than 2 columns in data and also assumes that the ordering of columns is the same in train, test and real_test -><|endoftext|>
200fd8cc68612a9995e38dec712eee4fddc199ac4d5cdad98b6f460dd46bdccd
@pytest.fixture(scope='session') def ip_file(tmpdir_factory) -> Path: '\n Generates the temporary ip file\n ' tmp_file = tmpdir_factory.mktemp(DEFAULT_DIR).join(TEST_IP_FILE) tmp_file.write('8.8.8.8') return tmp_file
Generates the temporary ip file
src/tests/test_ui.py
ip_file
t-a-y-l-o-r/checkip
0
python
@pytest.fixture(scope='session') def ip_file(tmpdir_factory) -> Path: '\n \n ' tmp_file = tmpdir_factory.mktemp(DEFAULT_DIR).join(TEST_IP_FILE) tmp_file.write('8.8.8.8') return tmp_file
@pytest.fixture(scope='session') def ip_file(tmpdir_factory) -> Path: '\n \n ' tmp_file = tmpdir_factory.mktemp(DEFAULT_DIR).join(TEST_IP_FILE) tmp_file.write('8.8.8.8') return tmp_file<|docstring|>Generates the temporary ip file<|endoftext|>
8211d31ea2aa96f98409ada77d7e2336dafb062f5a4859e31bf2c0eedfc64fd0
@pytest.fixture def ui_obj() -> ui.UI: '\n A simple ui test object\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8']) ui_obj = ui.UI(conf) return ui_obj
A simple ui test object
src/tests/test_ui.py
ui_obj
t-a-y-l-o-r/checkip
0
python
@pytest.fixture def ui_obj() -> ui.UI: '\n \n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8']) ui_obj = ui.UI(conf) return ui_obj
@pytest.fixture def ui_obj() -> ui.UI: '\n \n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8']) ui_obj = ui.UI(conf) return ui_obj<|docstring|>A simple ui test object<|endoftext|>
bf41d1745a02efb476d240e00073d04dc410cf348f149a422fbef1426ce03f0b
def test_argument_setup(ui_obj) -> None: '\n Ensures the ui.UI class constructs the correct arguments\n ' args = set(ui_obj.args.keys()) message = ''.join([f'EXPECTED: {UI_EXPECTED_ARGS} does not match ', f'ACTUAL: {args} for UI(): {ui_obj}']) assert (UI_EXPECTED_ARGS == args), message
Ensures the ui.UI class constructs the correct arguments
src/tests/test_ui.py
test_argument_setup
t-a-y-l-o-r/checkip
0
python
def test_argument_setup(ui_obj) -> None: '\n \n ' args = set(ui_obj.args.keys()) message = .join([f'EXPECTED: {UI_EXPECTED_ARGS} does not match ', f'ACTUAL: {args} for UI(): {ui_obj}']) assert (UI_EXPECTED_ARGS == args), message
def test_argument_setup(ui_obj) -> None: '\n \n ' args = set(ui_obj.args.keys()) message = .join([f'EXPECTED: {UI_EXPECTED_ARGS} does not match ', f'ACTUAL: {args} for UI(): {ui_obj}']) assert (UI_EXPECTED_ARGS == args), message<|docstring|>Ensures the ui.UI class constructs the correct arguments<|endoftext|>
bc075a5df5f2b1fc814fc82e88e64f601de1b9b601a022256c22d03d8bcebc84
def test_ip_already_set(ui_obj) -> None: '\n Tests the base case for the ip property\n Ensuring the inner value is always provided when set\n ' ip_list = ['0.0.0.0', '1.1.1.1', '8.8.8.8', '127.0.0.1'] for ip in ip_list: ui_obj._ip = ip message = ''.join([f'EXPECTED: {ip} does not match ', f'ACTUAL: {ui_obj.ip} for UI(): {ui_obj}']) assert (ui_obj.ip == ip), message
Tests the base case for the ip property Ensuring the inner value is always provided when set
src/tests/test_ui.py
test_ip_already_set
t-a-y-l-o-r/checkip
0
python
def test_ip_already_set(ui_obj) -> None: '\n Tests the base case for the ip property\n Ensuring the inner value is always provided when set\n ' ip_list = ['0.0.0.0', '1.1.1.1', '8.8.8.8', '127.0.0.1'] for ip in ip_list: ui_obj._ip = ip message = .join([f'EXPECTED: {ip} does not match ', f'ACTUAL: {ui_obj.ip} for UI(): {ui_obj}']) assert (ui_obj.ip == ip), message
def test_ip_already_set(ui_obj) -> None: '\n Tests the base case for the ip property\n Ensuring the inner value is always provided when set\n ' ip_list = ['0.0.0.0', '1.1.1.1', '8.8.8.8', '127.0.0.1'] for ip in ip_list: ui_obj._ip = ip message = .join([f'EXPECTED: {ip} does not match ', f'ACTUAL: {ui_obj.ip} for UI(): {ui_obj}']) assert (ui_obj.ip == ip), message<|docstring|>Tests the base case for the ip property Ensuring the inner value is always provided when set<|endoftext|>
c8669b9ac7ed90e3dce31967c33842b5ddde9c3c2d35aca06c3d2caaee370a76
def test_ip_raw_ip() -> None: '\n Ensures that a raw ip address from the `user` is\n identical to the stored proptery\n ' ip_list = ['0.0.0.0', '1.1.1.1', '8.8.8.8', '127.0.0.1'] for ip in ip_list: conf = ui.UI_Config(testing=True, args=['-ip', ip]) ui_obj = ui.UI(config=conf) message = ''.join([f'EXPECTED: {ip} does not match ', f'ACTUAL: {ui_obj.ip} for UI(): {ui_obj}']) assert (ui_obj.ip == ip), message
Ensures that a raw ip address from the `user` is identical to the stored proptery
src/tests/test_ui.py
test_ip_raw_ip
t-a-y-l-o-r/checkip
0
python
def test_ip_raw_ip() -> None: '\n Ensures that a raw ip address from the `user` is\n identical to the stored proptery\n ' ip_list = ['0.0.0.0', '1.1.1.1', '8.8.8.8', '127.0.0.1'] for ip in ip_list: conf = ui.UI_Config(testing=True, args=['-ip', ip]) ui_obj = ui.UI(config=conf) message = .join([f'EXPECTED: {ip} does not match ', f'ACTUAL: {ui_obj.ip} for UI(): {ui_obj}']) assert (ui_obj.ip == ip), message
def test_ip_raw_ip() -> None: '\n Ensures that a raw ip address from the `user` is\n identical to the stored proptery\n ' ip_list = ['0.0.0.0', '1.1.1.1', '8.8.8.8', '127.0.0.1'] for ip in ip_list: conf = ui.UI_Config(testing=True, args=['-ip', ip]) ui_obj = ui.UI(config=conf) message = .join([f'EXPECTED: {ip} does not match ', f'ACTUAL: {ui_obj.ip} for UI(): {ui_obj}']) assert (ui_obj.ip == ip), message<|docstring|>Ensures that a raw ip address from the `user` is identical to the stored proptery<|endoftext|>
ab42c35727c3f96cd24726237deba39feeb50c85590566d7c316f6900321b7cb
def test_ip_from_host() -> None: '\n Ensures the ip property is identical to the\n ip resolved for a user input url\n ' host_list = ['google.com', 'nmap.com', 'github.com', 'gitlab.com'] for host in host_list: conf = ui.UI_Config(testing=True, args=['--host', host]) ip = socket.gethostbyname(host) ui_obj = ui.UI(config=conf) message = ''.join([f'EXPECTED: {ip} does not match ', f'ACTUAL: {ui_obj.ip} for UI(): {ui_obj}']) assert (ui_obj.ip == ip), message
Ensures the ip property is identical to the ip resolved for a user input url
src/tests/test_ui.py
test_ip_from_host
t-a-y-l-o-r/checkip
0
python
def test_ip_from_host() -> None: '\n Ensures the ip property is identical to the\n ip resolved for a user input url\n ' host_list = ['google.com', 'nmap.com', 'github.com', 'gitlab.com'] for host in host_list: conf = ui.UI_Config(testing=True, args=['--host', host]) ip = socket.gethostbyname(host) ui_obj = ui.UI(config=conf) message = .join([f'EXPECTED: {ip} does not match ', f'ACTUAL: {ui_obj.ip} for UI(): {ui_obj}']) assert (ui_obj.ip == ip), message
def test_ip_from_host() -> None: '\n Ensures the ip property is identical to the\n ip resolved for a user input url\n ' host_list = ['google.com', 'nmap.com', 'github.com', 'gitlab.com'] for host in host_list: conf = ui.UI_Config(testing=True, args=['--host', host]) ip = socket.gethostbyname(host) ui_obj = ui.UI(config=conf) message = .join([f'EXPECTED: {ip} does not match ', f'ACTUAL: {ui_obj.ip} for UI(): {ui_obj}']) assert (ui_obj.ip == ip), message<|docstring|>Ensures the ip property is identical to the ip resolved for a user input url<|endoftext|>
ca1e97e0f74517cc224d2d93906b852dbdf6e1c26ebc5d3c63ea1216a2da140e
def test_ip_from_host_failure() -> None: '\n Ensures the ip property throws and error\n ' host_list = ['google///.com', 'nmap.comasdasldjnhasd', 'asdhajlsdnljsagithub.com'] for host in host_list: conf = ui.UI_Config(testing=True, args=['--host', host]) ui_obj = ui.UI(config=conf) with pytest.raises(ValueError): ip = ui_obj.ip
Ensures the ip property throws and error
src/tests/test_ui.py
test_ip_from_host_failure
t-a-y-l-o-r/checkip
0
python
def test_ip_from_host_failure() -> None: '\n \n ' host_list = ['google///.com', 'nmap.comasdasldjnhasd', 'asdhajlsdnljsagithub.com'] for host in host_list: conf = ui.UI_Config(testing=True, args=['--host', host]) ui_obj = ui.UI(config=conf) with pytest.raises(ValueError): ip = ui_obj.ip
def test_ip_from_host_failure() -> None: '\n \n ' host_list = ['google///.com', 'nmap.comasdasldjnhasd', 'asdhajlsdnljsagithub.com'] for host in host_list: conf = ui.UI_Config(testing=True, args=['--host', host]) ui_obj = ui.UI(config=conf) with pytest.raises(ValueError): ip = ui_obj.ip<|docstring|>Ensures the ip property throws and error<|endoftext|>
31251fdf5230975ba5dc293cfa80c5c612611dc20b5a5ba7b693720f742fc010
def test_ip_no_ip_no_host(ui_obj) -> None: '\n Ensures that `None` is returned when there is no\n appropriate ip / host found\n ' expected = None ip_flag = ui.UI_Args.IP.value host_flag = ui.UI_Args.HOST.value ui_obj.args ui_obj.args[ip_flag] = None ui_obj.args[host_flag] = None actual = ui_obj.ip message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that `None` is returned when there is no appropriate ip / host found
src/tests/test_ui.py
test_ip_no_ip_no_host
t-a-y-l-o-r/checkip
0
python
def test_ip_no_ip_no_host(ui_obj) -> None: '\n Ensures that `None` is returned when there is no\n appropriate ip / host found\n ' expected = None ip_flag = ui.UI_Args.IP.value host_flag = ui.UI_Args.HOST.value ui_obj.args ui_obj.args[ip_flag] = None ui_obj.args[host_flag] = None actual = ui_obj.ip message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_ip_no_ip_no_host(ui_obj) -> None: '\n Ensures that `None` is returned when there is no\n appropriate ip / host found\n ' expected = None ip_flag = ui.UI_Args.IP.value host_flag = ui.UI_Args.HOST.value ui_obj.args ui_obj.args[ip_flag] = None ui_obj.args[host_flag] = None actual = ui_obj.ip message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that `None` is returned when there is no appropriate ip / host found<|endoftext|>
796494e2fe45d4e84f630a9a43574497fea2e74d257183666feacb7a38b942ee
def test_ip_file_set(ui_obj, ip_file) -> None: '\n Ensures that the correct arument value is returned when\n the inner argument is already set\n ' file_str = str(ip_file) ui_obj._ip_file = file_str actual = ui_obj.ip_file expected = file_str message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the correct arument value is returned when the inner argument is already set
src/tests/test_ui.py
test_ip_file_set
t-a-y-l-o-r/checkip
0
python
def test_ip_file_set(ui_obj, ip_file) -> None: '\n Ensures that the correct arument value is returned when\n the inner argument is already set\n ' file_str = str(ip_file) ui_obj._ip_file = file_str actual = ui_obj.ip_file expected = file_str message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_ip_file_set(ui_obj, ip_file) -> None: '\n Ensures that the correct arument value is returned when\n the inner argument is already set\n ' file_str = str(ip_file) ui_obj._ip_file = file_str actual = ui_obj.ip_file expected = file_str message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the correct arument value is returned when the inner argument is already set<|endoftext|>
d067b712656cb4ae2d2cc98f4f5430ec9754726ea7c9c05fd0a73014c86106d9
def test_ip_file_no_file(ui_obj) -> None: '\n Ensures that the correct arument value is returned when\n the inner argument is empty\n ' actual = ui_obj.ip_file expected = None message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the correct arument value is returned when the inner argument is empty
src/tests/test_ui.py
test_ip_file_no_file
t-a-y-l-o-r/checkip
0
python
def test_ip_file_no_file(ui_obj) -> None: '\n Ensures that the correct arument value is returned when\n the inner argument is empty\n ' actual = ui_obj.ip_file expected = None message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_ip_file_no_file(ui_obj) -> None: '\n Ensures that the correct arument value is returned when\n the inner argument is empty\n ' actual = ui_obj.ip_file expected = None message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the correct arument value is returned when the inner argument is empty<|endoftext|>
9ed6b2c26d680506bac131f637b2109878d4700cf6a4628e16ae70d83899118c
def test_ip_file_has_file(ip_file) -> None: '\n Ensures that the correct arument value is returned when\n the file is provided properly\n ' file_str = str(ip_file) conf = ui.UI_Config(testing=True, args=['--input-file', str(file_str)]) ui_obj = ui.UI(config=conf) actual = ui_obj.ip_file expected = file_str message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the correct arument value is returned when the file is provided properly
src/tests/test_ui.py
test_ip_file_has_file
t-a-y-l-o-r/checkip
0
python
def test_ip_file_has_file(ip_file) -> None: '\n Ensures that the correct arument value is returned when\n the file is provided properly\n ' file_str = str(ip_file) conf = ui.UI_Config(testing=True, args=['--input-file', str(file_str)]) ui_obj = ui.UI(config=conf) actual = ui_obj.ip_file expected = file_str message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_ip_file_has_file(ip_file) -> None: '\n Ensures that the correct arument value is returned when\n the file is provided properly\n ' file_str = str(ip_file) conf = ui.UI_Config(testing=True, args=['--input-file', str(file_str)]) ui_obj = ui.UI(config=conf) actual = ui_obj.ip_file expected = file_str message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the correct arument value is returned when the file is provided properly<|endoftext|>
5909cc4842a5a1df6bed381ff6953271ea70562ad4f9d6c35055adafd7169426
def test_ip_file_invalid_file() -> None: '\n Ensures that the correct arument value is returned when\n the file is invalid\n ' conf = ui.UI_Config(testing=True, args=['--input-file', 'hasldjhalsjdn']) ui_obj = ui.UI(config=conf) actual = ui_obj.ip_file expected = None message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the correct arument value is returned when the file is invalid
src/tests/test_ui.py
test_ip_file_invalid_file
t-a-y-l-o-r/checkip
0
python
def test_ip_file_invalid_file() -> None: '\n Ensures that the correct arument value is returned when\n the file is invalid\n ' conf = ui.UI_Config(testing=True, args=['--input-file', 'hasldjhalsjdn']) ui_obj = ui.UI(config=conf) actual = ui_obj.ip_file expected = None message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_ip_file_invalid_file() -> None: '\n Ensures that the correct arument value is returned when\n the file is invalid\n ' conf = ui.UI_Config(testing=True, args=['--input-file', 'hasldjhalsjdn']) ui_obj = ui.UI(config=conf) actual = ui_obj.ip_file expected = None message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the correct arument value is returned when the file is invalid<|endoftext|>
dd99a72d9b171654fcb2401e2f3f2f93287f0d63aee4bd39f610e3cd33749cb3
def test_ui_args_unique() -> None: '\n Ensures that all values for UI_Args is unique\n ' count_of_args: Dict[(Any, int)] = {} for arg in ui.UI_Args: message = ''.join([f'WARNING: {arg} was found in UI_Args more than once']) assert (arg not in count_of_args), message count_of_args.setdefault(arg, 1)
Ensures that all values for UI_Args is unique
src/tests/test_ui.py
test_ui_args_unique
t-a-y-l-o-r/checkip
0
python
def test_ui_args_unique() -> None: '\n \n ' count_of_args: Dict[(Any, int)] = {} for arg in ui.UI_Args: message = .join([f'WARNING: {arg} was found in UI_Args more than once']) assert (arg not in count_of_args), message count_of_args.setdefault(arg, 1)
def test_ui_args_unique() -> None: '\n \n ' count_of_args: Dict[(Any, int)] = {} for arg in ui.UI_Args: message = .join([f'WARNING: {arg} was found in UI_Args more than once']) assert (arg not in count_of_args), message count_of_args.setdefault(arg, 1)<|docstring|>Ensures that all values for UI_Args is unique<|endoftext|>
2cf01ff5516605f474a830f56cf233c6e69fcb64d5c90780b297f4f510b4d498
def test_ui_args_match_ui_args(ui_obj) -> None: '\n Ensures that the UI_Args always exist within UI.args\n ' enum_args: Dict[(Any, int)] = {} for arg in ui.UI_Args: value = arg.value enum_args[value] = (enum_args.setdefault(value, 0) + 1) obj_args: Dict[(Any, int)] = {} for arg in ui_obj.args: obj_args[arg] = (obj_args.setdefault(arg, 0) + 1) message = ''.join([f'EXPECTED: {enum_args} does not match ', f'ACTUAL: {obj_args} for UI(): {ui_obj}']) assert (enum_args == obj_args), message
Ensures that the UI_Args always exist within UI.args
src/tests/test_ui.py
test_ui_args_match_ui_args
t-a-y-l-o-r/checkip
0
python
def test_ui_args_match_ui_args(ui_obj) -> None: '\n \n ' enum_args: Dict[(Any, int)] = {} for arg in ui.UI_Args: value = arg.value enum_args[value] = (enum_args.setdefault(value, 0) + 1) obj_args: Dict[(Any, int)] = {} for arg in ui_obj.args: obj_args[arg] = (obj_args.setdefault(arg, 0) + 1) message = .join([f'EXPECTED: {enum_args} does not match ', f'ACTUAL: {obj_args} for UI(): {ui_obj}']) assert (enum_args == obj_args), message
def test_ui_args_match_ui_args(ui_obj) -> None: '\n \n ' enum_args: Dict[(Any, int)] = {} for arg in ui.UI_Args: value = arg.value enum_args[value] = (enum_args.setdefault(value, 0) + 1) obj_args: Dict[(Any, int)] = {} for arg in ui_obj.args: obj_args[arg] = (obj_args.setdefault(arg, 0) + 1) message = .join([f'EXPECTED: {enum_args} does not match ', f'ACTUAL: {obj_args} for UI(): {ui_obj}']) assert (enum_args == obj_args), message<|docstring|>Ensures that the UI_Args always exist within UI.args<|endoftext|>
cc25412584c449464bae53eee6a5ff7ea25563783ee847ca18ca405024d6afa6
def test_force_manual(ui_obj) -> None: '\n Ensures that all branches of the ui.force\n execute as expeted.\n Providing any existing value if there is one\n ' ui_obj._force = True actual = ui_obj.force expected = True message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message ui_obj._force = False actual = ui_obj.force expected = False message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that all branches of the ui.force execute as expeted. Providing any existing value if there is one
src/tests/test_ui.py
test_force_manual
t-a-y-l-o-r/checkip
0
python
def test_force_manual(ui_obj) -> None: '\n Ensures that all branches of the ui.force\n execute as expeted.\n Providing any existing value if there is one\n ' ui_obj._force = True actual = ui_obj.force expected = True message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message ui_obj._force = False actual = ui_obj.force expected = False message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_force_manual(ui_obj) -> None: '\n Ensures that all branches of the ui.force\n execute as expeted.\n Providing any existing value if there is one\n ' ui_obj._force = True actual = ui_obj.force expected = True message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message ui_obj._force = False actual = ui_obj.force expected = False message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that all branches of the ui.force execute as expeted. Providing any existing value if there is one<|endoftext|>
8f9cc8f5ac2b1e8b1eab6ec47dd3e89dd88976249e311d1ff913eeadce7079e1
def test_force_args() -> None: '\n Ensures that all branches of the ui.force\n execute as expeted.\n Given the value is NOT already in memory and must be calculated\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) actual = ui_obj.force expected = True message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8']) ui_obj = ui.UI(conf) actual = ui_obj.force expected = False message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that all branches of the ui.force execute as expeted. Given the value is NOT already in memory and must be calculated
src/tests/test_ui.py
test_force_args
t-a-y-l-o-r/checkip
0
python
def test_force_args() -> None: '\n Ensures that all branches of the ui.force\n execute as expeted.\n Given the value is NOT already in memory and must be calculated\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) actual = ui_obj.force expected = True message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8']) ui_obj = ui.UI(conf) actual = ui_obj.force expected = False message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_force_args() -> None: '\n Ensures that all branches of the ui.force\n execute as expeted.\n Given the value is NOT already in memory and must be calculated\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) actual = ui_obj.force expected = True message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8']) ui_obj = ui.UI(conf) actual = ui_obj.force expected = False message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that all branches of the ui.force execute as expeted. Given the value is NOT already in memory and must be calculated<|endoftext|>
4638cc35f5580e60c674ce02281ea1b4a3374303a2075bc89d1863199d8e0af9
def test_validate_ip_empty() -> None: '\n Ensures that the ui._validate_ip()\n call returns `False` when a falsey value is provided\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) falsey_values = ['', None] expected = False for value in falsey_values: actual = ui_obj._validate_ip(value) message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the ui._validate_ip() call returns `False` when a falsey value is provided
src/tests/test_ui.py
test_validate_ip_empty
t-a-y-l-o-r/checkip
0
python
def test_validate_ip_empty() -> None: '\n Ensures that the ui._validate_ip()\n call returns `False` when a falsey value is provided\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) falsey_values = [, None] expected = False for value in falsey_values: actual = ui_obj._validate_ip(value) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_validate_ip_empty() -> None: '\n Ensures that the ui._validate_ip()\n call returns `False` when a falsey value is provided\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) falsey_values = [, None] expected = False for value in falsey_values: actual = ui_obj._validate_ip(value) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the ui._validate_ip() call returns `False` when a falsey value is provided<|endoftext|>
7b85e3c1fff507871ef316d71dc50199b8d7d71bcc8cc8c5cf3753d4b8c2345a
def test_validate_ip_no_match() -> None: '\n Ensures that the ui._validate_ip()\n call returns `False` when a a bad pattern is provided\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) bad_values = ['8.0', 'abc', '-1,000'] expected = False for value in bad_values: actual = ui_obj._validate_ip(value) message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the ui._validate_ip() call returns `False` when a a bad pattern is provided
src/tests/test_ui.py
test_validate_ip_no_match
t-a-y-l-o-r/checkip
0
python
def test_validate_ip_no_match() -> None: '\n Ensures that the ui._validate_ip()\n call returns `False` when a a bad pattern is provided\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) bad_values = ['8.0', 'abc', '-1,000'] expected = False for value in bad_values: actual = ui_obj._validate_ip(value) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_validate_ip_no_match() -> None: '\n Ensures that the ui._validate_ip()\n call returns `False` when a a bad pattern is provided\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) bad_values = ['8.0', 'abc', '-1,000'] expected = False for value in bad_values: actual = ui_obj._validate_ip(value) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the ui._validate_ip() call returns `False` when a a bad pattern is provided<|endoftext|>
acbb3483ad545414bdc6d0848ad7419e430256a46f6093664178b7516a1753bf
def test_validate_ip_passes() -> None: '\n Ensures that the ui._validate_ip()\n call returns `True` when a a good pattern is provided\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) bad_values = ['8.8.8.8', '127.0.0.1', '192.168.0.1'] expected = True for value in bad_values: actual = ui_obj._validate_ip(value) message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the ui._validate_ip() call returns `True` when a a good pattern is provided
src/tests/test_ui.py
test_validate_ip_passes
t-a-y-l-o-r/checkip
0
python
def test_validate_ip_passes() -> None: '\n Ensures that the ui._validate_ip()\n call returns `True` when a a good pattern is provided\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) bad_values = ['8.8.8.8', '127.0.0.1', '192.168.0.1'] expected = True for value in bad_values: actual = ui_obj._validate_ip(value) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_validate_ip_passes() -> None: '\n Ensures that the ui._validate_ip()\n call returns `True` when a a good pattern is provided\n ' conf = ui.UI_Config(testing=True, args=['-ip', '8.8.8.8', '--force']) ui_obj = ui.UI(conf) bad_values = ['8.8.8.8', '127.0.0.1', '192.168.0.1'] expected = True for value in bad_values: actual = ui_obj._validate_ip(value) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the ui._validate_ip() call returns `True` when a a good pattern is provided<|endoftext|>
1dc0457f1ec981ddac157882b58657e8f7d8b358131490659ab2441ad00cea9f
def test_args_already_set(ui_obj) -> None: '\n Ensures that the arugments provided are\n the values stored when already in memory\n ' args = {'ip': '1.1.1.1', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False} ui_obj._args = args actual = ui_obj.args message = ''.join([f'EXPECTED: {args} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (args == actual), message
Ensures that the arugments provided are the values stored when already in memory
src/tests/test_ui.py
test_args_already_set
t-a-y-l-o-r/checkip
0
python
def test_args_already_set(ui_obj) -> None: '\n Ensures that the arugments provided are\n the values stored when already in memory\n ' args = {'ip': '1.1.1.1', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False} ui_obj._args = args actual = ui_obj.args message = .join([f'EXPECTED: {args} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (args == actual), message
def test_args_already_set(ui_obj) -> None: '\n Ensures that the arugments provided are\n the values stored when already in memory\n ' args = {'ip': '1.1.1.1', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False} ui_obj._args = args actual = ui_obj.args message = .join([f'EXPECTED: {args} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (args == actual), message<|docstring|>Ensures that the arugments provided are the values stored when already in memory<|endoftext|>
06488c7cf208c42a2ff62d89e14bc42b31a1e8455245bf34fbb2e705c5a9ac20
def test_args_from_config(ui_obj) -> None: '\n Ensures that the arugments provided are\n the same as the config object passed in\n ' args = {'ip': '8.8.8.8', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False} actual = ui_obj.args message = ''.join([f'EXPECTED: {args} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (args == actual), message
Ensures that the arugments provided are the same as the config object passed in
src/tests/test_ui.py
test_args_from_config
t-a-y-l-o-r/checkip
0
python
def test_args_from_config(ui_obj) -> None: '\n Ensures that the arugments provided are\n the same as the config object passed in\n ' args = {'ip': '8.8.8.8', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False} actual = ui_obj.args message = .join([f'EXPECTED: {args} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (args == actual), message
def test_args_from_config(ui_obj) -> None: '\n Ensures that the arugments provided are\n the same as the config object passed in\n ' args = {'ip': '8.8.8.8', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False} actual = ui_obj.args message = .join([f'EXPECTED: {args} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (args == actual), message<|docstring|>Ensures that the arugments provided are the same as the config object passed in<|endoftext|>
0b451dda8526dd0875a873c752974d412a1ec81f03a51584b914cd8c49a6fe8f
def test_args_from_user_input() -> None: '\n Ensures that the arugments provided are\n the same as the config object passed in\n ' root = './src/checkip.py' argument_list = [{'args': [root, '-ip', '8.8.8.8'], 'expected': {'ip': '8.8.8.8', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False}}, {'args': [root, '-ip', '1.1.1.1'], 'expected': {'ip': '1.1.1.1', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False}}, {'args': [root, '-u', 'google.com'], 'expected': {'ip': None, 'input_file': None, 'host': 'google.com', 'force': False, 'silent': False, 'verbose': False}}] for arg_set in argument_list: arguments = arg_set['args'] expected = arg_set['expected'] sys.argv = list(arguments) conf = ui.UI_Config(testing=True, args=None) ui_obj = ui.UI(conf) actual = ui_obj.args message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the arugments provided are the same as the config object passed in
src/tests/test_ui.py
test_args_from_user_input
t-a-y-l-o-r/checkip
0
python
def test_args_from_user_input() -> None: '\n Ensures that the arugments provided are\n the same as the config object passed in\n ' root = './src/checkip.py' argument_list = [{'args': [root, '-ip', '8.8.8.8'], 'expected': {'ip': '8.8.8.8', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False}}, {'args': [root, '-ip', '1.1.1.1'], 'expected': {'ip': '1.1.1.1', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False}}, {'args': [root, '-u', 'google.com'], 'expected': {'ip': None, 'input_file': None, 'host': 'google.com', 'force': False, 'silent': False, 'verbose': False}}] for arg_set in argument_list: arguments = arg_set['args'] expected = arg_set['expected'] sys.argv = list(arguments) conf = ui.UI_Config(testing=True, args=None) ui_obj = ui.UI(conf) actual = ui_obj.args message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_args_from_user_input() -> None: '\n Ensures that the arugments provided are\n the same as the config object passed in\n ' root = './src/checkip.py' argument_list = [{'args': [root, '-ip', '8.8.8.8'], 'expected': {'ip': '8.8.8.8', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False}}, {'args': [root, '-ip', '1.1.1.1'], 'expected': {'ip': '1.1.1.1', 'input_file': None, 'host': None, 'force': False, 'silent': False, 'verbose': False}}, {'args': [root, '-u', 'google.com'], 'expected': {'ip': None, 'input_file': None, 'host': 'google.com', 'force': False, 'silent': False, 'verbose': False}}] for arg_set in argument_list: arguments = arg_set['args'] expected = arg_set['expected'] sys.argv = list(arguments) conf = ui.UI_Config(testing=True, args=None) ui_obj = ui.UI(conf) actual = ui_obj.args message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the arugments provided are the same as the config object passed in<|endoftext|>
afde05a8d8ba49312002f7abf35c2b0f44ef5047ea1c7d1575d2719168672f74
def test_bad_ip_exit_not_silent(capsys) -> None: '\n Ensures appropriate input when silent is not passed\n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['-ip', ip]) ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_ip_exit(ip) actual = capsys.readouterr().out expected = ''.join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'''{ip} is an invalid ipv4 address ''']) message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures appropriate input when silent is not passed
src/tests/test_ui.py
test_bad_ip_exit_not_silent
t-a-y-l-o-r/checkip
0
python
def test_bad_ip_exit_not_silent(capsys) -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['-ip', ip]) ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_ip_exit(ip) actual = capsys.readouterr().out expected = .join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'{ip} is an invalid ipv4 address ']) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_bad_ip_exit_not_silent(capsys) -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['-ip', ip]) ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_ip_exit(ip) actual = capsys.readouterr().out expected = .join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'{ip} is an invalid ipv4 address ']) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures appropriate input when silent is not passed<|endoftext|>
c3dd3efc65085de747f25c585e15a1482aa6b44ab5eb84fbcd336c88b8446f19
def test_bad_ip_exit_silent(capsys) -> None: '\n Ensures appropriate input when silent is not passed\n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['-ip', ip]) ui_obj = ui.UI(conf) ui_obj._silent = True ui_obj._bad_ip_exit(ip) actual = capsys.readouterr().out expected = '' message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures appropriate input when silent is not passed
src/tests/test_ui.py
test_bad_ip_exit_silent
t-a-y-l-o-r/checkip
0
python
def test_bad_ip_exit_silent(capsys) -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['-ip', ip]) ui_obj = ui.UI(conf) ui_obj._silent = True ui_obj._bad_ip_exit(ip) actual = capsys.readouterr().out expected = message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_bad_ip_exit_silent(capsys) -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['-ip', ip]) ui_obj = ui.UI(conf) ui_obj._silent = True ui_obj._bad_ip_exit(ip) actual = capsys.readouterr().out expected = message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures appropriate input when silent is not passed<|endoftext|>
a51092bfd167847a397872a242641464d6cf84bf6d221203bf941232871fd248
def test_bad_ip_exit_not_silent_not_testing() -> None: '\n Ensures appropriate input when silent is not passed\n ' ip = 'google.com' conf = ui.UI_Config(testing=False, args=['-ip', ip]) with pytest.raises(SystemExit): ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_ip_exit(ip)
Ensures appropriate input when silent is not passed
src/tests/test_ui.py
test_bad_ip_exit_not_silent_not_testing
t-a-y-l-o-r/checkip
0
python
def test_bad_ip_exit_not_silent_not_testing() -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=False, args=['-ip', ip]) with pytest.raises(SystemExit): ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_ip_exit(ip)
def test_bad_ip_exit_not_silent_not_testing() -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=False, args=['-ip', ip]) with pytest.raises(SystemExit): ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_ip_exit(ip)<|docstring|>Ensures appropriate input when silent is not passed<|endoftext|>
c9e0111bba4716bc91a6e73067449fa9172c6413c2cb3204e0f3f31a67f932e2
def test_silent_set(ui_obj) -> None: '\n Ensures that an already set `silent`\n value is properly provided\n ' silent_sets = [{'bool': True, 'expected': True}, {'bool': False, 'expected': False}] for pairs in silent_sets: set_to = pairs['bool'] expected = pairs['expected'] ui_obj._silent = set_to actual = ui_obj.silent message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that an already set `silent` value is properly provided
src/tests/test_ui.py
test_silent_set
t-a-y-l-o-r/checkip
0
python
def test_silent_set(ui_obj) -> None: '\n Ensures that an already set `silent`\n value is properly provided\n ' silent_sets = [{'bool': True, 'expected': True}, {'bool': False, 'expected': False}] for pairs in silent_sets: set_to = pairs['bool'] expected = pairs['expected'] ui_obj._silent = set_to actual = ui_obj.silent message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_silent_set(ui_obj) -> None: '\n Ensures that an already set `silent`\n value is properly provided\n ' silent_sets = [{'bool': True, 'expected': True}, {'bool': False, 'expected': False}] for pairs in silent_sets: set_to = pairs['bool'] expected = pairs['expected'] ui_obj._silent = set_to actual = ui_obj.silent message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that an already set `silent` value is properly provided<|endoftext|>
ef3f2dc0e5e20bc06fdf8a9b71a3c8fb958d40c0a7d40b8b44dc20578809064b
def test_silent_not_set() -> None: '\n Ensures that an already set `silent`\n value is properly provided\n ' silent_sets = [{'bool': True, 'expected': True}, {'bool': False, 'expected': False}] for pairs in silent_sets: silent = pairs['bool'] arg_list = ['-ip', '8.8.8.8'] if silent: arg_list.append('--silent') conf = ui.UI_Config(testing=True, args=arg_list) ui_obj = ui.UI(conf) expected = pairs['expected'] actual = ui_obj.silent message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that an already set `silent` value is properly provided
src/tests/test_ui.py
test_silent_not_set
t-a-y-l-o-r/checkip
0
python
def test_silent_not_set() -> None: '\n Ensures that an already set `silent`\n value is properly provided\n ' silent_sets = [{'bool': True, 'expected': True}, {'bool': False, 'expected': False}] for pairs in silent_sets: silent = pairs['bool'] arg_list = ['-ip', '8.8.8.8'] if silent: arg_list.append('--silent') conf = ui.UI_Config(testing=True, args=arg_list) ui_obj = ui.UI(conf) expected = pairs['expected'] actual = ui_obj.silent message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_silent_not_set() -> None: '\n Ensures that an already set `silent`\n value is properly provided\n ' silent_sets = [{'bool': True, 'expected': True}, {'bool': False, 'expected': False}] for pairs in silent_sets: silent = pairs['bool'] arg_list = ['-ip', '8.8.8.8'] if silent: arg_list.append('--silent') conf = ui.UI_Config(testing=True, args=arg_list) ui_obj = ui.UI(conf) expected = pairs['expected'] actual = ui_obj.silent message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that an already set `silent` value is properly provided<|endoftext|>
3e0b37fd93d44de0612a0e95f1743a94696cc67bba3255f10ad720f27dafc78e
def test_bad_file_exit_not_silent(capsys) -> None: '\n Ensures appropriate input when silent is not set\n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['--input-file', ip]) ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_file_exit(ip) actual = capsys.readouterr().out expected = ''.join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'''{ip} is an invalid file ''']) message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures appropriate input when silent is not set
src/tests/test_ui.py
test_bad_file_exit_not_silent
t-a-y-l-o-r/checkip
0
python
def test_bad_file_exit_not_silent(capsys) -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['--input-file', ip]) ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_file_exit(ip) actual = capsys.readouterr().out expected = .join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'{ip} is an invalid file ']) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_bad_file_exit_not_silent(capsys) -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['--input-file', ip]) ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_file_exit(ip) actual = capsys.readouterr().out expected = .join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'{ip} is an invalid file ']) message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures appropriate input when silent is not set<|endoftext|>
1ba99ccf3cf0f36ed5dff37d508bc387df0c7c616646ccf3d83bb8a600eef60f
def test_bad_file_exit_silent(capsys) -> None: '\n Ensures appropriate input when silent is set\n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['--input-file', ip]) ui_obj = ui.UI(conf) ui_obj._silent = True ui_obj._bad_file_exit(ip) actual = capsys.readouterr().out expected = '' message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures appropriate input when silent is set
src/tests/test_ui.py
test_bad_file_exit_silent
t-a-y-l-o-r/checkip
0
python
def test_bad_file_exit_silent(capsys) -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['--input-file', ip]) ui_obj = ui.UI(conf) ui_obj._silent = True ui_obj._bad_file_exit(ip) actual = capsys.readouterr().out expected = message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_bad_file_exit_silent(capsys) -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=True, args=['--input-file', ip]) ui_obj = ui.UI(conf) ui_obj._silent = True ui_obj._bad_file_exit(ip) actual = capsys.readouterr().out expected = message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures appropriate input when silent is set<|endoftext|>
39ef73f8a84af85ab5d0dd17425a39cde3607e5f145700a171e11d39abbada16
def test_bad_file_exit_not_silent_not_testing() -> None: '\n Ensures appropriate input when silent is not passed\n ' ip = 'google.com' conf = ui.UI_Config(testing=False, args=['--input-file', ip]) with pytest.raises(SystemExit): ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_file_exit(ip)
Ensures appropriate input when silent is not passed
src/tests/test_ui.py
test_bad_file_exit_not_silent_not_testing
t-a-y-l-o-r/checkip
0
python
def test_bad_file_exit_not_silent_not_testing() -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=False, args=['--input-file', ip]) with pytest.raises(SystemExit): ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_file_exit(ip)
def test_bad_file_exit_not_silent_not_testing() -> None: '\n \n ' ip = 'google.com' conf = ui.UI_Config(testing=False, args=['--input-file', ip]) with pytest.raises(SystemExit): ui_obj = ui.UI(conf) ui_obj._silent = False ui_obj._bad_file_exit(ip)<|docstring|>Ensures appropriate input when silent is not passed<|endoftext|>
ae2144eeb107c725bb5f5510d7ce71094209098b388efe00e62732564ad98b01
def test_valid_ip_file_empty(ui_obj, capsys) -> None: '\n Ensures that `False` is returned when a no file is provided\n Also ensure the propery message is provided\n ' provided_file = None actual = ui_obj._valid_ip_file(provided_file) expected: Any = False message = ''.join([f'EXPECTED: {str(expected)} does not match ', f'ACTUAL: {str(actual)} for UI(): {ui_obj}']) assert (expected == actual), message actual = capsys.readouterr().out expected = ''.join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'''{provided_file} is not a valid file! ''']) assert (expected == actual), message
Ensures that `False` is returned when a no file is provided Also ensure the propery message is provided
src/tests/test_ui.py
test_valid_ip_file_empty
t-a-y-l-o-r/checkip
0
python
def test_valid_ip_file_empty(ui_obj, capsys) -> None: '\n Ensures that `False` is returned when a no file is provided\n Also ensure the propery message is provided\n ' provided_file = None actual = ui_obj._valid_ip_file(provided_file) expected: Any = False message = .join([f'EXPECTED: {str(expected)} does not match ', f'ACTUAL: {str(actual)} for UI(): {ui_obj}']) assert (expected == actual), message actual = capsys.readouterr().out expected = .join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'{provided_file} is not a valid file! ']) assert (expected == actual), message
def test_valid_ip_file_empty(ui_obj, capsys) -> None: '\n Ensures that `False` is returned when a no file is provided\n Also ensure the propery message is provided\n ' provided_file = None actual = ui_obj._valid_ip_file(provided_file) expected: Any = False message = .join([f'EXPECTED: {str(expected)} does not match ', f'ACTUAL: {str(actual)} for UI(): {ui_obj}']) assert (expected == actual), message actual = capsys.readouterr().out expected = .join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'{provided_file} is not a valid file! ']) assert (expected == actual), message<|docstring|>Ensures that `False` is returned when a no file is provided Also ensure the propery message is provided<|endoftext|>
43a7ad4d0274a59f8297491e357b056ff32eb932e636a4c25f42bf5bc7c4d4a4
def test_valid_ip_file_doesnt_exist(ui_obj, capsys) -> None: '\n Ensures that `False` is returned when a none existent file is provided\n Also ensure the propery message is provided\n ' provided_file = 'some_dumby_file.txt' actual: Any = ui_obj._valid_ip_file(provided_file) expected: Any = False message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message actual = capsys.readouterr().out expected = ''.join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'''{provided_file} is not a valid file! ''']) assert (expected == actual), message
Ensures that `False` is returned when a none existent file is provided Also ensure the propery message is provided
src/tests/test_ui.py
test_valid_ip_file_doesnt_exist
t-a-y-l-o-r/checkip
0
python
def test_valid_ip_file_doesnt_exist(ui_obj, capsys) -> None: '\n Ensures that `False` is returned when a none existent file is provided\n Also ensure the propery message is provided\n ' provided_file = 'some_dumby_file.txt' actual: Any = ui_obj._valid_ip_file(provided_file) expected: Any = False message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message actual = capsys.readouterr().out expected = .join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'{provided_file} is not a valid file! ']) assert (expected == actual), message
def test_valid_ip_file_doesnt_exist(ui_obj, capsys) -> None: '\n Ensures that `False` is returned when a none existent file is provided\n Also ensure the propery message is provided\n ' provided_file = 'some_dumby_file.txt' actual: Any = ui_obj._valid_ip_file(provided_file) expected: Any = False message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message actual = capsys.readouterr().out expected = .join([f'{ui.RED}[*] Warning:{ui.CLEAR} ', f'{provided_file} is not a valid file! ']) assert (expected == actual), message<|docstring|>Ensures that `False` is returned when a none existent file is provided Also ensure the propery message is provided<|endoftext|>
b7ff6a285bbb917a4f9bccde5b0091c4f29dee52d6f32bc7a672ed809cfb15ff
def test_valid_ip_file_does_exist(ui_obj, ip_file) -> None: '\n Ensures that `True` is returned when a real file is provided\n ' actual = ui_obj._valid_ip_file(ip_file) expected = True message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that `True` is returned when a real file is provided
src/tests/test_ui.py
test_valid_ip_file_does_exist
t-a-y-l-o-r/checkip
0
python
def test_valid_ip_file_does_exist(ui_obj, ip_file) -> None: '\n \n ' actual = ui_obj._valid_ip_file(ip_file) expected = True message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_valid_ip_file_does_exist(ui_obj, ip_file) -> None: '\n \n ' actual = ui_obj._valid_ip_file(ip_file) expected = True message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that `True` is returned when a real file is provided<|endoftext|>
7ae736db2288b2474c313ab0462671aa1150058954d9297828308464a8050c69
def test_display_silent(ui_obj, capsys) -> None: '\n Ensures that there is NO output when silent is set\n ' ui_obj._silent = True header = '' ui_obj.display_report(header) actual = capsys.readouterr().out expected = '' message = ''.join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that there is NO output when silent is set
src/tests/test_ui.py
test_display_silent
t-a-y-l-o-r/checkip
0
python
def test_display_silent(ui_obj, capsys) -> None: '\n \n ' ui_obj._silent = True header = ui_obj.display_report(header) actual = capsys.readouterr().out expected = message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message
def test_display_silent(ui_obj, capsys) -> None: '\n \n ' ui_obj._silent = True header = ui_obj.display_report(header) actual = capsys.readouterr().out expected = message = .join([f'EXPECTED: {expected} does not match ', f'ACTUAL: {actual} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that there is NO output when silent is set<|endoftext|>
f1975f32a782a1e77c58503e55e85940b849774ca0334d032a373e77b3da35fc
def test_display_ip(ui_obj, capsys) -> None: '\n Ensures that the correct output is printed when silent is set\n ' ui_obj._silent = False ip = '8.8.8.8' ui_obj.display_ip(ip) actual = capsys.readouterr().out expected = ''.join([f''' {ui._TITLE_OFFSET}============================= ''', f'{ui._TITLE_OFFSET}[ip] {ip} [ip]', f''' {ui._TITLE_OFFSET}============================= ''']) message = ''.join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the correct output is printed when silent is set
src/tests/test_ui.py
test_display_ip
t-a-y-l-o-r/checkip
0
python
def test_display_ip(ui_obj, capsys) -> None: '\n \n ' ui_obj._silent = False ip = '8.8.8.8' ui_obj.display_ip(ip) actual = capsys.readouterr().out expected = .join([f' {ui._TITLE_OFFSET}============================= ', f'{ui._TITLE_OFFSET}[ip] {ip} [ip]', f' {ui._TITLE_OFFSET}============================= ']) message = .join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message
def test_display_ip(ui_obj, capsys) -> None: '\n \n ' ui_obj._silent = False ip = '8.8.8.8' ui_obj.display_ip(ip) actual = capsys.readouterr().out expected = .join([f' {ui._TITLE_OFFSET}============================= ', f'{ui._TITLE_OFFSET}[ip] {ip} [ip]', f' {ui._TITLE_OFFSET}============================= ']) message = .join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the correct output is printed when silent is set<|endoftext|>
d40f412aef1915aec8f45c347232f51b7820d05c409f05bac5767d415e3fd888
def test_display_excluded_ips_silent(ui_obj, capsys) -> None: '\n Ensures that nothing is printed when silent\n ' ui_obj._silent = True ips = None ui_obj.display_excluded_ips(ips) actual = capsys.readouterr().out expected = '' message = ''.join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that nothing is printed when silent
src/tests/test_ui.py
test_display_excluded_ips_silent
t-a-y-l-o-r/checkip
0
python
def test_display_excluded_ips_silent(ui_obj, capsys) -> None: '\n \n ' ui_obj._silent = True ips = None ui_obj.display_excluded_ips(ips) actual = capsys.readouterr().out expected = message = .join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message
def test_display_excluded_ips_silent(ui_obj, capsys) -> None: '\n \n ' ui_obj._silent = True ips = None ui_obj.display_excluded_ips(ips) actual = capsys.readouterr().out expected = message = .join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that nothing is printed when silent<|endoftext|>
531e82ef1f6117433962fa7cae2627fac5d3144ecd0b60092df8407c4cffe555
def test_display_excluded_ips_not_silent(ui_obj, capsys) -> None: '\n Ensures that the dict is printed when ips are provided\n ' ui_obj._silent = False notes = {'notes': 'N/A'} ip_dict = {'8.8.8.8': notes, '1.1.1.1': notes, '2.2.2.2': notes} ui_obj.display_excluded_ips(ip_dict) actual = capsys.readouterr().out ip_json = json.dumps(ip_dict, indent=4, sort_keys=True) expected = ''.join([f'[*]{ui.YELLOW} Notice: {ui.CLEAR} ', f'''the following ips will NOT be scanned: {ip_json} ''']) message = ''.join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the dict is printed when ips are provided
src/tests/test_ui.py
test_display_excluded_ips_not_silent
t-a-y-l-o-r/checkip
0
python
def test_display_excluded_ips_not_silent(ui_obj, capsys) -> None: '\n \n ' ui_obj._silent = False notes = {'notes': 'N/A'} ip_dict = {'8.8.8.8': notes, '1.1.1.1': notes, '2.2.2.2': notes} ui_obj.display_excluded_ips(ip_dict) actual = capsys.readouterr().out ip_json = json.dumps(ip_dict, indent=4, sort_keys=True) expected = .join([f'[*]{ui.YELLOW} Notice: {ui.CLEAR} ', f'the following ips will NOT be scanned: {ip_json} ']) message = .join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message
def test_display_excluded_ips_not_silent(ui_obj, capsys) -> None: '\n \n ' ui_obj._silent = False notes = {'notes': 'N/A'} ip_dict = {'8.8.8.8': notes, '1.1.1.1': notes, '2.2.2.2': notes} ui_obj.display_excluded_ips(ip_dict) actual = capsys.readouterr().out ip_json = json.dumps(ip_dict, indent=4, sort_keys=True) expected = .join([f'[*]{ui.YELLOW} Notice: {ui.CLEAR} ', f'the following ips will NOT be scanned: {ip_json} ']) message = .join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the dict is printed when ips are provided<|endoftext|>
610908d69b8821768fa16ccf94ae6eb5416be0f7cc313b0118a1564d2f70a54d
def test_display_help(ui_obj, capsys) -> None: '\n Ensures that the proper output happens for\n help messages\n ' ui_obj.display_help() actual = capsys.readouterr().out expected = ui_obj._parser.format_help() message = ''.join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message
Ensures that the proper output happens for help messages
src/tests/test_ui.py
test_display_help
t-a-y-l-o-r/checkip
0
python
def test_display_help(ui_obj, capsys) -> None: '\n Ensures that the proper output happens for\n help messages\n ' ui_obj.display_help() actual = capsys.readouterr().out expected = ui_obj._parser.format_help() message = .join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message
def test_display_help(ui_obj, capsys) -> None: '\n Ensures that the proper output happens for\n help messages\n ' ui_obj.display_help() actual = capsys.readouterr().out expected = ui_obj._parser.format_help() message = .join([f'EXPECTED: {repr(expected)} does not match ', f'ACTUAL: {repr(actual)} for UI(): {ui_obj}']) assert (expected == actual), message<|docstring|>Ensures that the proper output happens for help messages<|endoftext|>
f32958abae55b8a0abaaea9a203b5e0c71fee1329e31457337ec34bea3124733
def words(text): 'An iterator over tokens (words) in text. Replace this with a\n stemmer or other smarter logic.\n ' for word in text.split(): normed = re.sub('[^a-z]', '', word.lower()) if normed: (yield normed)
An iterator over tokens (words) in text. Replace this with a stemmer or other smarter logic.
category_predictor/category_predictor.py
words
lenmorld/Yackathon_keyword_map
1,008
python
def words(text): 'An iterator over tokens (words) in text. Replace this with a\n stemmer or other smarter logic.\n ' for word in text.split(): normed = re.sub('[^a-z]', , word.lower()) if normed: (yield normed)
def words(text): 'An iterator over tokens (words) in text. Replace this with a\n stemmer or other smarter logic.\n ' for word in text.split(): normed = re.sub('[^a-z]', , word.lower()) if normed: (yield normed)<|docstring|>An iterator over tokens (words) in text. Replace this with a stemmer or other smarter logic.<|endoftext|>
5afc547265be030940c187f64a5b4a418068482d907c0ed2546f698ea62269b0
def review_category_mapper(self, _, data): 'Visit reviews and businesses, yielding out (business_id,\n (review or category)).\n ' if (data['type'] == 'review'): (yield (data['business_id'], ('review', data['text']))) elif (data['type'] == 'business'): (yield (data['business_id'], ('categories', data['categories'])))
Visit reviews and businesses, yielding out (business_id, (review or category)).
category_predictor/category_predictor.py
review_category_mapper
lenmorld/Yackathon_keyword_map
1,008
python
def review_category_mapper(self, _, data): 'Visit reviews and businesses, yielding out (business_id,\n (review or category)).\n ' if (data['type'] == 'review'): (yield (data['business_id'], ('review', data['text']))) elif (data['type'] == 'business'): (yield (data['business_id'], ('categories', data['categories'])))
def review_category_mapper(self, _, data): 'Visit reviews and businesses, yielding out (business_id,\n (review or category)).\n ' if (data['type'] == 'review'): (yield (data['business_id'], ('review', data['text']))) elif (data['type'] == 'business'): (yield (data['business_id'], ('categories', data['categories'])))<|docstring|>Visit reviews and businesses, yielding out (business_id, (review or category)).<|endoftext|>
1c02246a9ec4f91976c9772c503f907bdc66ac9333744a11b5022b42eeeb1c64
def add_categories_to_reviews_reducer(self, business_id, reviews_or_categories): "Yield out (category, review) for each category-review\n pair. We'll do the actual review tokenizing in the next\n mapper, since you typically have much more map-capacity than\n reduce-capacity.\n " categories = None reviews = [] for (data_type, data) in reviews_or_categories: if (data_type == 'review'): reviews.append(data) else: categories = data if (not categories): return (yield ('all', dict(((cat, len(reviews)) for cat in categories)))) for category in categories: for review in reviews: (yield (category, review))
Yield out (category, review) for each category-review pair. We'll do the actual review tokenizing in the next mapper, since you typically have much more map-capacity than reduce-capacity.
category_predictor/category_predictor.py
add_categories_to_reviews_reducer
lenmorld/Yackathon_keyword_map
1,008
python
def add_categories_to_reviews_reducer(self, business_id, reviews_or_categories): "Yield out (category, review) for each category-review\n pair. We'll do the actual review tokenizing in the next\n mapper, since you typically have much more map-capacity than\n reduce-capacity.\n " categories = None reviews = [] for (data_type, data) in reviews_or_categories: if (data_type == 'review'): reviews.append(data) else: categories = data if (not categories): return (yield ('all', dict(((cat, len(reviews)) for cat in categories)))) for category in categories: for review in reviews: (yield (category, review))
def add_categories_to_reviews_reducer(self, business_id, reviews_or_categories): "Yield out (category, review) for each category-review\n pair. We'll do the actual review tokenizing in the next\n mapper, since you typically have much more map-capacity than\n reduce-capacity.\n " categories = None reviews = [] for (data_type, data) in reviews_or_categories: if (data_type == 'review'): reviews.append(data) else: categories = data if (not categories): return (yield ('all', dict(((cat, len(reviews)) for cat in categories)))) for category in categories: for review in reviews: (yield (category, review))<|docstring|>Yield out (category, review) for each category-review pair. We'll do the actual review tokenizing in the next mapper, since you typically have much more map-capacity than reduce-capacity.<|endoftext|>
be0c7774cef039e5a4aa54974678b273640973ca59b0d2cd8cbc3133c4c35ece
def tokenize_reviews_mapper(self, category, review): "Split reviews into words, yielding out (category, {word: count}) and\n ('all', {word: count}). We yield out a dictionary of counts\n rather than a single entry per-word to reduce the amount of\n i/o between mapper and reducer.\n " if (category == 'all'): (yield (category, review)) return counts = {} for word in words(review): counts[word] = (counts.get(word, 0) + 1) (yield (category, counts))
Split reviews into words, yielding out (category, {word: count}) and ('all', {word: count}). We yield out a dictionary of counts rather than a single entry per-word to reduce the amount of i/o between mapper and reducer.
category_predictor/category_predictor.py
tokenize_reviews_mapper
lenmorld/Yackathon_keyword_map
1,008
python
def tokenize_reviews_mapper(self, category, review): "Split reviews into words, yielding out (category, {word: count}) and\n ('all', {word: count}). We yield out a dictionary of counts\n rather than a single entry per-word to reduce the amount of\n i/o between mapper and reducer.\n " if (category == 'all'): (yield (category, review)) return counts = {} for word in words(review): counts[word] = (counts.get(word, 0) + 1) (yield (category, counts))
def tokenize_reviews_mapper(self, category, review): "Split reviews into words, yielding out (category, {word: count}) and\n ('all', {word: count}). We yield out a dictionary of counts\n rather than a single entry per-word to reduce the amount of\n i/o between mapper and reducer.\n " if (category == 'all'): (yield (category, review)) return counts = {} for word in words(review): counts[word] = (counts.get(word, 0) + 1) (yield (category, counts))<|docstring|>Split reviews into words, yielding out (category, {word: count}) and ('all', {word: count}). We yield out a dictionary of counts rather than a single entry per-word to reduce the amount of i/o between mapper and reducer.<|endoftext|>
971ea285542db688b7ed9e625d66f16a197ac3214aaf1cdad4c3b117c1c769cc
def sum_counts(self, category, counts): 'Sum up dictionaries of counts, filter out rare words\n (bucketing them into an unknown word bucket), and yield the\n counts.\n ' raw_count = {} for word_count in counts: for (word, count) in word_count.iteritems(): raw_count[word] = (raw_count.get(word, 0) + count) if (category == 'all'): (yield (category, raw_count)) return filtered_counts = {} for (word, count) in raw_count.iteritems(): if (count > MINIMUM_OCCURENCES): filtered_counts[word] = count if (not filtered_counts): return filtered_counts['UNK'] = 0.01 (yield (category, filtered_counts))
Sum up dictionaries of counts, filter out rare words (bucketing them into an unknown word bucket), and yield the counts.
category_predictor/category_predictor.py
sum_counts
lenmorld/Yackathon_keyword_map
1,008
python
def sum_counts(self, category, counts): 'Sum up dictionaries of counts, filter out rare words\n (bucketing them into an unknown word bucket), and yield the\n counts.\n ' raw_count = {} for word_count in counts: for (word, count) in word_count.iteritems(): raw_count[word] = (raw_count.get(word, 0) + count) if (category == 'all'): (yield (category, raw_count)) return filtered_counts = {} for (word, count) in raw_count.iteritems(): if (count > MINIMUM_OCCURENCES): filtered_counts[word] = count if (not filtered_counts): return filtered_counts['UNK'] = 0.01 (yield (category, filtered_counts))
def sum_counts(self, category, counts): 'Sum up dictionaries of counts, filter out rare words\n (bucketing them into an unknown word bucket), and yield the\n counts.\n ' raw_count = {} for word_count in counts: for (word, count) in word_count.iteritems(): raw_count[word] = (raw_count.get(word, 0) + count) if (category == 'all'): (yield (category, raw_count)) return filtered_counts = {} for (word, count) in raw_count.iteritems(): if (count > MINIMUM_OCCURENCES): filtered_counts[word] = count if (not filtered_counts): return filtered_counts['UNK'] = 0.01 (yield (category, filtered_counts))<|docstring|>Sum up dictionaries of counts, filter out rare words (bucketing them into an unknown word bucket), and yield the counts.<|endoftext|>
0dc7a8b534b8ce0d84c32066dddb1d2db95924e80ce7358d92249348208bea3c
def LSTM_Model(): '\n :param x: inputs of size [T, batch_size, input_size]\n :param W: matrix of fully-connected output layer weights\n :param b: vector of fully-connected output layer biases\n ' cell = rnn_cell.BasicLSTMCell(hidden_dim) (outputs, states) = rnn.dynamic_rnn(cell, x, dtype=tf.float32) num_examples = tf.shape(x)[0] W_repeated = tf.tile(tf.expand_dims(W_out, 0), [num_examples, 1, 1]) out = (tf.matmul(outputs, W_repeated) + b_out) out = tf.squeeze(out) return out
:param x: inputs of size [T, batch_size, input_size] :param W: matrix of fully-connected output layer weights :param b: vector of fully-connected output layer biases
Chapter09/LSTM_Time_Series/TimeSeriesPredictor.py
LSTM_Model
eric-erki/Predictive-Analytics-with-TensorFlow
73
python
def LSTM_Model(): '\n :param x: inputs of size [T, batch_size, input_size]\n :param W: matrix of fully-connected output layer weights\n :param b: vector of fully-connected output layer biases\n ' cell = rnn_cell.BasicLSTMCell(hidden_dim) (outputs, states) = rnn.dynamic_rnn(cell, x, dtype=tf.float32) num_examples = tf.shape(x)[0] W_repeated = tf.tile(tf.expand_dims(W_out, 0), [num_examples, 1, 1]) out = (tf.matmul(outputs, W_repeated) + b_out) out = tf.squeeze(out) return out
def LSTM_Model(): '\n :param x: inputs of size [T, batch_size, input_size]\n :param W: matrix of fully-connected output layer weights\n :param b: vector of fully-connected output layer biases\n ' cell = rnn_cell.BasicLSTMCell(hidden_dim) (outputs, states) = rnn.dynamic_rnn(cell, x, dtype=tf.float32) num_examples = tf.shape(x)[0] W_repeated = tf.tile(tf.expand_dims(W_out, 0), [num_examples, 1, 1]) out = (tf.matmul(outputs, W_repeated) + b_out) out = tf.squeeze(out) return out<|docstring|>:param x: inputs of size [T, batch_size, input_size] :param W: matrix of fully-connected output layer weights :param b: vector of fully-connected output layer biases<|endoftext|>
f6c2390256d423d9946b10b5658c6fb71f3f56139990d21386e0d5179be2f933
def __init__(self, model): '\n Class to calculate probability (pmf) values specific to Bayesian Models\n\n Parameters\n ----------\n model: Bayesian Model\n model on which inference queries will be computed\n ' super(BayesianModelProbability, self).__init__(model)
Class to calculate probability (pmf) values specific to Bayesian Models Parameters ---------- model: Bayesian Model model on which inference queries will be computed
pgmpy/metrics/bn_inference.py
__init__
vbob/pgmpy
2,144
python
def __init__(self, model): '\n Class to calculate probability (pmf) values specific to Bayesian Models\n\n Parameters\n ----------\n model: Bayesian Model\n model on which inference queries will be computed\n ' super(BayesianModelProbability, self).__init__(model)
def __init__(self, model): '\n Class to calculate probability (pmf) values specific to Bayesian Models\n\n Parameters\n ----------\n model: Bayesian Model\n model on which inference queries will be computed\n ' super(BayesianModelProbability, self).__init__(model)<|docstring|>Class to calculate probability (pmf) values specific to Bayesian Models Parameters ---------- model: Bayesian Model model on which inference queries will be computed<|endoftext|>
3d22b9fc08f4cc3c40816b1a3c6d2d60864f8acc3d38b797ce1f3fe4c42dc0cb
def _log_probability_node(self, data, ordering, node): '\n Evaluate the log probability of each datapoint for a specific node.\n\n Internal function used by log_probability().\n\n Parameters\n ----------\n data: array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n ordering: list\n ordering of columns in data, used by the Bayesian model.\n default is topological ordering used by model.\n\n node: Bayesian Model Node\n node from the Bayesian network.\n\n Returns\n -------\n ndarray: having shape (n_samples,)\n The array of log(density) evaluations. These are normalized to be\n probability densities, so values will be low for high-dimensional\n data.\n ' def vec_translate(a, my_dict): return np.vectorize(my_dict.__getitem__)(a) cpd = self.model.get_cpds(node) current = cpd.variables[0] current_idx = ordering.index(current) current_val = data[(:, current_idx)] current_no = vec_translate(current_val, cpd.name_to_no[current]) evidence = cpd.variables[:0:(- 1)] evidence_idx = [ordering.index(ev) for ev in evidence] evidence_val = data[(:, evidence_idx)] evidence_no = np.empty_like(evidence_val, dtype=int) for (i, ev) in enumerate(evidence): evidence_no[(:, i)] = vec_translate(evidence_val[(:, i)], cpd.name_to_no[ev]) if evidence: (state_to_index, index_to_weight) = self.pre_compute_reduce_maps(variable=node) (unique, inverse) = np.unique(evidence_no, axis=0, return_inverse=True) weights = np.array([index_to_weight[state_to_index[tuple(u)]] for u in unique])[inverse] else: weights = np.array(([cpd.values] * len(data))) probability_node = np.array([weights[i][cn] for (i, cn) in enumerate(current_no)]) return np.log(probability_node)
Evaluate the log probability of each datapoint for a specific node. Internal function used by log_probability(). Parameters ---------- data: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. ordering: list ordering of columns in data, used by the Bayesian model. default is topological ordering used by model. node: Bayesian Model Node node from the Bayesian network. Returns ------- ndarray: having shape (n_samples,) The array of log(density) evaluations. These are normalized to be probability densities, so values will be low for high-dimensional data.
pgmpy/metrics/bn_inference.py
_log_probability_node
vbob/pgmpy
2,144
python
def _log_probability_node(self, data, ordering, node): '\n Evaluate the log probability of each datapoint for a specific node.\n\n Internal function used by log_probability().\n\n Parameters\n ----------\n data: array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n ordering: list\n ordering of columns in data, used by the Bayesian model.\n default is topological ordering used by model.\n\n node: Bayesian Model Node\n node from the Bayesian network.\n\n Returns\n -------\n ndarray: having shape (n_samples,)\n The array of log(density) evaluations. These are normalized to be\n probability densities, so values will be low for high-dimensional\n data.\n ' def vec_translate(a, my_dict): return np.vectorize(my_dict.__getitem__)(a) cpd = self.model.get_cpds(node) current = cpd.variables[0] current_idx = ordering.index(current) current_val = data[(:, current_idx)] current_no = vec_translate(current_val, cpd.name_to_no[current]) evidence = cpd.variables[:0:(- 1)] evidence_idx = [ordering.index(ev) for ev in evidence] evidence_val = data[(:, evidence_idx)] evidence_no = np.empty_like(evidence_val, dtype=int) for (i, ev) in enumerate(evidence): evidence_no[(:, i)] = vec_translate(evidence_val[(:, i)], cpd.name_to_no[ev]) if evidence: (state_to_index, index_to_weight) = self.pre_compute_reduce_maps(variable=node) (unique, inverse) = np.unique(evidence_no, axis=0, return_inverse=True) weights = np.array([index_to_weight[state_to_index[tuple(u)]] for u in unique])[inverse] else: weights = np.array(([cpd.values] * len(data))) probability_node = np.array([weights[i][cn] for (i, cn) in enumerate(current_no)]) return np.log(probability_node)
def _log_probability_node(self, data, ordering, node): '\n Evaluate the log probability of each datapoint for a specific node.\n\n Internal function used by log_probability().\n\n Parameters\n ----------\n data: array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n ordering: list\n ordering of columns in data, used by the Bayesian model.\n default is topological ordering used by model.\n\n node: Bayesian Model Node\n node from the Bayesian network.\n\n Returns\n -------\n ndarray: having shape (n_samples,)\n The array of log(density) evaluations. These are normalized to be\n probability densities, so values will be low for high-dimensional\n data.\n ' def vec_translate(a, my_dict): return np.vectorize(my_dict.__getitem__)(a) cpd = self.model.get_cpds(node) current = cpd.variables[0] current_idx = ordering.index(current) current_val = data[(:, current_idx)] current_no = vec_translate(current_val, cpd.name_to_no[current]) evidence = cpd.variables[:0:(- 1)] evidence_idx = [ordering.index(ev) for ev in evidence] evidence_val = data[(:, evidence_idx)] evidence_no = np.empty_like(evidence_val, dtype=int) for (i, ev) in enumerate(evidence): evidence_no[(:, i)] = vec_translate(evidence_val[(:, i)], cpd.name_to_no[ev]) if evidence: (state_to_index, index_to_weight) = self.pre_compute_reduce_maps(variable=node) (unique, inverse) = np.unique(evidence_no, axis=0, return_inverse=True) weights = np.array([index_to_weight[state_to_index[tuple(u)]] for u in unique])[inverse] else: weights = np.array(([cpd.values] * len(data))) probability_node = np.array([weights[i][cn] for (i, cn) in enumerate(current_no)]) return np.log(probability_node)<|docstring|>Evaluate the log probability of each datapoint for a specific node. Internal function used by log_probability(). Parameters ---------- data: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. ordering: list ordering of columns in data, used by the Bayesian model. default is topological ordering used by model. node: Bayesian Model Node node from the Bayesian network. Returns ------- ndarray: having shape (n_samples,) The array of log(density) evaluations. These are normalized to be probability densities, so values will be low for high-dimensional data.<|endoftext|>
73182a6e0a0a73d88c8cdefab3c2b7a7e69d24e0a994141dd5e8a1cadc6148a8
def log_probability(self, data, ordering=None): '\n Evaluate the logarithmic probability of each point in a data set.\n\n Parameters\n ----------\n data: pandas dataframe OR array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n ordering: list\n ordering of columns in data, used by the Bayesian model.\n default is topological ordering used by model.\n\n Returns\n -------\n ndarray: having shape (n_samples,)\n The array of log(density) evaluations. These are normalized to be\n probability densities, so values will be low for high-dimensional\n data.\n ' if isinstance(data, pd.DataFrame): ordering = data.columns.to_list() data = data.values if (ordering is None): ordering = self.topological_order data = data.loc[(:, ordering)].values logp = np.array([self._log_probability_node(data, ordering, node) for node in ordering]) return np.sum(logp, axis=0)
Evaluate the logarithmic probability of each point in a data set. Parameters ---------- data: pandas dataframe OR array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. ordering: list ordering of columns in data, used by the Bayesian model. default is topological ordering used by model. Returns ------- ndarray: having shape (n_samples,) The array of log(density) evaluations. These are normalized to be probability densities, so values will be low for high-dimensional data.
pgmpy/metrics/bn_inference.py
log_probability
vbob/pgmpy
2,144
python
def log_probability(self, data, ordering=None): '\n Evaluate the logarithmic probability of each point in a data set.\n\n Parameters\n ----------\n data: pandas dataframe OR array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n ordering: list\n ordering of columns in data, used by the Bayesian model.\n default is topological ordering used by model.\n\n Returns\n -------\n ndarray: having shape (n_samples,)\n The array of log(density) evaluations. These are normalized to be\n probability densities, so values will be low for high-dimensional\n data.\n ' if isinstance(data, pd.DataFrame): ordering = data.columns.to_list() data = data.values if (ordering is None): ordering = self.topological_order data = data.loc[(:, ordering)].values logp = np.array([self._log_probability_node(data, ordering, node) for node in ordering]) return np.sum(logp, axis=0)
def log_probability(self, data, ordering=None): '\n Evaluate the logarithmic probability of each point in a data set.\n\n Parameters\n ----------\n data: pandas dataframe OR array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n ordering: list\n ordering of columns in data, used by the Bayesian model.\n default is topological ordering used by model.\n\n Returns\n -------\n ndarray: having shape (n_samples,)\n The array of log(density) evaluations. These are normalized to be\n probability densities, so values will be low for high-dimensional\n data.\n ' if isinstance(data, pd.DataFrame): ordering = data.columns.to_list() data = data.values if (ordering is None): ordering = self.topological_order data = data.loc[(:, ordering)].values logp = np.array([self._log_probability_node(data, ordering, node) for node in ordering]) return np.sum(logp, axis=0)<|docstring|>Evaluate the logarithmic probability of each point in a data set. Parameters ---------- data: pandas dataframe OR array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. ordering: list ordering of columns in data, used by the Bayesian model. default is topological ordering used by model. Returns ------- ndarray: having shape (n_samples,) The array of log(density) evaluations. These are normalized to be probability densities, so values will be low for high-dimensional data.<|endoftext|>
0d86eab3838b8032a775f92bbab79831db72bb5e838a39a3aaaddbfe88b13d0c
def score(self, data, ordering=None): '\n Compute the total log probability density under the model.\n\n Parameters\n ----------\n data: pandas dataframe OR array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n ordering: list\n ordering of columns in data, used by the Bayesian model.\n default is topological ordering used by model.\n\n Returns\n -------\n float: total log-likelihood of the data in data.\n This is normalized to be a probability density, so the value\n will be low for high-dimensional data.\n ' return np.sum(self.log_probability(data, ordering))
Compute the total log probability density under the model. Parameters ---------- data: pandas dataframe OR array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. ordering: list ordering of columns in data, used by the Bayesian model. default is topological ordering used by model. Returns ------- float: total log-likelihood of the data in data. This is normalized to be a probability density, so the value will be low for high-dimensional data.
pgmpy/metrics/bn_inference.py
score
vbob/pgmpy
2,144
python
def score(self, data, ordering=None): '\n Compute the total log probability density under the model.\n\n Parameters\n ----------\n data: pandas dataframe OR array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n ordering: list\n ordering of columns in data, used by the Bayesian model.\n default is topological ordering used by model.\n\n Returns\n -------\n float: total log-likelihood of the data in data.\n This is normalized to be a probability density, so the value\n will be low for high-dimensional data.\n ' return np.sum(self.log_probability(data, ordering))
def score(self, data, ordering=None): '\n Compute the total log probability density under the model.\n\n Parameters\n ----------\n data: pandas dataframe OR array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n ordering: list\n ordering of columns in data, used by the Bayesian model.\n default is topological ordering used by model.\n\n Returns\n -------\n float: total log-likelihood of the data in data.\n This is normalized to be a probability density, so the value\n will be low for high-dimensional data.\n ' return np.sum(self.log_probability(data, ordering))<|docstring|>Compute the total log probability density under the model. Parameters ---------- data: pandas dataframe OR array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. ordering: list ordering of columns in data, used by the Bayesian model. default is topological ordering used by model. Returns ------- float: total log-likelihood of the data in data. This is normalized to be a probability density, so the value will be low for high-dimensional data.<|endoftext|>
5a6febc13f6602e93dcea733c3c3c604ca34d39741188154293010d192e956a1
def register_model(name: str, rest: str, body: ProtoModel, response: ProtoModel) -> None: '\n Registers a new REST model.\n\n Parameters\n ----------\n name : str\n A regular expression describing the rest endpoint.\n rest : str\n The REST endpoint type.\n body : ProtoModel\n The REST query body model.\n response : ProtoModel\n The REST query response model.\n\n ' rest = rest.upper() if ((name in __rest_models) and (rest in __rest_models[name])): raise KeyError(f'Model name {name} already registered.') if (name not in __rest_models): __rest_models[name] = {} __rest_models[name][rest] = (body, response)
Registers a new REST model. Parameters ---------- name : str A regular expression describing the rest endpoint. rest : str The REST endpoint type. body : ProtoModel The REST query body model. response : ProtoModel The REST query response model.
qcfractal/interface/models/rest_models.py
register_model
Lnaden/QCFractal
113
python
def register_model(name: str, rest: str, body: ProtoModel, response: ProtoModel) -> None: '\n Registers a new REST model.\n\n Parameters\n ----------\n name : str\n A regular expression describing the rest endpoint.\n rest : str\n The REST endpoint type.\n body : ProtoModel\n The REST query body model.\n response : ProtoModel\n The REST query response model.\n\n ' rest = rest.upper() if ((name in __rest_models) and (rest in __rest_models[name])): raise KeyError(f'Model name {name} already registered.') if (name not in __rest_models): __rest_models[name] = {} __rest_models[name][rest] = (body, response)
def register_model(name: str, rest: str, body: ProtoModel, response: ProtoModel) -> None: '\n Registers a new REST model.\n\n Parameters\n ----------\n name : str\n A regular expression describing the rest endpoint.\n rest : str\n The REST endpoint type.\n body : ProtoModel\n The REST query body model.\n response : ProtoModel\n The REST query response model.\n\n ' rest = rest.upper() if ((name in __rest_models) and (rest in __rest_models[name])): raise KeyError(f'Model name {name} already registered.') if (name not in __rest_models): __rest_models[name] = {} __rest_models[name][rest] = (body, response)<|docstring|>Registers a new REST model. Parameters ---------- name : str A regular expression describing the rest endpoint. rest : str The REST endpoint type. body : ProtoModel The REST query body model. response : ProtoModel The REST query response model.<|endoftext|>
804466eafd16677e1eda478376e4e61e4b18f5f46d7c80e46933c8450e23677b
@functools.lru_cache(1000, typed=True) def rest_model(resource: str, rest: str) -> Tuple[(ProtoModel, ProtoModel)]: '\n Acquires a REST Model.\n\n Parameters\n ----------\n resource : str\n The REST endpoint resource name.\n rest : str\n The REST endpoint type: GET, POST, PUT, DELETE\n\n Returns\n -------\n Tuple[ProtoModel, ProtoModel]\n The (body, response) models of the REST request.\n\n ' rest = rest.upper() matches = [] for model_re in __rest_models.keys(): if re.fullmatch(model_re, resource): try: matches.append(__rest_models[model_re][rest]) except KeyError: pass if (len(matches) == 0): raise KeyError(f'REST Model for endpoint {resource} could not be found.') if (len(matches) > 1): warnings.warn(f'Multiple REST models were matched for {rest} request at endpoint {resource}. The following models will be used: {matches[0][0]}, {matches[0][1]}.', RuntimeWarning) return matches[0]
Acquires a REST Model. Parameters ---------- resource : str The REST endpoint resource name. rest : str The REST endpoint type: GET, POST, PUT, DELETE Returns ------- Tuple[ProtoModel, ProtoModel] The (body, response) models of the REST request.
qcfractal/interface/models/rest_models.py
rest_model
Lnaden/QCFractal
113
python
@functools.lru_cache(1000, typed=True) def rest_model(resource: str, rest: str) -> Tuple[(ProtoModel, ProtoModel)]: '\n Acquires a REST Model.\n\n Parameters\n ----------\n resource : str\n The REST endpoint resource name.\n rest : str\n The REST endpoint type: GET, POST, PUT, DELETE\n\n Returns\n -------\n Tuple[ProtoModel, ProtoModel]\n The (body, response) models of the REST request.\n\n ' rest = rest.upper() matches = [] for model_re in __rest_models.keys(): if re.fullmatch(model_re, resource): try: matches.append(__rest_models[model_re][rest]) except KeyError: pass if (len(matches) == 0): raise KeyError(f'REST Model for endpoint {resource} could not be found.') if (len(matches) > 1): warnings.warn(f'Multiple REST models were matched for {rest} request at endpoint {resource}. The following models will be used: {matches[0][0]}, {matches[0][1]}.', RuntimeWarning) return matches[0]
@functools.lru_cache(1000, typed=True) def rest_model(resource: str, rest: str) -> Tuple[(ProtoModel, ProtoModel)]: '\n Acquires a REST Model.\n\n Parameters\n ----------\n resource : str\n The REST endpoint resource name.\n rest : str\n The REST endpoint type: GET, POST, PUT, DELETE\n\n Returns\n -------\n Tuple[ProtoModel, ProtoModel]\n The (body, response) models of the REST request.\n\n ' rest = rest.upper() matches = [] for model_re in __rest_models.keys(): if re.fullmatch(model_re, resource): try: matches.append(__rest_models[model_re][rest]) except KeyError: pass if (len(matches) == 0): raise KeyError(f'REST Model for endpoint {resource} could not be found.') if (len(matches) > 1): warnings.warn(f'Multiple REST models were matched for {rest} request at endpoint {resource}. The following models will be used: {matches[0][0]}, {matches[0][1]}.', RuntimeWarning) return matches[0]<|docstring|>Acquires a REST Model. Parameters ---------- resource : str The REST endpoint resource name. rest : str The REST endpoint type: GET, POST, PUT, DELETE Returns ------- Tuple[ProtoModel, ProtoModel] The (body, response) models of the REST request.<|endoftext|>
f1bbea4d1a5e7fb674d025bf1b56225a11719b3c3e4d3676daf48dd97ca0a6b5
def merge(self, other: 'ComputeResponse') -> 'ComputeResponse': 'Merges two ComputeResponse objects together. The first takes precedence and order is maintained.\n\n Parameters\n ----------\n other : ComputeResponse\n The compute response to merge\n\n Returns\n -------\n ComputeResponse\n The merged compute response\n ' return ComputeResponse(ids=(self.ids + other.ids), submitted=(self.submitted + other.submitted), existing=(self.existing + other.existing))
Merges two ComputeResponse objects together. The first takes precedence and order is maintained. Parameters ---------- other : ComputeResponse The compute response to merge Returns ------- ComputeResponse The merged compute response
qcfractal/interface/models/rest_models.py
merge
Lnaden/QCFractal
113
python
def merge(self, other: 'ComputeResponse') -> 'ComputeResponse': 'Merges two ComputeResponse objects together. The first takes precedence and order is maintained.\n\n Parameters\n ----------\n other : ComputeResponse\n The compute response to merge\n\n Returns\n -------\n ComputeResponse\n The merged compute response\n ' return ComputeResponse(ids=(self.ids + other.ids), submitted=(self.submitted + other.submitted), existing=(self.existing + other.existing))
def merge(self, other: 'ComputeResponse') -> 'ComputeResponse': 'Merges two ComputeResponse objects together. The first takes precedence and order is maintained.\n\n Parameters\n ----------\n other : ComputeResponse\n The compute response to merge\n\n Returns\n -------\n ComputeResponse\n The merged compute response\n ' return ComputeResponse(ids=(self.ids + other.ids), submitted=(self.submitted + other.submitted), existing=(self.existing + other.existing))<|docstring|>Merges two ComputeResponse objects together. The first takes precedence and order is maintained. Parameters ---------- other : ComputeResponse The compute response to merge Returns ------- ComputeResponse The merged compute response<|endoftext|>
e5b854d099be30d6661acdf9f7cfdc96c19ddc6add87b6e65b115e8c1d0e0eed
@shared_task def get_wpt_audit_configurations(): 'gets all the available locations from WPT' response = requests.get('https://www.webpagetest.org/getLocations.php?f=json&k=A') if (response.status_code != 200): logging.error('Invalid response from WebPageTest API: non-200 response code') return try: data = response.json()['data'] except KeyError: logging.error("Invalid response from WebPageTest API: 'data' key is not present") return for available_audit_parameter in AvailableAuditParameters.objects.all(): available_audit_parameter.is_active = False available_audit_parameter.save() for (location, location_data) in data.items(): browsers = location_data['Browsers'].split(',') group = location_data['group'] label = location_data['labelShort'] for brower in browsers: (configuration, created) = AvailableAuditParameters.objects.update_or_create(browser=brower, location=location, defaults={'location_label': label, 'location_group': group, 'is_active': True})
gets all the available locations from WPT
backend/audits/tasks.py
get_wpt_audit_configurations
donroyco/falco
1
python
@shared_task def get_wpt_audit_configurations(): response = requests.get('https://www.webpagetest.org/getLocations.php?f=json&k=A') if (response.status_code != 200): logging.error('Invalid response from WebPageTest API: non-200 response code') return try: data = response.json()['data'] except KeyError: logging.error("Invalid response from WebPageTest API: 'data' key is not present") return for available_audit_parameter in AvailableAuditParameters.objects.all(): available_audit_parameter.is_active = False available_audit_parameter.save() for (location, location_data) in data.items(): browsers = location_data['Browsers'].split(',') group = location_data['group'] label = location_data['labelShort'] for brower in browsers: (configuration, created) = AvailableAuditParameters.objects.update_or_create(browser=brower, location=location, defaults={'location_label': label, 'location_group': group, 'is_active': True})
@shared_task def get_wpt_audit_configurations(): response = requests.get('https://www.webpagetest.org/getLocations.php?f=json&k=A') if (response.status_code != 200): logging.error('Invalid response from WebPageTest API: non-200 response code') return try: data = response.json()['data'] except KeyError: logging.error("Invalid response from WebPageTest API: 'data' key is not present") return for available_audit_parameter in AvailableAuditParameters.objects.all(): available_audit_parameter.is_active = False available_audit_parameter.save() for (location, location_data) in data.items(): browsers = location_data['Browsers'].split(',') group = location_data['group'] label = location_data['labelShort'] for brower in browsers: (configuration, created) = AvailableAuditParameters.objects.update_or_create(browser=brower, location=location, defaults={'location_label': label, 'location_group': group, 'is_active': True})<|docstring|>gets all the available locations from WPT<|endoftext|>
abca46c46a5bc542494a81a93265f5dead49df690aceaf76c125bd126968d401
def testGetData(self): '\n 测试getData\n :return:\n ' self.listTemplate1.set_background_image('http://back-img.com') listTemplateItem1 = ListTemplateItem() listTemplateItem1.set_image('http://item-img1.com', '123', '345') listTemplateItem1.set_plain_primary_text('Plain Primary Text') listTemplateItem1.set_plain_secondary_text('Plain Secondary Text') listTemplateItem1.set_tertiary_text('Plain Tertiary Text') listTemplateItem1.data['token'] = 'token' listTemplateItem2 = ListTemplateItem() listTemplateItem2.set_image('http://item-img2.com', '12', '45') listTemplateItem2.set_plain_primary_text('Plain Primary Text') listTemplateItem2.set_plain_secondary_text('Plain Secondary Text') listTemplateItem2.set_tertiary_text('Plain Tertiary Text') listTemplateItem2.data['token'] = 'token' self.listTemplate1.add_item(listTemplateItem1) self.listTemplate1.add_item(listTemplateItem2) data = self.listTemplate1.get_data() data['token'] = 'token' ret = {'type': 'ListTemplate1', 'token': 'token', 'backgroundImage': {'url': 'http://back-img.com'}, 'listItems': [{'token': 'token', 'image': {'url': 'http://item-img1.com', 'widthPixels': '123', 'heightPixels': '345'}, 'textContent': {'primaryText': {'type': 'PlainText', 'text': 'Plain Primary Text'}, 'secondaryText': {'type': 'PlainText', 'text': 'Plain Secondary Text'}, 'tertiaryText': {'type': 'PlainText', 'text': 'Plain Tertiary Text'}}}, {'token': 'token', 'image': {'url': 'http://item-img2.com', 'widthPixels': '12', 'heightPixels': '45'}, 'textContent': {'primaryText': {'type': 'PlainText', 'text': 'Plain Primary Text'}, 'secondaryText': {'type': 'PlainText', 'text': 'Plain Secondary Text'}, 'tertiaryText': {'type': 'PlainText', 'text': 'Plain Tertiary Text'}}}]} self.assertEqual(self.listTemplate1.get_data(), ret)
测试getData :return:
dueros/tests/directive/display/template/ListTemplate1Test.py
testGetData
ayxue/BaiduSaxoOpenAPI
0
python
def testGetData(self): '\n 测试getData\n :return:\n ' self.listTemplate1.set_background_image('http://back-img.com') listTemplateItem1 = ListTemplateItem() listTemplateItem1.set_image('http://item-img1.com', '123', '345') listTemplateItem1.set_plain_primary_text('Plain Primary Text') listTemplateItem1.set_plain_secondary_text('Plain Secondary Text') listTemplateItem1.set_tertiary_text('Plain Tertiary Text') listTemplateItem1.data['token'] = 'token' listTemplateItem2 = ListTemplateItem() listTemplateItem2.set_image('http://item-img2.com', '12', '45') listTemplateItem2.set_plain_primary_text('Plain Primary Text') listTemplateItem2.set_plain_secondary_text('Plain Secondary Text') listTemplateItem2.set_tertiary_text('Plain Tertiary Text') listTemplateItem2.data['token'] = 'token' self.listTemplate1.add_item(listTemplateItem1) self.listTemplate1.add_item(listTemplateItem2) data = self.listTemplate1.get_data() data['token'] = 'token' ret = {'type': 'ListTemplate1', 'token': 'token', 'backgroundImage': {'url': 'http://back-img.com'}, 'listItems': [{'token': 'token', 'image': {'url': 'http://item-img1.com', 'widthPixels': '123', 'heightPixels': '345'}, 'textContent': {'primaryText': {'type': 'PlainText', 'text': 'Plain Primary Text'}, 'secondaryText': {'type': 'PlainText', 'text': 'Plain Secondary Text'}, 'tertiaryText': {'type': 'PlainText', 'text': 'Plain Tertiary Text'}}}, {'token': 'token', 'image': {'url': 'http://item-img2.com', 'widthPixels': '12', 'heightPixels': '45'}, 'textContent': {'primaryText': {'type': 'PlainText', 'text': 'Plain Primary Text'}, 'secondaryText': {'type': 'PlainText', 'text': 'Plain Secondary Text'}, 'tertiaryText': {'type': 'PlainText', 'text': 'Plain Tertiary Text'}}}]} self.assertEqual(self.listTemplate1.get_data(), ret)
def testGetData(self): '\n 测试getData\n :return:\n ' self.listTemplate1.set_background_image('http://back-img.com') listTemplateItem1 = ListTemplateItem() listTemplateItem1.set_image('http://item-img1.com', '123', '345') listTemplateItem1.set_plain_primary_text('Plain Primary Text') listTemplateItem1.set_plain_secondary_text('Plain Secondary Text') listTemplateItem1.set_tertiary_text('Plain Tertiary Text') listTemplateItem1.data['token'] = 'token' listTemplateItem2 = ListTemplateItem() listTemplateItem2.set_image('http://item-img2.com', '12', '45') listTemplateItem2.set_plain_primary_text('Plain Primary Text') listTemplateItem2.set_plain_secondary_text('Plain Secondary Text') listTemplateItem2.set_tertiary_text('Plain Tertiary Text') listTemplateItem2.data['token'] = 'token' self.listTemplate1.add_item(listTemplateItem1) self.listTemplate1.add_item(listTemplateItem2) data = self.listTemplate1.get_data() data['token'] = 'token' ret = {'type': 'ListTemplate1', 'token': 'token', 'backgroundImage': {'url': 'http://back-img.com'}, 'listItems': [{'token': 'token', 'image': {'url': 'http://item-img1.com', 'widthPixels': '123', 'heightPixels': '345'}, 'textContent': {'primaryText': {'type': 'PlainText', 'text': 'Plain Primary Text'}, 'secondaryText': {'type': 'PlainText', 'text': 'Plain Secondary Text'}, 'tertiaryText': {'type': 'PlainText', 'text': 'Plain Tertiary Text'}}}, {'token': 'token', 'image': {'url': 'http://item-img2.com', 'widthPixels': '12', 'heightPixels': '45'}, 'textContent': {'primaryText': {'type': 'PlainText', 'text': 'Plain Primary Text'}, 'secondaryText': {'type': 'PlainText', 'text': 'Plain Secondary Text'}, 'tertiaryText': {'type': 'PlainText', 'text': 'Plain Tertiary Text'}}}]} self.assertEqual(self.listTemplate1.get_data(), ret)<|docstring|>测试getData :return:<|endoftext|>
1b3783de347bb9620deb04f90817aef81d0a5914ec74b30ae46ae3f108abf81b
def es_home(): '\n :return: Returns Urls for ElasticSearch services\n ' path = "Urls for ElasticSearch services:\n '/start/': 'elasticsearch_start'\n '/stop/': 'elasticsearch_stop'\n '/restart/': 'elasticsearch_restart'" return path
:return: Returns Urls for ElasticSearch services
agent/src/ElasticSearch/run_es_services.py
es_home
ekbanasolutions/aditas
2
python
def es_home(): '\n \n ' path = "Urls for ElasticSearch services:\n '/start/': 'elasticsearch_start'\n '/stop/': 'elasticsearch_stop'\n '/restart/': 'elasticsearch_restart'" return path
def es_home(): '\n \n ' path = "Urls for ElasticSearch services:\n '/start/': 'elasticsearch_start'\n '/stop/': 'elasticsearch_stop'\n '/restart/': 'elasticsearch_restart'" return path<|docstring|>:return: Returns Urls for ElasticSearch services<|endoftext|>
a2992f742e5fc0f5b64a7f91c2612b1870a38b2e5e053d2688b2b6cd22efcb56
def elasticsearch_start(): '\n Starts \n :return: Returns success if es_start_cmd command executes successfully or error message is shown\n ' log.info('\nStarting elasticsearch\n') header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if (api_status == 'success'): loaded_json = json.loads(request.data.decode()) cluster_id = loaded_json['cluster_id'] return es_start(cluster_id) else: return api_status
Starts :return: Returns success if es_start_cmd command executes successfully or error message is shown
agent/src/ElasticSearch/run_es_services.py
elasticsearch_start
ekbanasolutions/aditas
2
python
def elasticsearch_start(): '\n Starts \n :return: Returns success if es_start_cmd command executes successfully or error message is shown\n ' log.info('\nStarting elasticsearch\n') header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if (api_status == 'success'): loaded_json = json.loads(request.data.decode()) cluster_id = loaded_json['cluster_id'] return es_start(cluster_id) else: return api_status
def elasticsearch_start(): '\n Starts \n :return: Returns success if es_start_cmd command executes successfully or error message is shown\n ' log.info('\nStarting elasticsearch\n') header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if (api_status == 'success'): loaded_json = json.loads(request.data.decode()) cluster_id = loaded_json['cluster_id'] return es_start(cluster_id) else: return api_status<|docstring|>Starts :return: Returns success if es_start_cmd command executes successfully or error message is shown<|endoftext|>
dfa68d1df3cb3e23f745df4c4b9c3183b2df68863645ffcc662e7d022708182c
def elasticsearch_stop(): '\n Stops\n :return: Returns success if es_stop_cmd command executes successfully or error message is shown\n ' log.info('\nStopping elasticsearch\n') header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if (api_status == 'success'): loaded_json = json.loads(request.data.decode()) cluster_id = loaded_json['cluster_id'] return es_stop(cluster_id) else: return api_status
Stops :return: Returns success if es_stop_cmd command executes successfully or error message is shown
agent/src/ElasticSearch/run_es_services.py
elasticsearch_stop
ekbanasolutions/aditas
2
python
def elasticsearch_stop(): '\n Stops\n :return: Returns success if es_stop_cmd command executes successfully or error message is shown\n ' log.info('\nStopping elasticsearch\n') header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if (api_status == 'success'): loaded_json = json.loads(request.data.decode()) cluster_id = loaded_json['cluster_id'] return es_stop(cluster_id) else: return api_status
def elasticsearch_stop(): '\n Stops\n :return: Returns success if es_stop_cmd command executes successfully or error message is shown\n ' log.info('\nStopping elasticsearch\n') header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if (api_status == 'success'): loaded_json = json.loads(request.data.decode()) cluster_id = loaded_json['cluster_id'] return es_stop(cluster_id) else: return api_status<|docstring|>Stops :return: Returns success if es_stop_cmd command executes successfully or error message is shown<|endoftext|>
3af917aa136f3a723d3922068d526e61121d66b59afe2077f42689eaa5428f26
def elasticsearch_restart(): '\n Restarts elasticsearch\n ' log.info('\nRestarting ES\n') header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if (api_status == 'success'): loaded_json = json.loads(request.data.decode()) cluster_id = loaded_json['cluster_id'] stop = json.loads(es_stop(cluster_id)) if stop['success']: start = json.loads(es_start(cluster_id)) if start['success']: return '{"success": 1, "msg": ["Successfully Restarted Elasticsearch"]}' else: return '{"success": 0, "msg": ["Error Restarting Elasticsearch!!!"]}' else: return '{"success": 0, "msg": ["Error Restarting Elasticsearch!!!"]}' else: return api_status
Restarts elasticsearch
agent/src/ElasticSearch/run_es_services.py
elasticsearch_restart
ekbanasolutions/aditas
2
python
def elasticsearch_restart(): '\n \n ' log.info('\nRestarting ES\n') header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if (api_status == 'success'): loaded_json = json.loads(request.data.decode()) cluster_id = loaded_json['cluster_id'] stop = json.loads(es_stop(cluster_id)) if stop['success']: start = json.loads(es_start(cluster_id)) if start['success']: return '{"success": 1, "msg": ["Successfully Restarted Elasticsearch"]}' else: return '{"success": 0, "msg": ["Error Restarting Elasticsearch!!!"]}' else: return '{"success": 0, "msg": ["Error Restarting Elasticsearch!!!"]}' else: return api_status
def elasticsearch_restart(): '\n \n ' log.info('\nRestarting ES\n') header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if (api_status == 'success'): loaded_json = json.loads(request.data.decode()) cluster_id = loaded_json['cluster_id'] stop = json.loads(es_stop(cluster_id)) if stop['success']: start = json.loads(es_start(cluster_id)) if start['success']: return '{"success": 1, "msg": ["Successfully Restarted Elasticsearch"]}' else: return '{"success": 0, "msg": ["Error Restarting Elasticsearch!!!"]}' else: return '{"success": 0, "msg": ["Error Restarting Elasticsearch!!!"]}' else: return api_status<|docstring|>Restarts elasticsearch<|endoftext|>
d1a014a6b61cbeabf7c2cb3b3ee36a6214dd7662999943dcb4b335c81fb503e2
def count_lf_r10(z, loglir, area=0, zrange=[0.0, 0.0]): 'This is to count how many objects are above loglir\n area and zrange must be set simultaneously\n ' if (area > 0): skyfrac = (float(area) / 41253.0) k = (0.0089 * ((1 + z) ** 1.1)) sigma = 0.7 lstar = (1770000000.0 * ((1 + z) ** 2.7)) lf = np.zeros((len(z), len(loglir)), dtype='float32') for i in np.arange(len(z)): lf[(i, :)] = ((k[i] * (((10 ** loglir) / lstar[i]) ** (1 - 1.2))) * np.exp((((- 1) / (2 * (sigma ** 2))) * (np.log10((1 + ((10 ** loglir) / lstar[i]))) ** 2)))) return lf
This is to count how many objects are above loglir area and zrange must be set simultaneously
ctc_lf.py
count_lf_r10
CTJChen/ctc_astropylib
0
python
def count_lf_r10(z, loglir, area=0, zrange=[0.0, 0.0]): 'This is to count how many objects are above loglir\n area and zrange must be set simultaneously\n ' if (area > 0): skyfrac = (float(area) / 41253.0) k = (0.0089 * ((1 + z) ** 1.1)) sigma = 0.7 lstar = (1770000000.0 * ((1 + z) ** 2.7)) lf = np.zeros((len(z), len(loglir)), dtype='float32') for i in np.arange(len(z)): lf[(i, :)] = ((k[i] * (((10 ** loglir) / lstar[i]) ** (1 - 1.2))) * np.exp((((- 1) / (2 * (sigma ** 2))) * (np.log10((1 + ((10 ** loglir) / lstar[i]))) ** 2)))) return lf
def count_lf_r10(z, loglir, area=0, zrange=[0.0, 0.0]): 'This is to count how many objects are above loglir\n area and zrange must be set simultaneously\n ' if (area > 0): skyfrac = (float(area) / 41253.0) k = (0.0089 * ((1 + z) ** 1.1)) sigma = 0.7 lstar = (1770000000.0 * ((1 + z) ** 2.7)) lf = np.zeros((len(z), len(loglir)), dtype='float32') for i in np.arange(len(z)): lf[(i, :)] = ((k[i] * (((10 ** loglir) / lstar[i]) ** (1 - 1.2))) * np.exp((((- 1) / (2 * (sigma ** 2))) * (np.log10((1 + ((10 ** loglir) / lstar[i]))) ** 2)))) return lf<|docstring|>This is to count how many objects are above loglir area and zrange must be set simultaneously<|endoftext|>
01d6f638d017063965dc3332676ff628b6db0b4c22c24fc93933ada4ae4d7250
def Start(self): 'Get information about the file from the client.' self.state.max_chunk_number = max(2, (self.args.read_length // self.CHUNK_SIZE)) self.state.current_chunk_number = 0 self.state.file_size = 0 self.state.blobs = [] self.state.stat_entry = None self.state.num_bytes_collected = 0 self.state.target_pathspec = self.args.pathspec.Copy() if ((not self.client_version) or (self.client_version >= 3221)): stub = server_stubs.GetFileStat request = rdf_client_action.GetFileStatRequest(pathspec=self.state.target_pathspec, follow_symlink=True) else: stub = server_stubs.StatFile request = rdf_client_action.ListDirRequest(pathspec=self.state.target_pathspec) self.CallClient(stub, request, next_state=compatibility.GetName(self.Stat))
Get information about the file from the client.
grr/server/grr_response_server/flows/general/transfer.py
Start
max-vogler/grr
4,238
python
def Start(self): self.state.max_chunk_number = max(2, (self.args.read_length // self.CHUNK_SIZE)) self.state.current_chunk_number = 0 self.state.file_size = 0 self.state.blobs = [] self.state.stat_entry = None self.state.num_bytes_collected = 0 self.state.target_pathspec = self.args.pathspec.Copy() if ((not self.client_version) or (self.client_version >= 3221)): stub = server_stubs.GetFileStat request = rdf_client_action.GetFileStatRequest(pathspec=self.state.target_pathspec, follow_symlink=True) else: stub = server_stubs.StatFile request = rdf_client_action.ListDirRequest(pathspec=self.state.target_pathspec) self.CallClient(stub, request, next_state=compatibility.GetName(self.Stat))
def Start(self): self.state.max_chunk_number = max(2, (self.args.read_length // self.CHUNK_SIZE)) self.state.current_chunk_number = 0 self.state.file_size = 0 self.state.blobs = [] self.state.stat_entry = None self.state.num_bytes_collected = 0 self.state.target_pathspec = self.args.pathspec.Copy() if ((not self.client_version) or (self.client_version >= 3221)): stub = server_stubs.GetFileStat request = rdf_client_action.GetFileStatRequest(pathspec=self.state.target_pathspec, follow_symlink=True) else: stub = server_stubs.StatFile request = rdf_client_action.ListDirRequest(pathspec=self.state.target_pathspec) self.CallClient(stub, request, next_state=compatibility.GetName(self.Stat))<|docstring|>Get information about the file from the client.<|endoftext|>
4fa52a20d0acf625655a1a5c07a84778299a15215b579aa469ffa132e21085fa
def Stat(self, responses): 'Fix up the pathspec of the file.' response = responses.First() file_size_known = True if (responses.success and response): if stat.S_ISDIR(int(response.st_mode)): raise ValueError('`GetFile` called on a directory') if ((not stat.S_ISREG(int(response.st_mode))) and (response.st_size == 0)): file_size_known = False self.state.stat_entry = response else: if (not self.args.ignore_stat_failure): raise IOError(('Error: %s' % responses.status)) self.state.stat_entry = rdf_client_fs.StatEntry(pathspec=self.state.target_pathspec) file_size_known = False if (not file_size_known): if ((not self.state.target_pathspec.HasField('file_size_override')) and (not self.args.read_length)): raise ValueError("The file couldn't be stat-ed. Either read_length or pathspec.file_size_override has to be provided.") if (self.args.read_length == 0): self.state.stat_entry.st_size = self.state.target_pathspec.file_size_override else: self.state.stat_entry.st_size = (self.state.target_pathspec.offset + self.args.read_length) if (self.args.read_length == 0): self.state.file_size = max(0, (self.state.stat_entry.st_size - self.state.stat_entry.pathspec.offset)) else: self.state.file_size = self.args.read_length if (not self.state.target_pathspec.HasField('file_size_override')): self.state.target_pathspec.file_size_override = (self.state.target_pathspec.offset + self.args.read_length) self.state.max_chunk_number = ((self.state.file_size // self.CHUNK_SIZE) + 1) self.FetchWindow(min(self.WINDOW_SIZE, (self.state.max_chunk_number - self.state['current_chunk_number'])))
Fix up the pathspec of the file.
grr/server/grr_response_server/flows/general/transfer.py
Stat
max-vogler/grr
4,238
python
def Stat(self, responses): response = responses.First() file_size_known = True if (responses.success and response): if stat.S_ISDIR(int(response.st_mode)): raise ValueError('`GetFile` called on a directory') if ((not stat.S_ISREG(int(response.st_mode))) and (response.st_size == 0)): file_size_known = False self.state.stat_entry = response else: if (not self.args.ignore_stat_failure): raise IOError(('Error: %s' % responses.status)) self.state.stat_entry = rdf_client_fs.StatEntry(pathspec=self.state.target_pathspec) file_size_known = False if (not file_size_known): if ((not self.state.target_pathspec.HasField('file_size_override')) and (not self.args.read_length)): raise ValueError("The file couldn't be stat-ed. Either read_length or pathspec.file_size_override has to be provided.") if (self.args.read_length == 0): self.state.stat_entry.st_size = self.state.target_pathspec.file_size_override else: self.state.stat_entry.st_size = (self.state.target_pathspec.offset + self.args.read_length) if (self.args.read_length == 0): self.state.file_size = max(0, (self.state.stat_entry.st_size - self.state.stat_entry.pathspec.offset)) else: self.state.file_size = self.args.read_length if (not self.state.target_pathspec.HasField('file_size_override')): self.state.target_pathspec.file_size_override = (self.state.target_pathspec.offset + self.args.read_length) self.state.max_chunk_number = ((self.state.file_size // self.CHUNK_SIZE) + 1) self.FetchWindow(min(self.WINDOW_SIZE, (self.state.max_chunk_number - self.state['current_chunk_number'])))
def Stat(self, responses): response = responses.First() file_size_known = True if (responses.success and response): if stat.S_ISDIR(int(response.st_mode)): raise ValueError('`GetFile` called on a directory') if ((not stat.S_ISREG(int(response.st_mode))) and (response.st_size == 0)): file_size_known = False self.state.stat_entry = response else: if (not self.args.ignore_stat_failure): raise IOError(('Error: %s' % responses.status)) self.state.stat_entry = rdf_client_fs.StatEntry(pathspec=self.state.target_pathspec) file_size_known = False if (not file_size_known): if ((not self.state.target_pathspec.HasField('file_size_override')) and (not self.args.read_length)): raise ValueError("The file couldn't be stat-ed. Either read_length or pathspec.file_size_override has to be provided.") if (self.args.read_length == 0): self.state.stat_entry.st_size = self.state.target_pathspec.file_size_override else: self.state.stat_entry.st_size = (self.state.target_pathspec.offset + self.args.read_length) if (self.args.read_length == 0): self.state.file_size = max(0, (self.state.stat_entry.st_size - self.state.stat_entry.pathspec.offset)) else: self.state.file_size = self.args.read_length if (not self.state.target_pathspec.HasField('file_size_override')): self.state.target_pathspec.file_size_override = (self.state.target_pathspec.offset + self.args.read_length) self.state.max_chunk_number = ((self.state.file_size // self.CHUNK_SIZE) + 1) self.FetchWindow(min(self.WINDOW_SIZE, (self.state.max_chunk_number - self.state['current_chunk_number'])))<|docstring|>Fix up the pathspec of the file.<|endoftext|>
3fe70c30b2527830d0d02f16650ef030320cc341ab024c052b9bc3d768df6e4a
def FetchWindow(self, number_of_chunks_to_readahead): 'Read ahead a number of buffers to fill the window.' for _ in range(number_of_chunks_to_readahead): next_offset = (self.state.current_chunk_number * self.CHUNK_SIZE) if (next_offset >= self.state.file_size): return request = rdf_client.BufferReference(pathspec=self.state.target_pathspec, offset=next_offset, length=min((self.state.file_size - next_offset), self.CHUNK_SIZE)) self.CallClient(server_stubs.TransferBuffer, request, next_state=compatibility.GetName(self.ReadBuffer)) self.state.current_chunk_number += 1
Read ahead a number of buffers to fill the window.
grr/server/grr_response_server/flows/general/transfer.py
FetchWindow
max-vogler/grr
4,238
python
def FetchWindow(self, number_of_chunks_to_readahead): for _ in range(number_of_chunks_to_readahead): next_offset = (self.state.current_chunk_number * self.CHUNK_SIZE) if (next_offset >= self.state.file_size): return request = rdf_client.BufferReference(pathspec=self.state.target_pathspec, offset=next_offset, length=min((self.state.file_size - next_offset), self.CHUNK_SIZE)) self.CallClient(server_stubs.TransferBuffer, request, next_state=compatibility.GetName(self.ReadBuffer)) self.state.current_chunk_number += 1
def FetchWindow(self, number_of_chunks_to_readahead): for _ in range(number_of_chunks_to_readahead): next_offset = (self.state.current_chunk_number * self.CHUNK_SIZE) if (next_offset >= self.state.file_size): return request = rdf_client.BufferReference(pathspec=self.state.target_pathspec, offset=next_offset, length=min((self.state.file_size - next_offset), self.CHUNK_SIZE)) self.CallClient(server_stubs.TransferBuffer, request, next_state=compatibility.GetName(self.ReadBuffer)) self.state.current_chunk_number += 1<|docstring|>Read ahead a number of buffers to fill the window.<|endoftext|>
21529f1abfbb13110f1273fe265762950e64ffa1128633fa83a9e08fdaf07942
def ReadBuffer(self, responses): 'Read the buffer and write to the file.' if (not responses.success): return response = responses.First() if (not response): raise IOError(('Missing hash for offset %s missing' % response.offset)) self.state.num_bytes_collected += response.length if (response.offset <= (self.state.max_chunk_number * self.CHUNK_SIZE)): self.state.blobs.append((response.data, response.length)) self.Log('Received blob hash %s', text.Hexify(response.data)) self.FetchWindow(1)
Read the buffer and write to the file.
grr/server/grr_response_server/flows/general/transfer.py
ReadBuffer
max-vogler/grr
4,238
python
def ReadBuffer(self, responses): if (not responses.success): return response = responses.First() if (not response): raise IOError(('Missing hash for offset %s missing' % response.offset)) self.state.num_bytes_collected += response.length if (response.offset <= (self.state.max_chunk_number * self.CHUNK_SIZE)): self.state.blobs.append((response.data, response.length)) self.Log('Received blob hash %s', text.Hexify(response.data)) self.FetchWindow(1)
def ReadBuffer(self, responses): if (not responses.success): return response = responses.First() if (not response): raise IOError(('Missing hash for offset %s missing' % response.offset)) self.state.num_bytes_collected += response.length if (response.offset <= (self.state.max_chunk_number * self.CHUNK_SIZE)): self.state.blobs.append((response.data, response.length)) self.Log('Received blob hash %s', text.Hexify(response.data)) self.FetchWindow(1)<|docstring|>Read the buffer and write to the file.<|endoftext|>
8832cb1896dd1fb7210bc64698caa56865932e14268acf8a97edabf6a6155c5a
def End(self, responses): 'Finalize reading the file.' if (self.state.num_bytes_collected >= 0): self._AddFileToFileStore() stat_entry = self.state.stat_entry if (self.state.num_bytes_collected >= self.state.file_size): self.Log('File %s transferred successfully.', stat_entry.AFF4Path(self.client_urn)) else: self.Log('File %s transferred partially (%d bytes out of %d).', stat_entry.AFF4Path(self.client_urn), self.state.num_bytes_collected, self.state.file_size) self.SendReply(stat_entry) else: self.Log('File transfer failed.') super().End(responses)
Finalize reading the file.
grr/server/grr_response_server/flows/general/transfer.py
End
max-vogler/grr
4,238
python
def End(self, responses): if (self.state.num_bytes_collected >= 0): self._AddFileToFileStore() stat_entry = self.state.stat_entry if (self.state.num_bytes_collected >= self.state.file_size): self.Log('File %s transferred successfully.', stat_entry.AFF4Path(self.client_urn)) else: self.Log('File %s transferred partially (%d bytes out of %d).', stat_entry.AFF4Path(self.client_urn), self.state.num_bytes_collected, self.state.file_size) self.SendReply(stat_entry) else: self.Log('File transfer failed.') super().End(responses)
def End(self, responses): if (self.state.num_bytes_collected >= 0): self._AddFileToFileStore() stat_entry = self.state.stat_entry if (self.state.num_bytes_collected >= self.state.file_size): self.Log('File %s transferred successfully.', stat_entry.AFF4Path(self.client_urn)) else: self.Log('File %s transferred partially (%d bytes out of %d).', stat_entry.AFF4Path(self.client_urn), self.state.num_bytes_collected, self.state.file_size) self.SendReply(stat_entry) else: self.Log('File transfer failed.') super().End(responses)<|docstring|>Finalize reading the file.<|endoftext|>
48ed430c306a3bfc844cea0536f55caa7757dd79509c7117642a9e1f94f7e442
def Start(self, file_size=0, maximum_pending_files=1000, use_external_stores=False): 'Initialize our state.' super().Start() self.state.files_hashed = 0 self.state.use_external_stores = use_external_stores self.state.file_size = file_size self.state.files_to_fetch = 0 self.state.files_fetched = 0 self.state.files_skipped = 0 self.state.files_failed = 0 self.state.files_hashed_since_check = 0 self.state.pending_hashes = {} self.state.pending_files = {} self.state.maximum_pending_files = maximum_pending_files self.state.indexed_pathspecs = [] self.state.request_data_list = [] self.state.next_pathspec_to_start = 0 self.state.blob_hashes_pending = 0
Initialize our state.
grr/server/grr_response_server/flows/general/transfer.py
Start
max-vogler/grr
4,238
python
def Start(self, file_size=0, maximum_pending_files=1000, use_external_stores=False): super().Start() self.state.files_hashed = 0 self.state.use_external_stores = use_external_stores self.state.file_size = file_size self.state.files_to_fetch = 0 self.state.files_fetched = 0 self.state.files_skipped = 0 self.state.files_failed = 0 self.state.files_hashed_since_check = 0 self.state.pending_hashes = {} self.state.pending_files = {} self.state.maximum_pending_files = maximum_pending_files self.state.indexed_pathspecs = [] self.state.request_data_list = [] self.state.next_pathspec_to_start = 0 self.state.blob_hashes_pending = 0
def Start(self, file_size=0, maximum_pending_files=1000, use_external_stores=False): super().Start() self.state.files_hashed = 0 self.state.use_external_stores = use_external_stores self.state.file_size = file_size self.state.files_to_fetch = 0 self.state.files_fetched = 0 self.state.files_skipped = 0 self.state.files_failed = 0 self.state.files_hashed_since_check = 0 self.state.pending_hashes = {} self.state.pending_files = {} self.state.maximum_pending_files = maximum_pending_files self.state.indexed_pathspecs = [] self.state.request_data_list = [] self.state.next_pathspec_to_start = 0 self.state.blob_hashes_pending = 0<|docstring|>Initialize our state.<|endoftext|>
cd4fba73e969650f1fc8c7d6639fd3eb6163e5fe28473ade1ea7565a49460917
def StartFileFetch(self, pathspec, request_data=None): 'The entry point for this flow mixin - Schedules new file transfer.' self.state.indexed_pathspecs.append(pathspec) self.state.request_data_list.append(request_data) self._TryToStartNextPathspec()
The entry point for this flow mixin - Schedules new file transfer.
grr/server/grr_response_server/flows/general/transfer.py
StartFileFetch
max-vogler/grr
4,238
python
def StartFileFetch(self, pathspec, request_data=None): self.state.indexed_pathspecs.append(pathspec) self.state.request_data_list.append(request_data) self._TryToStartNextPathspec()
def StartFileFetch(self, pathspec, request_data=None): self.state.indexed_pathspecs.append(pathspec) self.state.request_data_list.append(request_data) self._TryToStartNextPathspec()<|docstring|>The entry point for this flow mixin - Schedules new file transfer.<|endoftext|>
7f279996bde43873d31e397783ab9fa210cf269361dc7a341f792bb2bdce9aeb
def _TryToStartNextPathspec(self): 'Try to schedule the next pathspec if there is enough capacity.' if (self.state.maximum_pending_files <= len(self.state.pending_files)): return if (self.state.maximum_pending_files <= len(self.state.pending_hashes)): return try: index = self.state.next_pathspec_to_start pathspec = self.state.indexed_pathspecs[index] self.state.next_pathspec_to_start = (index + 1) except IndexError: return self.state.pending_hashes[index] = {'index': index} if ((not self.client_version) or (self.client_version >= 3221)): stub = server_stubs.GetFileStat request = rdf_client_action.GetFileStatRequest(pathspec=pathspec) request.follow_symlink = True request_name = 'GetFileStat' else: stub = server_stubs.StatFile request = rdf_client_action.ListDirRequest(pathspec=pathspec) request_name = 'StatFile' self.CallClient(stub, request, next_state=compatibility.GetName(self._StoreStat), request_data=dict(index=index, request_name=request_name)) request = rdf_client_action.FingerprintRequest(pathspec=pathspec, max_filesize=self.state.file_size) request.AddRequest(fp_type=rdf_client_action.FingerprintTuple.Type.FPT_GENERIC, hashers=[rdf_client_action.FingerprintTuple.HashType.MD5, rdf_client_action.FingerprintTuple.HashType.SHA1, rdf_client_action.FingerprintTuple.HashType.SHA256]) self.CallClient(server_stubs.HashFile, request, next_state=compatibility.GetName(self._ReceiveFileHash), request_data=dict(index=index))
Try to schedule the next pathspec if there is enough capacity.
grr/server/grr_response_server/flows/general/transfer.py
_TryToStartNextPathspec
max-vogler/grr
4,238
python
def _TryToStartNextPathspec(self): if (self.state.maximum_pending_files <= len(self.state.pending_files)): return if (self.state.maximum_pending_files <= len(self.state.pending_hashes)): return try: index = self.state.next_pathspec_to_start pathspec = self.state.indexed_pathspecs[index] self.state.next_pathspec_to_start = (index + 1) except IndexError: return self.state.pending_hashes[index] = {'index': index} if ((not self.client_version) or (self.client_version >= 3221)): stub = server_stubs.GetFileStat request = rdf_client_action.GetFileStatRequest(pathspec=pathspec) request.follow_symlink = True request_name = 'GetFileStat' else: stub = server_stubs.StatFile request = rdf_client_action.ListDirRequest(pathspec=pathspec) request_name = 'StatFile' self.CallClient(stub, request, next_state=compatibility.GetName(self._StoreStat), request_data=dict(index=index, request_name=request_name)) request = rdf_client_action.FingerprintRequest(pathspec=pathspec, max_filesize=self.state.file_size) request.AddRequest(fp_type=rdf_client_action.FingerprintTuple.Type.FPT_GENERIC, hashers=[rdf_client_action.FingerprintTuple.HashType.MD5, rdf_client_action.FingerprintTuple.HashType.SHA1, rdf_client_action.FingerprintTuple.HashType.SHA256]) self.CallClient(server_stubs.HashFile, request, next_state=compatibility.GetName(self._ReceiveFileHash), request_data=dict(index=index))
def _TryToStartNextPathspec(self): if (self.state.maximum_pending_files <= len(self.state.pending_files)): return if (self.state.maximum_pending_files <= len(self.state.pending_hashes)): return try: index = self.state.next_pathspec_to_start pathspec = self.state.indexed_pathspecs[index] self.state.next_pathspec_to_start = (index + 1) except IndexError: return self.state.pending_hashes[index] = {'index': index} if ((not self.client_version) or (self.client_version >= 3221)): stub = server_stubs.GetFileStat request = rdf_client_action.GetFileStatRequest(pathspec=pathspec) request.follow_symlink = True request_name = 'GetFileStat' else: stub = server_stubs.StatFile request = rdf_client_action.ListDirRequest(pathspec=pathspec) request_name = 'StatFile' self.CallClient(stub, request, next_state=compatibility.GetName(self._StoreStat), request_data=dict(index=index, request_name=request_name)) request = rdf_client_action.FingerprintRequest(pathspec=pathspec, max_filesize=self.state.file_size) request.AddRequest(fp_type=rdf_client_action.FingerprintTuple.Type.FPT_GENERIC, hashers=[rdf_client_action.FingerprintTuple.HashType.MD5, rdf_client_action.FingerprintTuple.HashType.SHA1, rdf_client_action.FingerprintTuple.HashType.SHA256]) self.CallClient(server_stubs.HashFile, request, next_state=compatibility.GetName(self._ReceiveFileHash), request_data=dict(index=index))<|docstring|>Try to schedule the next pathspec if there is enough capacity.<|endoftext|>
018b884e16fa9029614220cbf6274c01ad30e6f76c416459d6e30c38a2f354c6
def _RemoveCompletedPathspec(self, index): 'Removes a pathspec from the list of pathspecs.' pathspec = self.state.indexed_pathspecs[index] request_data = self.state.request_data_list[index] self.state.indexed_pathspecs[index] = None self.state.request_data_list[index] = None self.state.pending_hashes.pop(index, None) self.state.pending_files.pop(index, None) self._TryToStartNextPathspec() return (pathspec, request_data)
Removes a pathspec from the list of pathspecs.
grr/server/grr_response_server/flows/general/transfer.py
_RemoveCompletedPathspec
max-vogler/grr
4,238
python
def _RemoveCompletedPathspec(self, index): pathspec = self.state.indexed_pathspecs[index] request_data = self.state.request_data_list[index] self.state.indexed_pathspecs[index] = None self.state.request_data_list[index] = None self.state.pending_hashes.pop(index, None) self.state.pending_files.pop(index, None) self._TryToStartNextPathspec() return (pathspec, request_data)
def _RemoveCompletedPathspec(self, index): pathspec = self.state.indexed_pathspecs[index] request_data = self.state.request_data_list[index] self.state.indexed_pathspecs[index] = None self.state.request_data_list[index] = None self.state.pending_hashes.pop(index, None) self.state.pending_files.pop(index, None) self._TryToStartNextPathspec() return (pathspec, request_data)<|docstring|>Removes a pathspec from the list of pathspecs.<|endoftext|>
bcc3f1af6927af70716f6f8138e942209962bdaa39d975f277780adeebf20fab
def _ReceiveFetchedFile(self, tracker, is_duplicate=False): 'Remove pathspec for this index and call the ReceiveFetchedFile method.' index = tracker['index'] (_, request_data) = self._RemoveCompletedPathspec(index) self.ReceiveFetchedFile(tracker['stat_entry'], tracker['hash_obj'], request_data=request_data, is_duplicate=is_duplicate)
Remove pathspec for this index and call the ReceiveFetchedFile method.
grr/server/grr_response_server/flows/general/transfer.py
_ReceiveFetchedFile
max-vogler/grr
4,238
python
def _ReceiveFetchedFile(self, tracker, is_duplicate=False): index = tracker['index'] (_, request_data) = self._RemoveCompletedPathspec(index) self.ReceiveFetchedFile(tracker['stat_entry'], tracker['hash_obj'], request_data=request_data, is_duplicate=is_duplicate)
def _ReceiveFetchedFile(self, tracker, is_duplicate=False): index = tracker['index'] (_, request_data) = self._RemoveCompletedPathspec(index) self.ReceiveFetchedFile(tracker['stat_entry'], tracker['hash_obj'], request_data=request_data, is_duplicate=is_duplicate)<|docstring|>Remove pathspec for this index and call the ReceiveFetchedFile method.<|endoftext|>
0ebc0d8dfca9f05f06bd967c4c5454c6e9dcc26efb2e290165698d7a72e33981
def ReceiveFetchedFile(self, stat_entry, file_hash, request_data=None, is_duplicate=False): "This method will be called for each new file successfully fetched.\n\n Args:\n stat_entry: rdf_client_fs.StatEntry object describing the file.\n file_hash: rdf_crypto.Hash object with file hashes.\n request_data: Arbitrary dictionary that was passed to the corresponding\n StartFileFetch call.\n is_duplicate: If True, the file wasn't actually collected as its hash was\n found in the filestore.\n "
This method will be called for each new file successfully fetched. Args: stat_entry: rdf_client_fs.StatEntry object describing the file. file_hash: rdf_crypto.Hash object with file hashes. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call. is_duplicate: If True, the file wasn't actually collected as its hash was found in the filestore.
grr/server/grr_response_server/flows/general/transfer.py
ReceiveFetchedFile
max-vogler/grr
4,238
python
def ReceiveFetchedFile(self, stat_entry, file_hash, request_data=None, is_duplicate=False): "This method will be called for each new file successfully fetched.\n\n Args:\n stat_entry: rdf_client_fs.StatEntry object describing the file.\n file_hash: rdf_crypto.Hash object with file hashes.\n request_data: Arbitrary dictionary that was passed to the corresponding\n StartFileFetch call.\n is_duplicate: If True, the file wasn't actually collected as its hash was\n found in the filestore.\n "
def ReceiveFetchedFile(self, stat_entry, file_hash, request_data=None, is_duplicate=False): "This method will be called for each new file successfully fetched.\n\n Args:\n stat_entry: rdf_client_fs.StatEntry object describing the file.\n file_hash: rdf_crypto.Hash object with file hashes.\n request_data: Arbitrary dictionary that was passed to the corresponding\n StartFileFetch call.\n is_duplicate: If True, the file wasn't actually collected as its hash was\n found in the filestore.\n "<|docstring|>This method will be called for each new file successfully fetched. Args: stat_entry: rdf_client_fs.StatEntry object describing the file. file_hash: rdf_crypto.Hash object with file hashes. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call. is_duplicate: If True, the file wasn't actually collected as its hash was found in the filestore.<|endoftext|>
c2c953719fab389c940f4fe3172c577c51e8670d3a4cf9e834b926a938ad0001
def _FileFetchFailed(self, index: int, status: Optional[rdf_flow_objects.FlowStatus]): 'Remove pathspec for this index and call the FileFetchFailed method.' (pathspec, request_data) = self._RemoveCompletedPathspec(index) if (pathspec is None): return self.state.files_failed += 1 self.FileFetchFailed(pathspec, request_data=request_data, status=status)
Remove pathspec for this index and call the FileFetchFailed method.
grr/server/grr_response_server/flows/general/transfer.py
_FileFetchFailed
max-vogler/grr
4,238
python
def _FileFetchFailed(self, index: int, status: Optional[rdf_flow_objects.FlowStatus]): (pathspec, request_data) = self._RemoveCompletedPathspec(index) if (pathspec is None): return self.state.files_failed += 1 self.FileFetchFailed(pathspec, request_data=request_data, status=status)
def _FileFetchFailed(self, index: int, status: Optional[rdf_flow_objects.FlowStatus]): (pathspec, request_data) = self._RemoveCompletedPathspec(index) if (pathspec is None): return self.state.files_failed += 1 self.FileFetchFailed(pathspec, request_data=request_data, status=status)<|docstring|>Remove pathspec for this index and call the FileFetchFailed method.<|endoftext|>
736be75cd4e9c86b5e62c83641237fecd1ee00222c8aaa6828b79e4f31301fe3
def FileFetchFailed(self, pathspec: rdf_paths.PathSpec, request_data: Any=None, status: Optional[rdf_flow_objects.FlowStatus]=None): 'This method will be called when stat or hash requests fail.\n\n Args:\n pathspec: Pathspec of a file that failed to be fetched.\n request_data: Arbitrary dictionary that was passed to the corresponding\n StartFileFetch call.\n status: FlowStatus that contains more error details.\n '
This method will be called when stat or hash requests fail. Args: pathspec: Pathspec of a file that failed to be fetched. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call. status: FlowStatus that contains more error details.
grr/server/grr_response_server/flows/general/transfer.py
FileFetchFailed
max-vogler/grr
4,238
python
def FileFetchFailed(self, pathspec: rdf_paths.PathSpec, request_data: Any=None, status: Optional[rdf_flow_objects.FlowStatus]=None): 'This method will be called when stat or hash requests fail.\n\n Args:\n pathspec: Pathspec of a file that failed to be fetched.\n request_data: Arbitrary dictionary that was passed to the corresponding\n StartFileFetch call.\n status: FlowStatus that contains more error details.\n '
def FileFetchFailed(self, pathspec: rdf_paths.PathSpec, request_data: Any=None, status: Optional[rdf_flow_objects.FlowStatus]=None): 'This method will be called when stat or hash requests fail.\n\n Args:\n pathspec: Pathspec of a file that failed to be fetched.\n request_data: Arbitrary dictionary that was passed to the corresponding\n StartFileFetch call.\n status: FlowStatus that contains more error details.\n '<|docstring|>This method will be called when stat or hash requests fail. Args: pathspec: Pathspec of a file that failed to be fetched. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call. status: FlowStatus that contains more error details.<|endoftext|>
04b2842c912c468285345f9962fc8f7535e321a761c6fe7c5fa052e312f7fb2b
def _StoreStat(self, responses): "Stores stat entry in the flow's state." index = responses.request_data['index'] if (not responses.success): self.Log('Failed to stat file: %s', responses.status) self._FileFetchFailed(index, status=responses.status) return tracker = self.state.pending_hashes[index] tracker['stat_entry'] = responses.First()
Stores stat entry in the flow's state.
grr/server/grr_response_server/flows/general/transfer.py
_StoreStat
max-vogler/grr
4,238
python
def _StoreStat(self, responses): index = responses.request_data['index'] if (not responses.success): self.Log('Failed to stat file: %s', responses.status) self._FileFetchFailed(index, status=responses.status) return tracker = self.state.pending_hashes[index] tracker['stat_entry'] = responses.First()
def _StoreStat(self, responses): index = responses.request_data['index'] if (not responses.success): self.Log('Failed to stat file: %s', responses.status) self._FileFetchFailed(index, status=responses.status) return tracker = self.state.pending_hashes[index] tracker['stat_entry'] = responses.First()<|docstring|>Stores stat entry in the flow's state.<|endoftext|>
85e06a2997d641f1e7c773d83c8dd24a0bdf06964bd0959401eabdb22fd4a611
def _ReceiveFileHash(self, responses): 'Add hash digest to tracker and check with filestore.' index = responses.request_data['index'] if (not responses.success): self.Log('Failed to hash file: %s', responses.status) self.state.pending_hashes.pop(index, None) self._FileFetchFailed(index, status=responses.status) return self.state.files_hashed += 1 response = responses.First() if response.HasField('hash'): hash_obj = response.hash else: hash_obj = rdf_crypto.Hash() if ((len(response.results) < 1) or (response.results[0]['name'] != 'generic')): self.Log('Failed to hash file: %s', self.state.indexed_pathspecs[index]) self.state.pending_hashes.pop(index, None) return result = response.results[0] try: for hash_type in ['md5', 'sha1', 'sha256']: value = result.GetItem(hash_type) setattr(hash_obj, hash_type, value) except AttributeError: self.Log('Failed to hash file: %s', self.state.indexed_pathspecs[index]) self.state.pending_hashes.pop(index, None) return try: tracker = self.state.pending_hashes[index] except KeyError: self._FileFetchFailed(index, status=responses.status) return tracker['hash_obj'] = hash_obj tracker['bytes_read'] = response.bytes_read self.state.files_hashed_since_check += 1 if (self.state.files_hashed_since_check >= self.MIN_CALL_TO_FILE_STORE): self._CheckHashesWithFileStore()
Add hash digest to tracker and check with filestore.
grr/server/grr_response_server/flows/general/transfer.py
_ReceiveFileHash
max-vogler/grr
4,238
python
def _ReceiveFileHash(self, responses): index = responses.request_data['index'] if (not responses.success): self.Log('Failed to hash file: %s', responses.status) self.state.pending_hashes.pop(index, None) self._FileFetchFailed(index, status=responses.status) return self.state.files_hashed += 1 response = responses.First() if response.HasField('hash'): hash_obj = response.hash else: hash_obj = rdf_crypto.Hash() if ((len(response.results) < 1) or (response.results[0]['name'] != 'generic')): self.Log('Failed to hash file: %s', self.state.indexed_pathspecs[index]) self.state.pending_hashes.pop(index, None) return result = response.results[0] try: for hash_type in ['md5', 'sha1', 'sha256']: value = result.GetItem(hash_type) setattr(hash_obj, hash_type, value) except AttributeError: self.Log('Failed to hash file: %s', self.state.indexed_pathspecs[index]) self.state.pending_hashes.pop(index, None) return try: tracker = self.state.pending_hashes[index] except KeyError: self._FileFetchFailed(index, status=responses.status) return tracker['hash_obj'] = hash_obj tracker['bytes_read'] = response.bytes_read self.state.files_hashed_since_check += 1 if (self.state.files_hashed_since_check >= self.MIN_CALL_TO_FILE_STORE): self._CheckHashesWithFileStore()
def _ReceiveFileHash(self, responses): index = responses.request_data['index'] if (not responses.success): self.Log('Failed to hash file: %s', responses.status) self.state.pending_hashes.pop(index, None) self._FileFetchFailed(index, status=responses.status) return self.state.files_hashed += 1 response = responses.First() if response.HasField('hash'): hash_obj = response.hash else: hash_obj = rdf_crypto.Hash() if ((len(response.results) < 1) or (response.results[0]['name'] != 'generic')): self.Log('Failed to hash file: %s', self.state.indexed_pathspecs[index]) self.state.pending_hashes.pop(index, None) return result = response.results[0] try: for hash_type in ['md5', 'sha1', 'sha256']: value = result.GetItem(hash_type) setattr(hash_obj, hash_type, value) except AttributeError: self.Log('Failed to hash file: %s', self.state.indexed_pathspecs[index]) self.state.pending_hashes.pop(index, None) return try: tracker = self.state.pending_hashes[index] except KeyError: self._FileFetchFailed(index, status=responses.status) return tracker['hash_obj'] = hash_obj tracker['bytes_read'] = response.bytes_read self.state.files_hashed_since_check += 1 if (self.state.files_hashed_since_check >= self.MIN_CALL_TO_FILE_STORE): self._CheckHashesWithFileStore()<|docstring|>Add hash digest to tracker and check with filestore.<|endoftext|>
4fe0a0bb8127e7ff18a7b0619171914297a0c4e493b8e9db4504ecbc66e940b1
def _CheckHashesWithFileStore(self): "Check all queued up hashes for existence in file store.\n\n Hashes which do not exist in the file store will be downloaded. This\n function flushes the entire queue (self.state.pending_hashes) in order to\n minimize the round trips to the file store.\n\n If a file was found in the file store it is not scheduled for collection\n and its PathInfo is written to the datastore pointing to the file store's\n hash. Otherwise, we request the client to hash every block in the file,\n and add it to the file tracking queue (self.state.pending_files).\n " if (not self.state.pending_hashes): return file_hashes = {} hash_to_tracker = {} for (index, tracker) in self.state.pending_hashes.items(): if (tracker.get('hash_obj') is None): continue hash_obj = tracker['hash_obj'] digest = hash_obj.sha256 file_hashes[index] = hash_obj hash_to_tracker.setdefault(rdf_objects.SHA256HashID(digest), []).append(tracker) files_in_filestore = set() statuses = file_store.CheckHashes([rdf_objects.SHA256HashID.FromSerializedBytes(ho.sha256.AsBytes()) for ho in file_hashes.values()]) for (hash_id, status) in statuses.items(): self.HeartBeat() if (not status): continue for tracker in hash_to_tracker[hash_id]: self.state.files_skipped += 1 file_hashes.pop(tracker['index']) files_in_filestore.add(hash_id) self.state.pending_hashes.pop(tracker['index']) self.state.files_hashed_since_check = 0 for hash_id in files_in_filestore: for file_tracker in hash_to_tracker.get(hash_id, []): stat_entry = file_tracker['stat_entry'] path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry) path_info.hash_entry = file_tracker['hash_obj'] data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) self._ReceiveFetchedFile(file_tracker, is_duplicate=True) for index in file_hashes: file_tracker = self.state.pending_hashes.pop(index) self.state.pending_files[index] = file_tracker if (file_tracker['bytes_read'] > 0): file_tracker['size_to_download'] = file_tracker['bytes_read'] else: file_tracker['size_to_download'] = file_tracker['stat_entry'].st_size expected_number_of_hashes = file_tracker['expected_chunks'] = ((file_tracker['size_to_download'] // self.CHUNK_SIZE) + 1) self.state.files_to_fetch += 1 for i in range(expected_number_of_hashes): if (i == (expected_number_of_hashes - 1)): length = (file_tracker['size_to_download'] % self.CHUNK_SIZE) else: length = self.CHUNK_SIZE self.CallClient(server_stubs.HashBuffer, pathspec=file_tracker['stat_entry'].pathspec, offset=(i * self.CHUNK_SIZE), length=length, next_state=compatibility.GetName(self._CheckHash), request_data=dict(index=index)) if ((self.state.files_hashed % 100) == 0): self.Log('Hashed %d files, skipped %s already stored.', self.state.files_hashed, self.state.files_skipped)
Check all queued up hashes for existence in file store. Hashes which do not exist in the file store will be downloaded. This function flushes the entire queue (self.state.pending_hashes) in order to minimize the round trips to the file store. If a file was found in the file store it is not scheduled for collection and its PathInfo is written to the datastore pointing to the file store's hash. Otherwise, we request the client to hash every block in the file, and add it to the file tracking queue (self.state.pending_files).
grr/server/grr_response_server/flows/general/transfer.py
_CheckHashesWithFileStore
max-vogler/grr
4,238
python
def _CheckHashesWithFileStore(self): "Check all queued up hashes for existence in file store.\n\n Hashes which do not exist in the file store will be downloaded. This\n function flushes the entire queue (self.state.pending_hashes) in order to\n minimize the round trips to the file store.\n\n If a file was found in the file store it is not scheduled for collection\n and its PathInfo is written to the datastore pointing to the file store's\n hash. Otherwise, we request the client to hash every block in the file,\n and add it to the file tracking queue (self.state.pending_files).\n " if (not self.state.pending_hashes): return file_hashes = {} hash_to_tracker = {} for (index, tracker) in self.state.pending_hashes.items(): if (tracker.get('hash_obj') is None): continue hash_obj = tracker['hash_obj'] digest = hash_obj.sha256 file_hashes[index] = hash_obj hash_to_tracker.setdefault(rdf_objects.SHA256HashID(digest), []).append(tracker) files_in_filestore = set() statuses = file_store.CheckHashes([rdf_objects.SHA256HashID.FromSerializedBytes(ho.sha256.AsBytes()) for ho in file_hashes.values()]) for (hash_id, status) in statuses.items(): self.HeartBeat() if (not status): continue for tracker in hash_to_tracker[hash_id]: self.state.files_skipped += 1 file_hashes.pop(tracker['index']) files_in_filestore.add(hash_id) self.state.pending_hashes.pop(tracker['index']) self.state.files_hashed_since_check = 0 for hash_id in files_in_filestore: for file_tracker in hash_to_tracker.get(hash_id, []): stat_entry = file_tracker['stat_entry'] path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry) path_info.hash_entry = file_tracker['hash_obj'] data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) self._ReceiveFetchedFile(file_tracker, is_duplicate=True) for index in file_hashes: file_tracker = self.state.pending_hashes.pop(index) self.state.pending_files[index] = file_tracker if (file_tracker['bytes_read'] > 0): file_tracker['size_to_download'] = file_tracker['bytes_read'] else: file_tracker['size_to_download'] = file_tracker['stat_entry'].st_size expected_number_of_hashes = file_tracker['expected_chunks'] = ((file_tracker['size_to_download'] // self.CHUNK_SIZE) + 1) self.state.files_to_fetch += 1 for i in range(expected_number_of_hashes): if (i == (expected_number_of_hashes - 1)): length = (file_tracker['size_to_download'] % self.CHUNK_SIZE) else: length = self.CHUNK_SIZE self.CallClient(server_stubs.HashBuffer, pathspec=file_tracker['stat_entry'].pathspec, offset=(i * self.CHUNK_SIZE), length=length, next_state=compatibility.GetName(self._CheckHash), request_data=dict(index=index)) if ((self.state.files_hashed % 100) == 0): self.Log('Hashed %d files, skipped %s already stored.', self.state.files_hashed, self.state.files_skipped)
def _CheckHashesWithFileStore(self): "Check all queued up hashes for existence in file store.\n\n Hashes which do not exist in the file store will be downloaded. This\n function flushes the entire queue (self.state.pending_hashes) in order to\n minimize the round trips to the file store.\n\n If a file was found in the file store it is not scheduled for collection\n and its PathInfo is written to the datastore pointing to the file store's\n hash. Otherwise, we request the client to hash every block in the file,\n and add it to the file tracking queue (self.state.pending_files).\n " if (not self.state.pending_hashes): return file_hashes = {} hash_to_tracker = {} for (index, tracker) in self.state.pending_hashes.items(): if (tracker.get('hash_obj') is None): continue hash_obj = tracker['hash_obj'] digest = hash_obj.sha256 file_hashes[index] = hash_obj hash_to_tracker.setdefault(rdf_objects.SHA256HashID(digest), []).append(tracker) files_in_filestore = set() statuses = file_store.CheckHashes([rdf_objects.SHA256HashID.FromSerializedBytes(ho.sha256.AsBytes()) for ho in file_hashes.values()]) for (hash_id, status) in statuses.items(): self.HeartBeat() if (not status): continue for tracker in hash_to_tracker[hash_id]: self.state.files_skipped += 1 file_hashes.pop(tracker['index']) files_in_filestore.add(hash_id) self.state.pending_hashes.pop(tracker['index']) self.state.files_hashed_since_check = 0 for hash_id in files_in_filestore: for file_tracker in hash_to_tracker.get(hash_id, []): stat_entry = file_tracker['stat_entry'] path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry) path_info.hash_entry = file_tracker['hash_obj'] data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) self._ReceiveFetchedFile(file_tracker, is_duplicate=True) for index in file_hashes: file_tracker = self.state.pending_hashes.pop(index) self.state.pending_files[index] = file_tracker if (file_tracker['bytes_read'] > 0): file_tracker['size_to_download'] = file_tracker['bytes_read'] else: file_tracker['size_to_download'] = file_tracker['stat_entry'].st_size expected_number_of_hashes = file_tracker['expected_chunks'] = ((file_tracker['size_to_download'] // self.CHUNK_SIZE) + 1) self.state.files_to_fetch += 1 for i in range(expected_number_of_hashes): if (i == (expected_number_of_hashes - 1)): length = (file_tracker['size_to_download'] % self.CHUNK_SIZE) else: length = self.CHUNK_SIZE self.CallClient(server_stubs.HashBuffer, pathspec=file_tracker['stat_entry'].pathspec, offset=(i * self.CHUNK_SIZE), length=length, next_state=compatibility.GetName(self._CheckHash), request_data=dict(index=index)) if ((self.state.files_hashed % 100) == 0): self.Log('Hashed %d files, skipped %s already stored.', self.state.files_hashed, self.state.files_skipped)<|docstring|>Check all queued up hashes for existence in file store. Hashes which do not exist in the file store will be downloaded. This function flushes the entire queue (self.state.pending_hashes) in order to minimize the round trips to the file store. If a file was found in the file store it is not scheduled for collection and its PathInfo is written to the datastore pointing to the file store's hash. Otherwise, we request the client to hash every block in the file, and add it to the file tracking queue (self.state.pending_files).<|endoftext|>
33555bd9174c3c9d7e0216169079446f2d5e74f815e2df63b1fc8a661ed8fa5e
def _CheckHash(self, responses): 'Adds the block hash to the file tracker responsible for this vfs URN.' index = responses.request_data['index'] if (index not in self.state.pending_files): return file_tracker = self.state.pending_files[index] hash_response = responses.First() if ((not responses.success) or (not hash_response)): urn = file_tracker['stat_entry'].pathspec.AFF4Path(self.client_urn) self.Log('Failed to read %s: %s', urn, responses.status) self._FileFetchFailed(index, status=responses.status) return file_tracker.setdefault('hash_list', []).append(hash_response) self.state.blob_hashes_pending += 1 if (self.state.blob_hashes_pending > self.MIN_CALL_TO_FILE_STORE): self._FetchFileContent()
Adds the block hash to the file tracker responsible for this vfs URN.
grr/server/grr_response_server/flows/general/transfer.py
_CheckHash
max-vogler/grr
4,238
python
def _CheckHash(self, responses): index = responses.request_data['index'] if (index not in self.state.pending_files): return file_tracker = self.state.pending_files[index] hash_response = responses.First() if ((not responses.success) or (not hash_response)): urn = file_tracker['stat_entry'].pathspec.AFF4Path(self.client_urn) self.Log('Failed to read %s: %s', urn, responses.status) self._FileFetchFailed(index, status=responses.status) return file_tracker.setdefault('hash_list', []).append(hash_response) self.state.blob_hashes_pending += 1 if (self.state.blob_hashes_pending > self.MIN_CALL_TO_FILE_STORE): self._FetchFileContent()
def _CheckHash(self, responses): index = responses.request_data['index'] if (index not in self.state.pending_files): return file_tracker = self.state.pending_files[index] hash_response = responses.First() if ((not responses.success) or (not hash_response)): urn = file_tracker['stat_entry'].pathspec.AFF4Path(self.client_urn) self.Log('Failed to read %s: %s', urn, responses.status) self._FileFetchFailed(index, status=responses.status) return file_tracker.setdefault('hash_list', []).append(hash_response) self.state.blob_hashes_pending += 1 if (self.state.blob_hashes_pending > self.MIN_CALL_TO_FILE_STORE): self._FetchFileContent()<|docstring|>Adds the block hash to the file tracker responsible for this vfs URN.<|endoftext|>
4cc80f03b049cc419aaa7210ec47b6f1da3b40d1e31edd6c69b90d3db7232140
def _FetchFileContent(self): "Fetch as much as the file's content as possible.\n\n This drains the pending_files store by checking which blobs we already have\n in the store and issuing calls to the client to receive outstanding blobs.\n " if (not self.state.pending_files): return blob_hashes = [] for file_tracker in self.state.pending_files.values(): for hash_response in file_tracker.get('hash_list', []): blob_hashes.append(rdf_objects.BlobID.FromSerializedBytes(hash_response.data)) existing_blobs = data_store.BLOBS.CheckBlobsExist(blob_hashes) self.state.blob_hashes_pending = 0 for (index, file_tracker) in list(self.state.pending_files.items()): for (i, hash_response) in enumerate(file_tracker.get('hash_list', [])): hash_response.pathspec = file_tracker['stat_entry'].pathspec if existing_blobs[rdf_objects.BlobID.FromSerializedBytes(hash_response.data)]: self.CallStateInline(messages=[hash_response], next_state=compatibility.GetName(self._WriteBuffer), request_data=dict(index=index, blob_index=i)) else: self.CallClient(server_stubs.TransferBuffer, hash_response, next_state=compatibility.GetName(self._WriteBuffer), request_data=dict(index=index, blob_index=i))
Fetch as much as the file's content as possible. This drains the pending_files store by checking which blobs we already have in the store and issuing calls to the client to receive outstanding blobs.
grr/server/grr_response_server/flows/general/transfer.py
_FetchFileContent
max-vogler/grr
4,238
python
def _FetchFileContent(self): "Fetch as much as the file's content as possible.\n\n This drains the pending_files store by checking which blobs we already have\n in the store and issuing calls to the client to receive outstanding blobs.\n " if (not self.state.pending_files): return blob_hashes = [] for file_tracker in self.state.pending_files.values(): for hash_response in file_tracker.get('hash_list', []): blob_hashes.append(rdf_objects.BlobID.FromSerializedBytes(hash_response.data)) existing_blobs = data_store.BLOBS.CheckBlobsExist(blob_hashes) self.state.blob_hashes_pending = 0 for (index, file_tracker) in list(self.state.pending_files.items()): for (i, hash_response) in enumerate(file_tracker.get('hash_list', [])): hash_response.pathspec = file_tracker['stat_entry'].pathspec if existing_blobs[rdf_objects.BlobID.FromSerializedBytes(hash_response.data)]: self.CallStateInline(messages=[hash_response], next_state=compatibility.GetName(self._WriteBuffer), request_data=dict(index=index, blob_index=i)) else: self.CallClient(server_stubs.TransferBuffer, hash_response, next_state=compatibility.GetName(self._WriteBuffer), request_data=dict(index=index, blob_index=i))
def _FetchFileContent(self): "Fetch as much as the file's content as possible.\n\n This drains the pending_files store by checking which blobs we already have\n in the store and issuing calls to the client to receive outstanding blobs.\n " if (not self.state.pending_files): return blob_hashes = [] for file_tracker in self.state.pending_files.values(): for hash_response in file_tracker.get('hash_list', []): blob_hashes.append(rdf_objects.BlobID.FromSerializedBytes(hash_response.data)) existing_blobs = data_store.BLOBS.CheckBlobsExist(blob_hashes) self.state.blob_hashes_pending = 0 for (index, file_tracker) in list(self.state.pending_files.items()): for (i, hash_response) in enumerate(file_tracker.get('hash_list', [])): hash_response.pathspec = file_tracker['stat_entry'].pathspec if existing_blobs[rdf_objects.BlobID.FromSerializedBytes(hash_response.data)]: self.CallStateInline(messages=[hash_response], next_state=compatibility.GetName(self._WriteBuffer), request_data=dict(index=index, blob_index=i)) else: self.CallClient(server_stubs.TransferBuffer, hash_response, next_state=compatibility.GetName(self._WriteBuffer), request_data=dict(index=index, blob_index=i))<|docstring|>Fetch as much as the file's content as possible. This drains the pending_files store by checking which blobs we already have in the store and issuing calls to the client to receive outstanding blobs.<|endoftext|>
ba2583451d17ce8f4fbfde4b9969be15363422d960f537bf4e84c72d72d3af77
def _WriteBuffer(self, responses): 'Write the hash received to the blob image.' index = responses.request_data['index'] if (index not in self.state.pending_files): return if (not responses.success): self._FileFetchFailed(index, status=responses.status) return response = responses.First() file_tracker = self.state.pending_files.get(index) if (not file_tracker): return blob_dict = file_tracker.setdefault('blobs', {}) blob_index = responses.request_data['blob_index'] blob_dict[blob_index] = (response.data, response.length) if (len(blob_dict) != file_tracker['expected_chunks']): return stat_entry = file_tracker['stat_entry'] path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry) blob_refs = [] offset = 0 for index in sorted(blob_dict): (digest, size) = blob_dict[index] blob_refs.append(rdf_objects.BlobReference(offset=offset, size=size, blob_id=rdf_objects.BlobID.FromSerializedBytes(digest))) offset += size hash_obj = file_tracker['hash_obj'] client_path = db.ClientPath.FromPathInfo(self.client_id, path_info) hash_id = file_store.AddFileWithUnknownHash(client_path, blob_refs, use_external_stores=self.state.use_external_stores) if (hash_id.AsBytes() == hash_obj.sha256): path_info.hash_entry = hash_obj else: path_info.hash_entry.sha256 = hash_id.AsBytes() path_info.hash_entry.num_bytes = offset data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) del file_tracker['blobs'] del file_tracker['hash_list'] self._ReceiveFetchedFile(file_tracker) self.state.files_fetched += 1 if (not (self.state.files_fetched % 100)): self.Log('Fetched %d of %d files.', self.state.files_fetched, self.state.files_to_fetch)
Write the hash received to the blob image.
grr/server/grr_response_server/flows/general/transfer.py
_WriteBuffer
max-vogler/grr
4,238
python
def _WriteBuffer(self, responses): index = responses.request_data['index'] if (index not in self.state.pending_files): return if (not responses.success): self._FileFetchFailed(index, status=responses.status) return response = responses.First() file_tracker = self.state.pending_files.get(index) if (not file_tracker): return blob_dict = file_tracker.setdefault('blobs', {}) blob_index = responses.request_data['blob_index'] blob_dict[blob_index] = (response.data, response.length) if (len(blob_dict) != file_tracker['expected_chunks']): return stat_entry = file_tracker['stat_entry'] path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry) blob_refs = [] offset = 0 for index in sorted(blob_dict): (digest, size) = blob_dict[index] blob_refs.append(rdf_objects.BlobReference(offset=offset, size=size, blob_id=rdf_objects.BlobID.FromSerializedBytes(digest))) offset += size hash_obj = file_tracker['hash_obj'] client_path = db.ClientPath.FromPathInfo(self.client_id, path_info) hash_id = file_store.AddFileWithUnknownHash(client_path, blob_refs, use_external_stores=self.state.use_external_stores) if (hash_id.AsBytes() == hash_obj.sha256): path_info.hash_entry = hash_obj else: path_info.hash_entry.sha256 = hash_id.AsBytes() path_info.hash_entry.num_bytes = offset data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) del file_tracker['blobs'] del file_tracker['hash_list'] self._ReceiveFetchedFile(file_tracker) self.state.files_fetched += 1 if (not (self.state.files_fetched % 100)): self.Log('Fetched %d of %d files.', self.state.files_fetched, self.state.files_to_fetch)
def _WriteBuffer(self, responses): index = responses.request_data['index'] if (index not in self.state.pending_files): return if (not responses.success): self._FileFetchFailed(index, status=responses.status) return response = responses.First() file_tracker = self.state.pending_files.get(index) if (not file_tracker): return blob_dict = file_tracker.setdefault('blobs', {}) blob_index = responses.request_data['blob_index'] blob_dict[blob_index] = (response.data, response.length) if (len(blob_dict) != file_tracker['expected_chunks']): return stat_entry = file_tracker['stat_entry'] path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry) blob_refs = [] offset = 0 for index in sorted(blob_dict): (digest, size) = blob_dict[index] blob_refs.append(rdf_objects.BlobReference(offset=offset, size=size, blob_id=rdf_objects.BlobID.FromSerializedBytes(digest))) offset += size hash_obj = file_tracker['hash_obj'] client_path = db.ClientPath.FromPathInfo(self.client_id, path_info) hash_id = file_store.AddFileWithUnknownHash(client_path, blob_refs, use_external_stores=self.state.use_external_stores) if (hash_id.AsBytes() == hash_obj.sha256): path_info.hash_entry = hash_obj else: path_info.hash_entry.sha256 = hash_id.AsBytes() path_info.hash_entry.num_bytes = offset data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) del file_tracker['blobs'] del file_tracker['hash_list'] self._ReceiveFetchedFile(file_tracker) self.state.files_fetched += 1 if (not (self.state.files_fetched % 100)): self.Log('Fetched %d of %d files.', self.state.files_fetched, self.state.files_to_fetch)<|docstring|>Write the hash received to the blob image.<|endoftext|>
fedeeee9f146cf45e1cd48e6c7dc2246969b357c51f1fc4e947bbb7eefea4dd4
def Start(self): 'Start state of the flow.' super().Start(file_size=self.args.file_size, maximum_pending_files=self.args.maximum_pending_files, use_external_stores=self.args.use_external_stores) unique_paths = set() self.state.pathspecs_progress = [PathSpecProgress(pathspec=p, status=PathSpecProgress.Status.IN_PROGRESS) for p in self.args.pathspecs] for (i, pathspec) in enumerate(self.args.pathspecs): vfs_urn = pathspec.AFF4Path(self.client_urn) if (vfs_urn not in unique_paths): unique_paths.add(vfs_urn) self.StartFileFetch(pathspec, request_data=i)
Start state of the flow.
grr/server/grr_response_server/flows/general/transfer.py
Start
max-vogler/grr
4,238
python
def Start(self): super().Start(file_size=self.args.file_size, maximum_pending_files=self.args.maximum_pending_files, use_external_stores=self.args.use_external_stores) unique_paths = set() self.state.pathspecs_progress = [PathSpecProgress(pathspec=p, status=PathSpecProgress.Status.IN_PROGRESS) for p in self.args.pathspecs] for (i, pathspec) in enumerate(self.args.pathspecs): vfs_urn = pathspec.AFF4Path(self.client_urn) if (vfs_urn not in unique_paths): unique_paths.add(vfs_urn) self.StartFileFetch(pathspec, request_data=i)
def Start(self): super().Start(file_size=self.args.file_size, maximum_pending_files=self.args.maximum_pending_files, use_external_stores=self.args.use_external_stores) unique_paths = set() self.state.pathspecs_progress = [PathSpecProgress(pathspec=p, status=PathSpecProgress.Status.IN_PROGRESS) for p in self.args.pathspecs] for (i, pathspec) in enumerate(self.args.pathspecs): vfs_urn = pathspec.AFF4Path(self.client_urn) if (vfs_urn not in unique_paths): unique_paths.add(vfs_urn) self.StartFileFetch(pathspec, request_data=i)<|docstring|>Start state of the flow.<|endoftext|>
adf2422a92937e83cbb57882e81e84100f25cb812aa91fcd83fb307c47b0af18
def ReceiveFetchedFile(self, stat_entry, unused_hash_obj, request_data=None, is_duplicate=False): 'This method will be called for each new file successfully fetched.' if is_duplicate: status = PathSpecProgress.Status.SKIPPED else: status = PathSpecProgress.Status.COLLECTED self.state.pathspecs_progress[request_data].status = status self.SendReply(stat_entry)
This method will be called for each new file successfully fetched.
grr/server/grr_response_server/flows/general/transfer.py
ReceiveFetchedFile
max-vogler/grr
4,238
python
def ReceiveFetchedFile(self, stat_entry, unused_hash_obj, request_data=None, is_duplicate=False): if is_duplicate: status = PathSpecProgress.Status.SKIPPED else: status = PathSpecProgress.Status.COLLECTED self.state.pathspecs_progress[request_data].status = status self.SendReply(stat_entry)
def ReceiveFetchedFile(self, stat_entry, unused_hash_obj, request_data=None, is_duplicate=False): if is_duplicate: status = PathSpecProgress.Status.SKIPPED else: status = PathSpecProgress.Status.COLLECTED self.state.pathspecs_progress[request_data].status = status self.SendReply(stat_entry)<|docstring|>This method will be called for each new file successfully fetched.<|endoftext|>
6003a8fb64b64782768dcbb4a01d4c1073a7f675ea3970d100acd798a1e044e9
def FileFetchFailed(self, pathspec, request_data=None, status=None): 'This method will be called when stat or hash requests fail.' self.state.pathspecs_progress[request_data].status = PathSpecProgress.Status.FAILED
This method will be called when stat or hash requests fail.
grr/server/grr_response_server/flows/general/transfer.py
FileFetchFailed
max-vogler/grr
4,238
python
def FileFetchFailed(self, pathspec, request_data=None, status=None): self.state.pathspecs_progress[request_data].status = PathSpecProgress.Status.FAILED
def FileFetchFailed(self, pathspec, request_data=None, status=None): self.state.pathspecs_progress[request_data].status = PathSpecProgress.Status.FAILED<|docstring|>This method will be called when stat or hash requests fail.<|endoftext|>