body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
1f87481a7da64c9f606702b1ed62844f0e5a72c4b0eec0e66368571a0c48025f | def __str__(self):
' Default way to display this activity record. '
if self.date_filled:
display = f'{self.box_number} ({self.box_type}) {self.prod_name} ({self.prod_cat_name}) {self.quantity} {self.exp_year}({self.exp_month_start}-{self.exp_month_end}){self.date_filled} - {self.date_consumed}({self.duration}) at {self.loc_row} / {self.loc_bin} / {self.loc_tier}'
else:
display = f'{self.box_number} ({self.box_type}) - Empty'
return display | Default way to display this activity record. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self):
' '
if self.date_filled:
display = f'{self.box_number} ({self.box_type}) {self.prod_name} ({self.prod_cat_name}) {self.quantity} {self.exp_year}({self.exp_month_start}-{self.exp_month_end}){self.date_filled} - {self.date_consumed}({self.duration}) at {self.loc_row} / {self.loc_bin} / {self.loc_tier}'
else:
display = f'{self.box_number} ({self.box_type}) - Empty'
return display | def __str__(self):
' '
if self.date_filled:
display = f'{self.box_number} ({self.box_type}) {self.prod_name} ({self.prod_cat_name}) {self.quantity} {self.exp_year}({self.exp_month_start}-{self.exp_month_end}){self.date_filled} - {self.date_consumed}({self.duration}) at {self.loc_row} / {self.loc_bin} / {self.loc_tier}'
else:
display = f'{self.box_number} ({self.box_type}) - Empty'
return display<|docstring|>Default way to display this activity record.<|endoftext|> |
227fa33c6bacf680f07289f19a52c7979596e8806f0f303bb43f1ffec757139c | def __str__(self):
' Default way to display this constraint record. '
if (self.constraint_type in [self.INT_RANGE, self.CHAR_RANGE]):
display = f'{self.constraint_name} - {self.constraint_min} to {self.constraint_max} ({self.constraint_type})'
else:
display = f'{self.constraint_name} - {self.constraint_list} ({self.constraint_type})'
if self.constraint_descr:
display += f' -- {self.constraint_descr[:50]}'
return display | Default way to display this constraint record. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self):
' '
if (self.constraint_type in [self.INT_RANGE, self.CHAR_RANGE]):
display = f'{self.constraint_name} - {self.constraint_min} to {self.constraint_max} ({self.constraint_type})'
else:
display = f'{self.constraint_name} - {self.constraint_list} ({self.constraint_type})'
if self.constraint_descr:
display += f' -- {self.constraint_descr[:50]}'
return display | def __str__(self):
' '
if (self.constraint_type in [self.INT_RANGE, self.CHAR_RANGE]):
display = f'{self.constraint_name} - {self.constraint_min} to {self.constraint_max} ({self.constraint_type})'
else:
display = f'{self.constraint_name} - {self.constraint_list} ({self.constraint_type})'
if self.constraint_descr:
display += f' -- {self.constraint_descr[:50]}'
return display<|docstring|>Default way to display this constraint record.<|endoftext|> |
0233c36257b5682901d7b6037e6c69276ae35da10882c41d7b53d01ca97583e6 | def __str__(self):
' Default way to display this product example '
display = f'{self.prod_example_name} ({self.prod_id})'
return display | Default way to display this product example | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self):
' '
display = f'{self.prod_example_name} ({self.prod_id})'
return display | def __str__(self):
' '
display = f'{self.prod_example_name} ({self.prod_id})'
return display<|docstring|>Default way to display this product example<|endoftext|> |
23e469b52d11fce45529acf7d9c099e82c18cee8d02e3270fced17ec60187de9 | @commands.command()
@log_all
async def shop(self, ctx):
' View all items available in the shop '
stuff = self.api.shop.list_all()
weapons = ('\n'.join(stuff['weapons']) if stuff['weapons'] else 'None')
armor = ('\n'.join(stuff['armor']) if stuff['armor'] else 'None')
accessories = ('\n'.join(stuff['accessories']) if stuff['accessories'] else 'None')
items = ('\n'.join(stuff['items']) if stuff['items'] else 'None')
spells = ('\n'.join(stuff['spells']) if stuff['spells'] else 'None')
out = self.api.logger.entry()
out.title('Welcome to the shop!')
out.desc('Type *!info <ITEM>* to get more information about each item')
out.field('Weapons', f'{weapons}')
out.field('Armor', f'{armor}')
out.field('Accessories', f'{accessories}')
out.field('Items', f'{items}')
out.field('Spells', f'{spells}')
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π')) | View all items available in the shop | bot/hooks/shop.py | shop | lungdart/discord_rpg_bot | 1 | python | @commands.command()
@log_all
async def shop(self, ctx):
' '
stuff = self.api.shop.list_all()
weapons = ('\n'.join(stuff['weapons']) if stuff['weapons'] else 'None')
armor = ('\n'.join(stuff['armor']) if stuff['armor'] else 'None')
accessories = ('\n'.join(stuff['accessories']) if stuff['accessories'] else 'None')
items = ('\n'.join(stuff['items']) if stuff['items'] else 'None')
spells = ('\n'.join(stuff['spells']) if stuff['spells'] else 'None')
out = self.api.logger.entry()
out.title('Welcome to the shop!')
out.desc('Type *!info <ITEM>* to get more information about each item')
out.field('Weapons', f'{weapons}')
out.field('Armor', f'{armor}')
out.field('Accessories', f'{accessories}')
out.field('Items', f'{items}')
out.field('Spells', f'{spells}')
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π')) | @commands.command()
@log_all
async def shop(self, ctx):
' '
stuff = self.api.shop.list_all()
weapons = ('\n'.join(stuff['weapons']) if stuff['weapons'] else 'None')
armor = ('\n'.join(stuff['armor']) if stuff['armor'] else 'None')
accessories = ('\n'.join(stuff['accessories']) if stuff['accessories'] else 'None')
items = ('\n'.join(stuff['items']) if stuff['items'] else 'None')
spells = ('\n'.join(stuff['spells']) if stuff['spells'] else 'None')
out = self.api.logger.entry()
out.title('Welcome to the shop!')
out.desc('Type *!info <ITEM>* to get more information about each item')
out.field('Weapons', f'{weapons}')
out.field('Armor', f'{armor}')
out.field('Accessories', f'{accessories}')
out.field('Items', f'{items}')
out.field('Spells', f'{spells}')
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π'))<|docstring|>View all items available in the shop<|endoftext|> |
4bec3f58deae7ce580194899c2f698c550df26ba8e32f1f4ea20438ffa6c29ae | @commands.command()
@log_all
async def info(self, ctx, *name):
' Get detailed information about an item '
name = ' '.join(name)
details = self.api.shop.find_info(name)
out = self.api.logger.entry()
out.title(f"{details['name']}")
out.desc(f"{details['desc']}")
del details['name']
del details['desc']
for key in details:
out.field(title=f'{key}', desc=f'{details[key]}')
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π')) | Get detailed information about an item | bot/hooks/shop.py | info | lungdart/discord_rpg_bot | 1 | python | @commands.command()
@log_all
async def info(self, ctx, *name):
' '
name = ' '.join(name)
details = self.api.shop.find_info(name)
out = self.api.logger.entry()
out.title(f"{details['name']}")
out.desc(f"{details['desc']}")
del details['name']
del details['desc']
for key in details:
out.field(title=f'{key}', desc=f'{details[key]}')
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π')) | @commands.command()
@log_all
async def info(self, ctx, *name):
' '
name = ' '.join(name)
details = self.api.shop.find_info(name)
out = self.api.logger.entry()
out.title(f"{details['name']}")
out.desc(f"{details['desc']}")
del details['name']
del details['desc']
for key in details:
out.field(title=f'{key}', desc=f'{details[key]}')
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π'))<|docstring|>Get detailed information about an item<|endoftext|> |
18c023efa9ac88582e189b6d82c4900802a2f5342b9f199e8e42f931d66ce12e | @commands.command()
@log_all
async def buy(self, ctx, *args):
' Purchase one or more item(s) from the shop '
try:
quantity = int(args[(- 1)])
args = args[:(- 1)]
except ValueError:
quantity = 1
name = ' '.join(args)
user = self.api.character.get(ctx.author.name)
self.api.shop.buy(user, name, quantity)
cstats = self.api.character.stats(ctx.author.name)
out = self.api.logger.entry()
out.color('success')
out.title(f'{quantity}x {name} purchased!')
out.desc(f"You have {cstats['gold']} gold remaining...")
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π')) | Purchase one or more item(s) from the shop | bot/hooks/shop.py | buy | lungdart/discord_rpg_bot | 1 | python | @commands.command()
@log_all
async def buy(self, ctx, *args):
' '
try:
quantity = int(args[(- 1)])
args = args[:(- 1)]
except ValueError:
quantity = 1
name = ' '.join(args)
user = self.api.character.get(ctx.author.name)
self.api.shop.buy(user, name, quantity)
cstats = self.api.character.stats(ctx.author.name)
out = self.api.logger.entry()
out.color('success')
out.title(f'{quantity}x {name} purchased!')
out.desc(f"You have {cstats['gold']} gold remaining...")
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π')) | @commands.command()
@log_all
async def buy(self, ctx, *args):
' '
try:
quantity = int(args[(- 1)])
args = args[:(- 1)]
except ValueError:
quantity = 1
name = ' '.join(args)
user = self.api.character.get(ctx.author.name)
self.api.shop.buy(user, name, quantity)
cstats = self.api.character.stats(ctx.author.name)
out = self.api.logger.entry()
out.color('success')
out.title(f'{quantity}x {name} purchased!')
out.desc(f"You have {cstats['gold']} gold remaining...")
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π'))<|docstring|>Purchase one or more item(s) from the shop<|endoftext|> |
27e216d652c4e11f971f884512393c3379b5a02c1c0ab6d7bac5ac84309c49fa | @commands.command()
@log_all
async def sell(self, ctx, *args):
' Sell one or more item(s) from your inventory '
try:
quantity = int(args[(- 1)])
args = args[:(- 1)]
except ValueError:
quantity = 1
name = ' '.join(args)
user = self.api.character.get(ctx.author.name)
self.api.shop.sell(user, name, quantity)
cstats = self.api.character.stats(ctx.author.name)
out = self.api.logger.entry()
out.color('success')
out.title(f'{quantity} {name} sold!')
out.desc(f"You now have {cstats['gold']} gold!")
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π')) | Sell one or more item(s) from your inventory | bot/hooks/shop.py | sell | lungdart/discord_rpg_bot | 1 | python | @commands.command()
@log_all
async def sell(self, ctx, *args):
' '
try:
quantity = int(args[(- 1)])
args = args[:(- 1)]
except ValueError:
quantity = 1
name = ' '.join(args)
user = self.api.character.get(ctx.author.name)
self.api.shop.sell(user, name, quantity)
cstats = self.api.character.stats(ctx.author.name)
out = self.api.logger.entry()
out.color('success')
out.title(f'{quantity} {name} sold!')
out.desc(f"You now have {cstats['gold']} gold!")
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π')) | @commands.command()
@log_all
async def sell(self, ctx, *args):
' '
try:
quantity = int(args[(- 1)])
args = args[:(- 1)]
except ValueError:
quantity = 1
name = ' '.join(args)
user = self.api.character.get(ctx.author.name)
self.api.shop.sell(user, name, quantity)
cstats = self.api.character.stats(ctx.author.name)
out = self.api.logger.entry()
out.color('success')
out.title(f'{quantity} {name} sold!')
out.desc(f"You now have {cstats['gold']} gold!")
out.buffer(ctx.author)
(await self.api.logger.send_buffer())
(await ctx.message.add_reaction(u'π'))<|docstring|>Sell one or more item(s) from your inventory<|endoftext|> |
030e8cee895c5a30cf92f1b501e97db0d4eaa0778b52124ef772675ca4a5a105 | def load_model(self, epoch, train_dir=None):
'Load pretrained model\n\n Parameters\n ----------\n epoch : int\n Which epoch to load.\n train_dir : str, optional\n Path to train directory. Defaults to self.train_dir_.\n '
if (train_dir is None):
train_dir = self.train_dir_
import torch
weights_pt = self.WEIGHTS_PT.format(train_dir=train_dir, epoch=epoch)
self.model_.load_state_dict(torch.load(weights_pt))
return self.model_ | Load pretrained model
Parameters
----------
epoch : int
Which epoch to load.
train_dir : str, optional
Path to train directory. Defaults to self.train_dir_. | pyannote/audio/applications/base.py | load_model | diego-fustes/pyannote-audio | 2 | python | def load_model(self, epoch, train_dir=None):
'Load pretrained model\n\n Parameters\n ----------\n epoch : int\n Which epoch to load.\n train_dir : str, optional\n Path to train directory. Defaults to self.train_dir_.\n '
if (train_dir is None):
train_dir = self.train_dir_
import torch
weights_pt = self.WEIGHTS_PT.format(train_dir=train_dir, epoch=epoch)
self.model_.load_state_dict(torch.load(weights_pt))
return self.model_ | def load_model(self, epoch, train_dir=None):
'Load pretrained model\n\n Parameters\n ----------\n epoch : int\n Which epoch to load.\n train_dir : str, optional\n Path to train directory. Defaults to self.train_dir_.\n '
if (train_dir is None):
train_dir = self.train_dir_
import torch
weights_pt = self.WEIGHTS_PT.format(train_dir=train_dir, epoch=epoch)
self.model_.load_state_dict(torch.load(weights_pt))
return self.model_<|docstring|>Load pretrained model
Parameters
----------
epoch : int
Which epoch to load.
train_dir : str, optional
Path to train directory. Defaults to self.train_dir_.<|endoftext|> |
5f2cf0d79bd7c261d44c172872acb3fd18d0c7095f65e2ff99482b51d0de1f39 | def get_number_of_epochs(self, train_dir=None, return_first=False):
'Get information about completed epochs\n\n Parameters\n ----------\n train_dir : str, optional\n Training directory. Defaults to self.train_dir_\n return_first : bool, optional\n Defaults (False) to return number of epochs.\n Set to True to also return index of first epoch.\n\n '
if (train_dir is None):
train_dir = self.train_dir_
directory = self.WEIGHTS_PT.format(train_dir=train_dir, epoch=0)[:(- 7)]
weights = sorted(glob((directory + '*[0-9][0-9][0-9][0-9].pt')))
if (not weights):
number_of_epochs = 0
first_epoch = None
else:
number_of_epochs = (int(basename(weights[(- 1)])[:(- 3)]) + 1)
first_epoch = int(basename(weights[0])[:(- 3)])
return ((number_of_epochs, first_epoch) if return_first else number_of_epochs) | Get information about completed epochs
Parameters
----------
train_dir : str, optional
Training directory. Defaults to self.train_dir_
return_first : bool, optional
Defaults (False) to return number of epochs.
Set to True to also return index of first epoch. | pyannote/audio/applications/base.py | get_number_of_epochs | diego-fustes/pyannote-audio | 2 | python | def get_number_of_epochs(self, train_dir=None, return_first=False):
'Get information about completed epochs\n\n Parameters\n ----------\n train_dir : str, optional\n Training directory. Defaults to self.train_dir_\n return_first : bool, optional\n Defaults (False) to return number of epochs.\n Set to True to also return index of first epoch.\n\n '
if (train_dir is None):
train_dir = self.train_dir_
directory = self.WEIGHTS_PT.format(train_dir=train_dir, epoch=0)[:(- 7)]
weights = sorted(glob((directory + '*[0-9][0-9][0-9][0-9].pt')))
if (not weights):
number_of_epochs = 0
first_epoch = None
else:
number_of_epochs = (int(basename(weights[(- 1)])[:(- 3)]) + 1)
first_epoch = int(basename(weights[0])[:(- 3)])
return ((number_of_epochs, first_epoch) if return_first else number_of_epochs) | def get_number_of_epochs(self, train_dir=None, return_first=False):
'Get information about completed epochs\n\n Parameters\n ----------\n train_dir : str, optional\n Training directory. Defaults to self.train_dir_\n return_first : bool, optional\n Defaults (False) to return number of epochs.\n Set to True to also return index of first epoch.\n\n '
if (train_dir is None):
train_dir = self.train_dir_
directory = self.WEIGHTS_PT.format(train_dir=train_dir, epoch=0)[:(- 7)]
weights = sorted(glob((directory + '*[0-9][0-9][0-9][0-9].pt')))
if (not weights):
number_of_epochs = 0
first_epoch = None
else:
number_of_epochs = (int(basename(weights[(- 1)])[:(- 3)]) + 1)
first_epoch = int(basename(weights[0])[:(- 3)])
return ((number_of_epochs, first_epoch) if return_first else number_of_epochs)<|docstring|>Get information about completed epochs
Parameters
----------
train_dir : str, optional
Training directory. Defaults to self.train_dir_
return_first : bool, optional
Defaults (False) to return number of epochs.
Set to True to also return index of first epoch.<|endoftext|> |
2686706309ef9b7c161f3774fee07c38523b8da6163a2c8cf5c561846dc94fd1 | def validate_iter(self, start=None, end=None, step=1, sleep=10, in_order=False):
'Continuously watches `train_dir` for newly completed epochs\n and yields them for validation\n\n Note that epochs will not necessarily be yielded in order.\n The very last completed epoch will always be first on the list.\n\n Parameters\n ----------\n start : int, optional\n Start validating after `start` epochs. Defaults to 0.\n end : int, optional\n Stop validating after epoch `end`. Defaults to never stop.\n step : int, optional\n Validate every `step`th epoch. Defaults to 1.\n sleep : int, optional\n in_order : bool, optional\n Force chronological validation.\n\n Usage\n -----\n >>> for epoch in app.validate_iter():\n ... app.validate(epoch)\n\n\n '
if (end is None):
end = np.inf
if (start is None):
start = 0
validated_epochs = set()
next_epoch_to_validate_in_order = start
while (next_epoch_to_validate_in_order < end):
(_, first_epoch) = self.get_number_of_epochs(return_first=True)
if (first_epoch is None):
print('waiting for first epoch to complete...')
time.sleep(sleep)
continue
if (next_epoch_to_validate_in_order < first_epoch):
next_epoch_to_validate_in_order = first_epoch
break
while True:
last_completed_epoch = (self.get_number_of_epochs() - 1)
if ((not in_order) and (last_completed_epoch not in validated_epochs)):
next_epoch_to_validate = last_completed_epoch
time.sleep(5)
elif (next_epoch_to_validate_in_order <= last_completed_epoch):
next_epoch_to_validate = next_epoch_to_validate_in_order
else:
time.sleep(sleep)
continue
if (next_epoch_to_validate not in validated_epochs):
(yield next_epoch_to_validate)
validated_epochs.add(next_epoch_to_validate)
if (next_epoch_to_validate_in_order == next_epoch_to_validate):
next_epoch_to_validate_in_order += step | Continuously watches `train_dir` for newly completed epochs
and yields them for validation
Note that epochs will not necessarily be yielded in order.
The very last completed epoch will always be first on the list.
Parameters
----------
start : int, optional
Start validating after `start` epochs. Defaults to 0.
end : int, optional
Stop validating after epoch `end`. Defaults to never stop.
step : int, optional
Validate every `step`th epoch. Defaults to 1.
sleep : int, optional
in_order : bool, optional
Force chronological validation.
Usage
-----
>>> for epoch in app.validate_iter():
... app.validate(epoch) | pyannote/audio/applications/base.py | validate_iter | diego-fustes/pyannote-audio | 2 | python | def validate_iter(self, start=None, end=None, step=1, sleep=10, in_order=False):
'Continuously watches `train_dir` for newly completed epochs\n and yields them for validation\n\n Note that epochs will not necessarily be yielded in order.\n The very last completed epoch will always be first on the list.\n\n Parameters\n ----------\n start : int, optional\n Start validating after `start` epochs. Defaults to 0.\n end : int, optional\n Stop validating after epoch `end`. Defaults to never stop.\n step : int, optional\n Validate every `step`th epoch. Defaults to 1.\n sleep : int, optional\n in_order : bool, optional\n Force chronological validation.\n\n Usage\n -----\n >>> for epoch in app.validate_iter():\n ... app.validate(epoch)\n\n\n '
if (end is None):
end = np.inf
if (start is None):
start = 0
validated_epochs = set()
next_epoch_to_validate_in_order = start
while (next_epoch_to_validate_in_order < end):
(_, first_epoch) = self.get_number_of_epochs(return_first=True)
if (first_epoch is None):
print('waiting for first epoch to complete...')
time.sleep(sleep)
continue
if (next_epoch_to_validate_in_order < first_epoch):
next_epoch_to_validate_in_order = first_epoch
break
while True:
last_completed_epoch = (self.get_number_of_epochs() - 1)
if ((not in_order) and (last_completed_epoch not in validated_epochs)):
next_epoch_to_validate = last_completed_epoch
time.sleep(5)
elif (next_epoch_to_validate_in_order <= last_completed_epoch):
next_epoch_to_validate = next_epoch_to_validate_in_order
else:
time.sleep(sleep)
continue
if (next_epoch_to_validate not in validated_epochs):
(yield next_epoch_to_validate)
validated_epochs.add(next_epoch_to_validate)
if (next_epoch_to_validate_in_order == next_epoch_to_validate):
next_epoch_to_validate_in_order += step | def validate_iter(self, start=None, end=None, step=1, sleep=10, in_order=False):
'Continuously watches `train_dir` for newly completed epochs\n and yields them for validation\n\n Note that epochs will not necessarily be yielded in order.\n The very last completed epoch will always be first on the list.\n\n Parameters\n ----------\n start : int, optional\n Start validating after `start` epochs. Defaults to 0.\n end : int, optional\n Stop validating after epoch `end`. Defaults to never stop.\n step : int, optional\n Validate every `step`th epoch. Defaults to 1.\n sleep : int, optional\n in_order : bool, optional\n Force chronological validation.\n\n Usage\n -----\n >>> for epoch in app.validate_iter():\n ... app.validate(epoch)\n\n\n '
if (end is None):
end = np.inf
if (start is None):
start = 0
validated_epochs = set()
next_epoch_to_validate_in_order = start
while (next_epoch_to_validate_in_order < end):
(_, first_epoch) = self.get_number_of_epochs(return_first=True)
if (first_epoch is None):
print('waiting for first epoch to complete...')
time.sleep(sleep)
continue
if (next_epoch_to_validate_in_order < first_epoch):
next_epoch_to_validate_in_order = first_epoch
break
while True:
last_completed_epoch = (self.get_number_of_epochs() - 1)
if ((not in_order) and (last_completed_epoch not in validated_epochs)):
next_epoch_to_validate = last_completed_epoch
time.sleep(5)
elif (next_epoch_to_validate_in_order <= last_completed_epoch):
next_epoch_to_validate = next_epoch_to_validate_in_order
else:
time.sleep(sleep)
continue
if (next_epoch_to_validate not in validated_epochs):
(yield next_epoch_to_validate)
validated_epochs.add(next_epoch_to_validate)
if (next_epoch_to_validate_in_order == next_epoch_to_validate):
next_epoch_to_validate_in_order += step<|docstring|>Continuously watches `train_dir` for newly completed epochs
and yields them for validation
Note that epochs will not necessarily be yielded in order.
The very last completed epoch will always be first on the list.
Parameters
----------
start : int, optional
Start validating after `start` epochs. Defaults to 0.
end : int, optional
Stop validating after epoch `end`. Defaults to never stop.
step : int, optional
Validate every `step`th epoch. Defaults to 1.
sleep : int, optional
in_order : bool, optional
Force chronological validation.
Usage
-----
>>> for epoch in app.validate_iter():
... app.validate(epoch)<|endoftext|> |
0813a038e380ae105104d4ba1dca121ebfac8a6dd6a50a4d27ef147453329a36 | def agentdf_to_dfic(df):
'Takes in a dataframe generated by experiment_runner in projection_block_construction and returns a dataframe formatted for use with the trajectory graph'
dfic = pd.DataFrame()
dfic['gameID'] = df['run_ID']
dfic['targetName'] = df['world']
dfic['agent'] = df['agent_attributes'].apply((lambda x: str(x)))
dfic['discreteWorld'] = df['blockmap'].apply((lambda x: (x > 0).astype(int)))
dfic['usableDiscreteWorld'] = dfic['discreteWorld'].apply((lambda a: (1 + ((- 1) * np.array(a)))))
dfic['flatDiscreteWorld'] = dfic['discreteWorld'].apply((lambda a: (1 + ((- 1) * np.array(a))).flatten()))
analysis_helper.fill_F1(df)
dfic['rawF1DiscreteScore'] = df['F1']
return dfic | Takes in a dataframe generated by experiment_runner in projection_block_construction and returns a dataframe formatted for use with the trajectory graph | analysis/utils/trajectory.py | agentdf_to_dfic | cogtoolslab/projection_block_construction | 0 | python | def agentdf_to_dfic(df):
dfic = pd.DataFrame()
dfic['gameID'] = df['run_ID']
dfic['targetName'] = df['world']
dfic['agent'] = df['agent_attributes'].apply((lambda x: str(x)))
dfic['discreteWorld'] = df['blockmap'].apply((lambda x: (x > 0).astype(int)))
dfic['usableDiscreteWorld'] = dfic['discreteWorld'].apply((lambda a: (1 + ((- 1) * np.array(a)))))
dfic['flatDiscreteWorld'] = dfic['discreteWorld'].apply((lambda a: (1 + ((- 1) * np.array(a))).flatten()))
analysis_helper.fill_F1(df)
dfic['rawF1DiscreteScore'] = df['F1']
return dfic | def agentdf_to_dfic(df):
dfic = pd.DataFrame()
dfic['gameID'] = df['run_ID']
dfic['targetName'] = df['world']
dfic['agent'] = df['agent_attributes'].apply((lambda x: str(x)))
dfic['discreteWorld'] = df['blockmap'].apply((lambda x: (x > 0).astype(int)))
dfic['usableDiscreteWorld'] = dfic['discreteWorld'].apply((lambda a: (1 + ((- 1) * np.array(a)))))
dfic['flatDiscreteWorld'] = dfic['discreteWorld'].apply((lambda a: (1 + ((- 1) * np.array(a))).flatten()))
analysis_helper.fill_F1(df)
dfic['rawF1DiscreteScore'] = df['F1']
return dfic<|docstring|>Takes in a dataframe generated by experiment_runner in projection_block_construction and returns a dataframe formatted for use with the trajectory graph<|endoftext|> |
6a6707d6c11bd020ca0b67ec7bf4625437e40fbce0e144d6c6cf8e51f0fad98f | def bootstrapCI(x, nIter=1000, crit_val=0, ci=95, verbose=False, compareNull=True):
'\n input: x = an array, \n nIter = number of iterations, \n crit_val = hypothetical null value\n verbose = a flag to set whether to pretty print output\n compareNull = a flag to set whether to compare against crit_val or not\n \n output: U,lb,ub,p1,p2\n U = mean\n lb = lower bound of 95% CI\n ub = upper bound of 95% CI\n p1 = proportion of bootstrapped iterations less than critical value\n p2 = proportion of bootstrapped iterations greater than critical value\n ci = what kind of confidence interval do you want? 95% by default\n '
u = []
for i in np.arange(nIter):
inds = np.random.RandomState(i).choice(len(x), len(x))
boot = x[inds]
u.append(np.mean(boot))
p1 = ((len([i for i in u if (i < crit_val)]) / len(u)) * 2)
p2 = ((len([i for i in u if (i > crit_val)]) / len(u)) * 2)
U = np.mean(u)
lb = np.percentile(u, ((100 - ci) / 2))
ub = np.percentile(u, (100 - ((100 - ci) / 2)))
if verbose:
print('Original mean = {}. Bootstrapped mean = {}.'.format(np.mean(x).round(5), U.round(5)))
print('{}% CI = [{}, {}].'.format(ci, lb.round(5), ub.round(5)))
if compareNull:
print('p<{}={} | p>{}={}.'.format(crit_val, np.round(p1, 5), crit_val, np.round(p2, 5)))
return (U, lb, ub, p1, p2) | input: x = an array,
nIter = number of iterations,
crit_val = hypothetical null value
verbose = a flag to set whether to pretty print output
compareNull = a flag to set whether to compare against crit_val or not
output: U,lb,ub,p1,p2
U = mean
lb = lower bound of 95% CI
ub = upper bound of 95% CI
p1 = proportion of bootstrapped iterations less than critical value
p2 = proportion of bootstrapped iterations greater than critical value
ci = what kind of confidence interval do you want? 95% by default | analysis/utils/trajectory.py | bootstrapCI | cogtoolslab/projection_block_construction | 0 | python | def bootstrapCI(x, nIter=1000, crit_val=0, ci=95, verbose=False, compareNull=True):
'\n input: x = an array, \n nIter = number of iterations, \n crit_val = hypothetical null value\n verbose = a flag to set whether to pretty print output\n compareNull = a flag to set whether to compare against crit_val or not\n \n output: U,lb,ub,p1,p2\n U = mean\n lb = lower bound of 95% CI\n ub = upper bound of 95% CI\n p1 = proportion of bootstrapped iterations less than critical value\n p2 = proportion of bootstrapped iterations greater than critical value\n ci = what kind of confidence interval do you want? 95% by default\n '
u = []
for i in np.arange(nIter):
inds = np.random.RandomState(i).choice(len(x), len(x))
boot = x[inds]
u.append(np.mean(boot))
p1 = ((len([i for i in u if (i < crit_val)]) / len(u)) * 2)
p2 = ((len([i for i in u if (i > crit_val)]) / len(u)) * 2)
U = np.mean(u)
lb = np.percentile(u, ((100 - ci) / 2))
ub = np.percentile(u, (100 - ((100 - ci) / 2)))
if verbose:
print('Original mean = {}. Bootstrapped mean = {}.'.format(np.mean(x).round(5), U.round(5)))
print('{}% CI = [{}, {}].'.format(ci, lb.round(5), ub.round(5)))
if compareNull:
print('p<{}={} | p>{}={}.'.format(crit_val, np.round(p1, 5), crit_val, np.round(p2, 5)))
return (U, lb, ub, p1, p2) | def bootstrapCI(x, nIter=1000, crit_val=0, ci=95, verbose=False, compareNull=True):
'\n input: x = an array, \n nIter = number of iterations, \n crit_val = hypothetical null value\n verbose = a flag to set whether to pretty print output\n compareNull = a flag to set whether to compare against crit_val or not\n \n output: U,lb,ub,p1,p2\n U = mean\n lb = lower bound of 95% CI\n ub = upper bound of 95% CI\n p1 = proportion of bootstrapped iterations less than critical value\n p2 = proportion of bootstrapped iterations greater than critical value\n ci = what kind of confidence interval do you want? 95% by default\n '
u = []
for i in np.arange(nIter):
inds = np.random.RandomState(i).choice(len(x), len(x))
boot = x[inds]
u.append(np.mean(boot))
p1 = ((len([i for i in u if (i < crit_val)]) / len(u)) * 2)
p2 = ((len([i for i in u if (i > crit_val)]) / len(u)) * 2)
U = np.mean(u)
lb = np.percentile(u, ((100 - ci) / 2))
ub = np.percentile(u, (100 - ((100 - ci) / 2)))
if verbose:
print('Original mean = {}. Bootstrapped mean = {}.'.format(np.mean(x).round(5), U.round(5)))
print('{}% CI = [{}, {}].'.format(ci, lb.round(5), ub.round(5)))
if compareNull:
print('p<{}={} | p>{}={}.'.format(crit_val, np.round(p1, 5), crit_val, np.round(p2, 5)))
return (U, lb, ub, p1, p2)<|docstring|>input: x = an array,
nIter = number of iterations,
crit_val = hypothetical null value
verbose = a flag to set whether to pretty print output
compareNull = a flag to set whether to compare against crit_val or not
output: U,lb,ub,p1,p2
U = mean
lb = lower bound of 95% CI
ub = upper bound of 95% CI
p1 = proportion of bootstrapped iterations less than critical value
p2 = proportion of bootstrapped iterations greater than critical value
ci = what kind of confidence interval do you want? 95% by default<|endoftext|> |
81a8c330177c6c31453cbdb2da9745a7bfa08a65dd85abd60d07efe7ddbb2f32 | def plot_trajectory_graph(data, target_name, agent, show=True, save=False, out_dir='./plots', extension='', x_lower_bound=0, x_upper_bound=8, edge_width_scale_factor=0.8, node_size_scale_factor=0.8, colors=['#1c373e', '#EF553B']):
'\n makes F1 world state trajectory graph for specific target structure and phase \n '
if (len(data) == 0):
raise Exception('No data was passed in! Please pass in data.')
t = GenericBuildGraph(target_name=target_name)
a = data[((data.targetName == target_name) & (data.agent == agent))]
a = a.groupby('gameID')
a.apply((lambda g: t.add_build_path(g)))
(node_xs, node_ys, edge_xs, edge_ys, node_sizes, edge_ws, node_colors, edge_colors) = t.get_coords()
node_sizes = [(i * node_size_scale_factor) for i in node_sizes]
O = {1: 0.1, 2: 0.3, 3: 0.5, 4: 0.7}
def make_edge(x, y, width):
return go.Scatter(x=x, y=y, line=dict(width=width), hoverinfo='none', mode='lines')
def make_edges(edge_xs, edge_ys, edge_ws):
edge_trace = []
for i in range(0, int((len(edge_xs) / 2))):
edge_trace.append(go.Scatter(x=(edge_xs[(i * 2)], edge_xs[((i * 2) + 1)]), y=(edge_ys[(i * 2)], edge_ys[((i * 2) + 1)]), line=dict(width=(edge_ws[i] * edge_width_scale_factor), color=list(reversed(colors))[edge_colors[i]]), opacity=(O[edge_ws[i]] if (edge_ws[i] in O.keys()) else 0.95), hoverinfo='none', mode='lines'))
return edge_trace
edge_trace = make_edges(edge_xs, edge_ys, edge_ws)
node_trace = go.Scatter(x=node_xs, y=node_ys, mode='markers', hoverinfo='text', marker=dict(colorscale=colors, reversescale=True, size=node_sizes, color=node_colors, opacity=1.0, line_width=0, line_color='#FFF'))
fig = go.Figure(data=(edge_trace + [node_trace]), layout=go.Layout(titlefont_size=22, showlegend=False, hovermode='closest', margin=dict(b=20, l=5, r=5, t=40), xaxis={'range': [x_lower_bound, x_upper_bound], 'showgrid': False, 'zeroline': True, 'visible': False}, yaxis={'range': [(- 0.1), 1.05], 'showgrid': False}))
fig.update_yaxes(tickfont=dict(family='Helvetica', color='black', size=24), title_text='F1 score', title_font=dict(size=24, family='Helvetica', color='black'))
fig.update_layout(width=600, height=400, template='simple_white')
fig.add_shape(type='line', x0=x_lower_bound, y0=1, x1=x_upper_bound, y1=1, line={'color': 'black', 'width': 1, 'dash': 'dot'})
fig.add_shape(type='line', x0=x_lower_bound, y0=0, x1=x_upper_bound, y1=0, line={'color': 'black', 'width': 1, 'dash': 'dot'})
if show:
fig.show()
if save:
if (not os.path.exists(out_dir)):
os.mkdir(out_dir)
if (not os.path.exists(os.path.join(out_dir, 'state_trajectory'))):
os.mkdir(os.path.join(out_dir, 'state_trajectory'))
plot_path = os.path.join(out_dir, 'state_trajectory', (((((target_name + '_') + agent) + '_') + extension) + '.pdf'))
fig.write_image(plot_path)
img = fig.to_image()
img = np.fromstring(img, dtype='uint8')
img = cv2.imdecode(img, cv2.IMREAD_UNCHANGED)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img | makes F1 world state trajectory graph for specific target structure and phase | analysis/utils/trajectory.py | plot_trajectory_graph | cogtoolslab/projection_block_construction | 0 | python | def plot_trajectory_graph(data, target_name, agent, show=True, save=False, out_dir='./plots', extension=, x_lower_bound=0, x_upper_bound=8, edge_width_scale_factor=0.8, node_size_scale_factor=0.8, colors=['#1c373e', '#EF553B']):
'\n \n '
if (len(data) == 0):
raise Exception('No data was passed in! Please pass in data.')
t = GenericBuildGraph(target_name=target_name)
a = data[((data.targetName == target_name) & (data.agent == agent))]
a = a.groupby('gameID')
a.apply((lambda g: t.add_build_path(g)))
(node_xs, node_ys, edge_xs, edge_ys, node_sizes, edge_ws, node_colors, edge_colors) = t.get_coords()
node_sizes = [(i * node_size_scale_factor) for i in node_sizes]
O = {1: 0.1, 2: 0.3, 3: 0.5, 4: 0.7}
def make_edge(x, y, width):
return go.Scatter(x=x, y=y, line=dict(width=width), hoverinfo='none', mode='lines')
def make_edges(edge_xs, edge_ys, edge_ws):
edge_trace = []
for i in range(0, int((len(edge_xs) / 2))):
edge_trace.append(go.Scatter(x=(edge_xs[(i * 2)], edge_xs[((i * 2) + 1)]), y=(edge_ys[(i * 2)], edge_ys[((i * 2) + 1)]), line=dict(width=(edge_ws[i] * edge_width_scale_factor), color=list(reversed(colors))[edge_colors[i]]), opacity=(O[edge_ws[i]] if (edge_ws[i] in O.keys()) else 0.95), hoverinfo='none', mode='lines'))
return edge_trace
edge_trace = make_edges(edge_xs, edge_ys, edge_ws)
node_trace = go.Scatter(x=node_xs, y=node_ys, mode='markers', hoverinfo='text', marker=dict(colorscale=colors, reversescale=True, size=node_sizes, color=node_colors, opacity=1.0, line_width=0, line_color='#FFF'))
fig = go.Figure(data=(edge_trace + [node_trace]), layout=go.Layout(titlefont_size=22, showlegend=False, hovermode='closest', margin=dict(b=20, l=5, r=5, t=40), xaxis={'range': [x_lower_bound, x_upper_bound], 'showgrid': False, 'zeroline': True, 'visible': False}, yaxis={'range': [(- 0.1), 1.05], 'showgrid': False}))
fig.update_yaxes(tickfont=dict(family='Helvetica', color='black', size=24), title_text='F1 score', title_font=dict(size=24, family='Helvetica', color='black'))
fig.update_layout(width=600, height=400, template='simple_white')
fig.add_shape(type='line', x0=x_lower_bound, y0=1, x1=x_upper_bound, y1=1, line={'color': 'black', 'width': 1, 'dash': 'dot'})
fig.add_shape(type='line', x0=x_lower_bound, y0=0, x1=x_upper_bound, y1=0, line={'color': 'black', 'width': 1, 'dash': 'dot'})
if show:
fig.show()
if save:
if (not os.path.exists(out_dir)):
os.mkdir(out_dir)
if (not os.path.exists(os.path.join(out_dir, 'state_trajectory'))):
os.mkdir(os.path.join(out_dir, 'state_trajectory'))
plot_path = os.path.join(out_dir, 'state_trajectory', (((((target_name + '_') + agent) + '_') + extension) + '.pdf'))
fig.write_image(plot_path)
img = fig.to_image()
img = np.fromstring(img, dtype='uint8')
img = cv2.imdecode(img, cv2.IMREAD_UNCHANGED)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img | def plot_trajectory_graph(data, target_name, agent, show=True, save=False, out_dir='./plots', extension=, x_lower_bound=0, x_upper_bound=8, edge_width_scale_factor=0.8, node_size_scale_factor=0.8, colors=['#1c373e', '#EF553B']):
'\n \n '
if (len(data) == 0):
raise Exception('No data was passed in! Please pass in data.')
t = GenericBuildGraph(target_name=target_name)
a = data[((data.targetName == target_name) & (data.agent == agent))]
a = a.groupby('gameID')
a.apply((lambda g: t.add_build_path(g)))
(node_xs, node_ys, edge_xs, edge_ys, node_sizes, edge_ws, node_colors, edge_colors) = t.get_coords()
node_sizes = [(i * node_size_scale_factor) for i in node_sizes]
O = {1: 0.1, 2: 0.3, 3: 0.5, 4: 0.7}
def make_edge(x, y, width):
return go.Scatter(x=x, y=y, line=dict(width=width), hoverinfo='none', mode='lines')
def make_edges(edge_xs, edge_ys, edge_ws):
edge_trace = []
for i in range(0, int((len(edge_xs) / 2))):
edge_trace.append(go.Scatter(x=(edge_xs[(i * 2)], edge_xs[((i * 2) + 1)]), y=(edge_ys[(i * 2)], edge_ys[((i * 2) + 1)]), line=dict(width=(edge_ws[i] * edge_width_scale_factor), color=list(reversed(colors))[edge_colors[i]]), opacity=(O[edge_ws[i]] if (edge_ws[i] in O.keys()) else 0.95), hoverinfo='none', mode='lines'))
return edge_trace
edge_trace = make_edges(edge_xs, edge_ys, edge_ws)
node_trace = go.Scatter(x=node_xs, y=node_ys, mode='markers', hoverinfo='text', marker=dict(colorscale=colors, reversescale=True, size=node_sizes, color=node_colors, opacity=1.0, line_width=0, line_color='#FFF'))
fig = go.Figure(data=(edge_trace + [node_trace]), layout=go.Layout(titlefont_size=22, showlegend=False, hovermode='closest', margin=dict(b=20, l=5, r=5, t=40), xaxis={'range': [x_lower_bound, x_upper_bound], 'showgrid': False, 'zeroline': True, 'visible': False}, yaxis={'range': [(- 0.1), 1.05], 'showgrid': False}))
fig.update_yaxes(tickfont=dict(family='Helvetica', color='black', size=24), title_text='F1 score', title_font=dict(size=24, family='Helvetica', color='black'))
fig.update_layout(width=600, height=400, template='simple_white')
fig.add_shape(type='line', x0=x_lower_bound, y0=1, x1=x_upper_bound, y1=1, line={'color': 'black', 'width': 1, 'dash': 'dot'})
fig.add_shape(type='line', x0=x_lower_bound, y0=0, x1=x_upper_bound, y1=0, line={'color': 'black', 'width': 1, 'dash': 'dot'})
if show:
fig.show()
if save:
if (not os.path.exists(out_dir)):
os.mkdir(out_dir)
if (not os.path.exists(os.path.join(out_dir, 'state_trajectory'))):
os.mkdir(os.path.join(out_dir, 'state_trajectory'))
plot_path = os.path.join(out_dir, 'state_trajectory', (((((target_name + '_') + agent) + '_') + extension) + '.pdf'))
fig.write_image(plot_path)
img = fig.to_image()
img = np.fromstring(img, dtype='uint8')
img = cv2.imdecode(img, cv2.IMREAD_UNCHANGED)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img<|docstring|>makes F1 world state trajectory graph for specific target structure and phase<|endoftext|> |
4c9c61c1467e389c0f9082afbce8004aa6dff10f6c13a380465551c308a77347 | def convert_to_prob_dist(DICT):
'\n convert the count values in a dictionary DICT to probabilities \n '
pdist = (np.array(list(DICT.values())) / np.sum(list(DICT.values())))
try:
assert np.isclose(np.sum(pdist), 1.0)
except:
print('Sum of pdist = {}, but should be 1.'.format(np.sum(pdist)))
P = dict(zip(list(DICT.keys()), list(pdist)))
return P | convert the count values in a dictionary DICT to probabilities | analysis/utils/trajectory.py | convert_to_prob_dist | cogtoolslab/projection_block_construction | 0 | python | def convert_to_prob_dist(DICT):
'\n \n '
pdist = (np.array(list(DICT.values())) / np.sum(list(DICT.values())))
try:
assert np.isclose(np.sum(pdist), 1.0)
except:
print('Sum of pdist = {}, but should be 1.'.format(np.sum(pdist)))
P = dict(zip(list(DICT.keys()), list(pdist)))
return P | def convert_to_prob_dist(DICT):
'\n \n '
pdist = (np.array(list(DICT.values())) / np.sum(list(DICT.values())))
try:
assert np.isclose(np.sum(pdist), 1.0)
except:
print('Sum of pdist = {}, but should be 1.'.format(np.sum(pdist)))
P = dict(zip(list(DICT.keys()), list(pdist)))
return P<|docstring|>convert the count values in a dictionary DICT to probabilities<|endoftext|> |
328f74d4fe8d9ea618e3bdd69f77118ea03e183208460ae9c09ca217dbbd1d0d | def get_sparsity_over_states(data=[], target='hand_selected_004', phase='pre', metric='entropy'):
"\n calculate sparsity over states visited for particular target and phase\n metric: ['entropy', 'mean'] \n "
if (len(data) == 0):
raise Exception('No data was passed in! Please pass in data.')
empty_world = '[0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0]'
t = GenericBuildGraph(target_name=target)
a = data[((data.targetName == target) & (data.phase_extended == phase))]
a = a.groupby('gameID')
a.apply((lambda g: t.add_build_path(g)))
W = dict()
for (i, (k1, layer)) in enumerate(t.world_layers.items()):
for (j, (k2, node)) in enumerate(layer.nodes.items()):
if (k2 != empty_world):
W[k2] = node.visits
_ = W.pop(empty_world, None)
P = convert_to_prob_dist(W)
if (metric == 'entropy'):
stat = entropy(list(P.values()))
elif (metric == 'mean'):
stat = np.mean(list(W.values()))
elif (metric == 'gini'):
stat = gini_coefficient(list(W.values()))
return (stat, P, W) | calculate sparsity over states visited for particular target and phase
metric: ['entropy', 'mean'] | analysis/utils/trajectory.py | get_sparsity_over_states | cogtoolslab/projection_block_construction | 0 | python | def get_sparsity_over_states(data=[], target='hand_selected_004', phase='pre', metric='entropy'):
"\n calculate sparsity over states visited for particular target and phase\n metric: ['entropy', 'mean'] \n "
if (len(data) == 0):
raise Exception('No data was passed in! Please pass in data.')
empty_world = '[0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0]'
t = GenericBuildGraph(target_name=target)
a = data[((data.targetName == target) & (data.phase_extended == phase))]
a = a.groupby('gameID')
a.apply((lambda g: t.add_build_path(g)))
W = dict()
for (i, (k1, layer)) in enumerate(t.world_layers.items()):
for (j, (k2, node)) in enumerate(layer.nodes.items()):
if (k2 != empty_world):
W[k2] = node.visits
_ = W.pop(empty_world, None)
P = convert_to_prob_dist(W)
if (metric == 'entropy'):
stat = entropy(list(P.values()))
elif (metric == 'mean'):
stat = np.mean(list(W.values()))
elif (metric == 'gini'):
stat = gini_coefficient(list(W.values()))
return (stat, P, W) | def get_sparsity_over_states(data=[], target='hand_selected_004', phase='pre', metric='entropy'):
"\n calculate sparsity over states visited for particular target and phase\n metric: ['entropy', 'mean'] \n "
if (len(data) == 0):
raise Exception('No data was passed in! Please pass in data.')
empty_world = '[0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0 0 0 0 0 0]'
t = GenericBuildGraph(target_name=target)
a = data[((data.targetName == target) & (data.phase_extended == phase))]
a = a.groupby('gameID')
a.apply((lambda g: t.add_build_path(g)))
W = dict()
for (i, (k1, layer)) in enumerate(t.world_layers.items()):
for (j, (k2, node)) in enumerate(layer.nodes.items()):
if (k2 != empty_world):
W[k2] = node.visits
_ = W.pop(empty_world, None)
P = convert_to_prob_dist(W)
if (metric == 'entropy'):
stat = entropy(list(P.values()))
elif (metric == 'mean'):
stat = np.mean(list(W.values()))
elif (metric == 'gini'):
stat = gini_coefficient(list(W.values()))
return (stat, P, W)<|docstring|>calculate sparsity over states visited for particular target and phase
metric: ['entropy', 'mean']<|endoftext|> |
57190b8241eab6abe22cb7de457fe120152cb9ca429569ab91215a6748fb9b98 | def get_sparsity_over_edges(data=[], target='hand_selected_004', phase='pre', metric='entropy'):
'\n calculate entropy over states visited for particular target and phase\n '
if (len(data) == 0):
raise Exception('No data was passed in! Please pass in data.')
t = GenericBuildGraph(target_name=target)
a = data[((data.targetName == target) & (data.phase_extended == phase))]
a = a.groupby('gameID')
a.apply((lambda g: t.add_build_path(g)))
W = dict()
parent_xs = []
parent_ys = []
child_xs = []
child_ys = []
edge_ws = []
for (i, (k1, layer)) in enumerate(t.world_layers.items()):
for (j, (k2, node)) in enumerate(layer.nodes.items()):
parent_x = node.x
parent_y = node.y
for (_, e) in enumerate(node.out_edges):
child_y = e.target.y
child_x = e.target.x
parent_xs.append(parent_x)
child_xs.append(child_x)
parent_ys.append(parent_y)
child_ys.append(child_y)
edge_ws.append(e.visits)
E = pd.DataFrame([parent_xs, parent_ys, child_xs, child_ys, edge_ws]).transpose()
E.columns = ['parent_x', 'parent_y', 'child_x', 'child_y', 'edge_weight']
E['edge_freq'] = (E['edge_weight'] / E['edge_weight'].sum())
assert np.isclose(E['edge_freq'].sum(), 1)
if (metric == 'entropy'):
stat = entropy(E['edge_freq'])
elif (metric == 'mean'):
stat = np.mean(E['edge_weight'])
elif (metric == 'gini'):
stat = gini_coefficient(E['edge_weight'])
return (stat, E) | calculate entropy over states visited for particular target and phase | analysis/utils/trajectory.py | get_sparsity_over_edges | cogtoolslab/projection_block_construction | 0 | python | def get_sparsity_over_edges(data=[], target='hand_selected_004', phase='pre', metric='entropy'):
'\n \n '
if (len(data) == 0):
raise Exception('No data was passed in! Please pass in data.')
t = GenericBuildGraph(target_name=target)
a = data[((data.targetName == target) & (data.phase_extended == phase))]
a = a.groupby('gameID')
a.apply((lambda g: t.add_build_path(g)))
W = dict()
parent_xs = []
parent_ys = []
child_xs = []
child_ys = []
edge_ws = []
for (i, (k1, layer)) in enumerate(t.world_layers.items()):
for (j, (k2, node)) in enumerate(layer.nodes.items()):
parent_x = node.x
parent_y = node.y
for (_, e) in enumerate(node.out_edges):
child_y = e.target.y
child_x = e.target.x
parent_xs.append(parent_x)
child_xs.append(child_x)
parent_ys.append(parent_y)
child_ys.append(child_y)
edge_ws.append(e.visits)
E = pd.DataFrame([parent_xs, parent_ys, child_xs, child_ys, edge_ws]).transpose()
E.columns = ['parent_x', 'parent_y', 'child_x', 'child_y', 'edge_weight']
E['edge_freq'] = (E['edge_weight'] / E['edge_weight'].sum())
assert np.isclose(E['edge_freq'].sum(), 1)
if (metric == 'entropy'):
stat = entropy(E['edge_freq'])
elif (metric == 'mean'):
stat = np.mean(E['edge_weight'])
elif (metric == 'gini'):
stat = gini_coefficient(E['edge_weight'])
return (stat, E) | def get_sparsity_over_edges(data=[], target='hand_selected_004', phase='pre', metric='entropy'):
'\n \n '
if (len(data) == 0):
raise Exception('No data was passed in! Please pass in data.')
t = GenericBuildGraph(target_name=target)
a = data[((data.targetName == target) & (data.phase_extended == phase))]
a = a.groupby('gameID')
a.apply((lambda g: t.add_build_path(g)))
W = dict()
parent_xs = []
parent_ys = []
child_xs = []
child_ys = []
edge_ws = []
for (i, (k1, layer)) in enumerate(t.world_layers.items()):
for (j, (k2, node)) in enumerate(layer.nodes.items()):
parent_x = node.x
parent_y = node.y
for (_, e) in enumerate(node.out_edges):
child_y = e.target.y
child_x = e.target.x
parent_xs.append(parent_x)
child_xs.append(child_x)
parent_ys.append(parent_y)
child_ys.append(child_y)
edge_ws.append(e.visits)
E = pd.DataFrame([parent_xs, parent_ys, child_xs, child_ys, edge_ws]).transpose()
E.columns = ['parent_x', 'parent_y', 'child_x', 'child_y', 'edge_weight']
E['edge_freq'] = (E['edge_weight'] / E['edge_weight'].sum())
assert np.isclose(E['edge_freq'].sum(), 1)
if (metric == 'entropy'):
stat = entropy(E['edge_freq'])
elif (metric == 'mean'):
stat = np.mean(E['edge_weight'])
elif (metric == 'gini'):
stat = gini_coefficient(E['edge_weight'])
return (stat, E)<|docstring|>calculate entropy over states visited for particular target and phase<|endoftext|> |
1082bbb4eea943f8e304e2098b47d1d43b6c6b9121d7aafa42502d90a526054d | def get_world_node(self, discrete_world, f1_score, target_name):
'\n add world state into layer\n if already exists, then returns the existing node.\n otherwise, creates new node and returns it\n \n '
world_str = convert_to_str(discrete_world)
if (world_str in self.nodes):
existing_node = self.nodes[world_str]
existing_node.visit()
assert (existing_node.f1_score == f1_score)
return existing_node
else:
new_node = GenericNode(discrete_world, f1_score, target_name)
self.nodes[world_str] = new_node
return new_node | add world state into layer
if already exists, then returns the existing node.
otherwise, creates new node and returns it | analysis/utils/trajectory.py | get_world_node | cogtoolslab/projection_block_construction | 0 | python | def get_world_node(self, discrete_world, f1_score, target_name):
'\n add world state into layer\n if already exists, then returns the existing node.\n otherwise, creates new node and returns it\n \n '
world_str = convert_to_str(discrete_world)
if (world_str in self.nodes):
existing_node = self.nodes[world_str]
existing_node.visit()
assert (existing_node.f1_score == f1_score)
return existing_node
else:
new_node = GenericNode(discrete_world, f1_score, target_name)
self.nodes[world_str] = new_node
return new_node | def get_world_node(self, discrete_world, f1_score, target_name):
'\n add world state into layer\n if already exists, then returns the existing node.\n otherwise, creates new node and returns it\n \n '
world_str = convert_to_str(discrete_world)
if (world_str in self.nodes):
existing_node = self.nodes[world_str]
existing_node.visit()
assert (existing_node.f1_score == f1_score)
return existing_node
else:
new_node = GenericNode(discrete_world, f1_score, target_name)
self.nodes[world_str] = new_node
return new_node<|docstring|>add world state into layer
if already exists, then returns the existing node.
otherwise, creates new node and returns it<|endoftext|> |
16b305c4540ceabec2ee5f02fcb0f12ff96f85358aa959a20cd8c38a70927e3e | def reset_prev_node(self):
'\n Reset pointer to previous node\n '
self.prev_node = self.root | Reset pointer to previous node | analysis/utils/trajectory.py | reset_prev_node | cogtoolslab/projection_block_construction | 0 | python | def reset_prev_node(self):
'\n \n '
self.prev_node = self.root | def reset_prev_node(self):
'\n \n '
self.prev_node = self.root<|docstring|>Reset pointer to previous node<|endoftext|> |
7f4505ef27a4e9ad8433d96dc203514fb1bb4b457fc506c986347914d5b5d29c | def add_build_path(self, df):
'\n adds series of block placements from dataframe (for one build sequence) to the tree\n '
self.root.visit()
df.apply((lambda row: self.add_block_placement(row)), axis=1)
self.reset_prev_node() | adds series of block placements from dataframe (for one build sequence) to the tree | analysis/utils/trajectory.py | add_build_path | cogtoolslab/projection_block_construction | 0 | python | def add_build_path(self, df):
'\n \n '
self.root.visit()
df.apply((lambda row: self.add_block_placement(row)), axis=1)
self.reset_prev_node() | def add_build_path(self, df):
'\n \n '
self.root.visit()
df.apply((lambda row: self.add_block_placement(row)), axis=1)
self.reset_prev_node()<|docstring|>adds series of block placements from dataframe (for one build sequence) to the tree<|endoftext|> |
599b0284385ea65c2d0299b62988f516d984e5b2b6af45c0088891406e1ac994 | def crop_square(im, r, c, sz):
'\n crop image into a square of size sz,\n '
return im[(r:(r + sz), c:(c + sz))] | crop image into a square of size sz, | vel/api/data/image_ops.py | crop_square | cclauss/vel | 273 | python | def crop_square(im, r, c, sz):
'\n \n '
return im[(r:(r + sz), c:(c + sz))] | def crop_square(im, r, c, sz):
'\n \n '
return im[(r:(r + sz), c:(c + sz))]<|docstring|>crop image into a square of size sz,<|endoftext|> |
267fc6fcec6ab0d284974ff39b6b8e05b366358c7db33157c35195aa74ba3184 | def crop(im, r, c, sz_h, sz_w):
'\n crop image into a square of size sz,\n '
return im[(r:(r + sz_h), c:(c + sz_w))] | crop image into a square of size sz, | vel/api/data/image_ops.py | crop | cclauss/vel | 273 | python | def crop(im, r, c, sz_h, sz_w):
'\n \n '
return im[(r:(r + sz_h), c:(c + sz_w))] | def crop(im, r, c, sz_h, sz_w):
'\n \n '
return im[(r:(r + sz_h), c:(c + sz_w))]<|docstring|>crop image into a square of size sz,<|endoftext|> |
a8a48a8c0220290d67d8c578c0792d21e0b391c3cefa08991af8c84ede277c39 | def center_crop(im, min_sz=None):
' Returns a center crop of an image'
(r, c, *_) = im.shape
if (min_sz is None):
min_sz = min(r, c)
start_r = math.ceil(((r - min_sz) / 2))
start_c = math.ceil(((c - min_sz) / 2))
return crop_square(im, start_r, start_c, min_sz) | Returns a center crop of an image | vel/api/data/image_ops.py | center_crop | cclauss/vel | 273 | python | def center_crop(im, min_sz=None):
' '
(r, c, *_) = im.shape
if (min_sz is None):
min_sz = min(r, c)
start_r = math.ceil(((r - min_sz) / 2))
start_c = math.ceil(((c - min_sz) / 2))
return crop_square(im, start_r, start_c, min_sz) | def center_crop(im, min_sz=None):
' '
(r, c, *_) = im.shape
if (min_sz is None):
min_sz = min(r, c)
start_r = math.ceil(((r - min_sz) / 2))
start_c = math.ceil(((c - min_sz) / 2))
return crop_square(im, start_r, start_c, min_sz)<|docstring|>Returns a center crop of an image<|endoftext|> |
39ccd591f4d52803bedf2e94e87afefaefe6982d0370634954d37461abdb7edc | def scale_to(x, ratio, targ):
'Calculate dimension of an image during scaling with aspect ratio'
return max(math.floor((x * ratio)), targ) | Calculate dimension of an image during scaling with aspect ratio | vel/api/data/image_ops.py | scale_to | cclauss/vel | 273 | python | def scale_to(x, ratio, targ):
return max(math.floor((x * ratio)), targ) | def scale_to(x, ratio, targ):
return max(math.floor((x * ratio)), targ)<|docstring|>Calculate dimension of an image during scaling with aspect ratio<|endoftext|> |
93b2d008edaf3c10b6f1b543c59d4ab6b64ddc57393a573afb9c76779804c99b | def scale_min(im, targ, interpolation=cv2.INTER_AREA):
' Scales the image so that the smallest axis is of size targ.\n\n Arguments:\n im (array): image\n targ (int): target size\n '
(r, c, *_) = im.shape
ratio = (targ / min(r, c))
sz = (scale_to(c, ratio, targ), scale_to(r, ratio, targ))
return cv2.resize(im, sz, interpolation=interpolation) | Scales the image so that the smallest axis is of size targ.
Arguments:
im (array): image
targ (int): target size | vel/api/data/image_ops.py | scale_min | cclauss/vel | 273 | python | def scale_min(im, targ, interpolation=cv2.INTER_AREA):
' Scales the image so that the smallest axis is of size targ.\n\n Arguments:\n im (array): image\n targ (int): target size\n '
(r, c, *_) = im.shape
ratio = (targ / min(r, c))
sz = (scale_to(c, ratio, targ), scale_to(r, ratio, targ))
return cv2.resize(im, sz, interpolation=interpolation) | def scale_min(im, targ, interpolation=cv2.INTER_AREA):
' Scales the image so that the smallest axis is of size targ.\n\n Arguments:\n im (array): image\n targ (int): target size\n '
(r, c, *_) = im.shape
ratio = (targ / min(r, c))
sz = (scale_to(c, ratio, targ), scale_to(r, ratio, targ))
return cv2.resize(im, sz, interpolation=interpolation)<|docstring|>Scales the image so that the smallest axis is of size targ.
Arguments:
im (array): image
targ (int): target size<|endoftext|> |
6652eee3f443333a7f6ff878b69a07f63e924cc2b24b4c786a9da0eb19cebffb | def rotate_img(im, deg, mode=cv2.BORDER_CONSTANT, interpolation=cv2.INTER_AREA):
' Rotates an image by deg degrees\n\n Arguments:\n deg (float): degree to rotate.\n '
(r, c, *_) = im.shape
M = cv2.getRotationMatrix2D(((c // 2), (r // 2)), deg, 1)
return cv2.warpAffine(im, M, (c, r), borderMode=mode, flags=(cv2.WARP_FILL_OUTLIERS + interpolation)) | Rotates an image by deg degrees
Arguments:
deg (float): degree to rotate. | vel/api/data/image_ops.py | rotate_img | cclauss/vel | 273 | python | def rotate_img(im, deg, mode=cv2.BORDER_CONSTANT, interpolation=cv2.INTER_AREA):
' Rotates an image by deg degrees\n\n Arguments:\n deg (float): degree to rotate.\n '
(r, c, *_) = im.shape
M = cv2.getRotationMatrix2D(((c // 2), (r // 2)), deg, 1)
return cv2.warpAffine(im, M, (c, r), borderMode=mode, flags=(cv2.WARP_FILL_OUTLIERS + interpolation)) | def rotate_img(im, deg, mode=cv2.BORDER_CONSTANT, interpolation=cv2.INTER_AREA):
' Rotates an image by deg degrees\n\n Arguments:\n deg (float): degree to rotate.\n '
(r, c, *_) = im.shape
M = cv2.getRotationMatrix2D(((c // 2), (r // 2)), deg, 1)
return cv2.warpAffine(im, M, (c, r), borderMode=mode, flags=(cv2.WARP_FILL_OUTLIERS + interpolation))<|docstring|>Rotates an image by deg degrees
Arguments:
deg (float): degree to rotate.<|endoftext|> |
a174e5857c7730d5fc7a8750f7fe8a4739f985f013159bfcec8a80e36c88d1b6 | def lighting(im, b, c):
" adjusts image's balance and contrast"
if ((b == 0) and (c == 1)):
return im
mu = np.average(im)
return np.clip(((((im - mu) * c) + mu) + b), 0.0, 1.0).astype(np.float32) | adjusts image's balance and contrast | vel/api/data/image_ops.py | lighting | cclauss/vel | 273 | python | def lighting(im, b, c):
" "
if ((b == 0) and (c == 1)):
return im
mu = np.average(im)
return np.clip(((((im - mu) * c) + mu) + b), 0.0, 1.0).astype(np.float32) | def lighting(im, b, c):
" "
if ((b == 0) and (c == 1)):
return im
mu = np.average(im)
return np.clip(((((im - mu) * c) + mu) + b), 0.0, 1.0).astype(np.float32)<|docstring|>adjusts image's balance and contrast<|endoftext|> |
8f93c46df02dce59d69f0ec030767abc40113bcf450200c130d1a2bdfaa11e66 | def say_hello(to):
'Say hello'
return f'Hello {to}' | Say hello | nbdevtutorial/core.py | say_hello | VedAustin/nbdevtutorial | 0 | python | def say_hello(to):
return f'Hello {to}' | def say_hello(to):
return f'Hello {to}'<|docstring|>Say hello<|endoftext|> |
4eb8633e5f993fb0d7565996d7cbc75456b4a83ef40172fb61ccaa11bf7d6f64 | def say(self):
'Say something'
return say_hello(self.to) | Say something | nbdevtutorial/core.py | say | VedAustin/nbdevtutorial | 0 | python | def say(self):
return say_hello(self.to) | def say(self):
return say_hello(self.to)<|docstring|>Say something<|endoftext|> |
86e4afb745300fe253661c5f50cfd237d2ee9079334befff2ba67a775985cd89 | def get_fact(self):
'Get factorial'
return factorial(self.val) | Get factorial | nbdevtutorial/core.py | get_fact | VedAustin/nbdevtutorial | 0 | python | def get_fact(self):
return factorial(self.val) | def get_fact(self):
return factorial(self.val)<|docstring|>Get factorial<|endoftext|> |
c36cb816ed37ae65e9b448327cb4d70925240902f7fee313c5bb57386aff095a | def generate_wrapper(name, module, original_docstring, num_threads):
'Generate a wrapper function.\n\n Parameters\n ----------\n name : string\n Name of the function wrapped.\n module : string\n Name of the module the wrapped function is part of.\n original_docstring : string\n Docstring of the wrapped function.\n num_threads : int\n Number of threads to use.\n\n Returns\n -------\n wrapper_code : string\n A string that contains the code to the wrapper function.\n '
add_keyword_atoms = ('additional arguments docs', 'additional argument docs')
if any([(keyword in original_docstring) for keyword in add_keyword_atoms]):
additional_arg_string = ('Arguments automatically added on call are "threads=%s".\n' % num_threads)
additional_arg_code = ('kwargs["threads"] = %s' % num_threads)
else:
additional_arg_string = 'This wrapper does nothing besides calling the pyfftw function.\n'
additional_arg_code = ''
wrapper_string = ('\ndef %(name)s(*args, **kwargs):\n """A thin wrapper around pyfftw.interfaces.%(module)s.%(name)s.\n\n %(additional_arg_string)s\n Docstring of original pyfftw function:\n --------------------------------------\n %(original_docstring)s\n\n """\n %(additional_arg_code)s\n\n return _%(name)s(*args, **kwargs)\n' % locals())
return wrapper_string | Generate a wrapper function.
Parameters
----------
name : string
Name of the function wrapped.
module : string
Name of the module the wrapped function is part of.
original_docstring : string
Docstring of the wrapped function.
num_threads : int
Number of threads to use.
Returns
-------
wrapper_code : string
A string that contains the code to the wrapper function. | transparent_pyfftw/generate_wrappers.py | generate_wrapper | aeberspaecher/transparent_pyfftw | 0 | python | def generate_wrapper(name, module, original_docstring, num_threads):
'Generate a wrapper function.\n\n Parameters\n ----------\n name : string\n Name of the function wrapped.\n module : string\n Name of the module the wrapped function is part of.\n original_docstring : string\n Docstring of the wrapped function.\n num_threads : int\n Number of threads to use.\n\n Returns\n -------\n wrapper_code : string\n A string that contains the code to the wrapper function.\n '
add_keyword_atoms = ('additional arguments docs', 'additional argument docs')
if any([(keyword in original_docstring) for keyword in add_keyword_atoms]):
additional_arg_string = ('Arguments automatically added on call are "threads=%s".\n' % num_threads)
additional_arg_code = ('kwargs["threads"] = %s' % num_threads)
else:
additional_arg_string = 'This wrapper does nothing besides calling the pyfftw function.\n'
additional_arg_code =
wrapper_string = ('\ndef %(name)s(*args, **kwargs):\n "A thin wrapper around pyfftw.interfaces.%(module)s.%(name)s.\n\n %(additional_arg_string)s\n Docstring of original pyfftw function:\n --------------------------------------\n %(original_docstring)s\n\n "\n %(additional_arg_code)s\n\n return _%(name)s(*args, **kwargs)\n' % locals())
return wrapper_string | def generate_wrapper(name, module, original_docstring, num_threads):
'Generate a wrapper function.\n\n Parameters\n ----------\n name : string\n Name of the function wrapped.\n module : string\n Name of the module the wrapped function is part of.\n original_docstring : string\n Docstring of the wrapped function.\n num_threads : int\n Number of threads to use.\n\n Returns\n -------\n wrapper_code : string\n A string that contains the code to the wrapper function.\n '
add_keyword_atoms = ('additional arguments docs', 'additional argument docs')
if any([(keyword in original_docstring) for keyword in add_keyword_atoms]):
additional_arg_string = ('Arguments automatically added on call are "threads=%s".\n' % num_threads)
additional_arg_code = ('kwargs["threads"] = %s' % num_threads)
else:
additional_arg_string = 'This wrapper does nothing besides calling the pyfftw function.\n'
additional_arg_code =
wrapper_string = ('\ndef %(name)s(*args, **kwargs):\n "A thin wrapper around pyfftw.interfaces.%(module)s.%(name)s.\n\n %(additional_arg_string)s\n Docstring of original pyfftw function:\n --------------------------------------\n %(original_docstring)s\n\n "\n %(additional_arg_code)s\n\n return _%(name)s(*args, **kwargs)\n' % locals())
return wrapper_string<|docstring|>Generate a wrapper function.
Parameters
----------
name : string
Name of the function wrapped.
module : string
Name of the module the wrapped function is part of.
original_docstring : string
Docstring of the wrapped function.
num_threads : int
Number of threads to use.
Returns
-------
wrapper_code : string
A string that contains the code to the wrapper function.<|endoftext|> |
39a837a682516a49cb1db589163ad53931ae3865190eb7243d843d036c6ed4ae | def test_driver_lifecycle(self, proc_output, driver_node):
'Test pendulum_driver lifecycle.'
proc_output.assertWaitFor('Configuring', process=driver_node, timeout=5)
proc_output.assertWaitFor('Activating', process=driver_node, timeout=5)
proc_output.assertWaitFor('Deactivating', process=driver_node, timeout=10)
pattern = re.compile('Cart position = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Cart velocity = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Pole angular velocity = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Controller force command = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Disturbance force = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
proc_output.assertWaitFor('Cleaning up', process=driver_node, timeout=5)
proc_output.assertWaitFor('Shutting down', process=driver_node, timeout=5) | Test pendulum_driver lifecycle. | model/src/pendulum/pendulum_driver/test/pendulum_driver_lifecycle.test.py | test_driver_lifecycle | swgu931/ros2-pendulum-caas | 81 | python | def test_driver_lifecycle(self, proc_output, driver_node):
proc_output.assertWaitFor('Configuring', process=driver_node, timeout=5)
proc_output.assertWaitFor('Activating', process=driver_node, timeout=5)
proc_output.assertWaitFor('Deactivating', process=driver_node, timeout=10)
pattern = re.compile('Cart position = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Cart velocity = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Pole angular velocity = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Controller force command = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Disturbance force = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
proc_output.assertWaitFor('Cleaning up', process=driver_node, timeout=5)
proc_output.assertWaitFor('Shutting down', process=driver_node, timeout=5) | def test_driver_lifecycle(self, proc_output, driver_node):
proc_output.assertWaitFor('Configuring', process=driver_node, timeout=5)
proc_output.assertWaitFor('Activating', process=driver_node, timeout=5)
proc_output.assertWaitFor('Deactivating', process=driver_node, timeout=10)
pattern = re.compile('Cart position = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Cart velocity = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Pole angular velocity = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Controller force command = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
pattern = re.compile('Disturbance force = [+-]?\\d+(?:\\.\\d+)?')
proc_output.assertWaitFor(expected_output=pattern, process=driver_node, timeout=5)
proc_output.assertWaitFor('Cleaning up', process=driver_node, timeout=5)
proc_output.assertWaitFor('Shutting down', process=driver_node, timeout=5)<|docstring|>Test pendulum_driver lifecycle.<|endoftext|> |
5f87da1b09ab47612913c6099a6568627f859b6c2c21feadb9299ad699bea574 | def test_node_graceful_shutdown(self, proc_info, driver_node):
'Test controller_node graceful shutdown.'
launch_testing.asserts.assertExitCodes(proc_info, process=driver_node) | Test controller_node graceful shutdown. | model/src/pendulum/pendulum_driver/test/pendulum_driver_lifecycle.test.py | test_node_graceful_shutdown | swgu931/ros2-pendulum-caas | 81 | python | def test_node_graceful_shutdown(self, proc_info, driver_node):
launch_testing.asserts.assertExitCodes(proc_info, process=driver_node) | def test_node_graceful_shutdown(self, proc_info, driver_node):
launch_testing.asserts.assertExitCodes(proc_info, process=driver_node)<|docstring|>Test controller_node graceful shutdown.<|endoftext|> |
ec3ffd64f83e0191c2f3e11b01e03d1e98476a770d21e5cd4aaad3974e48e2af | def UI(app_in=None, executor=None, **kwargs):
'\n :param environment: The\n :class:`~stalker.models.env.EnvironmentBase` can be None to let the UI to\n work in "environmentless" mode in which it only creates data in database\n and copies the resultant version file path to clipboard.\n\n :param mode: Runs the UI either in Read-Write (0) mode or in Read-Only (1)\n mode.\n\n :param app_in: A Qt Application instance, which you can pass to let the UI\n be attached to the given applications event process.\n\n :param executor: Instead of calling app.exec_ the UI will call this given\n function. It also passes the created app instance to this executor.\n\n '
from anima.ui.base import ui_caller
return ui_caller(app_in, executor, ToolboxDialog, **kwargs) | :param environment: The
:class:`~stalker.models.env.EnvironmentBase` can be None to let the UI to
work in "environmentless" mode in which it only creates data in database
and copies the resultant version file path to clipboard.
:param mode: Runs the UI either in Read-Write (0) mode or in Read-Only (1)
mode.
:param app_in: A Qt Application instance, which you can pass to let the UI
be attached to the given applications event process.
:param executor: Instead of calling app.exec_ the UI will call this given
function. It also passes the created app instance to this executor. | anima/env/fusion/toolbox.py | UI | tws0002/anima | 0 | python | def UI(app_in=None, executor=None, **kwargs):
'\n :param environment: The\n :class:`~stalker.models.env.EnvironmentBase` can be None to let the UI to\n work in "environmentless" mode in which it only creates data in database\n and copies the resultant version file path to clipboard.\n\n :param mode: Runs the UI either in Read-Write (0) mode or in Read-Only (1)\n mode.\n\n :param app_in: A Qt Application instance, which you can pass to let the UI\n be attached to the given applications event process.\n\n :param executor: Instead of calling app.exec_ the UI will call this given\n function. It also passes the created app instance to this executor.\n\n '
from anima.ui.base import ui_caller
return ui_caller(app_in, executor, ToolboxDialog, **kwargs) | def UI(app_in=None, executor=None, **kwargs):
'\n :param environment: The\n :class:`~stalker.models.env.EnvironmentBase` can be None to let the UI to\n work in "environmentless" mode in which it only creates data in database\n and copies the resultant version file path to clipboard.\n\n :param mode: Runs the UI either in Read-Write (0) mode or in Read-Only (1)\n mode.\n\n :param app_in: A Qt Application instance, which you can pass to let the UI\n be attached to the given applications event process.\n\n :param executor: Instead of calling app.exec_ the UI will call this given\n function. It also passes the created app instance to this executor.\n\n '
from anima.ui.base import ui_caller
return ui_caller(app_in, executor, ToolboxDialog, **kwargs)<|docstring|>:param environment: The
:class:`~stalker.models.env.EnvironmentBase` can be None to let the UI to
work in "environmentless" mode in which it only creates data in database
and copies the resultant version file path to clipboard.
:param mode: Runs the UI either in Read-Write (0) mode or in Read-Only (1)
mode.
:param app_in: A Qt Application instance, which you can pass to let the UI
be attached to the given applications event process.
:param executor: Instead of calling app.exec_ the UI will call this given
function. It also passes the created app instance to this executor.<|endoftext|> |
34107f75d448d11b28a6d22d211eadc23b10bb7551a96bfae573e09671829e2f | def add_button(label, layout, callback, tooltip=''):
'A wrapper for button creation\n\n :param label: The label of the button\n :param layout: The layout that the button is going to be placed under.\n :param callback: The callable that will be called when the button is\n clicked.\n :param str tooltip: Optional tooltip for the button\n :return:\n '
button = QtWidgets.QPushButton(layout.parentWidget())
button.setText(label)
layout.addWidget(button)
button.setToolTip(tooltip)
QtCore.QObject.connect(button, QtCore.SIGNAL('clicked()'), callback)
return button | A wrapper for button creation
:param label: The label of the button
:param layout: The layout that the button is going to be placed under.
:param callback: The callable that will be called when the button is
clicked.
:param str tooltip: Optional tooltip for the button
:return: | anima/env/fusion/toolbox.py | add_button | tws0002/anima | 0 | python | def add_button(label, layout, callback, tooltip=):
'A wrapper for button creation\n\n :param label: The label of the button\n :param layout: The layout that the button is going to be placed under.\n :param callback: The callable that will be called when the button is\n clicked.\n :param str tooltip: Optional tooltip for the button\n :return:\n '
button = QtWidgets.QPushButton(layout.parentWidget())
button.setText(label)
layout.addWidget(button)
button.setToolTip(tooltip)
QtCore.QObject.connect(button, QtCore.SIGNAL('clicked()'), callback)
return button | def add_button(label, layout, callback, tooltip=):
'A wrapper for button creation\n\n :param label: The label of the button\n :param layout: The layout that the button is going to be placed under.\n :param callback: The callable that will be called when the button is\n clicked.\n :param str tooltip: Optional tooltip for the button\n :return:\n '
button = QtWidgets.QPushButton(layout.parentWidget())
button.setText(label)
layout.addWidget(button)
button.setToolTip(tooltip)
QtCore.QObject.connect(button, QtCore.SIGNAL('clicked()'), callback)
return button<|docstring|>A wrapper for button creation
:param label: The label of the button
:param layout: The layout that the button is going to be placed under.
:param callback: The callable that will be called when the button is
clicked.
:param str tooltip: Optional tooltip for the button
:return:<|endoftext|> |
743dfcddbefc497d18fce7b372d10a1ecdeb4a051e6c33096f5ea5a8af357ea3 | def setup_ui(self):
'add tools\n '
main_tab_widget = QtWidgets.QTabWidget(self.widget())
self.addWidget(main_tab_widget)
general_tab_widget = QtWidgets.QWidget(self.widget())
general_tab_vertical_layout = QtWidgets.QVBoxLayout()
general_tab_vertical_layout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
general_tab_widget.setLayout(general_tab_vertical_layout)
main_tab_widget.addTab(general_tab_widget, 'Generic')
add_button('Version Creator', general_tab_vertical_layout, GenericTools.version_creator, GenericTools.version_creator.__doc__)
add_button('Loader Report', general_tab_vertical_layout, GenericTools.loader_report, GenericTools.loader_report.__doc__)
general_tab_vertical_layout.addStretch() | add tools | anima/env/fusion/toolbox.py | setup_ui | tws0002/anima | 0 | python | def setup_ui(self):
'\n '
main_tab_widget = QtWidgets.QTabWidget(self.widget())
self.addWidget(main_tab_widget)
general_tab_widget = QtWidgets.QWidget(self.widget())
general_tab_vertical_layout = QtWidgets.QVBoxLayout()
general_tab_vertical_layout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
general_tab_widget.setLayout(general_tab_vertical_layout)
main_tab_widget.addTab(general_tab_widget, 'Generic')
add_button('Version Creator', general_tab_vertical_layout, GenericTools.version_creator, GenericTools.version_creator.__doc__)
add_button('Loader Report', general_tab_vertical_layout, GenericTools.loader_report, GenericTools.loader_report.__doc__)
general_tab_vertical_layout.addStretch() | def setup_ui(self):
'\n '
main_tab_widget = QtWidgets.QTabWidget(self.widget())
self.addWidget(main_tab_widget)
general_tab_widget = QtWidgets.QWidget(self.widget())
general_tab_vertical_layout = QtWidgets.QVBoxLayout()
general_tab_vertical_layout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
general_tab_widget.setLayout(general_tab_vertical_layout)
main_tab_widget.addTab(general_tab_widget, 'Generic')
add_button('Version Creator', general_tab_vertical_layout, GenericTools.version_creator, GenericTools.version_creator.__doc__)
add_button('Loader Report', general_tab_vertical_layout, GenericTools.loader_report, GenericTools.loader_report.__doc__)
general_tab_vertical_layout.addStretch()<|docstring|>add tools<|endoftext|> |
cbb8d5b983ef488550ecbaea4c5798e1ee92ea2d181aa9f454a2a2284203088d | @classmethod
def version_creator(cls):
'version creator\n '
from anima.ui.scripts import fusion
fusion.version_creator() | version creator | anima/env/fusion/toolbox.py | version_creator | tws0002/anima | 0 | python | @classmethod
def version_creator(cls):
'\n '
from anima.ui.scripts import fusion
fusion.version_creator() | @classmethod
def version_creator(cls):
'\n '
from anima.ui.scripts import fusion
fusion.version_creator()<|docstring|>version creator<|endoftext|> |
a13be6836a1af081eb4df8f66d013b7daf4034c56da7b023d48730bab80cde48 | @classmethod
def loader_report(cls):
'returns the loaders in this comp\n '
from anima.env import fusion
fs = fusion.Fusion()
comp = fs.comp
paths = []
all_loader_nodes = comp.GetToolList(False, 'Loader').values()
for loader_node in all_loader_nodes:
node_input_list = loader_node.GetInputList()
for input_entry_key in node_input_list.keys():
input_entry = node_input_list[input_entry_key]
input_id = input_entry.GetAttrs()['INPS_ID']
if (input_id == 'Clip'):
value = input_entry[0]
if ((value != '') or (value is not None)):
paths.append(value)
for path in sorted(paths):
print(path) | returns the loaders in this comp | anima/env/fusion/toolbox.py | loader_report | tws0002/anima | 0 | python | @classmethod
def loader_report(cls):
'\n '
from anima.env import fusion
fs = fusion.Fusion()
comp = fs.comp
paths = []
all_loader_nodes = comp.GetToolList(False, 'Loader').values()
for loader_node in all_loader_nodes:
node_input_list = loader_node.GetInputList()
for input_entry_key in node_input_list.keys():
input_entry = node_input_list[input_entry_key]
input_id = input_entry.GetAttrs()['INPS_ID']
if (input_id == 'Clip'):
value = input_entry[0]
if ((value != ) or (value is not None)):
paths.append(value)
for path in sorted(paths):
print(path) | @classmethod
def loader_report(cls):
'\n '
from anima.env import fusion
fs = fusion.Fusion()
comp = fs.comp
paths = []
all_loader_nodes = comp.GetToolList(False, 'Loader').values()
for loader_node in all_loader_nodes:
node_input_list = loader_node.GetInputList()
for input_entry_key in node_input_list.keys():
input_entry = node_input_list[input_entry_key]
input_id = input_entry.GetAttrs()['INPS_ID']
if (input_id == 'Clip'):
value = input_entry[0]
if ((value != ) or (value is not None)):
paths.append(value)
for path in sorted(paths):
print(path)<|docstring|>returns the loaders in this comp<|endoftext|> |
cab27ee4fff6470853f2912353efe995255da8f1fdb5289d4007a0f3aa20a389 | def forward(self, hidden):
'\n [2, B, enc_hidden_size // 2]\n => [B, 2, enc_hidden_size // 2] (transpose)\n => [B, enc_hidden_size] (view)\n => [B, dec_hidden_size] (linear)\n '
if (self.rnn_type == 'GRU'):
h = hidden
B = h.size(1)
h = h.transpose(0, 1).contiguous().view(B, self.enc_hidden_size)
return self.act(self.linear(h))
elif (self.rnn_type == 'LSTM'):
(h, c) = hidden
B = h.size(1)
h = h.transpose(0, 1).contiguous().view(B, self.enc_hidden_size)
c = h.transpose(0, 1).contiguous().view(B, self.enc_hidden_size)
h = self.act(self.linear_h(h))
c = self.act(self.linear_c(c))
h = (h, c)
return h | [2, B, enc_hidden_size // 2]
=> [B, 2, enc_hidden_size // 2] (transpose)
=> [B, enc_hidden_size] (view)
=> [B, dec_hidden_size] (linear) | layers/bridge.py | forward | nrjkhtr1/FocusSeq2Seq | 112 | python | def forward(self, hidden):
'\n [2, B, enc_hidden_size // 2]\n => [B, 2, enc_hidden_size // 2] (transpose)\n => [B, enc_hidden_size] (view)\n => [B, dec_hidden_size] (linear)\n '
if (self.rnn_type == 'GRU'):
h = hidden
B = h.size(1)
h = h.transpose(0, 1).contiguous().view(B, self.enc_hidden_size)
return self.act(self.linear(h))
elif (self.rnn_type == 'LSTM'):
(h, c) = hidden
B = h.size(1)
h = h.transpose(0, 1).contiguous().view(B, self.enc_hidden_size)
c = h.transpose(0, 1).contiguous().view(B, self.enc_hidden_size)
h = self.act(self.linear_h(h))
c = self.act(self.linear_c(c))
h = (h, c)
return h | def forward(self, hidden):
'\n [2, B, enc_hidden_size // 2]\n => [B, 2, enc_hidden_size // 2] (transpose)\n => [B, enc_hidden_size] (view)\n => [B, dec_hidden_size] (linear)\n '
if (self.rnn_type == 'GRU'):
h = hidden
B = h.size(1)
h = h.transpose(0, 1).contiguous().view(B, self.enc_hidden_size)
return self.act(self.linear(h))
elif (self.rnn_type == 'LSTM'):
(h, c) = hidden
B = h.size(1)
h = h.transpose(0, 1).contiguous().view(B, self.enc_hidden_size)
c = h.transpose(0, 1).contiguous().view(B, self.enc_hidden_size)
h = self.act(self.linear_h(h))
c = self.act(self.linear_c(c))
h = (h, c)
return h<|docstring|>[2, B, enc_hidden_size // 2]
=> [B, 2, enc_hidden_size // 2] (transpose)
=> [B, enc_hidden_size] (view)
=> [B, dec_hidden_size] (linear)<|endoftext|> |
e3b8c49c103504bc28fe738bd164e9ef7b58744b5c12ee74211043342d733484 | def convert_time_stamp(timestamp: str) -> str:
' Function to help convert timestamps from s to H:M:S '
delta = datetime.timedelta(seconds=float(timestamp))
seconds = (delta - datetime.timedelta(microseconds=delta.microseconds))
return str(seconds) | Function to help convert timestamps from s to H:M:S | tscribe/__init__.py | convert_time_stamp | banterco/aws_transcribe_to_docx | 0 | python | def convert_time_stamp(timestamp: str) -> str:
' '
delta = datetime.timedelta(seconds=float(timestamp))
seconds = (delta - datetime.timedelta(microseconds=delta.microseconds))
return str(seconds) | def convert_time_stamp(timestamp: str) -> str:
' '
delta = datetime.timedelta(seconds=float(timestamp))
seconds = (delta - datetime.timedelta(microseconds=delta.microseconds))
return str(seconds)<|docstring|>Function to help convert timestamps from s to H:M:S<|endoftext|> |
9cd8771db7de2f29330b29e7e9f749b7b2ead137ad4ce50141fc6e9bcacb4543 | def load_json_as_dict(filepath: str) -> dict:
'Load in JSON file and return as dict'
logging.info('Loading json')
json_filepath = Path(filepath)
assert json_filepath.is_file(), 'JSON file does not exist'
data = json.load(open(json_filepath.absolute(), 'r', encoding='utf-8'))
assert ('jobName' in data)
assert ('results' in data)
assert ('status' in data)
assert (data['status'] == 'COMPLETED'), 'JSON file not shown as completed.'
logging.debug('json checks psased')
return data | Load in JSON file and return as dict | tscribe/__init__.py | load_json_as_dict | banterco/aws_transcribe_to_docx | 0 | python | def load_json_as_dict(filepath: str) -> dict:
logging.info('Loading json')
json_filepath = Path(filepath)
assert json_filepath.is_file(), 'JSON file does not exist'
data = json.load(open(json_filepath.absolute(), 'r', encoding='utf-8'))
assert ('jobName' in data)
assert ('results' in data)
assert ('status' in data)
assert (data['status'] == 'COMPLETED'), 'JSON file not shown as completed.'
logging.debug('json checks psased')
return data | def load_json_as_dict(filepath: str) -> dict:
logging.info('Loading json')
json_filepath = Path(filepath)
assert json_filepath.is_file(), 'JSON file does not exist'
data = json.load(open(json_filepath.absolute(), 'r', encoding='utf-8'))
assert ('jobName' in data)
assert ('results' in data)
assert ('status' in data)
assert (data['status'] == 'COMPLETED'), 'JSON file not shown as completed.'
logging.debug('json checks psased')
return data<|docstring|>Load in JSON file and return as dict<|endoftext|> |
d44c172287de8bae2721a40e52de584e8275292e60af7c8a411e14ae0ccffe3d | def decode_transcript_to_dataframe(data: str):
'Decode the transcript into a pandas dataframe'
logging.info('Decoding transcript')
decoded_data = {'start_time': [], 'end_time': [], 'speaker': [], 'comment': []}
if ('speaker_labels' in data['results'].keys()):
logging.debug('Transcipt has speaker_labels')
for segment in data['results']['speaker_labels']['segments']:
if (len(segment['items']) > 0):
decoded_data['start_time'].append(convert_time_stamp(segment['start_time']))
decoded_data['end_time'].append(convert_time_stamp(segment['end_time']))
decoded_data['speaker'].append(segment['speaker_label'])
decoded_data['comment'].append('')
for word in segment['items']:
pronunciations = list(filter((lambda x: (x['type'] == 'pronunciation')), data['results']['items']))
word_result = list(filter((lambda x: ((x['start_time'] == word['start_time']) and (x['end_time'] == word['end_time']))), pronunciations))
result = sorted(word_result[(- 1)]['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'][(- 1)] += (' ' + result['content'])
try:
word_result_index = data['results']['items'].index(word_result[0])
next_item = data['results']['items'][(word_result_index + 1)]
if (next_item['type'] == 'punctuation'):
decoded_data['comment'][(- 1)] += next_item['alternatives'][0]['content']
except IndexError:
pass
elif ('channel_labels' in data['results'].keys()):
logging.debug('Transcipt has channel_labels')
for word in data['results']['items']:
if ('start_time' not in word.keys()):
continue
channel = list(filter((lambda x: (word in x['items'])), data['results']['channel_labels']['channels']))[0]['channel_label']
if ((channel in decoded_data['speaker']) and (decoded_data['speaker'][(- 1)] == channel)):
current_word = sorted(word['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'][(- 1)] += (' ' + current_word['content'])
else:
decoded_data['start_time'].append(convert_time_stamp(word['start_time']))
decoded_data['end_time'].append(convert_time_stamp(word['end_time']))
decoded_data['speaker'].append(channel)
current_word = sorted(word['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'].append(current_word['content'])
try:
word_result_index = data['results']['items'].index(word)
next_item = data['results']['items'][(word_result_index + 1)]
if (next_item['type'] == 'punctuation'):
decoded_data['comment'][(- 1)] += next_item['alternatives'][0]['content']
except IndexError:
pass
else:
logging.debug('No speaker_labels or channel_labels')
decoded_data['start_time'] = convert_time_stamp(list(filter((lambda x: (x['type'] == 'pronunciation')), data['results']['items']))[0]['start_time'])
decoded_data['end_time'] = convert_time_stamp(list(filter((lambda x: (x['type'] == 'pronunciation')), data['results']['items']))[(- 1)]['end_time'])
decoded_data['speaker'].append('')
decoded_data['comment'].append('')
for word in data['results']['items']:
result = sorted(word['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'][(- 1)] += (' ' + result['content'])
try:
word_result_index = data['results']['items'].index(word)
next_item = data['results']['items'][(word_result_index + 1)]
if (next_item['type'] == 'punctuation'):
decoded_data['comment'][(- 1)] += next_item['alternatives'][0]['content']
except IndexError:
pass
dataframe = pandas.DataFrame(decoded_data, columns=['start_time', 'end_time', 'speaker', 'comment'])
dataframe['comment'] = dataframe['comment'].str.lstrip()
return dataframe | Decode the transcript into a pandas dataframe | tscribe/__init__.py | decode_transcript_to_dataframe | banterco/aws_transcribe_to_docx | 0 | python | def decode_transcript_to_dataframe(data: str):
logging.info('Decoding transcript')
decoded_data = {'start_time': [], 'end_time': [], 'speaker': [], 'comment': []}
if ('speaker_labels' in data['results'].keys()):
logging.debug('Transcipt has speaker_labels')
for segment in data['results']['speaker_labels']['segments']:
if (len(segment['items']) > 0):
decoded_data['start_time'].append(convert_time_stamp(segment['start_time']))
decoded_data['end_time'].append(convert_time_stamp(segment['end_time']))
decoded_data['speaker'].append(segment['speaker_label'])
decoded_data['comment'].append()
for word in segment['items']:
pronunciations = list(filter((lambda x: (x['type'] == 'pronunciation')), data['results']['items']))
word_result = list(filter((lambda x: ((x['start_time'] == word['start_time']) and (x['end_time'] == word['end_time']))), pronunciations))
result = sorted(word_result[(- 1)]['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'][(- 1)] += (' ' + result['content'])
try:
word_result_index = data['results']['items'].index(word_result[0])
next_item = data['results']['items'][(word_result_index + 1)]
if (next_item['type'] == 'punctuation'):
decoded_data['comment'][(- 1)] += next_item['alternatives'][0]['content']
except IndexError:
pass
elif ('channel_labels' in data['results'].keys()):
logging.debug('Transcipt has channel_labels')
for word in data['results']['items']:
if ('start_time' not in word.keys()):
continue
channel = list(filter((lambda x: (word in x['items'])), data['results']['channel_labels']['channels']))[0]['channel_label']
if ((channel in decoded_data['speaker']) and (decoded_data['speaker'][(- 1)] == channel)):
current_word = sorted(word['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'][(- 1)] += (' ' + current_word['content'])
else:
decoded_data['start_time'].append(convert_time_stamp(word['start_time']))
decoded_data['end_time'].append(convert_time_stamp(word['end_time']))
decoded_data['speaker'].append(channel)
current_word = sorted(word['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'].append(current_word['content'])
try:
word_result_index = data['results']['items'].index(word)
next_item = data['results']['items'][(word_result_index + 1)]
if (next_item['type'] == 'punctuation'):
decoded_data['comment'][(- 1)] += next_item['alternatives'][0]['content']
except IndexError:
pass
else:
logging.debug('No speaker_labels or channel_labels')
decoded_data['start_time'] = convert_time_stamp(list(filter((lambda x: (x['type'] == 'pronunciation')), data['results']['items']))[0]['start_time'])
decoded_data['end_time'] = convert_time_stamp(list(filter((lambda x: (x['type'] == 'pronunciation')), data['results']['items']))[(- 1)]['end_time'])
decoded_data['speaker'].append()
decoded_data['comment'].append()
for word in data['results']['items']:
result = sorted(word['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'][(- 1)] += (' ' + result['content'])
try:
word_result_index = data['results']['items'].index(word)
next_item = data['results']['items'][(word_result_index + 1)]
if (next_item['type'] == 'punctuation'):
decoded_data['comment'][(- 1)] += next_item['alternatives'][0]['content']
except IndexError:
pass
dataframe = pandas.DataFrame(decoded_data, columns=['start_time', 'end_time', 'speaker', 'comment'])
dataframe['comment'] = dataframe['comment'].str.lstrip()
return dataframe | def decode_transcript_to_dataframe(data: str):
logging.info('Decoding transcript')
decoded_data = {'start_time': [], 'end_time': [], 'speaker': [], 'comment': []}
if ('speaker_labels' in data['results'].keys()):
logging.debug('Transcipt has speaker_labels')
for segment in data['results']['speaker_labels']['segments']:
if (len(segment['items']) > 0):
decoded_data['start_time'].append(convert_time_stamp(segment['start_time']))
decoded_data['end_time'].append(convert_time_stamp(segment['end_time']))
decoded_data['speaker'].append(segment['speaker_label'])
decoded_data['comment'].append()
for word in segment['items']:
pronunciations = list(filter((lambda x: (x['type'] == 'pronunciation')), data['results']['items']))
word_result = list(filter((lambda x: ((x['start_time'] == word['start_time']) and (x['end_time'] == word['end_time']))), pronunciations))
result = sorted(word_result[(- 1)]['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'][(- 1)] += (' ' + result['content'])
try:
word_result_index = data['results']['items'].index(word_result[0])
next_item = data['results']['items'][(word_result_index + 1)]
if (next_item['type'] == 'punctuation'):
decoded_data['comment'][(- 1)] += next_item['alternatives'][0]['content']
except IndexError:
pass
elif ('channel_labels' in data['results'].keys()):
logging.debug('Transcipt has channel_labels')
for word in data['results']['items']:
if ('start_time' not in word.keys()):
continue
channel = list(filter((lambda x: (word in x['items'])), data['results']['channel_labels']['channels']))[0]['channel_label']
if ((channel in decoded_data['speaker']) and (decoded_data['speaker'][(- 1)] == channel)):
current_word = sorted(word['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'][(- 1)] += (' ' + current_word['content'])
else:
decoded_data['start_time'].append(convert_time_stamp(word['start_time']))
decoded_data['end_time'].append(convert_time_stamp(word['end_time']))
decoded_data['speaker'].append(channel)
current_word = sorted(word['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'].append(current_word['content'])
try:
word_result_index = data['results']['items'].index(word)
next_item = data['results']['items'][(word_result_index + 1)]
if (next_item['type'] == 'punctuation'):
decoded_data['comment'][(- 1)] += next_item['alternatives'][0]['content']
except IndexError:
pass
else:
logging.debug('No speaker_labels or channel_labels')
decoded_data['start_time'] = convert_time_stamp(list(filter((lambda x: (x['type'] == 'pronunciation')), data['results']['items']))[0]['start_time'])
decoded_data['end_time'] = convert_time_stamp(list(filter((lambda x: (x['type'] == 'pronunciation')), data['results']['items']))[(- 1)]['end_time'])
decoded_data['speaker'].append()
decoded_data['comment'].append()
for word in data['results']['items']:
result = sorted(word['alternatives'], key=(lambda x: x['confidence']))[(- 1)]
decoded_data['comment'][(- 1)] += (' ' + result['content'])
try:
word_result_index = data['results']['items'].index(word)
next_item = data['results']['items'][(word_result_index + 1)]
if (next_item['type'] == 'punctuation'):
decoded_data['comment'][(- 1)] += next_item['alternatives'][0]['content']
except IndexError:
pass
dataframe = pandas.DataFrame(decoded_data, columns=['start_time', 'end_time', 'speaker', 'comment'])
dataframe['comment'] = dataframe['comment'].str.lstrip()
return dataframe<|docstring|>Decode the transcript into a pandas dataframe<|endoftext|> |
2c777887608653db3f921ce2933c589a03da339863448c568c379162c81f40f2 | def write_vtt(dataframe, filename):
'Output to VTT format'
logging.info('Writing VTT')
vtt = webvtt.WebVTT()
for (_, row) in dataframe.iterrows():
if (len(row['comment']) <= 80):
caption = webvtt.Caption(start=(row['start_time'] + '.000'), end=(row['end_time'] + '.000'), text=row['comment'])
else:
lines = []
text = row['comment']
while (len(text) > 80):
text = text.lstrip()
last_space = text[:80].rindex(' ')
lines.append(text[:last_space])
text = text[last_space:]
caption = webvtt.Caption((row['start_time'] + '.000'), (row['end_time'] + '.000'), lines)
if row['speaker']:
caption.identifier = row['speaker']
vtt.captions.append(caption)
vtt.save(filename)
logging.info('VTT saved to %s', filename) | Output to VTT format | tscribe/__init__.py | write_vtt | banterco/aws_transcribe_to_docx | 0 | python | def write_vtt(dataframe, filename):
logging.info('Writing VTT')
vtt = webvtt.WebVTT()
for (_, row) in dataframe.iterrows():
if (len(row['comment']) <= 80):
caption = webvtt.Caption(start=(row['start_time'] + '.000'), end=(row['end_time'] + '.000'), text=row['comment'])
else:
lines = []
text = row['comment']
while (len(text) > 80):
text = text.lstrip()
last_space = text[:80].rindex(' ')
lines.append(text[:last_space])
text = text[last_space:]
caption = webvtt.Caption((row['start_time'] + '.000'), (row['end_time'] + '.000'), lines)
if row['speaker']:
caption.identifier = row['speaker']
vtt.captions.append(caption)
vtt.save(filename)
logging.info('VTT saved to %s', filename) | def write_vtt(dataframe, filename):
logging.info('Writing VTT')
vtt = webvtt.WebVTT()
for (_, row) in dataframe.iterrows():
if (len(row['comment']) <= 80):
caption = webvtt.Caption(start=(row['start_time'] + '.000'), end=(row['end_time'] + '.000'), text=row['comment'])
else:
lines = []
text = row['comment']
while (len(text) > 80):
text = text.lstrip()
last_space = text[:80].rindex(' ')
lines.append(text[:last_space])
text = text[last_space:]
caption = webvtt.Caption((row['start_time'] + '.000'), (row['end_time'] + '.000'), lines)
if row['speaker']:
caption.identifier = row['speaker']
vtt.captions.append(caption)
vtt.save(filename)
logging.info('VTT saved to %s', filename)<|docstring|>Output to VTT format<|endoftext|> |
581601fec09e62416c225014b1f61fcc380999ac5ab71eae95bd5918813e1abc | def write(transcript_filepath, **kwargs):
'Main function, write transcript file from json'
start = perf_counter()
logging.info(('=' * 32))
logging.debug('Started at %s', start)
logging.info('Source file: %s', transcript_filepath)
logging.debug('kwargs = %s', str(kwargs))
data = load_json_as_dict(transcript_filepath)
dataframe = decode_transcript_to_dataframe(data)
output_format = kwargs.get('format', 'vtt')
if kwargs.get('tmp_dir'):
logging.warning('tmp_dir in kwargs')
raise Exception('tmp_dir has been deprecated, use save_as instead')
if (output_format == 'vtt'):
output_filepath = kwargs.get('save_as', Path(transcript_filepath).with_suffix('.vtt'))
write_vtt(dataframe, output_filepath)
else:
raise Exception("Output format should be 'docx', 'csv', 'sqlite' or 'vtt'")
finish = perf_counter()
logging.debug('Finished at %s', finish)
duration = round((finish - start), 2)
print(f'{output_filepath} written in {duration} seconds.')
logging.info('%s written in %s seconds.', output_filepath, duration) | Main function, write transcript file from json | tscribe/__init__.py | write | banterco/aws_transcribe_to_docx | 0 | python | def write(transcript_filepath, **kwargs):
start = perf_counter()
logging.info(('=' * 32))
logging.debug('Started at %s', start)
logging.info('Source file: %s', transcript_filepath)
logging.debug('kwargs = %s', str(kwargs))
data = load_json_as_dict(transcript_filepath)
dataframe = decode_transcript_to_dataframe(data)
output_format = kwargs.get('format', 'vtt')
if kwargs.get('tmp_dir'):
logging.warning('tmp_dir in kwargs')
raise Exception('tmp_dir has been deprecated, use save_as instead')
if (output_format == 'vtt'):
output_filepath = kwargs.get('save_as', Path(transcript_filepath).with_suffix('.vtt'))
write_vtt(dataframe, output_filepath)
else:
raise Exception("Output format should be 'docx', 'csv', 'sqlite' or 'vtt'")
finish = perf_counter()
logging.debug('Finished at %s', finish)
duration = round((finish - start), 2)
print(f'{output_filepath} written in {duration} seconds.')
logging.info('%s written in %s seconds.', output_filepath, duration) | def write(transcript_filepath, **kwargs):
start = perf_counter()
logging.info(('=' * 32))
logging.debug('Started at %s', start)
logging.info('Source file: %s', transcript_filepath)
logging.debug('kwargs = %s', str(kwargs))
data = load_json_as_dict(transcript_filepath)
dataframe = decode_transcript_to_dataframe(data)
output_format = kwargs.get('format', 'vtt')
if kwargs.get('tmp_dir'):
logging.warning('tmp_dir in kwargs')
raise Exception('tmp_dir has been deprecated, use save_as instead')
if (output_format == 'vtt'):
output_filepath = kwargs.get('save_as', Path(transcript_filepath).with_suffix('.vtt'))
write_vtt(dataframe, output_filepath)
else:
raise Exception("Output format should be 'docx', 'csv', 'sqlite' or 'vtt'")
finish = perf_counter()
logging.debug('Finished at %s', finish)
duration = round((finish - start), 2)
print(f'{output_filepath} written in {duration} seconds.')
logging.info('%s written in %s seconds.', output_filepath, duration)<|docstring|>Main function, write transcript file from json<|endoftext|> |
fa544badabedb1511c46d029bd1b250e2aa67eb79ae424dff6cd59b7f8e017c1 | @pytest.fixture()
def start_proc():
' helper function for spinning up a websocket participant '
def _start_proc(participant, kwargs):
def target():
server = participant(**kwargs)
server.start()
p = Process(target=target)
p.start()
return p
return _start_proc | helper function for spinning up a websocket participant | test/conftest.py | start_proc | AKSingh-Udacity/PySyft | 1 | python | @pytest.fixture()
def start_proc():
' '
def _start_proc(participant, kwargs):
def target():
server = participant(**kwargs)
server.start()
p = Process(target=target)
p.start()
return p
return _start_proc | @pytest.fixture()
def start_proc():
' '
def _start_proc(participant, kwargs):
def target():
server = participant(**kwargs)
server.start()
p = Process(target=target)
p.start()
return p
return _start_proc<|docstring|>helper function for spinning up a websocket participant<|endoftext|> |
0f79ca43614913b1330a5bc624702062b6a2fde4618ee58e01e9b57351f6c7cf | def __init__(self, url_path: str, raw_params: dict, Model: AbstractApiModel, config: MagellanConfig, limit: int=None, **kwargs) -> ConstantMagellanResponse:
'Generates a new ConstantMagellanResponse\n\n Args:\n url_path (str): The base path to start with (See MagellanResponse)\n raw_params (dict): The raw params passed to `query`\n Model (AbstractApiModel): A MagellanModel\n config (MagellanConfig): A MagellanConfig associated with the model\n limit (int, optional): The limit for the number of responses. Defaults to None.\n\n Returns:\n ConstantMagellanResponse: [description]\n '
self.raw_params = raw_params
super().__init__(url_path, Model, config, limit, **kwargs) | Generates a new ConstantMagellanResponse
Args:
url_path (str): The base path to start with (See MagellanResponse)
raw_params (dict): The raw params passed to `query`
Model (AbstractApiModel): A MagellanModel
config (MagellanConfig): A MagellanConfig associated with the model
limit (int, optional): The limit for the number of responses. Defaults to None.
Returns:
ConstantMagellanResponse: [description] | magellan_models/interface/constant_magellan_response.py | __init__ | 3mcloud/magellan-models | 2 | python | def __init__(self, url_path: str, raw_params: dict, Model: AbstractApiModel, config: MagellanConfig, limit: int=None, **kwargs) -> ConstantMagellanResponse:
'Generates a new ConstantMagellanResponse\n\n Args:\n url_path (str): The base path to start with (See MagellanResponse)\n raw_params (dict): The raw params passed to `query`\n Model (AbstractApiModel): A MagellanModel\n config (MagellanConfig): A MagellanConfig associated with the model\n limit (int, optional): The limit for the number of responses. Defaults to None.\n\n Returns:\n ConstantMagellanResponse: [description]\n '
self.raw_params = raw_params
super().__init__(url_path, Model, config, limit, **kwargs) | def __init__(self, url_path: str, raw_params: dict, Model: AbstractApiModel, config: MagellanConfig, limit: int=None, **kwargs) -> ConstantMagellanResponse:
'Generates a new ConstantMagellanResponse\n\n Args:\n url_path (str): The base path to start with (See MagellanResponse)\n raw_params (dict): The raw params passed to `query`\n Model (AbstractApiModel): A MagellanModel\n config (MagellanConfig): A MagellanConfig associated with the model\n limit (int, optional): The limit for the number of responses. Defaults to None.\n\n Returns:\n ConstantMagellanResponse: [description]\n '
self.raw_params = raw_params
super().__init__(url_path, Model, config, limit, **kwargs)<|docstring|>Generates a new ConstantMagellanResponse
Args:
url_path (str): The base path to start with (See MagellanResponse)
raw_params (dict): The raw params passed to `query`
Model (AbstractApiModel): A MagellanModel
config (MagellanConfig): A MagellanConfig associated with the model
limit (int, optional): The limit for the number of responses. Defaults to None.
Returns:
ConstantMagellanResponse: [description]<|endoftext|> |
a899705e5abd7741fff247f4c88015dfa15145d5b202815453a41825c39a702c | def process_next_page_of_results(self):
'Processes the next page of results using the internal parameters'
if ((self.next_url is None) or self.iteration_is_complete()):
return
header_and_args = self.__config__.create_header(**self.kwargs)
header = header_and_args[0]
if (len(self) == 0):
route = self.next_url
resp = self.get_request(route, self.raw_params, header)
self.iterate_through_response(resp)
else:
resp = self.get_request(url=self.next_url, headers=header)
self.iterate_through_response(resp)
self.next_url = self.__config__.get_next_link_from_resp(resp)
self.__meta_data__ = self.__config__.get_meta_data_from_resp(resp)
return | Processes the next page of results using the internal parameters | magellan_models/interface/constant_magellan_response.py | process_next_page_of_results | 3mcloud/magellan-models | 2 | python | def process_next_page_of_results(self):
if ((self.next_url is None) or self.iteration_is_complete()):
return
header_and_args = self.__config__.create_header(**self.kwargs)
header = header_and_args[0]
if (len(self) == 0):
route = self.next_url
resp = self.get_request(route, self.raw_params, header)
self.iterate_through_response(resp)
else:
resp = self.get_request(url=self.next_url, headers=header)
self.iterate_through_response(resp)
self.next_url = self.__config__.get_next_link_from_resp(resp)
self.__meta_data__ = self.__config__.get_meta_data_from_resp(resp)
return | def process_next_page_of_results(self):
if ((self.next_url is None) or self.iteration_is_complete()):
return
header_and_args = self.__config__.create_header(**self.kwargs)
header = header_and_args[0]
if (len(self) == 0):
route = self.next_url
resp = self.get_request(route, self.raw_params, header)
self.iterate_through_response(resp)
else:
resp = self.get_request(url=self.next_url, headers=header)
self.iterate_through_response(resp)
self.next_url = self.__config__.get_next_link_from_resp(resp)
self.__meta_data__ = self.__config__.get_meta_data_from_resp(resp)
return<|docstring|>Processes the next page of results using the internal parameters<|endoftext|> |
3c3f7a6a7f7e34444a46f98ba6119ca1c78e0baa23d3ab797fc2d7be3f911f70 | def run(results: list):
'\n Scan the above specified networks with Nmap and process the results.\n '
global CREATED_FILES, LOGGER
LOGGER = logging.getLogger(__name__)
LOGGER.info('Setting up Nmap scan')
nmap_call = create_nmap_call()
call_nmap(nmap_call, TEXT_NMAP_OUTPUT_PATH)
LOGGER.info('Parsing Nmap XML output')
result = parse_output_file(XML_NMAP_OUTPUT_PATH)
with open(TEXT_NMAP_OUTPUT_PATH) as file:
text = ' '.join(file.readlines())
if ('looks like an IPv6 target specification -- you have to use the -6 option.' in text):
LOGGER.info("IPv6 addresses were specified, running Nmap again with '-6' option.")
(text_name, text_ext) = os.path.splitext(TEXT_NMAP_OUTPUT_PATH)
textout = ((text_name + '_ipv6') + text_ext)
(xml_name, xml_ext) = os.path.splitext(XML_NMAP_OUTPUT_PATH)
xmlout = ((xml_name + '_ipv6') + xml_ext)
CREATED_FILES += [textout, xmlout]
nmap_call[(nmap_call.index('-oX') + 1)] = xmlout
nmap_call.append('-6')
call_nmap(nmap_call, textout)
LOGGER.info('Parsing Nmap XML output')
result_ipv6 = parse_output_file(xmlout)
for (ip, host) in result_ipv6.items():
result[ip] = host
with open(POT_OSS_PATH, 'w') as file:
file.write(json.dumps(DETECTED_OSS, ensure_ascii=False, indent=3))
CREATED_FILES.append(POT_OSS_PATH)
with open(OS_SELECTION_SCORES_PATH, 'w') as file:
file.write(json.dumps(OS_SIM_SCORES, ensure_ascii=False, indent=3))
CREATED_FILES.append(OS_SELECTION_SCORES_PATH)
LOGGER.info('Done')
results.append((ResultType.SCAN, result)) | Scan the above specified networks with Nmap and process the results. | modules/nmap/avain_nmap.py | run | RE4CT10N/avain | 51 | python | def run(results: list):
'\n \n '
global CREATED_FILES, LOGGER
LOGGER = logging.getLogger(__name__)
LOGGER.info('Setting up Nmap scan')
nmap_call = create_nmap_call()
call_nmap(nmap_call, TEXT_NMAP_OUTPUT_PATH)
LOGGER.info('Parsing Nmap XML output')
result = parse_output_file(XML_NMAP_OUTPUT_PATH)
with open(TEXT_NMAP_OUTPUT_PATH) as file:
text = ' '.join(file.readlines())
if ('looks like an IPv6 target specification -- you have to use the -6 option.' in text):
LOGGER.info("IPv6 addresses were specified, running Nmap again with '-6' option.")
(text_name, text_ext) = os.path.splitext(TEXT_NMAP_OUTPUT_PATH)
textout = ((text_name + '_ipv6') + text_ext)
(xml_name, xml_ext) = os.path.splitext(XML_NMAP_OUTPUT_PATH)
xmlout = ((xml_name + '_ipv6') + xml_ext)
CREATED_FILES += [textout, xmlout]
nmap_call[(nmap_call.index('-oX') + 1)] = xmlout
nmap_call.append('-6')
call_nmap(nmap_call, textout)
LOGGER.info('Parsing Nmap XML output')
result_ipv6 = parse_output_file(xmlout)
for (ip, host) in result_ipv6.items():
result[ip] = host
with open(POT_OSS_PATH, 'w') as file:
file.write(json.dumps(DETECTED_OSS, ensure_ascii=False, indent=3))
CREATED_FILES.append(POT_OSS_PATH)
with open(OS_SELECTION_SCORES_PATH, 'w') as file:
file.write(json.dumps(OS_SIM_SCORES, ensure_ascii=False, indent=3))
CREATED_FILES.append(OS_SELECTION_SCORES_PATH)
LOGGER.info('Done')
results.append((ResultType.SCAN, result)) | def run(results: list):
'\n \n '
global CREATED_FILES, LOGGER
LOGGER = logging.getLogger(__name__)
LOGGER.info('Setting up Nmap scan')
nmap_call = create_nmap_call()
call_nmap(nmap_call, TEXT_NMAP_OUTPUT_PATH)
LOGGER.info('Parsing Nmap XML output')
result = parse_output_file(XML_NMAP_OUTPUT_PATH)
with open(TEXT_NMAP_OUTPUT_PATH) as file:
text = ' '.join(file.readlines())
if ('looks like an IPv6 target specification -- you have to use the -6 option.' in text):
LOGGER.info("IPv6 addresses were specified, running Nmap again with '-6' option.")
(text_name, text_ext) = os.path.splitext(TEXT_NMAP_OUTPUT_PATH)
textout = ((text_name + '_ipv6') + text_ext)
(xml_name, xml_ext) = os.path.splitext(XML_NMAP_OUTPUT_PATH)
xmlout = ((xml_name + '_ipv6') + xml_ext)
CREATED_FILES += [textout, xmlout]
nmap_call[(nmap_call.index('-oX') + 1)] = xmlout
nmap_call.append('-6')
call_nmap(nmap_call, textout)
LOGGER.info('Parsing Nmap XML output')
result_ipv6 = parse_output_file(xmlout)
for (ip, host) in result_ipv6.items():
result[ip] = host
with open(POT_OSS_PATH, 'w') as file:
file.write(json.dumps(DETECTED_OSS, ensure_ascii=False, indent=3))
CREATED_FILES.append(POT_OSS_PATH)
with open(OS_SELECTION_SCORES_PATH, 'w') as file:
file.write(json.dumps(OS_SIM_SCORES, ensure_ascii=False, indent=3))
CREATED_FILES.append(OS_SELECTION_SCORES_PATH)
LOGGER.info('Done')
results.append((ResultType.SCAN, result))<|docstring|>Scan the above specified networks with Nmap and process the results.<|endoftext|> |
0fc1609c769a7b8977dda7d474be74d5e7a00014b43b0304d20d3722c951d626 | def create_nmap_call():
'\n Create the concrete Nmap call to use\n '
def check_sufficient_privs():
nonlocal scan_type
if ((os.geteuid() != 0) and (('S' in scan_type) or ('U' in scan_type))):
util.printit('Configured scan type requires root privileges!', color=util.RED)
util.printit('Either run as root or change the config file.', color=util.RED)
return False
return True
global CREATED_FILES
LOGGER.info("Writing networks to scan into '%s'", NETWORKS_PATH)
with open(NETWORKS_PATH, 'w') as file:
for net in NETWORKS:
file.write((net + '\n'))
if (CONFIG.get('fast_scan', 'false').lower() == 'false'):
nmap_call = ['nmap', '-Pn', '-n', '-A', '--osscan-guess', '-oX', XML_NMAP_OUTPUT_PATH, '-iL', NETWORKS_PATH]
elif PORTS:
nmap_call = ['nmap', '-Pn', '-n', '-A', '--osscan-guess', '-oX', XML_NMAP_OUTPUT_PATH, '-iL', NETWORKS_PATH]
else:
nmap_call = ['nmap', '-Pn', '-n', '-A', '--osscan-guess', '-F', '-oX', XML_NMAP_OUTPUT_PATH, '-iL', NETWORKS_PATH]
scan_type = CONFIG.get('scan_type', 'S')
nmap_call.append(('-s' + scan_type))
check_sufficient_privs()
timing_template = CONFIG.get('timing_template', '3')
if (not timing_template.startswith('T')):
timing_template = ('T' + timing_template)
nmap_call.append(('-' + timing_template))
if ('add_scripts' in CONFIG):
nmap_call.append(('--script=%s' % CONFIG['add_scripts'].replace(' ', '')))
if PORTS:
nmap_call.append(('-p%s' % PORTS))
if VERBOSE:
nmap_call.append('-v')
if OMIT_NETWORKS:
LOGGER.info("Writing networks to omit into '%s'", NETWORKS_OMIT_PATH)
with open(NETWORKS_OMIT_PATH, 'w') as file:
for net in OMIT_NETWORKS:
file.write((net + '\n'))
nmap_call.append('--excludefile')
nmap_call.append(NETWORKS_OMIT_PATH)
CREATED_FILES += [TEXT_NMAP_OUTPUT_PATH, XML_NMAP_OUTPUT_PATH, NETWORKS_PATH]
if OMIT_NETWORKS:
CREATED_FILES.append(NETWORKS_OMIT_PATH)
if ('add_nmap_params' in CONFIG):
nmap_call += CONFIG['add_nmap_params'].split(' ')
return nmap_call | Create the concrete Nmap call to use | modules/nmap/avain_nmap.py | create_nmap_call | RE4CT10N/avain | 51 | python | def create_nmap_call():
'\n \n '
def check_sufficient_privs():
nonlocal scan_type
if ((os.geteuid() != 0) and (('S' in scan_type) or ('U' in scan_type))):
util.printit('Configured scan type requires root privileges!', color=util.RED)
util.printit('Either run as root or change the config file.', color=util.RED)
return False
return True
global CREATED_FILES
LOGGER.info("Writing networks to scan into '%s'", NETWORKS_PATH)
with open(NETWORKS_PATH, 'w') as file:
for net in NETWORKS:
file.write((net + '\n'))
if (CONFIG.get('fast_scan', 'false').lower() == 'false'):
nmap_call = ['nmap', '-Pn', '-n', '-A', '--osscan-guess', '-oX', XML_NMAP_OUTPUT_PATH, '-iL', NETWORKS_PATH]
elif PORTS:
nmap_call = ['nmap', '-Pn', '-n', '-A', '--osscan-guess', '-oX', XML_NMAP_OUTPUT_PATH, '-iL', NETWORKS_PATH]
else:
nmap_call = ['nmap', '-Pn', '-n', '-A', '--osscan-guess', '-F', '-oX', XML_NMAP_OUTPUT_PATH, '-iL', NETWORKS_PATH]
scan_type = CONFIG.get('scan_type', 'S')
nmap_call.append(('-s' + scan_type))
check_sufficient_privs()
timing_template = CONFIG.get('timing_template', '3')
if (not timing_template.startswith('T')):
timing_template = ('T' + timing_template)
nmap_call.append(('-' + timing_template))
if ('add_scripts' in CONFIG):
nmap_call.append(('--script=%s' % CONFIG['add_scripts'].replace(' ', )))
if PORTS:
nmap_call.append(('-p%s' % PORTS))
if VERBOSE:
nmap_call.append('-v')
if OMIT_NETWORKS:
LOGGER.info("Writing networks to omit into '%s'", NETWORKS_OMIT_PATH)
with open(NETWORKS_OMIT_PATH, 'w') as file:
for net in OMIT_NETWORKS:
file.write((net + '\n'))
nmap_call.append('--excludefile')
nmap_call.append(NETWORKS_OMIT_PATH)
CREATED_FILES += [TEXT_NMAP_OUTPUT_PATH, XML_NMAP_OUTPUT_PATH, NETWORKS_PATH]
if OMIT_NETWORKS:
CREATED_FILES.append(NETWORKS_OMIT_PATH)
if ('add_nmap_params' in CONFIG):
nmap_call += CONFIG['add_nmap_params'].split(' ')
return nmap_call | def create_nmap_call():
'\n \n '
def check_sufficient_privs():
nonlocal scan_type
if ((os.geteuid() != 0) and (('S' in scan_type) or ('U' in scan_type))):
util.printit('Configured scan type requires root privileges!', color=util.RED)
util.printit('Either run as root or change the config file.', color=util.RED)
return False
return True
global CREATED_FILES
LOGGER.info("Writing networks to scan into '%s'", NETWORKS_PATH)
with open(NETWORKS_PATH, 'w') as file:
for net in NETWORKS:
file.write((net + '\n'))
if (CONFIG.get('fast_scan', 'false').lower() == 'false'):
nmap_call = ['nmap', '-Pn', '-n', '-A', '--osscan-guess', '-oX', XML_NMAP_OUTPUT_PATH, '-iL', NETWORKS_PATH]
elif PORTS:
nmap_call = ['nmap', '-Pn', '-n', '-A', '--osscan-guess', '-oX', XML_NMAP_OUTPUT_PATH, '-iL', NETWORKS_PATH]
else:
nmap_call = ['nmap', '-Pn', '-n', '-A', '--osscan-guess', '-F', '-oX', XML_NMAP_OUTPUT_PATH, '-iL', NETWORKS_PATH]
scan_type = CONFIG.get('scan_type', 'S')
nmap_call.append(('-s' + scan_type))
check_sufficient_privs()
timing_template = CONFIG.get('timing_template', '3')
if (not timing_template.startswith('T')):
timing_template = ('T' + timing_template)
nmap_call.append(('-' + timing_template))
if ('add_scripts' in CONFIG):
nmap_call.append(('--script=%s' % CONFIG['add_scripts'].replace(' ', )))
if PORTS:
nmap_call.append(('-p%s' % PORTS))
if VERBOSE:
nmap_call.append('-v')
if OMIT_NETWORKS:
LOGGER.info("Writing networks to omit into '%s'", NETWORKS_OMIT_PATH)
with open(NETWORKS_OMIT_PATH, 'w') as file:
for net in OMIT_NETWORKS:
file.write((net + '\n'))
nmap_call.append('--excludefile')
nmap_call.append(NETWORKS_OMIT_PATH)
CREATED_FILES += [TEXT_NMAP_OUTPUT_PATH, XML_NMAP_OUTPUT_PATH, NETWORKS_PATH]
if OMIT_NETWORKS:
CREATED_FILES.append(NETWORKS_OMIT_PATH)
if ('add_nmap_params' in CONFIG):
nmap_call += CONFIG['add_nmap_params'].split(' ')
return nmap_call<|docstring|>Create the concrete Nmap call to use<|endoftext|> |
566a258c3faaed97e01d957af990abab0fecf34bab259f29f994b026bd5f3b78 | def call_nmap(nmap_call: list, redr_filepath: str):
'\n Call Nmap using the given arguments and output redirection file\n '
LOGGER.info("Executing Nmap call '%s'", ' '.join(nmap_call))
redr_file = open(redr_filepath, 'w')
if VERBOSE:
with subprocess.Popen(nmap_call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as proc:
for line in proc.stdout:
util.printit(line, end='')
redr_file.write(line)
else:
subprocess.call(nmap_call, stdout=redr_file, stderr=subprocess.STDOUT)
redr_file.close()
LOGGER.info("Nmap scan done. Stdout and Stderr have been written to '%s'. %s '%s'", redr_filepath, 'The XML output has been written to', nmap_call[(nmap_call.index('-oX') + 1)]) | Call Nmap using the given arguments and output redirection file | modules/nmap/avain_nmap.py | call_nmap | RE4CT10N/avain | 51 | python | def call_nmap(nmap_call: list, redr_filepath: str):
'\n \n '
LOGGER.info("Executing Nmap call '%s'", ' '.join(nmap_call))
redr_file = open(redr_filepath, 'w')
if VERBOSE:
with subprocess.Popen(nmap_call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as proc:
for line in proc.stdout:
util.printit(line, end=)
redr_file.write(line)
else:
subprocess.call(nmap_call, stdout=redr_file, stderr=subprocess.STDOUT)
redr_file.close()
LOGGER.info("Nmap scan done. Stdout and Stderr have been written to '%s'. %s '%s'", redr_filepath, 'The XML output has been written to', nmap_call[(nmap_call.index('-oX') + 1)]) | def call_nmap(nmap_call: list, redr_filepath: str):
'\n \n '
LOGGER.info("Executing Nmap call '%s'", ' '.join(nmap_call))
redr_file = open(redr_filepath, 'w')
if VERBOSE:
with subprocess.Popen(nmap_call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as proc:
for line in proc.stdout:
util.printit(line, end=)
redr_file.write(line)
else:
subprocess.call(nmap_call, stdout=redr_file, stderr=subprocess.STDOUT)
redr_file.close()
LOGGER.info("Nmap scan done. Stdout and Stderr have been written to '%s'. %s '%s'", redr_filepath, 'The XML output has been written to', nmap_call[(nmap_call.index('-oX') + 1)])<|docstring|>Call Nmap using the given arguments and output redirection file<|endoftext|> |
605fe46be731623013d71254a6ea72291682beb66b292483f18d09ab53512fe8 | def cleanup():
'\n Delete the nmap input-list and exclude-file\n '
def remove_file(path: str):
try:
os.remove(path)
except FileNotFoundError:
pass
remove_file(NETWORKS_PATH)
remove_file(NETWORKS_OMIT_PATH) | Delete the nmap input-list and exclude-file | modules/nmap/avain_nmap.py | cleanup | RE4CT10N/avain | 51 | python | def cleanup():
'\n \n '
def remove_file(path: str):
try:
os.remove(path)
except FileNotFoundError:
pass
remove_file(NETWORKS_PATH)
remove_file(NETWORKS_OMIT_PATH) | def cleanup():
'\n \n '
def remove_file(path: str):
try:
os.remove(path)
except FileNotFoundError:
pass
remove_file(NETWORKS_PATH)
remove_file(NETWORKS_OMIT_PATH)<|docstring|>Delete the nmap input-list and exclude-file<|endoftext|> |
76a6fb6ec931c29bbe184683c4720916c81d4334920789197051b1882fe62b08 | def parse_output_file(filepath: str):
'\n Parse the nmap xml output file into the common dict format for AVAIN scan results.\n\n :param filepath: the location of the output Nmap XML file\n :return: a dict having host IPs as keys and their scan results as values\n '
parsed_hosts = parse_nmap_xml(filepath)
final_hosts = {}
for parsed_host in parsed_hosts:
host = transform_to_avain_scan_format(parsed_host)
final_hosts[host['ip']['addr']] = host
return final_hosts | Parse the nmap xml output file into the common dict format for AVAIN scan results.
:param filepath: the location of the output Nmap XML file
:return: a dict having host IPs as keys and their scan results as values | modules/nmap/avain_nmap.py | parse_output_file | RE4CT10N/avain | 51 | python | def parse_output_file(filepath: str):
'\n Parse the nmap xml output file into the common dict format for AVAIN scan results.\n\n :param filepath: the location of the output Nmap XML file\n :return: a dict having host IPs as keys and their scan results as values\n '
parsed_hosts = parse_nmap_xml(filepath)
final_hosts = {}
for parsed_host in parsed_hosts:
host = transform_to_avain_scan_format(parsed_host)
final_hosts[host['ip']['addr']] = host
return final_hosts | def parse_output_file(filepath: str):
'\n Parse the nmap xml output file into the common dict format for AVAIN scan results.\n\n :param filepath: the location of the output Nmap XML file\n :return: a dict having host IPs as keys and their scan results as values\n '
parsed_hosts = parse_nmap_xml(filepath)
final_hosts = {}
for parsed_host in parsed_hosts:
host = transform_to_avain_scan_format(parsed_host)
final_hosts[host['ip']['addr']] = host
return final_hosts<|docstring|>Parse the nmap xml output file into the common dict format for AVAIN scan results.
:param filepath: the location of the output Nmap XML file
:return: a dict having host IPs as keys and their scan results as values<|endoftext|> |
52c608c0654ee307d1b37c5e10e1e9bd6ae36ac77d9a266aaeedf524ee126fa7 | def parse_nmap_xml(filepath: str):
'\n Parse the XML output file created by Nmap into a python dict for easier processing.\n\n :param filepath: The filepath to the Nmap XML file\n :return: a dict containing the relevant information of the nmap XML\n '
def parse_addresses():
'\n Parse the <address> tags containing IP and MAC information.\n '
nonlocal host, host_elem
(ip, ip_type) = ('', '')
(mac, vendor) = ('', '')
for addr in host_elem.findall('address'):
if ((addr.attrib['addrtype'] == 'ipv4') or (addr.attrib['addrtype'] == 'ipv6')):
ip = addr.attrib['addr']
ip_type = addr.attrib['addrtype']
elif (addr.attrib['addrtype'] == 'mac'):
mac = addr.attrib['addr']
vendor = addr.attrib.get('vendor', '')
host['ip'] = {'addr': ip, 'type': ip_type}
host['mac'] = {'addr': mac, 'vendor': vendor}
def parse_osmatches():
'\n Parse all <osmatch> and their <osclass> tags to gather OS information.\n '
nonlocal host, host_elem
osmatches = []
os_elem = host_elem.find('os')
if (os_elem is not None):
for osmatch_elem in os_elem.findall('osmatch'):
osmatch = {}
for (key, value) in osmatch_elem.attrib.items():
if (key != 'line'):
osmatch[key] = value
osclasses = []
for osclass_elem in osmatch_elem.findall('osclass'):
osclass = {}
for (key, value) in osclass_elem.attrib.items():
osclass[key] = value
cpe_elems = osclass_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpes.append(cpe_elem.text)
osclass['cpes'] = cpes
osclasses.append(osclass)
osmatch['osclasses'] = osclasses
osmatches.append(osmatch)
host['osmatches'] = osmatches
def parse_port_information():
'\n Parse all <port> tags contained in <ports> to gather port information.\n '
nonlocal host, host_elem
(tcp_ports, udp_ports) = ({}, {})
port_elem = host_elem.find('ports')
if (port_elem is not None):
for port_elem in port_elem.findall('port'):
port = {}
port['portid'] = port_elem.attrib['portid']
port['protocol'] = port_elem.attrib['protocol']
port['state'] = port_elem.find('state').attrib['state']
if (('closed' in port['state']) or ('filtered' in port['state'])):
continue
new_os_si_added = False
(service_elem, service_version) = (port_elem.find('service'), '')
if (service_elem is not None):
for (key, value) in service_elem.attrib.items():
if (key not in ('conf', 'method', 'ostype', 'devicetype')):
port[key] = value
elif (key == 'ostype'):
if ('os_si' not in host):
host['os_si'] = []
if (not any(((os_si['name'] == value) for os_si in host['os_si']))):
host['os_si'].append({'name': value})
if ('devicetype' in service_elem.attrib):
host['os_si'][(- 1)]['type'] = service_elem.attrib['devicetype']
new_os_si_added = True
cpe_elems = service_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpe_text = cpe_elem.text
if cpe_text.startswith('cpe:/a:'):
cpes.append(cpe_text)
elif (cpe_text.startswith('cpe:/o:') and new_os_si_added):
if (not ('cpes' in host['os_si'][(- 1)])):
host['os_si'][(- 1)]['cpes'] = []
host['os_si'][(- 1)]['cpes'].append(cpe_text)
if cpes:
port['cpes'] = cpes
if (port['protocol'] == 'tcp'):
tcp_ports[port['portid']] = port
elif (port['protocol'] == 'udp'):
udp_ports[port['portid']] = port
host['tcp'] = tcp_ports
host['udp'] = udp_ports
def parse_smb_script_information():
'\n Parse the SMB script tag to get SMB information about the OS if possible.\n '
nonlocal host, smb_elem
(os_name, cpe, samba_version) = ('', '', '')
cpe_elem = smb_elem.find(".//elem[@key='cpe']")
if (cpe_elem is not None):
cpe = cpe_elem.text
output = smb_elem.attrib['output'].strip()
for stmt in output.split('\n'):
stmt = stmt.strip()
(key, value) = stmt.split(':', 1)
if (key == 'OS'):
os_name = value.split('(', 1)[0].strip()
match = re.match('.*\\(Samba (\\d+\\.\\d+\\.\\d+[a-z]?).*\\)', value.strip())
if match:
samba_version = match.group(1)
if os_name:
host['os_smb_discv'] = [{'name': os_name}]
if cpe:
host['os_smb_discv'][0]['cpe'] = cpe
if samba_version:
for proto in ('tcp', 'udp'):
for (portid, port_node) in host.get(proto, {}).items():
for (i, cpe) in enumerate(port_node.get('cpes', [])):
if (cpe == 'cpe:/a:samba:samba'):
cpe += (':' + samba_version)
port_node['cpes'][i] = cpe
try:
nm_xml_tree = ET.parse(filepath)
except ET.ParseError:
print('Could not parse file created by Nmap scan. Skipping Nmap scan ...', file=sys.stderr)
return {}
nmaprun_elem = nm_xml_tree.getroot()
hosts = []
for host_elem in nmaprun_elem.findall('host'):
host = {}
status_elem = host_elem.find('status')
if (status_elem is not None):
if ('state' in status_elem.attrib):
if (status_elem.attrib['state'] == 'down'):
continue
parse_addresses()
parse_osmatches()
parse_port_information()
hostscript_elem = host_elem.find('hostscript')
if hostscript_elem:
smb_elem = hostscript_elem.find(".//script[@id='smb-os-discovery']")
if smb_elem:
parse_smb_script_information()
hosts.append(host)
return hosts | Parse the XML output file created by Nmap into a python dict for easier processing.
:param filepath: The filepath to the Nmap XML file
:return: a dict containing the relevant information of the nmap XML | modules/nmap/avain_nmap.py | parse_nmap_xml | RE4CT10N/avain | 51 | python | def parse_nmap_xml(filepath: str):
'\n Parse the XML output file created by Nmap into a python dict for easier processing.\n\n :param filepath: The filepath to the Nmap XML file\n :return: a dict containing the relevant information of the nmap XML\n '
def parse_addresses():
'\n Parse the <address> tags containing IP and MAC information.\n '
nonlocal host, host_elem
(ip, ip_type) = (, )
(mac, vendor) = (, )
for addr in host_elem.findall('address'):
if ((addr.attrib['addrtype'] == 'ipv4') or (addr.attrib['addrtype'] == 'ipv6')):
ip = addr.attrib['addr']
ip_type = addr.attrib['addrtype']
elif (addr.attrib['addrtype'] == 'mac'):
mac = addr.attrib['addr']
vendor = addr.attrib.get('vendor', )
host['ip'] = {'addr': ip, 'type': ip_type}
host['mac'] = {'addr': mac, 'vendor': vendor}
def parse_osmatches():
'\n Parse all <osmatch> and their <osclass> tags to gather OS information.\n '
nonlocal host, host_elem
osmatches = []
os_elem = host_elem.find('os')
if (os_elem is not None):
for osmatch_elem in os_elem.findall('osmatch'):
osmatch = {}
for (key, value) in osmatch_elem.attrib.items():
if (key != 'line'):
osmatch[key] = value
osclasses = []
for osclass_elem in osmatch_elem.findall('osclass'):
osclass = {}
for (key, value) in osclass_elem.attrib.items():
osclass[key] = value
cpe_elems = osclass_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpes.append(cpe_elem.text)
osclass['cpes'] = cpes
osclasses.append(osclass)
osmatch['osclasses'] = osclasses
osmatches.append(osmatch)
host['osmatches'] = osmatches
def parse_port_information():
'\n Parse all <port> tags contained in <ports> to gather port information.\n '
nonlocal host, host_elem
(tcp_ports, udp_ports) = ({}, {})
port_elem = host_elem.find('ports')
if (port_elem is not None):
for port_elem in port_elem.findall('port'):
port = {}
port['portid'] = port_elem.attrib['portid']
port['protocol'] = port_elem.attrib['protocol']
port['state'] = port_elem.find('state').attrib['state']
if (('closed' in port['state']) or ('filtered' in port['state'])):
continue
new_os_si_added = False
(service_elem, service_version) = (port_elem.find('service'), )
if (service_elem is not None):
for (key, value) in service_elem.attrib.items():
if (key not in ('conf', 'method', 'ostype', 'devicetype')):
port[key] = value
elif (key == 'ostype'):
if ('os_si' not in host):
host['os_si'] = []
if (not any(((os_si['name'] == value) for os_si in host['os_si']))):
host['os_si'].append({'name': value})
if ('devicetype' in service_elem.attrib):
host['os_si'][(- 1)]['type'] = service_elem.attrib['devicetype']
new_os_si_added = True
cpe_elems = service_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpe_text = cpe_elem.text
if cpe_text.startswith('cpe:/a:'):
cpes.append(cpe_text)
elif (cpe_text.startswith('cpe:/o:') and new_os_si_added):
if (not ('cpes' in host['os_si'][(- 1)])):
host['os_si'][(- 1)]['cpes'] = []
host['os_si'][(- 1)]['cpes'].append(cpe_text)
if cpes:
port['cpes'] = cpes
if (port['protocol'] == 'tcp'):
tcp_ports[port['portid']] = port
elif (port['protocol'] == 'udp'):
udp_ports[port['portid']] = port
host['tcp'] = tcp_ports
host['udp'] = udp_ports
def parse_smb_script_information():
'\n Parse the SMB script tag to get SMB information about the OS if possible.\n '
nonlocal host, smb_elem
(os_name, cpe, samba_version) = (, , )
cpe_elem = smb_elem.find(".//elem[@key='cpe']")
if (cpe_elem is not None):
cpe = cpe_elem.text
output = smb_elem.attrib['output'].strip()
for stmt in output.split('\n'):
stmt = stmt.strip()
(key, value) = stmt.split(':', 1)
if (key == 'OS'):
os_name = value.split('(', 1)[0].strip()
match = re.match('.*\\(Samba (\\d+\\.\\d+\\.\\d+[a-z]?).*\\)', value.strip())
if match:
samba_version = match.group(1)
if os_name:
host['os_smb_discv'] = [{'name': os_name}]
if cpe:
host['os_smb_discv'][0]['cpe'] = cpe
if samba_version:
for proto in ('tcp', 'udp'):
for (portid, port_node) in host.get(proto, {}).items():
for (i, cpe) in enumerate(port_node.get('cpes', [])):
if (cpe == 'cpe:/a:samba:samba'):
cpe += (':' + samba_version)
port_node['cpes'][i] = cpe
try:
nm_xml_tree = ET.parse(filepath)
except ET.ParseError:
print('Could not parse file created by Nmap scan. Skipping Nmap scan ...', file=sys.stderr)
return {}
nmaprun_elem = nm_xml_tree.getroot()
hosts = []
for host_elem in nmaprun_elem.findall('host'):
host = {}
status_elem = host_elem.find('status')
if (status_elem is not None):
if ('state' in status_elem.attrib):
if (status_elem.attrib['state'] == 'down'):
continue
parse_addresses()
parse_osmatches()
parse_port_information()
hostscript_elem = host_elem.find('hostscript')
if hostscript_elem:
smb_elem = hostscript_elem.find(".//script[@id='smb-os-discovery']")
if smb_elem:
parse_smb_script_information()
hosts.append(host)
return hosts | def parse_nmap_xml(filepath: str):
'\n Parse the XML output file created by Nmap into a python dict for easier processing.\n\n :param filepath: The filepath to the Nmap XML file\n :return: a dict containing the relevant information of the nmap XML\n '
def parse_addresses():
'\n Parse the <address> tags containing IP and MAC information.\n '
nonlocal host, host_elem
(ip, ip_type) = (, )
(mac, vendor) = (, )
for addr in host_elem.findall('address'):
if ((addr.attrib['addrtype'] == 'ipv4') or (addr.attrib['addrtype'] == 'ipv6')):
ip = addr.attrib['addr']
ip_type = addr.attrib['addrtype']
elif (addr.attrib['addrtype'] == 'mac'):
mac = addr.attrib['addr']
vendor = addr.attrib.get('vendor', )
host['ip'] = {'addr': ip, 'type': ip_type}
host['mac'] = {'addr': mac, 'vendor': vendor}
def parse_osmatches():
'\n Parse all <osmatch> and their <osclass> tags to gather OS information.\n '
nonlocal host, host_elem
osmatches = []
os_elem = host_elem.find('os')
if (os_elem is not None):
for osmatch_elem in os_elem.findall('osmatch'):
osmatch = {}
for (key, value) in osmatch_elem.attrib.items():
if (key != 'line'):
osmatch[key] = value
osclasses = []
for osclass_elem in osmatch_elem.findall('osclass'):
osclass = {}
for (key, value) in osclass_elem.attrib.items():
osclass[key] = value
cpe_elems = osclass_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpes.append(cpe_elem.text)
osclass['cpes'] = cpes
osclasses.append(osclass)
osmatch['osclasses'] = osclasses
osmatches.append(osmatch)
host['osmatches'] = osmatches
def parse_port_information():
'\n Parse all <port> tags contained in <ports> to gather port information.\n '
nonlocal host, host_elem
(tcp_ports, udp_ports) = ({}, {})
port_elem = host_elem.find('ports')
if (port_elem is not None):
for port_elem in port_elem.findall('port'):
port = {}
port['portid'] = port_elem.attrib['portid']
port['protocol'] = port_elem.attrib['protocol']
port['state'] = port_elem.find('state').attrib['state']
if (('closed' in port['state']) or ('filtered' in port['state'])):
continue
new_os_si_added = False
(service_elem, service_version) = (port_elem.find('service'), )
if (service_elem is not None):
for (key, value) in service_elem.attrib.items():
if (key not in ('conf', 'method', 'ostype', 'devicetype')):
port[key] = value
elif (key == 'ostype'):
if ('os_si' not in host):
host['os_si'] = []
if (not any(((os_si['name'] == value) for os_si in host['os_si']))):
host['os_si'].append({'name': value})
if ('devicetype' in service_elem.attrib):
host['os_si'][(- 1)]['type'] = service_elem.attrib['devicetype']
new_os_si_added = True
cpe_elems = service_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpe_text = cpe_elem.text
if cpe_text.startswith('cpe:/a:'):
cpes.append(cpe_text)
elif (cpe_text.startswith('cpe:/o:') and new_os_si_added):
if (not ('cpes' in host['os_si'][(- 1)])):
host['os_si'][(- 1)]['cpes'] = []
host['os_si'][(- 1)]['cpes'].append(cpe_text)
if cpes:
port['cpes'] = cpes
if (port['protocol'] == 'tcp'):
tcp_ports[port['portid']] = port
elif (port['protocol'] == 'udp'):
udp_ports[port['portid']] = port
host['tcp'] = tcp_ports
host['udp'] = udp_ports
def parse_smb_script_information():
'\n Parse the SMB script tag to get SMB information about the OS if possible.\n '
nonlocal host, smb_elem
(os_name, cpe, samba_version) = (, , )
cpe_elem = smb_elem.find(".//elem[@key='cpe']")
if (cpe_elem is not None):
cpe = cpe_elem.text
output = smb_elem.attrib['output'].strip()
for stmt in output.split('\n'):
stmt = stmt.strip()
(key, value) = stmt.split(':', 1)
if (key == 'OS'):
os_name = value.split('(', 1)[0].strip()
match = re.match('.*\\(Samba (\\d+\\.\\d+\\.\\d+[a-z]?).*\\)', value.strip())
if match:
samba_version = match.group(1)
if os_name:
host['os_smb_discv'] = [{'name': os_name}]
if cpe:
host['os_smb_discv'][0]['cpe'] = cpe
if samba_version:
for proto in ('tcp', 'udp'):
for (portid, port_node) in host.get(proto, {}).items():
for (i, cpe) in enumerate(port_node.get('cpes', [])):
if (cpe == 'cpe:/a:samba:samba'):
cpe += (':' + samba_version)
port_node['cpes'][i] = cpe
try:
nm_xml_tree = ET.parse(filepath)
except ET.ParseError:
print('Could not parse file created by Nmap scan. Skipping Nmap scan ...', file=sys.stderr)
return {}
nmaprun_elem = nm_xml_tree.getroot()
hosts = []
for host_elem in nmaprun_elem.findall('host'):
host = {}
status_elem = host_elem.find('status')
if (status_elem is not None):
if ('state' in status_elem.attrib):
if (status_elem.attrib['state'] == 'down'):
continue
parse_addresses()
parse_osmatches()
parse_port_information()
hostscript_elem = host_elem.find('hostscript')
if hostscript_elem:
smb_elem = hostscript_elem.find(".//script[@id='smb-os-discovery']")
if smb_elem:
parse_smb_script_information()
hosts.append(host)
return hosts<|docstring|>Parse the XML output file created by Nmap into a python dict for easier processing.
:param filepath: The filepath to the Nmap XML file
:return: a dict containing the relevant information of the nmap XML<|endoftext|> |
a0dec6e388a9539c5ff1d87009db093af1a3c7b4112248b0e6281561ed02a39b | def transform_to_avain_scan_format(parsed_host: dict):
'\n Transform the Nmap scan results to the AVAIN scan result format.\n '
host = {}
host['os'] = select_os(parsed_host)
host['ip'] = parsed_host['ip']
host['mac'] = parsed_host['mac']
host['tcp'] = parsed_host['tcp']
host['udp'] = parsed_host['udp']
adjust_port_info_keys(host)
return host | Transform the Nmap scan results to the AVAIN scan result format. | modules/nmap/avain_nmap.py | transform_to_avain_scan_format | RE4CT10N/avain | 51 | python | def transform_to_avain_scan_format(parsed_host: dict):
'\n \n '
host = {}
host['os'] = select_os(parsed_host)
host['ip'] = parsed_host['ip']
host['mac'] = parsed_host['mac']
host['tcp'] = parsed_host['tcp']
host['udp'] = parsed_host['udp']
adjust_port_info_keys(host)
return host | def transform_to_avain_scan_format(parsed_host: dict):
'\n \n '
host = {}
host['os'] = select_os(parsed_host)
host['ip'] = parsed_host['ip']
host['mac'] = parsed_host['mac']
host['tcp'] = parsed_host['tcp']
host['udp'] = parsed_host['udp']
adjust_port_info_keys(host)
return host<|docstring|>Transform the Nmap scan results to the AVAIN scan result format.<|endoftext|> |
48c55abeaebb4d8da8ac168695fc453618641034a4b3ce76e6722c4a8cf7b98f | def select_os(parsed_host: dict):
'\n Out of all suggested OSs for the given host from Nmap, select the most\n likely one using cosine similarity string matching. First a string of\n relevant information is created, second the OS whose information is the\n most similar to the matching string is returned.\n '
global CREATED_FILES, DETECTED_OSS, OS_SIM_SCORES
matching_string = create_sim_matching_string(parsed_host)
with open(MATCH_STR_PATH, 'w') as file:
file.write(matching_string)
CREATED_FILES.append(MATCH_STR_PATH)
potential_oss = extract_oss(parsed_host)
for pot_os in potential_oss:
if ('cpes' in pot_os):
cpes = pot_os['cpes']
vendor = ''
if cpes:
cpe_vend = cpes[0][7:]
vendor = cpe_vend[:cpe_vend.find(':')]
if (vendor and (not pot_os['name'].lower().startswith(vendor))):
pot_os['name'] = (((vendor[0].upper() + vendor[1:]) + ' ') + pot_os['name'])
DETECTED_OSS[parsed_host['ip']['addr']] = potential_oss
os_sim_scores = []
(is_os, highest_sim) = (None, (- 1))
for pot_os in potential_oss:
(cur_name, sim_sum) = ('', (- 1))
for word in pot_os['name'].split(' '):
cur_name += word.lower()
cur_sim = util.compute_cosine_similarity(matching_string, cur_name)
sim_sum += cur_sim
cur_name += ' '
sim = (sim_sum / len(pot_os['name'].split(' ')))
if pot_os.get('cpes', []):
avg_cpe_sim = (sum((util.compute_cosine_similarity(matching_string, cpe[7:].lower()) for cpe in pot_os['cpes'])) / len(pot_os['cpes']))
sim = ((sim + avg_cpe_sim) / 2)
sim *= (float(pot_os['accuracy']) / 100)
os_sim_scores.append((pot_os, sim))
if (sim > highest_sim):
highest_sim = sim
is_os = pot_os
OS_SIM_SCORES[parsed_host['ip']['addr']] = os_sim_scores
if is_os:
return is_os
return {'name': '', 'cpes': []} | Out of all suggested OSs for the given host from Nmap, select the most
likely one using cosine similarity string matching. First a string of
relevant information is created, second the OS whose information is the
most similar to the matching string is returned. | modules/nmap/avain_nmap.py | select_os | RE4CT10N/avain | 51 | python | def select_os(parsed_host: dict):
'\n Out of all suggested OSs for the given host from Nmap, select the most\n likely one using cosine similarity string matching. First a string of\n relevant information is created, second the OS whose information is the\n most similar to the matching string is returned.\n '
global CREATED_FILES, DETECTED_OSS, OS_SIM_SCORES
matching_string = create_sim_matching_string(parsed_host)
with open(MATCH_STR_PATH, 'w') as file:
file.write(matching_string)
CREATED_FILES.append(MATCH_STR_PATH)
potential_oss = extract_oss(parsed_host)
for pot_os in potential_oss:
if ('cpes' in pot_os):
cpes = pot_os['cpes']
vendor =
if cpes:
cpe_vend = cpes[0][7:]
vendor = cpe_vend[:cpe_vend.find(':')]
if (vendor and (not pot_os['name'].lower().startswith(vendor))):
pot_os['name'] = (((vendor[0].upper() + vendor[1:]) + ' ') + pot_os['name'])
DETECTED_OSS[parsed_host['ip']['addr']] = potential_oss
os_sim_scores = []
(is_os, highest_sim) = (None, (- 1))
for pot_os in potential_oss:
(cur_name, sim_sum) = (, (- 1))
for word in pot_os['name'].split(' '):
cur_name += word.lower()
cur_sim = util.compute_cosine_similarity(matching_string, cur_name)
sim_sum += cur_sim
cur_name += ' '
sim = (sim_sum / len(pot_os['name'].split(' ')))
if pot_os.get('cpes', []):
avg_cpe_sim = (sum((util.compute_cosine_similarity(matching_string, cpe[7:].lower()) for cpe in pot_os['cpes'])) / len(pot_os['cpes']))
sim = ((sim + avg_cpe_sim) / 2)
sim *= (float(pot_os['accuracy']) / 100)
os_sim_scores.append((pot_os, sim))
if (sim > highest_sim):
highest_sim = sim
is_os = pot_os
OS_SIM_SCORES[parsed_host['ip']['addr']] = os_sim_scores
if is_os:
return is_os
return {'name': , 'cpes': []} | def select_os(parsed_host: dict):
'\n Out of all suggested OSs for the given host from Nmap, select the most\n likely one using cosine similarity string matching. First a string of\n relevant information is created, second the OS whose information is the\n most similar to the matching string is returned.\n '
global CREATED_FILES, DETECTED_OSS, OS_SIM_SCORES
matching_string = create_sim_matching_string(parsed_host)
with open(MATCH_STR_PATH, 'w') as file:
file.write(matching_string)
CREATED_FILES.append(MATCH_STR_PATH)
potential_oss = extract_oss(parsed_host)
for pot_os in potential_oss:
if ('cpes' in pot_os):
cpes = pot_os['cpes']
vendor =
if cpes:
cpe_vend = cpes[0][7:]
vendor = cpe_vend[:cpe_vend.find(':')]
if (vendor and (not pot_os['name'].lower().startswith(vendor))):
pot_os['name'] = (((vendor[0].upper() + vendor[1:]) + ' ') + pot_os['name'])
DETECTED_OSS[parsed_host['ip']['addr']] = potential_oss
os_sim_scores = []
(is_os, highest_sim) = (None, (- 1))
for pot_os in potential_oss:
(cur_name, sim_sum) = (, (- 1))
for word in pot_os['name'].split(' '):
cur_name += word.lower()
cur_sim = util.compute_cosine_similarity(matching_string, cur_name)
sim_sum += cur_sim
cur_name += ' '
sim = (sim_sum / len(pot_os['name'].split(' ')))
if pot_os.get('cpes', []):
avg_cpe_sim = (sum((util.compute_cosine_similarity(matching_string, cpe[7:].lower()) for cpe in pot_os['cpes'])) / len(pot_os['cpes']))
sim = ((sim + avg_cpe_sim) / 2)
sim *= (float(pot_os['accuracy']) / 100)
os_sim_scores.append((pot_os, sim))
if (sim > highest_sim):
highest_sim = sim
is_os = pot_os
OS_SIM_SCORES[parsed_host['ip']['addr']] = os_sim_scores
if is_os:
return is_os
return {'name': , 'cpes': []}<|docstring|>Out of all suggested OSs for the given host from Nmap, select the most
likely one using cosine similarity string matching. First a string of
relevant information is created, second the OS whose information is the
most similar to the matching string is returned.<|endoftext|> |
c38f8a9f055197c7c672c8e3e67700b507e5c6f671181aa0874b94b1e19dd765 | def create_sim_matching_string(parsed_host: dict):
'\n Fill the matching string with all text that contains information about the OS.\n '
def add_if_exists(obj: dict, field: str):
'\n Add a dict value to the matching string if its key exists in the dict.\n '
nonlocal matching_string
if (field in obj):
matching_string += (obj[field].lower() + ' ')
def add_ports_to_matching_string(protocol: str):
'\n Add the name and product information from the service information to the matching string.\n '
nonlocal parsed_host
if (protocol in parsed_host):
for (_, portinfo) in parsed_host[protocol].items():
add_if_exists(portinfo, 'product')
add_if_exists(portinfo, 'name')
matching_string = ''
if ('osmatches' in parsed_host):
for osmatch in parsed_host['osmatches']:
add_if_exists(osmatch, 'name')
if ('osclasses' in osmatch):
for osclass in osmatch['osclasses']:
if ('cpes' in osclass):
for cpe in osclass['cpes']:
matching_string += (cpe.lower() + ' ')
add_if_exists(osclass, 'osfamily')
add_if_exists(osclass, 'osgen')
add_if_exists(osclass, 'vendor')
if ('os_si' in parsed_host):
for os_si in parsed_host['os_si']:
if ('cpes' in os_si):
for cpe in os_si['cpes']:
matching_string += (cpe.lower() + ' ')
add_if_exists(os_si, 'name')
if ('os_smb_discv' in parsed_host):
for os_smb_discv in parsed_host['os_smb_discv']:
if ('cpe' in os_smb_discv):
matching_string += (os_smb_discv['cpe'].lower() + ' ')
add_if_exists(os_smb_discv, 'name')
add_ports_to_matching_string('tcp')
add_ports_to_matching_string('udp')
if ('mac' in parsed_host):
add_if_exists(parsed_host['mac'], 'vendor')
return matching_string | Fill the matching string with all text that contains information about the OS. | modules/nmap/avain_nmap.py | create_sim_matching_string | RE4CT10N/avain | 51 | python | def create_sim_matching_string(parsed_host: dict):
'\n \n '
def add_if_exists(obj: dict, field: str):
'\n Add a dict value to the matching string if its key exists in the dict.\n '
nonlocal matching_string
if (field in obj):
matching_string += (obj[field].lower() + ' ')
def add_ports_to_matching_string(protocol: str):
'\n Add the name and product information from the service information to the matching string.\n '
nonlocal parsed_host
if (protocol in parsed_host):
for (_, portinfo) in parsed_host[protocol].items():
add_if_exists(portinfo, 'product')
add_if_exists(portinfo, 'name')
matching_string =
if ('osmatches' in parsed_host):
for osmatch in parsed_host['osmatches']:
add_if_exists(osmatch, 'name')
if ('osclasses' in osmatch):
for osclass in osmatch['osclasses']:
if ('cpes' in osclass):
for cpe in osclass['cpes']:
matching_string += (cpe.lower() + ' ')
add_if_exists(osclass, 'osfamily')
add_if_exists(osclass, 'osgen')
add_if_exists(osclass, 'vendor')
if ('os_si' in parsed_host):
for os_si in parsed_host['os_si']:
if ('cpes' in os_si):
for cpe in os_si['cpes']:
matching_string += (cpe.lower() + ' ')
add_if_exists(os_si, 'name')
if ('os_smb_discv' in parsed_host):
for os_smb_discv in parsed_host['os_smb_discv']:
if ('cpe' in os_smb_discv):
matching_string += (os_smb_discv['cpe'].lower() + ' ')
add_if_exists(os_smb_discv, 'name')
add_ports_to_matching_string('tcp')
add_ports_to_matching_string('udp')
if ('mac' in parsed_host):
add_if_exists(parsed_host['mac'], 'vendor')
return matching_string | def create_sim_matching_string(parsed_host: dict):
'\n \n '
def add_if_exists(obj: dict, field: str):
'\n Add a dict value to the matching string if its key exists in the dict.\n '
nonlocal matching_string
if (field in obj):
matching_string += (obj[field].lower() + ' ')
def add_ports_to_matching_string(protocol: str):
'\n Add the name and product information from the service information to the matching string.\n '
nonlocal parsed_host
if (protocol in parsed_host):
for (_, portinfo) in parsed_host[protocol].items():
add_if_exists(portinfo, 'product')
add_if_exists(portinfo, 'name')
matching_string =
if ('osmatches' in parsed_host):
for osmatch in parsed_host['osmatches']:
add_if_exists(osmatch, 'name')
if ('osclasses' in osmatch):
for osclass in osmatch['osclasses']:
if ('cpes' in osclass):
for cpe in osclass['cpes']:
matching_string += (cpe.lower() + ' ')
add_if_exists(osclass, 'osfamily')
add_if_exists(osclass, 'osgen')
add_if_exists(osclass, 'vendor')
if ('os_si' in parsed_host):
for os_si in parsed_host['os_si']:
if ('cpes' in os_si):
for cpe in os_si['cpes']:
matching_string += (cpe.lower() + ' ')
add_if_exists(os_si, 'name')
if ('os_smb_discv' in parsed_host):
for os_smb_discv in parsed_host['os_smb_discv']:
if ('cpe' in os_smb_discv):
matching_string += (os_smb_discv['cpe'].lower() + ' ')
add_if_exists(os_smb_discv, 'name')
add_ports_to_matching_string('tcp')
add_ports_to_matching_string('udp')
if ('mac' in parsed_host):
add_if_exists(parsed_host['mac'], 'vendor')
return matching_string<|docstring|>Fill the matching string with all text that contains information about the OS.<|endoftext|> |
2bde6b77fe4981cfc2266556b40c6508f886f9f2ddadcfc8b781307785542898 | def extract_oss(parsed_host: dict):
'\n Return a list of potential OSs for the given host. More broad OSs are replaced\n by more concrete ones. E.g. within potential_oss, Windows is replaced by Windows 10.\n '
def add_direct_oss():
'Add the OSs found in the osmatches to poential_oss'
nonlocal parsed_host, potential_oss
if ('osmatches' in parsed_host):
for osmatch in parsed_host['osmatches']:
if ('osclasses' in osmatch):
for osclass in osmatch['osclasses']:
name = ''
if ('vendor' in osclass):
name += (osclass['vendor'] + ' ')
if ('osfamily' in osclass):
name += (osclass['osfamily'] + ' ')
if ('osgen' in osclass):
name += osclass['osgen']
name = name.strip()
if osclass.get('cpes', []):
for cpe in osclass['cpes']:
store_os = True
replace_accuracy = 0
if potential_oss:
for (i, pot_os) in enumerate(potential_oss):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
store_os = False
if any((((pot_cpe in cpe) and (not (cpe == pot_cpe))) for pot_cpe in pot_os['cpes'])):
store_os = True
if (int(pot_os['accuracy']) > int(replace_accuracy)):
replace_accuracy = pot_os['accuracy']
del potential_oss[i]
if store_os:
accuracy = str(max([int(osclass['accuracy']), int(replace_accuracy)]))
potential_oss.append({'name': name, 'cpes': osclass['cpes'], 'accuracy': accuracy, 'type': osclass.get('type', '')})
break
elif (not any(((name in pot_os['name']) for pot_os in potential_oss))):
potential_oss.append({'name': name, 'cpes': [], 'accuracy': osclass['accuracy'], 'type': osclass.get('type', '')})
def add_potential_oss_from_service(dict_key: str):
'\n Evaluate nmap Service Info OS information and append result to potential OSs\n\n :param dict_key: the key that identifies the service field within a host dict\n '
nonlocal parsed_host, potential_oss
added_in_service = set()
if (dict_key in parsed_host):
for service_elem in parsed_host[dict_key]:
found_supstring = False
if ('cpe' in service_elem):
service_elem['cpes'] = [service_elem['cpe']]
if ('cpes' in service_elem):
for cpe in service_elem['cpes']:
for pot_os in potential_oss:
if ('cpes' in pot_os):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
found_supstring = True
break
if found_supstring:
break
replaced_os = False
potential_os_cpy = copy.deepcopy(potential_oss)
for (i, pot_os) in enumerate(potential_os_cpy):
pot_os_cmp = pot_os['name'].replace(' ', '').lower()
service_os_cmp = service_elem['name'].replace(' ', '').lower()
if ((pot_os_cmp != service_os_cmp) and (pot_os_cmp in service_os_cmp)):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', '')}
if ('cpes' in service_elem):
new_pot_os['cpes'] = service_elem['cpes']
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
elif (('cpes' in service_elem) and ('cpes' in pot_os)):
for cpe in service_elem['cpes']:
if any(((pot_cpe in cpe) for pot_cpe in pot_os['cpes'])):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'cpes': service_elem['cpes'], 'accuracy': '100', 'type': service_elem.get('devicetype', '')}
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
if ((not found_supstring) and (not replaced_os) and (not (service_elem['name'] in added_in_service))):
potential_oss.append({'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', '')})
added_in_service.add(service_elem['name'])
if ('cpes' in service_elem):
potential_oss[(- 1)]['cpes'] = service_elem['cpes']
potential_oss = []
add_direct_oss()
add_potential_oss_from_service('os_si')
add_potential_oss_from_service('os_smb_discv')
return potential_oss | Return a list of potential OSs for the given host. More broad OSs are replaced
by more concrete ones. E.g. within potential_oss, Windows is replaced by Windows 10. | modules/nmap/avain_nmap.py | extract_oss | RE4CT10N/avain | 51 | python | def extract_oss(parsed_host: dict):
'\n Return a list of potential OSs for the given host. More broad OSs are replaced\n by more concrete ones. E.g. within potential_oss, Windows is replaced by Windows 10.\n '
def add_direct_oss():
'Add the OSs found in the osmatches to poential_oss'
nonlocal parsed_host, potential_oss
if ('osmatches' in parsed_host):
for osmatch in parsed_host['osmatches']:
if ('osclasses' in osmatch):
for osclass in osmatch['osclasses']:
name =
if ('vendor' in osclass):
name += (osclass['vendor'] + ' ')
if ('osfamily' in osclass):
name += (osclass['osfamily'] + ' ')
if ('osgen' in osclass):
name += osclass['osgen']
name = name.strip()
if osclass.get('cpes', []):
for cpe in osclass['cpes']:
store_os = True
replace_accuracy = 0
if potential_oss:
for (i, pot_os) in enumerate(potential_oss):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
store_os = False
if any((((pot_cpe in cpe) and (not (cpe == pot_cpe))) for pot_cpe in pot_os['cpes'])):
store_os = True
if (int(pot_os['accuracy']) > int(replace_accuracy)):
replace_accuracy = pot_os['accuracy']
del potential_oss[i]
if store_os:
accuracy = str(max([int(osclass['accuracy']), int(replace_accuracy)]))
potential_oss.append({'name': name, 'cpes': osclass['cpes'], 'accuracy': accuracy, 'type': osclass.get('type', )})
break
elif (not any(((name in pot_os['name']) for pot_os in potential_oss))):
potential_oss.append({'name': name, 'cpes': [], 'accuracy': osclass['accuracy'], 'type': osclass.get('type', )})
def add_potential_oss_from_service(dict_key: str):
'\n Evaluate nmap Service Info OS information and append result to potential OSs\n\n :param dict_key: the key that identifies the service field within a host dict\n '
nonlocal parsed_host, potential_oss
added_in_service = set()
if (dict_key in parsed_host):
for service_elem in parsed_host[dict_key]:
found_supstring = False
if ('cpe' in service_elem):
service_elem['cpes'] = [service_elem['cpe']]
if ('cpes' in service_elem):
for cpe in service_elem['cpes']:
for pot_os in potential_oss:
if ('cpes' in pot_os):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
found_supstring = True
break
if found_supstring:
break
replaced_os = False
potential_os_cpy = copy.deepcopy(potential_oss)
for (i, pot_os) in enumerate(potential_os_cpy):
pot_os_cmp = pot_os['name'].replace(' ', ).lower()
service_os_cmp = service_elem['name'].replace(' ', ).lower()
if ((pot_os_cmp != service_os_cmp) and (pot_os_cmp in service_os_cmp)):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', )}
if ('cpes' in service_elem):
new_pot_os['cpes'] = service_elem['cpes']
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
elif (('cpes' in service_elem) and ('cpes' in pot_os)):
for cpe in service_elem['cpes']:
if any(((pot_cpe in cpe) for pot_cpe in pot_os['cpes'])):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'cpes': service_elem['cpes'], 'accuracy': '100', 'type': service_elem.get('devicetype', )}
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
if ((not found_supstring) and (not replaced_os) and (not (service_elem['name'] in added_in_service))):
potential_oss.append({'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', )})
added_in_service.add(service_elem['name'])
if ('cpes' in service_elem):
potential_oss[(- 1)]['cpes'] = service_elem['cpes']
potential_oss = []
add_direct_oss()
add_potential_oss_from_service('os_si')
add_potential_oss_from_service('os_smb_discv')
return potential_oss | def extract_oss(parsed_host: dict):
'\n Return a list of potential OSs for the given host. More broad OSs are replaced\n by more concrete ones. E.g. within potential_oss, Windows is replaced by Windows 10.\n '
def add_direct_oss():
'Add the OSs found in the osmatches to poential_oss'
nonlocal parsed_host, potential_oss
if ('osmatches' in parsed_host):
for osmatch in parsed_host['osmatches']:
if ('osclasses' in osmatch):
for osclass in osmatch['osclasses']:
name =
if ('vendor' in osclass):
name += (osclass['vendor'] + ' ')
if ('osfamily' in osclass):
name += (osclass['osfamily'] + ' ')
if ('osgen' in osclass):
name += osclass['osgen']
name = name.strip()
if osclass.get('cpes', []):
for cpe in osclass['cpes']:
store_os = True
replace_accuracy = 0
if potential_oss:
for (i, pot_os) in enumerate(potential_oss):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
store_os = False
if any((((pot_cpe in cpe) and (not (cpe == pot_cpe))) for pot_cpe in pot_os['cpes'])):
store_os = True
if (int(pot_os['accuracy']) > int(replace_accuracy)):
replace_accuracy = pot_os['accuracy']
del potential_oss[i]
if store_os:
accuracy = str(max([int(osclass['accuracy']), int(replace_accuracy)]))
potential_oss.append({'name': name, 'cpes': osclass['cpes'], 'accuracy': accuracy, 'type': osclass.get('type', )})
break
elif (not any(((name in pot_os['name']) for pot_os in potential_oss))):
potential_oss.append({'name': name, 'cpes': [], 'accuracy': osclass['accuracy'], 'type': osclass.get('type', )})
def add_potential_oss_from_service(dict_key: str):
'\n Evaluate nmap Service Info OS information and append result to potential OSs\n\n :param dict_key: the key that identifies the service field within a host dict\n '
nonlocal parsed_host, potential_oss
added_in_service = set()
if (dict_key in parsed_host):
for service_elem in parsed_host[dict_key]:
found_supstring = False
if ('cpe' in service_elem):
service_elem['cpes'] = [service_elem['cpe']]
if ('cpes' in service_elem):
for cpe in service_elem['cpes']:
for pot_os in potential_oss:
if ('cpes' in pot_os):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
found_supstring = True
break
if found_supstring:
break
replaced_os = False
potential_os_cpy = copy.deepcopy(potential_oss)
for (i, pot_os) in enumerate(potential_os_cpy):
pot_os_cmp = pot_os['name'].replace(' ', ).lower()
service_os_cmp = service_elem['name'].replace(' ', ).lower()
if ((pot_os_cmp != service_os_cmp) and (pot_os_cmp in service_os_cmp)):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', )}
if ('cpes' in service_elem):
new_pot_os['cpes'] = service_elem['cpes']
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
elif (('cpes' in service_elem) and ('cpes' in pot_os)):
for cpe in service_elem['cpes']:
if any(((pot_cpe in cpe) for pot_cpe in pot_os['cpes'])):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'cpes': service_elem['cpes'], 'accuracy': '100', 'type': service_elem.get('devicetype', )}
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
if ((not found_supstring) and (not replaced_os) and (not (service_elem['name'] in added_in_service))):
potential_oss.append({'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', )})
added_in_service.add(service_elem['name'])
if ('cpes' in service_elem):
potential_oss[(- 1)]['cpes'] = service_elem['cpes']
potential_oss = []
add_direct_oss()
add_potential_oss_from_service('os_si')
add_potential_oss_from_service('os_smb_discv')
return potential_oss<|docstring|>Return a list of potential OSs for the given host. More broad OSs are replaced
by more concrete ones. E.g. within potential_oss, Windows is replaced by Windows 10.<|endoftext|> |
917dca8849dc689cf6b238d2b45f7f2883a72a8c4baf6213348517a40dce4987 | def adjust_port_info_keys(host: dict):
'\n Deletes the name, version and product keys of the port dicts of the host and instead adds\n a name describing the likely running software and a service key naming the offered service.\n\n :param host: the host to change the service keys of\n '
def adjust_ports(protocol: str):
'Adjust the information of all ports using the specified transport protocol'
nonlocal host
for (_, port) in host[protocol].items():
(product, name) = (port.get('product', ''), port.get('name', ''))
version = port.get('version', '')
keys = {'product', 'name', 'version'}
for k in keys:
if (k in port):
del port[k]
new_name = product
if version:
version = version.strip()
new_name += (' ' + version)
if re.match('^(\\d+\\.)*\\d+:?\\w*$', version):
cpes = port.get('cpes', [])
if ((len(cpes) == 1) and (len(cpes[0].split(':')) < 5)):
port['cpes'] = [((cpes[0] + ':') + version)]
port['name'] = new_name
port['service'] = name
if ('tcp' in host):
adjust_ports('tcp')
if ('udp' in host):
adjust_ports('udp') | Deletes the name, version and product keys of the port dicts of the host and instead adds
a name describing the likely running software and a service key naming the offered service.
:param host: the host to change the service keys of | modules/nmap/avain_nmap.py | adjust_port_info_keys | RE4CT10N/avain | 51 | python | def adjust_port_info_keys(host: dict):
'\n Deletes the name, version and product keys of the port dicts of the host and instead adds\n a name describing the likely running software and a service key naming the offered service.\n\n :param host: the host to change the service keys of\n '
def adjust_ports(protocol: str):
'Adjust the information of all ports using the specified transport protocol'
nonlocal host
for (_, port) in host[protocol].items():
(product, name) = (port.get('product', ), port.get('name', ))
version = port.get('version', )
keys = {'product', 'name', 'version'}
for k in keys:
if (k in port):
del port[k]
new_name = product
if version:
version = version.strip()
new_name += (' ' + version)
if re.match('^(\\d+\\.)*\\d+:?\\w*$', version):
cpes = port.get('cpes', [])
if ((len(cpes) == 1) and (len(cpes[0].split(':')) < 5)):
port['cpes'] = [((cpes[0] + ':') + version)]
port['name'] = new_name
port['service'] = name
if ('tcp' in host):
adjust_ports('tcp')
if ('udp' in host):
adjust_ports('udp') | def adjust_port_info_keys(host: dict):
'\n Deletes the name, version and product keys of the port dicts of the host and instead adds\n a name describing the likely running software and a service key naming the offered service.\n\n :param host: the host to change the service keys of\n '
def adjust_ports(protocol: str):
'Adjust the information of all ports using the specified transport protocol'
nonlocal host
for (_, port) in host[protocol].items():
(product, name) = (port.get('product', ), port.get('name', ))
version = port.get('version', )
keys = {'product', 'name', 'version'}
for k in keys:
if (k in port):
del port[k]
new_name = product
if version:
version = version.strip()
new_name += (' ' + version)
if re.match('^(\\d+\\.)*\\d+:?\\w*$', version):
cpes = port.get('cpes', [])
if ((len(cpes) == 1) and (len(cpes[0].split(':')) < 5)):
port['cpes'] = [((cpes[0] + ':') + version)]
port['name'] = new_name
port['service'] = name
if ('tcp' in host):
adjust_ports('tcp')
if ('udp' in host):
adjust_ports('udp')<|docstring|>Deletes the name, version and product keys of the port dicts of the host and instead adds
a name describing the likely running software and a service key naming the offered service.
:param host: the host to change the service keys of<|endoftext|> |
4923986bdeea13b4327e531797bfda11ea276e975be16ac5417a8113eee61ed8 | def parse_addresses():
'\n Parse the <address> tags containing IP and MAC information.\n '
nonlocal host, host_elem
(ip, ip_type) = ('', '')
(mac, vendor) = ('', '')
for addr in host_elem.findall('address'):
if ((addr.attrib['addrtype'] == 'ipv4') or (addr.attrib['addrtype'] == 'ipv6')):
ip = addr.attrib['addr']
ip_type = addr.attrib['addrtype']
elif (addr.attrib['addrtype'] == 'mac'):
mac = addr.attrib['addr']
vendor = addr.attrib.get('vendor', '')
host['ip'] = {'addr': ip, 'type': ip_type}
host['mac'] = {'addr': mac, 'vendor': vendor} | Parse the <address> tags containing IP and MAC information. | modules/nmap/avain_nmap.py | parse_addresses | RE4CT10N/avain | 51 | python | def parse_addresses():
'\n \n '
nonlocal host, host_elem
(ip, ip_type) = (, )
(mac, vendor) = (, )
for addr in host_elem.findall('address'):
if ((addr.attrib['addrtype'] == 'ipv4') or (addr.attrib['addrtype'] == 'ipv6')):
ip = addr.attrib['addr']
ip_type = addr.attrib['addrtype']
elif (addr.attrib['addrtype'] == 'mac'):
mac = addr.attrib['addr']
vendor = addr.attrib.get('vendor', )
host['ip'] = {'addr': ip, 'type': ip_type}
host['mac'] = {'addr': mac, 'vendor': vendor} | def parse_addresses():
'\n \n '
nonlocal host, host_elem
(ip, ip_type) = (, )
(mac, vendor) = (, )
for addr in host_elem.findall('address'):
if ((addr.attrib['addrtype'] == 'ipv4') or (addr.attrib['addrtype'] == 'ipv6')):
ip = addr.attrib['addr']
ip_type = addr.attrib['addrtype']
elif (addr.attrib['addrtype'] == 'mac'):
mac = addr.attrib['addr']
vendor = addr.attrib.get('vendor', )
host['ip'] = {'addr': ip, 'type': ip_type}
host['mac'] = {'addr': mac, 'vendor': vendor}<|docstring|>Parse the <address> tags containing IP and MAC information.<|endoftext|> |
f2c69634aacd19211781fb1a341a7d69c396f09c9cb1a439f761a2393c42ce3c | def parse_osmatches():
'\n Parse all <osmatch> and their <osclass> tags to gather OS information.\n '
nonlocal host, host_elem
osmatches = []
os_elem = host_elem.find('os')
if (os_elem is not None):
for osmatch_elem in os_elem.findall('osmatch'):
osmatch = {}
for (key, value) in osmatch_elem.attrib.items():
if (key != 'line'):
osmatch[key] = value
osclasses = []
for osclass_elem in osmatch_elem.findall('osclass'):
osclass = {}
for (key, value) in osclass_elem.attrib.items():
osclass[key] = value
cpe_elems = osclass_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpes.append(cpe_elem.text)
osclass['cpes'] = cpes
osclasses.append(osclass)
osmatch['osclasses'] = osclasses
osmatches.append(osmatch)
host['osmatches'] = osmatches | Parse all <osmatch> and their <osclass> tags to gather OS information. | modules/nmap/avain_nmap.py | parse_osmatches | RE4CT10N/avain | 51 | python | def parse_osmatches():
'\n \n '
nonlocal host, host_elem
osmatches = []
os_elem = host_elem.find('os')
if (os_elem is not None):
for osmatch_elem in os_elem.findall('osmatch'):
osmatch = {}
for (key, value) in osmatch_elem.attrib.items():
if (key != 'line'):
osmatch[key] = value
osclasses = []
for osclass_elem in osmatch_elem.findall('osclass'):
osclass = {}
for (key, value) in osclass_elem.attrib.items():
osclass[key] = value
cpe_elems = osclass_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpes.append(cpe_elem.text)
osclass['cpes'] = cpes
osclasses.append(osclass)
osmatch['osclasses'] = osclasses
osmatches.append(osmatch)
host['osmatches'] = osmatches | def parse_osmatches():
'\n \n '
nonlocal host, host_elem
osmatches = []
os_elem = host_elem.find('os')
if (os_elem is not None):
for osmatch_elem in os_elem.findall('osmatch'):
osmatch = {}
for (key, value) in osmatch_elem.attrib.items():
if (key != 'line'):
osmatch[key] = value
osclasses = []
for osclass_elem in osmatch_elem.findall('osclass'):
osclass = {}
for (key, value) in osclass_elem.attrib.items():
osclass[key] = value
cpe_elems = osclass_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpes.append(cpe_elem.text)
osclass['cpes'] = cpes
osclasses.append(osclass)
osmatch['osclasses'] = osclasses
osmatches.append(osmatch)
host['osmatches'] = osmatches<|docstring|>Parse all <osmatch> and their <osclass> tags to gather OS information.<|endoftext|> |
725992c97147ba29c12a4078263709907155c9d7ab8f2d344a4b6b3e2b5eb5a3 | def parse_port_information():
'\n Parse all <port> tags contained in <ports> to gather port information.\n '
nonlocal host, host_elem
(tcp_ports, udp_ports) = ({}, {})
port_elem = host_elem.find('ports')
if (port_elem is not None):
for port_elem in port_elem.findall('port'):
port = {}
port['portid'] = port_elem.attrib['portid']
port['protocol'] = port_elem.attrib['protocol']
port['state'] = port_elem.find('state').attrib['state']
if (('closed' in port['state']) or ('filtered' in port['state'])):
continue
new_os_si_added = False
(service_elem, service_version) = (port_elem.find('service'), '')
if (service_elem is not None):
for (key, value) in service_elem.attrib.items():
if (key not in ('conf', 'method', 'ostype', 'devicetype')):
port[key] = value
elif (key == 'ostype'):
if ('os_si' not in host):
host['os_si'] = []
if (not any(((os_si['name'] == value) for os_si in host['os_si']))):
host['os_si'].append({'name': value})
if ('devicetype' in service_elem.attrib):
host['os_si'][(- 1)]['type'] = service_elem.attrib['devicetype']
new_os_si_added = True
cpe_elems = service_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpe_text = cpe_elem.text
if cpe_text.startswith('cpe:/a:'):
cpes.append(cpe_text)
elif (cpe_text.startswith('cpe:/o:') and new_os_si_added):
if (not ('cpes' in host['os_si'][(- 1)])):
host['os_si'][(- 1)]['cpes'] = []
host['os_si'][(- 1)]['cpes'].append(cpe_text)
if cpes:
port['cpes'] = cpes
if (port['protocol'] == 'tcp'):
tcp_ports[port['portid']] = port
elif (port['protocol'] == 'udp'):
udp_ports[port['portid']] = port
host['tcp'] = tcp_ports
host['udp'] = udp_ports | Parse all <port> tags contained in <ports> to gather port information. | modules/nmap/avain_nmap.py | parse_port_information | RE4CT10N/avain | 51 | python | def parse_port_information():
'\n \n '
nonlocal host, host_elem
(tcp_ports, udp_ports) = ({}, {})
port_elem = host_elem.find('ports')
if (port_elem is not None):
for port_elem in port_elem.findall('port'):
port = {}
port['portid'] = port_elem.attrib['portid']
port['protocol'] = port_elem.attrib['protocol']
port['state'] = port_elem.find('state').attrib['state']
if (('closed' in port['state']) or ('filtered' in port['state'])):
continue
new_os_si_added = False
(service_elem, service_version) = (port_elem.find('service'), )
if (service_elem is not None):
for (key, value) in service_elem.attrib.items():
if (key not in ('conf', 'method', 'ostype', 'devicetype')):
port[key] = value
elif (key == 'ostype'):
if ('os_si' not in host):
host['os_si'] = []
if (not any(((os_si['name'] == value) for os_si in host['os_si']))):
host['os_si'].append({'name': value})
if ('devicetype' in service_elem.attrib):
host['os_si'][(- 1)]['type'] = service_elem.attrib['devicetype']
new_os_si_added = True
cpe_elems = service_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpe_text = cpe_elem.text
if cpe_text.startswith('cpe:/a:'):
cpes.append(cpe_text)
elif (cpe_text.startswith('cpe:/o:') and new_os_si_added):
if (not ('cpes' in host['os_si'][(- 1)])):
host['os_si'][(- 1)]['cpes'] = []
host['os_si'][(- 1)]['cpes'].append(cpe_text)
if cpes:
port['cpes'] = cpes
if (port['protocol'] == 'tcp'):
tcp_ports[port['portid']] = port
elif (port['protocol'] == 'udp'):
udp_ports[port['portid']] = port
host['tcp'] = tcp_ports
host['udp'] = udp_ports | def parse_port_information():
'\n \n '
nonlocal host, host_elem
(tcp_ports, udp_ports) = ({}, {})
port_elem = host_elem.find('ports')
if (port_elem is not None):
for port_elem in port_elem.findall('port'):
port = {}
port['portid'] = port_elem.attrib['portid']
port['protocol'] = port_elem.attrib['protocol']
port['state'] = port_elem.find('state').attrib['state']
if (('closed' in port['state']) or ('filtered' in port['state'])):
continue
new_os_si_added = False
(service_elem, service_version) = (port_elem.find('service'), )
if (service_elem is not None):
for (key, value) in service_elem.attrib.items():
if (key not in ('conf', 'method', 'ostype', 'devicetype')):
port[key] = value
elif (key == 'ostype'):
if ('os_si' not in host):
host['os_si'] = []
if (not any(((os_si['name'] == value) for os_si in host['os_si']))):
host['os_si'].append({'name': value})
if ('devicetype' in service_elem.attrib):
host['os_si'][(- 1)]['type'] = service_elem.attrib['devicetype']
new_os_si_added = True
cpe_elems = service_elem.findall('cpe')
cpes = []
for cpe_elem in cpe_elems:
cpe_text = cpe_elem.text
if cpe_text.startswith('cpe:/a:'):
cpes.append(cpe_text)
elif (cpe_text.startswith('cpe:/o:') and new_os_si_added):
if (not ('cpes' in host['os_si'][(- 1)])):
host['os_si'][(- 1)]['cpes'] = []
host['os_si'][(- 1)]['cpes'].append(cpe_text)
if cpes:
port['cpes'] = cpes
if (port['protocol'] == 'tcp'):
tcp_ports[port['portid']] = port
elif (port['protocol'] == 'udp'):
udp_ports[port['portid']] = port
host['tcp'] = tcp_ports
host['udp'] = udp_ports<|docstring|>Parse all <port> tags contained in <ports> to gather port information.<|endoftext|> |
c0014c81484ce5ff24fb23ece21fa6638f00bf7ac209dd5d38721551347945ca | def parse_smb_script_information():
'\n Parse the SMB script tag to get SMB information about the OS if possible.\n '
nonlocal host, smb_elem
(os_name, cpe, samba_version) = ('', '', '')
cpe_elem = smb_elem.find(".//elem[@key='cpe']")
if (cpe_elem is not None):
cpe = cpe_elem.text
output = smb_elem.attrib['output'].strip()
for stmt in output.split('\n'):
stmt = stmt.strip()
(key, value) = stmt.split(':', 1)
if (key == 'OS'):
os_name = value.split('(', 1)[0].strip()
match = re.match('.*\\(Samba (\\d+\\.\\d+\\.\\d+[a-z]?).*\\)', value.strip())
if match:
samba_version = match.group(1)
if os_name:
host['os_smb_discv'] = [{'name': os_name}]
if cpe:
host['os_smb_discv'][0]['cpe'] = cpe
if samba_version:
for proto in ('tcp', 'udp'):
for (portid, port_node) in host.get(proto, {}).items():
for (i, cpe) in enumerate(port_node.get('cpes', [])):
if (cpe == 'cpe:/a:samba:samba'):
cpe += (':' + samba_version)
port_node['cpes'][i] = cpe | Parse the SMB script tag to get SMB information about the OS if possible. | modules/nmap/avain_nmap.py | parse_smb_script_information | RE4CT10N/avain | 51 | python | def parse_smb_script_information():
'\n \n '
nonlocal host, smb_elem
(os_name, cpe, samba_version) = (, , )
cpe_elem = smb_elem.find(".//elem[@key='cpe']")
if (cpe_elem is not None):
cpe = cpe_elem.text
output = smb_elem.attrib['output'].strip()
for stmt in output.split('\n'):
stmt = stmt.strip()
(key, value) = stmt.split(':', 1)
if (key == 'OS'):
os_name = value.split('(', 1)[0].strip()
match = re.match('.*\\(Samba (\\d+\\.\\d+\\.\\d+[a-z]?).*\\)', value.strip())
if match:
samba_version = match.group(1)
if os_name:
host['os_smb_discv'] = [{'name': os_name}]
if cpe:
host['os_smb_discv'][0]['cpe'] = cpe
if samba_version:
for proto in ('tcp', 'udp'):
for (portid, port_node) in host.get(proto, {}).items():
for (i, cpe) in enumerate(port_node.get('cpes', [])):
if (cpe == 'cpe:/a:samba:samba'):
cpe += (':' + samba_version)
port_node['cpes'][i] = cpe | def parse_smb_script_information():
'\n \n '
nonlocal host, smb_elem
(os_name, cpe, samba_version) = (, , )
cpe_elem = smb_elem.find(".//elem[@key='cpe']")
if (cpe_elem is not None):
cpe = cpe_elem.text
output = smb_elem.attrib['output'].strip()
for stmt in output.split('\n'):
stmt = stmt.strip()
(key, value) = stmt.split(':', 1)
if (key == 'OS'):
os_name = value.split('(', 1)[0].strip()
match = re.match('.*\\(Samba (\\d+\\.\\d+\\.\\d+[a-z]?).*\\)', value.strip())
if match:
samba_version = match.group(1)
if os_name:
host['os_smb_discv'] = [{'name': os_name}]
if cpe:
host['os_smb_discv'][0]['cpe'] = cpe
if samba_version:
for proto in ('tcp', 'udp'):
for (portid, port_node) in host.get(proto, {}).items():
for (i, cpe) in enumerate(port_node.get('cpes', [])):
if (cpe == 'cpe:/a:samba:samba'):
cpe += (':' + samba_version)
port_node['cpes'][i] = cpe<|docstring|>Parse the SMB script tag to get SMB information about the OS if possible.<|endoftext|> |
6b0ebf9bc7468e50f4bb3f456ebbf43450ace677ab4ec076d3130f736c25b3c7 | def add_if_exists(obj: dict, field: str):
'\n Add a dict value to the matching string if its key exists in the dict.\n '
nonlocal matching_string
if (field in obj):
matching_string += (obj[field].lower() + ' ') | Add a dict value to the matching string if its key exists in the dict. | modules/nmap/avain_nmap.py | add_if_exists | RE4CT10N/avain | 51 | python | def add_if_exists(obj: dict, field: str):
'\n \n '
nonlocal matching_string
if (field in obj):
matching_string += (obj[field].lower() + ' ') | def add_if_exists(obj: dict, field: str):
'\n \n '
nonlocal matching_string
if (field in obj):
matching_string += (obj[field].lower() + ' ')<|docstring|>Add a dict value to the matching string if its key exists in the dict.<|endoftext|> |
467ba391f58f19f0e721780ce36bc2b0b75f2a14659523beefba94cb9ac2e080 | def add_ports_to_matching_string(protocol: str):
'\n Add the name and product information from the service information to the matching string.\n '
nonlocal parsed_host
if (protocol in parsed_host):
for (_, portinfo) in parsed_host[protocol].items():
add_if_exists(portinfo, 'product')
add_if_exists(portinfo, 'name') | Add the name and product information from the service information to the matching string. | modules/nmap/avain_nmap.py | add_ports_to_matching_string | RE4CT10N/avain | 51 | python | def add_ports_to_matching_string(protocol: str):
'\n \n '
nonlocal parsed_host
if (protocol in parsed_host):
for (_, portinfo) in parsed_host[protocol].items():
add_if_exists(portinfo, 'product')
add_if_exists(portinfo, 'name') | def add_ports_to_matching_string(protocol: str):
'\n \n '
nonlocal parsed_host
if (protocol in parsed_host):
for (_, portinfo) in parsed_host[protocol].items():
add_if_exists(portinfo, 'product')
add_if_exists(portinfo, 'name')<|docstring|>Add the name and product information from the service information to the matching string.<|endoftext|> |
433521a1e6c7161a831facf445a559927f189f107627e1c561eed42d1bd9dc17 | def add_direct_oss():
'Add the OSs found in the osmatches to poential_oss'
nonlocal parsed_host, potential_oss
if ('osmatches' in parsed_host):
for osmatch in parsed_host['osmatches']:
if ('osclasses' in osmatch):
for osclass in osmatch['osclasses']:
name = ''
if ('vendor' in osclass):
name += (osclass['vendor'] + ' ')
if ('osfamily' in osclass):
name += (osclass['osfamily'] + ' ')
if ('osgen' in osclass):
name += osclass['osgen']
name = name.strip()
if osclass.get('cpes', []):
for cpe in osclass['cpes']:
store_os = True
replace_accuracy = 0
if potential_oss:
for (i, pot_os) in enumerate(potential_oss):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
store_os = False
if any((((pot_cpe in cpe) and (not (cpe == pot_cpe))) for pot_cpe in pot_os['cpes'])):
store_os = True
if (int(pot_os['accuracy']) > int(replace_accuracy)):
replace_accuracy = pot_os['accuracy']
del potential_oss[i]
if store_os:
accuracy = str(max([int(osclass['accuracy']), int(replace_accuracy)]))
potential_oss.append({'name': name, 'cpes': osclass['cpes'], 'accuracy': accuracy, 'type': osclass.get('type', '')})
break
elif (not any(((name in pot_os['name']) for pot_os in potential_oss))):
potential_oss.append({'name': name, 'cpes': [], 'accuracy': osclass['accuracy'], 'type': osclass.get('type', '')}) | Add the OSs found in the osmatches to poential_oss | modules/nmap/avain_nmap.py | add_direct_oss | RE4CT10N/avain | 51 | python | def add_direct_oss():
nonlocal parsed_host, potential_oss
if ('osmatches' in parsed_host):
for osmatch in parsed_host['osmatches']:
if ('osclasses' in osmatch):
for osclass in osmatch['osclasses']:
name =
if ('vendor' in osclass):
name += (osclass['vendor'] + ' ')
if ('osfamily' in osclass):
name += (osclass['osfamily'] + ' ')
if ('osgen' in osclass):
name += osclass['osgen']
name = name.strip()
if osclass.get('cpes', []):
for cpe in osclass['cpes']:
store_os = True
replace_accuracy = 0
if potential_oss:
for (i, pot_os) in enumerate(potential_oss):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
store_os = False
if any((((pot_cpe in cpe) and (not (cpe == pot_cpe))) for pot_cpe in pot_os['cpes'])):
store_os = True
if (int(pot_os['accuracy']) > int(replace_accuracy)):
replace_accuracy = pot_os['accuracy']
del potential_oss[i]
if store_os:
accuracy = str(max([int(osclass['accuracy']), int(replace_accuracy)]))
potential_oss.append({'name': name, 'cpes': osclass['cpes'], 'accuracy': accuracy, 'type': osclass.get('type', )})
break
elif (not any(((name in pot_os['name']) for pot_os in potential_oss))):
potential_oss.append({'name': name, 'cpes': [], 'accuracy': osclass['accuracy'], 'type': osclass.get('type', )}) | def add_direct_oss():
nonlocal parsed_host, potential_oss
if ('osmatches' in parsed_host):
for osmatch in parsed_host['osmatches']:
if ('osclasses' in osmatch):
for osclass in osmatch['osclasses']:
name =
if ('vendor' in osclass):
name += (osclass['vendor'] + ' ')
if ('osfamily' in osclass):
name += (osclass['osfamily'] + ' ')
if ('osgen' in osclass):
name += osclass['osgen']
name = name.strip()
if osclass.get('cpes', []):
for cpe in osclass['cpes']:
store_os = True
replace_accuracy = 0
if potential_oss:
for (i, pot_os) in enumerate(potential_oss):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
store_os = False
if any((((pot_cpe in cpe) and (not (cpe == pot_cpe))) for pot_cpe in pot_os['cpes'])):
store_os = True
if (int(pot_os['accuracy']) > int(replace_accuracy)):
replace_accuracy = pot_os['accuracy']
del potential_oss[i]
if store_os:
accuracy = str(max([int(osclass['accuracy']), int(replace_accuracy)]))
potential_oss.append({'name': name, 'cpes': osclass['cpes'], 'accuracy': accuracy, 'type': osclass.get('type', )})
break
elif (not any(((name in pot_os['name']) for pot_os in potential_oss))):
potential_oss.append({'name': name, 'cpes': [], 'accuracy': osclass['accuracy'], 'type': osclass.get('type', )})<|docstring|>Add the OSs found in the osmatches to poential_oss<|endoftext|> |
b4d85063b7ef7e693182a10803aeeef95d0a0ae42bfd70b21438284af680e4eb | def add_potential_oss_from_service(dict_key: str):
'\n Evaluate nmap Service Info OS information and append result to potential OSs\n\n :param dict_key: the key that identifies the service field within a host dict\n '
nonlocal parsed_host, potential_oss
added_in_service = set()
if (dict_key in parsed_host):
for service_elem in parsed_host[dict_key]:
found_supstring = False
if ('cpe' in service_elem):
service_elem['cpes'] = [service_elem['cpe']]
if ('cpes' in service_elem):
for cpe in service_elem['cpes']:
for pot_os in potential_oss:
if ('cpes' in pot_os):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
found_supstring = True
break
if found_supstring:
break
replaced_os = False
potential_os_cpy = copy.deepcopy(potential_oss)
for (i, pot_os) in enumerate(potential_os_cpy):
pot_os_cmp = pot_os['name'].replace(' ', '').lower()
service_os_cmp = service_elem['name'].replace(' ', '').lower()
if ((pot_os_cmp != service_os_cmp) and (pot_os_cmp in service_os_cmp)):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', '')}
if ('cpes' in service_elem):
new_pot_os['cpes'] = service_elem['cpes']
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
elif (('cpes' in service_elem) and ('cpes' in pot_os)):
for cpe in service_elem['cpes']:
if any(((pot_cpe in cpe) for pot_cpe in pot_os['cpes'])):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'cpes': service_elem['cpes'], 'accuracy': '100', 'type': service_elem.get('devicetype', '')}
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
if ((not found_supstring) and (not replaced_os) and (not (service_elem['name'] in added_in_service))):
potential_oss.append({'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', '')})
added_in_service.add(service_elem['name'])
if ('cpes' in service_elem):
potential_oss[(- 1)]['cpes'] = service_elem['cpes'] | Evaluate nmap Service Info OS information and append result to potential OSs
:param dict_key: the key that identifies the service field within a host dict | modules/nmap/avain_nmap.py | add_potential_oss_from_service | RE4CT10N/avain | 51 | python | def add_potential_oss_from_service(dict_key: str):
'\n Evaluate nmap Service Info OS information and append result to potential OSs\n\n :param dict_key: the key that identifies the service field within a host dict\n '
nonlocal parsed_host, potential_oss
added_in_service = set()
if (dict_key in parsed_host):
for service_elem in parsed_host[dict_key]:
found_supstring = False
if ('cpe' in service_elem):
service_elem['cpes'] = [service_elem['cpe']]
if ('cpes' in service_elem):
for cpe in service_elem['cpes']:
for pot_os in potential_oss:
if ('cpes' in pot_os):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
found_supstring = True
break
if found_supstring:
break
replaced_os = False
potential_os_cpy = copy.deepcopy(potential_oss)
for (i, pot_os) in enumerate(potential_os_cpy):
pot_os_cmp = pot_os['name'].replace(' ', ).lower()
service_os_cmp = service_elem['name'].replace(' ', ).lower()
if ((pot_os_cmp != service_os_cmp) and (pot_os_cmp in service_os_cmp)):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', )}
if ('cpes' in service_elem):
new_pot_os['cpes'] = service_elem['cpes']
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
elif (('cpes' in service_elem) and ('cpes' in pot_os)):
for cpe in service_elem['cpes']:
if any(((pot_cpe in cpe) for pot_cpe in pot_os['cpes'])):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'cpes': service_elem['cpes'], 'accuracy': '100', 'type': service_elem.get('devicetype', )}
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
if ((not found_supstring) and (not replaced_os) and (not (service_elem['name'] in added_in_service))):
potential_oss.append({'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', )})
added_in_service.add(service_elem['name'])
if ('cpes' in service_elem):
potential_oss[(- 1)]['cpes'] = service_elem['cpes'] | def add_potential_oss_from_service(dict_key: str):
'\n Evaluate nmap Service Info OS information and append result to potential OSs\n\n :param dict_key: the key that identifies the service field within a host dict\n '
nonlocal parsed_host, potential_oss
added_in_service = set()
if (dict_key in parsed_host):
for service_elem in parsed_host[dict_key]:
found_supstring = False
if ('cpe' in service_elem):
service_elem['cpes'] = [service_elem['cpe']]
if ('cpes' in service_elem):
for cpe in service_elem['cpes']:
for pot_os in potential_oss:
if ('cpes' in pot_os):
if any(((cpe in pot_cpe) for pot_cpe in pot_os['cpes'])):
found_supstring = True
break
if found_supstring:
break
replaced_os = False
potential_os_cpy = copy.deepcopy(potential_oss)
for (i, pot_os) in enumerate(potential_os_cpy):
pot_os_cmp = pot_os['name'].replace(' ', ).lower()
service_os_cmp = service_elem['name'].replace(' ', ).lower()
if ((pot_os_cmp != service_os_cmp) and (pot_os_cmp in service_os_cmp)):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', )}
if ('cpes' in service_elem):
new_pot_os['cpes'] = service_elem['cpes']
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
elif (('cpes' in service_elem) and ('cpes' in pot_os)):
for cpe in service_elem['cpes']:
if any(((pot_cpe in cpe) for pot_cpe in pot_os['cpes'])):
del potential_oss[i]
new_pot_os = {'name': service_elem['name'], 'cpes': service_elem['cpes'], 'accuracy': '100', 'type': service_elem.get('devicetype', )}
if (not replaced_os):
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
if ((not found_supstring) and (not replaced_os) and (not (service_elem['name'] in added_in_service))):
potential_oss.append({'name': service_elem['name'], 'accuracy': '100', 'type': service_elem.get('devicetype', )})
added_in_service.add(service_elem['name'])
if ('cpes' in service_elem):
potential_oss[(- 1)]['cpes'] = service_elem['cpes']<|docstring|>Evaluate nmap Service Info OS information and append result to potential OSs
:param dict_key: the key that identifies the service field within a host dict<|endoftext|> |
0e95d25480a474cdcae6aec6f04d8eadc4a30f4520665ba265d361d0d8015957 | def adjust_ports(protocol: str):
'Adjust the information of all ports using the specified transport protocol'
nonlocal host
for (_, port) in host[protocol].items():
(product, name) = (port.get('product', ''), port.get('name', ''))
version = port.get('version', '')
keys = {'product', 'name', 'version'}
for k in keys:
if (k in port):
del port[k]
new_name = product
if version:
version = version.strip()
new_name += (' ' + version)
if re.match('^(\\d+\\.)*\\d+:?\\w*$', version):
cpes = port.get('cpes', [])
if ((len(cpes) == 1) and (len(cpes[0].split(':')) < 5)):
port['cpes'] = [((cpes[0] + ':') + version)]
port['name'] = new_name
port['service'] = name | Adjust the information of all ports using the specified transport protocol | modules/nmap/avain_nmap.py | adjust_ports | RE4CT10N/avain | 51 | python | def adjust_ports(protocol: str):
nonlocal host
for (_, port) in host[protocol].items():
(product, name) = (port.get('product', ), port.get('name', ))
version = port.get('version', )
keys = {'product', 'name', 'version'}
for k in keys:
if (k in port):
del port[k]
new_name = product
if version:
version = version.strip()
new_name += (' ' + version)
if re.match('^(\\d+\\.)*\\d+:?\\w*$', version):
cpes = port.get('cpes', [])
if ((len(cpes) == 1) and (len(cpes[0].split(':')) < 5)):
port['cpes'] = [((cpes[0] + ':') + version)]
port['name'] = new_name
port['service'] = name | def adjust_ports(protocol: str):
nonlocal host
for (_, port) in host[protocol].items():
(product, name) = (port.get('product', ), port.get('name', ))
version = port.get('version', )
keys = {'product', 'name', 'version'}
for k in keys:
if (k in port):
del port[k]
new_name = product
if version:
version = version.strip()
new_name += (' ' + version)
if re.match('^(\\d+\\.)*\\d+:?\\w*$', version):
cpes = port.get('cpes', [])
if ((len(cpes) == 1) and (len(cpes[0].split(':')) < 5)):
port['cpes'] = [((cpes[0] + ':') + version)]
port['name'] = new_name
port['service'] = name<|docstring|>Adjust the information of all ports using the specified transport protocol<|endoftext|> |
7f66190bab0aaa2bb75ee31ca48a04facf3556232e841bbb20bd347099a12cbf | def parseCmd():
'Parse command line arguments\n\n Returns:\n dictionary: Dictionary with arguments\n '
parser = argparse.ArgumentParser(description='')
parser.add_argument('--anno', type=str, help='')
parser.add_argument('--pseudo', type=str, help='')
parser.add_argument('--pred', type=str, help='')
parser.add_argument('--mode', type=str, help='')
return parser.parse_args() | Parse command line arguments
Returns:
dictionary: Dictionary with arguments | bin/dev/f1_score.py | parseCmd | LarsGab/PrEvCo | 0 | python | def parseCmd():
'Parse command line arguments\n\n Returns:\n dictionary: Dictionary with arguments\n '
parser = argparse.ArgumentParser(description=)
parser.add_argument('--anno', type=str, help=)
parser.add_argument('--pseudo', type=str, help=)
parser.add_argument('--pred', type=str, help=)
parser.add_argument('--mode', type=str, help=)
return parser.parse_args() | def parseCmd():
'Parse command line arguments\n\n Returns:\n dictionary: Dictionary with arguments\n '
parser = argparse.ArgumentParser(description=)
parser.add_argument('--anno', type=str, help=)
parser.add_argument('--pseudo', type=str, help=)
parser.add_argument('--pred', type=str, help=)
parser.add_argument('--mode', type=str, help=)
return parser.parse_args()<|docstring|>Parse command line arguments
Returns:
dictionary: Dictionary with arguments<|endoftext|> |
4067245ecd996adba274f28b8beb90fcefc40e01e65abeb7962fb26728ba2bd7 | def get(self):
'List all layers'
layers = layerManager.getLayers()
defs = {'name': globalconfig['name'], 'version': globalconfig['version'], 'layers': layers}
return jsonify(defs) | List all layers | src/layers.py | get | c-martinez/regis-backend | 0 | python | def get(self):
layers = layerManager.getLayers()
defs = {'name': globalconfig['name'], 'version': globalconfig['version'], 'layers': layers}
return jsonify(defs) | def get(self):
layers = layerManager.getLayers()
defs = {'name': globalconfig['name'], 'version': globalconfig['version'], 'layers': layers}
return jsonify(defs)<|docstring|>List all layers<|endoftext|> |
d6c25418c75b7dd321595cc31226f9bf57543dd8c5d09067e9541e92058155ce | @ns.expect(parser, validate=True)
def post(self):
'Add new layer'
args = parser.parse_args()
myFile = args['file']
del args['file']
layer = args
if myFile:
layer['url'] = davHandler.uploadFile(myFile)
savedLayer = layerManager.saveLayer(layer)
defs = {'status': 'ok', 'layer': savedLayer}
return jsonify(defs) | Add new layer | src/layers.py | post | c-martinez/regis-backend | 0 | python | @ns.expect(parser, validate=True)
def post(self):
args = parser.parse_args()
myFile = args['file']
del args['file']
layer = args
if myFile:
layer['url'] = davHandler.uploadFile(myFile)
savedLayer = layerManager.saveLayer(layer)
defs = {'status': 'ok', 'layer': savedLayer}
return jsonify(defs) | @ns.expect(parser, validate=True)
def post(self):
args = parser.parse_args()
myFile = args['file']
del args['file']
layer = args
if myFile:
layer['url'] = davHandler.uploadFile(myFile)
savedLayer = layerManager.saveLayer(layer)
defs = {'status': 'ok', 'layer': savedLayer}
return jsonify(defs)<|docstring|>Add new layer<|endoftext|> |
f13a078604eaf95341a06f520b66da22326cf3c0fdfc5ca5862550630ba40f34 | @ns.expect(infoParser)
def get(self):
'List columns for geojson file'
args = infoParser.parse_args()
id = args['id']
layer = layerManager.getLayer(id)
if layer:
if (layer['type'] == 'geojson'):
url = layer['url']
geojson = requests.get(url).json()
propSet = [set(feature['properties'].keys()) for feature in geojson['features']]
properties = list(set(chain.from_iterable(propSet)))
response = {'status': 'ok', 'columns': properties}
else:
response = {'status': 'error', 'message': 'No information retrievable from this type of layer'}
else:
response = {'status': 'error', 'message': 'Layer not found'}
return jsonify(response) | List columns for geojson file | src/layers.py | get | c-martinez/regis-backend | 0 | python | @ns.expect(infoParser)
def get(self):
args = infoParser.parse_args()
id = args['id']
layer = layerManager.getLayer(id)
if layer:
if (layer['type'] == 'geojson'):
url = layer['url']
geojson = requests.get(url).json()
propSet = [set(feature['properties'].keys()) for feature in geojson['features']]
properties = list(set(chain.from_iterable(propSet)))
response = {'status': 'ok', 'columns': properties}
else:
response = {'status': 'error', 'message': 'No information retrievable from this type of layer'}
else:
response = {'status': 'error', 'message': 'Layer not found'}
return jsonify(response) | @ns.expect(infoParser)
def get(self):
args = infoParser.parse_args()
id = args['id']
layer = layerManager.getLayer(id)
if layer:
if (layer['type'] == 'geojson'):
url = layer['url']
geojson = requests.get(url).json()
propSet = [set(feature['properties'].keys()) for feature in geojson['features']]
properties = list(set(chain.from_iterable(propSet)))
response = {'status': 'ok', 'columns': properties}
else:
response = {'status': 'error', 'message': 'No information retrievable from this type of layer'}
else:
response = {'status': 'error', 'message': 'Layer not found'}
return jsonify(response)<|docstring|>List columns for geojson file<|endoftext|> |
dda7b38eef432a09183c92063cd9d9551f87a26a3ba38ad3373f603b32ccffc7 | def levelOrder(self, root):
'\n :type root: Node\n :rtype: List[List[int]]\n '
if root:
self.visit([root], 1)
return self.result | :type root: Node
:rtype: List[List[int]] | Week 2/id_165/LeetCode_429_165.py | levelOrder | larryRishi/algorithm004-05 | 1 | python | def levelOrder(self, root):
'\n :type root: Node\n :rtype: List[List[int]]\n '
if root:
self.visit([root], 1)
return self.result | def levelOrder(self, root):
'\n :type root: Node\n :rtype: List[List[int]]\n '
if root:
self.visit([root], 1)
return self.result<|docstring|>:type root: Node
:rtype: List[List[int]]<|endoftext|> |
539bcd4425f4f7ae43daa9fa157d7a280f2a93f130c734d59e88086593cfe7ef | def translate_sequence(rna_sequence, genetic_code):
"Translates a sequence of RNA into a sequence of amino acids.\n\n Translates `rna_sequence` into string of amino acids, according to the\n `genetic_code` given as a dict. Translation begins at the first position of\n the `rna_sequence` and continues until the first stop codon is encountered\n or the end of `rna_sequence` is reached.\n\n If `rna_sequence` is less than 3 bases long, or starts with a stop codon,\n an empty string is returned.\n\n Parameters\n ----------\n rna_sequence : str\n A string representing an RNA sequence (upper or lower-case).\n\n genetic_code : dict\n A dictionary mapping all 64 codons (strings of three RNA bases) to\n amino acids (string of single-letter amino acid abbreviation). Stop\n codons should be represented with asterisks ('*').\n\n Returns\n -------\n str\n A string of the translated amino acids.\n "
RNA_list = rna_sequence.upper()
while True:
if (len(RNA_list) > 2):
protein_seq = ''
list = (len(RNA_list) % 3)
for i in range(0, (len(RNA_list) - list), 3):
protein = RNA_list[i:(i + 3)]
codon = genetic_code[protein]
if (codon == '*'):
break
protein_seq += codon
return protein_seq
else:
return ''
print('Base length is less than 3') | Translates a sequence of RNA into a sequence of amino acids.
Translates `rna_sequence` into string of amino acids, according to the
`genetic_code` given as a dict. Translation begins at the first position of
the `rna_sequence` and continues until the first stop codon is encountered
or the end of `rna_sequence` is reached.
If `rna_sequence` is less than 3 bases long, or starts with a stop codon,
an empty string is returned.
Parameters
----------
rna_sequence : str
A string representing an RNA sequence (upper or lower-case).
genetic_code : dict
A dictionary mapping all 64 codons (strings of three RNA bases) to
amino acids (string of single-letter amino acid abbreviation). Stop
codons should be represented with asterisks ('*').
Returns
-------
str
A string of the translated amino acids. | translate.py | translate_sequence | emw0083/python-translation-project | 0 | python | def translate_sequence(rna_sequence, genetic_code):
"Translates a sequence of RNA into a sequence of amino acids.\n\n Translates `rna_sequence` into string of amino acids, according to the\n `genetic_code` given as a dict. Translation begins at the first position of\n the `rna_sequence` and continues until the first stop codon is encountered\n or the end of `rna_sequence` is reached.\n\n If `rna_sequence` is less than 3 bases long, or starts with a stop codon,\n an empty string is returned.\n\n Parameters\n ----------\n rna_sequence : str\n A string representing an RNA sequence (upper or lower-case).\n\n genetic_code : dict\n A dictionary mapping all 64 codons (strings of three RNA bases) to\n amino acids (string of single-letter amino acid abbreviation). Stop\n codons should be represented with asterisks ('*').\n\n Returns\n -------\n str\n A string of the translated amino acids.\n "
RNA_list = rna_sequence.upper()
while True:
if (len(RNA_list) > 2):
protein_seq =
list = (len(RNA_list) % 3)
for i in range(0, (len(RNA_list) - list), 3):
protein = RNA_list[i:(i + 3)]
codon = genetic_code[protein]
if (codon == '*'):
break
protein_seq += codon
return protein_seq
else:
return
print('Base length is less than 3') | def translate_sequence(rna_sequence, genetic_code):
"Translates a sequence of RNA into a sequence of amino acids.\n\n Translates `rna_sequence` into string of amino acids, according to the\n `genetic_code` given as a dict. Translation begins at the first position of\n the `rna_sequence` and continues until the first stop codon is encountered\n or the end of `rna_sequence` is reached.\n\n If `rna_sequence` is less than 3 bases long, or starts with a stop codon,\n an empty string is returned.\n\n Parameters\n ----------\n rna_sequence : str\n A string representing an RNA sequence (upper or lower-case).\n\n genetic_code : dict\n A dictionary mapping all 64 codons (strings of three RNA bases) to\n amino acids (string of single-letter amino acid abbreviation). Stop\n codons should be represented with asterisks ('*').\n\n Returns\n -------\n str\n A string of the translated amino acids.\n "
RNA_list = rna_sequence.upper()
while True:
if (len(RNA_list) > 2):
protein_seq =
list = (len(RNA_list) % 3)
for i in range(0, (len(RNA_list) - list), 3):
protein = RNA_list[i:(i + 3)]
codon = genetic_code[protein]
if (codon == '*'):
break
protein_seq += codon
return protein_seq
else:
return
print('Base length is less than 3')<|docstring|>Translates a sequence of RNA into a sequence of amino acids.
Translates `rna_sequence` into string of amino acids, according to the
`genetic_code` given as a dict. Translation begins at the first position of
the `rna_sequence` and continues until the first stop codon is encountered
or the end of `rna_sequence` is reached.
If `rna_sequence` is less than 3 bases long, or starts with a stop codon,
an empty string is returned.
Parameters
----------
rna_sequence : str
A string representing an RNA sequence (upper or lower-case).
genetic_code : dict
A dictionary mapping all 64 codons (strings of three RNA bases) to
amino acids (string of single-letter amino acid abbreviation). Stop
codons should be represented with asterisks ('*').
Returns
-------
str
A string of the translated amino acids.<|endoftext|> |
42441b60c547a46952059c61cd82d6415820ded445ee6d9532a1131dcc49aa87 | def get_all_translations(rna_sequence, genetic_code):
"Get a list of all amino acid sequences encoded by an RNA sequence.\n\n All three reading frames of `rna_sequence` are scanned from 'left' to\n 'right', and the generation of a sequence of amino acids is started\n whenever the start codon 'AUG' is found. The `rna_sequence` is assumed to\n be in the correct orientation (i.e., no reverse and/or complement of the\n sequence is explored).\n\n The function returns a list of all possible amino acid sequences that\n are encoded by `rna_sequence`.\n\n If no amino acids can be translated from `rna_sequence`, an empty list is\n returned.\n\n Parameters\n ----------\n rna_sequence : str\n A string representing an RNA sequence (upper or lower-case).\n\n genetic_code : dict\n A dictionary mapping all 64 codons (strings of three RNA bases) to\n amino acids (string of single-letter amino acid abbreviation). Stop\n codons should be represented with asterisks ('*').\n\n Returns\n -------\n list\n A list of strings; each string is an sequence of amino acids encoded by\n `rna_sequence`.\n "
RNA_list = rna_sequence.upper()
total_bases = len(RNA_list)
index = (total_bases - 3)
protein_list = []
for i in range((index + 1)):
end = (i + 3)
seq = RNA_list[i:end]
if (seq == 'AUG'):
protein = translate_sequence(RNA_list[i:], genetic_code)
protein_list.append(protein)
return protein_list | Get a list of all amino acid sequences encoded by an RNA sequence.
All three reading frames of `rna_sequence` are scanned from 'left' to
'right', and the generation of a sequence of amino acids is started
whenever the start codon 'AUG' is found. The `rna_sequence` is assumed to
be in the correct orientation (i.e., no reverse and/or complement of the
sequence is explored).
The function returns a list of all possible amino acid sequences that
are encoded by `rna_sequence`.
If no amino acids can be translated from `rna_sequence`, an empty list is
returned.
Parameters
----------
rna_sequence : str
A string representing an RNA sequence (upper or lower-case).
genetic_code : dict
A dictionary mapping all 64 codons (strings of three RNA bases) to
amino acids (string of single-letter amino acid abbreviation). Stop
codons should be represented with asterisks ('*').
Returns
-------
list
A list of strings; each string is an sequence of amino acids encoded by
`rna_sequence`. | translate.py | get_all_translations | emw0083/python-translation-project | 0 | python | def get_all_translations(rna_sequence, genetic_code):
"Get a list of all amino acid sequences encoded by an RNA sequence.\n\n All three reading frames of `rna_sequence` are scanned from 'left' to\n 'right', and the generation of a sequence of amino acids is started\n whenever the start codon 'AUG' is found. The `rna_sequence` is assumed to\n be in the correct orientation (i.e., no reverse and/or complement of the\n sequence is explored).\n\n The function returns a list of all possible amino acid sequences that\n are encoded by `rna_sequence`.\n\n If no amino acids can be translated from `rna_sequence`, an empty list is\n returned.\n\n Parameters\n ----------\n rna_sequence : str\n A string representing an RNA sequence (upper or lower-case).\n\n genetic_code : dict\n A dictionary mapping all 64 codons (strings of three RNA bases) to\n amino acids (string of single-letter amino acid abbreviation). Stop\n codons should be represented with asterisks ('*').\n\n Returns\n -------\n list\n A list of strings; each string is an sequence of amino acids encoded by\n `rna_sequence`.\n "
RNA_list = rna_sequence.upper()
total_bases = len(RNA_list)
index = (total_bases - 3)
protein_list = []
for i in range((index + 1)):
end = (i + 3)
seq = RNA_list[i:end]
if (seq == 'AUG'):
protein = translate_sequence(RNA_list[i:], genetic_code)
protein_list.append(protein)
return protein_list | def get_all_translations(rna_sequence, genetic_code):
"Get a list of all amino acid sequences encoded by an RNA sequence.\n\n All three reading frames of `rna_sequence` are scanned from 'left' to\n 'right', and the generation of a sequence of amino acids is started\n whenever the start codon 'AUG' is found. The `rna_sequence` is assumed to\n be in the correct orientation (i.e., no reverse and/or complement of the\n sequence is explored).\n\n The function returns a list of all possible amino acid sequences that\n are encoded by `rna_sequence`.\n\n If no amino acids can be translated from `rna_sequence`, an empty list is\n returned.\n\n Parameters\n ----------\n rna_sequence : str\n A string representing an RNA sequence (upper or lower-case).\n\n genetic_code : dict\n A dictionary mapping all 64 codons (strings of three RNA bases) to\n amino acids (string of single-letter amino acid abbreviation). Stop\n codons should be represented with asterisks ('*').\n\n Returns\n -------\n list\n A list of strings; each string is an sequence of amino acids encoded by\n `rna_sequence`.\n "
RNA_list = rna_sequence.upper()
total_bases = len(RNA_list)
index = (total_bases - 3)
protein_list = []
for i in range((index + 1)):
end = (i + 3)
seq = RNA_list[i:end]
if (seq == 'AUG'):
protein = translate_sequence(RNA_list[i:], genetic_code)
protein_list.append(protein)
return protein_list<|docstring|>Get a list of all amino acid sequences encoded by an RNA sequence.
All three reading frames of `rna_sequence` are scanned from 'left' to
'right', and the generation of a sequence of amino acids is started
whenever the start codon 'AUG' is found. The `rna_sequence` is assumed to
be in the correct orientation (i.e., no reverse and/or complement of the
sequence is explored).
The function returns a list of all possible amino acid sequences that
are encoded by `rna_sequence`.
If no amino acids can be translated from `rna_sequence`, an empty list is
returned.
Parameters
----------
rna_sequence : str
A string representing an RNA sequence (upper or lower-case).
genetic_code : dict
A dictionary mapping all 64 codons (strings of three RNA bases) to
amino acids (string of single-letter amino acid abbreviation). Stop
codons should be represented with asterisks ('*').
Returns
-------
list
A list of strings; each string is an sequence of amino acids encoded by
`rna_sequence`.<|endoftext|> |
5c840d98368ddb0c3d14e8b157654499d2df63b896380788b25ae1cdabe502cf | def get_reverse(sequence):
"Reverse orientation of `sequence`.\n\n Returns a string with `sequence` in the reverse order.\n\n If `sequence` is empty, an empty string is returned.\n\n Examples\n --------\n >>> get_reverse('AUGC')\n 'CGUA'\n "
reverse = sequence.upper()
return reverse[::(- 1)] | Reverse orientation of `sequence`.
Returns a string with `sequence` in the reverse order.
If `sequence` is empty, an empty string is returned.
Examples
--------
>>> get_reverse('AUGC')
'CGUA' | translate.py | get_reverse | emw0083/python-translation-project | 0 | python | def get_reverse(sequence):
"Reverse orientation of `sequence`.\n\n Returns a string with `sequence` in the reverse order.\n\n If `sequence` is empty, an empty string is returned.\n\n Examples\n --------\n >>> get_reverse('AUGC')\n 'CGUA'\n "
reverse = sequence.upper()
return reverse[::(- 1)] | def get_reverse(sequence):
"Reverse orientation of `sequence`.\n\n Returns a string with `sequence` in the reverse order.\n\n If `sequence` is empty, an empty string is returned.\n\n Examples\n --------\n >>> get_reverse('AUGC')\n 'CGUA'\n "
reverse = sequence.upper()
return reverse[::(- 1)]<|docstring|>Reverse orientation of `sequence`.
Returns a string with `sequence` in the reverse order.
If `sequence` is empty, an empty string is returned.
Examples
--------
>>> get_reverse('AUGC')
'CGUA'<|endoftext|> |
a27b9552573a5bc3659ae7534d2dcb67a3a077ec812f407e4c0d4c3cf66f891f | def get_complement(sequence):
"Get the complement of a `sequence` of nucleotides.\n\n Returns a string with the complementary sequence of `sequence`.\n\n If `sequence` is empty, an empty string is returned.\n\n Examples\n --------\n >>> get_complement('AUGC')\n 'UACG'\n "
sequence_upper = sequence.upper()
comp = {'A': 'U', 'C': 'G', 'G': 'C', 'T': 'A', 'U': 'A'}
compSEQ = ''
for i in sequence_upper:
compSEQ += comp[i]
return compSEQ | Get the complement of a `sequence` of nucleotides.
Returns a string with the complementary sequence of `sequence`.
If `sequence` is empty, an empty string is returned.
Examples
--------
>>> get_complement('AUGC')
'UACG' | translate.py | get_complement | emw0083/python-translation-project | 0 | python | def get_complement(sequence):
"Get the complement of a `sequence` of nucleotides.\n\n Returns a string with the complementary sequence of `sequence`.\n\n If `sequence` is empty, an empty string is returned.\n\n Examples\n --------\n >>> get_complement('AUGC')\n 'UACG'\n "
sequence_upper = sequence.upper()
comp = {'A': 'U', 'C': 'G', 'G': 'C', 'T': 'A', 'U': 'A'}
compSEQ =
for i in sequence_upper:
compSEQ += comp[i]
return compSEQ | def get_complement(sequence):
"Get the complement of a `sequence` of nucleotides.\n\n Returns a string with the complementary sequence of `sequence`.\n\n If `sequence` is empty, an empty string is returned.\n\n Examples\n --------\n >>> get_complement('AUGC')\n 'UACG'\n "
sequence_upper = sequence.upper()
comp = {'A': 'U', 'C': 'G', 'G': 'C', 'T': 'A', 'U': 'A'}
compSEQ =
for i in sequence_upper:
compSEQ += comp[i]
return compSEQ<|docstring|>Get the complement of a `sequence` of nucleotides.
Returns a string with the complementary sequence of `sequence`.
If `sequence` is empty, an empty string is returned.
Examples
--------
>>> get_complement('AUGC')
'UACG'<|endoftext|> |
0c82599df3b640f842f23a415ce50ec3321af27ac0658e1c31e2c9f1b6498bc8 | def reverse_and_complement(sequence):
"Get the reversed and complemented form of a `sequence` of nucleotides.\n\n Returns a string that is the reversed and complemented sequence\n of `sequence`.\n\n If `sequence` is empty, an empty string is returned.\n\n Examples\n --------\n >>> reverse_and_complement('AUGC')\n 'GCAU'\n "
compSEQ = get_complement(sequence)
rev_comp_seq = get_reverse(compSEQ)
return rev_comp_seq | Get the reversed and complemented form of a `sequence` of nucleotides.
Returns a string that is the reversed and complemented sequence
of `sequence`.
If `sequence` is empty, an empty string is returned.
Examples
--------
>>> reverse_and_complement('AUGC')
'GCAU' | translate.py | reverse_and_complement | emw0083/python-translation-project | 0 | python | def reverse_and_complement(sequence):
"Get the reversed and complemented form of a `sequence` of nucleotides.\n\n Returns a string that is the reversed and complemented sequence\n of `sequence`.\n\n If `sequence` is empty, an empty string is returned.\n\n Examples\n --------\n >>> reverse_and_complement('AUGC')\n 'GCAU'\n "
compSEQ = get_complement(sequence)
rev_comp_seq = get_reverse(compSEQ)
return rev_comp_seq | def reverse_and_complement(sequence):
"Get the reversed and complemented form of a `sequence` of nucleotides.\n\n Returns a string that is the reversed and complemented sequence\n of `sequence`.\n\n If `sequence` is empty, an empty string is returned.\n\n Examples\n --------\n >>> reverse_and_complement('AUGC')\n 'GCAU'\n "
compSEQ = get_complement(sequence)
rev_comp_seq = get_reverse(compSEQ)
return rev_comp_seq<|docstring|>Get the reversed and complemented form of a `sequence` of nucleotides.
Returns a string that is the reversed and complemented sequence
of `sequence`.
If `sequence` is empty, an empty string is returned.
Examples
--------
>>> reverse_and_complement('AUGC')
'GCAU'<|endoftext|> |
6702cce4c0dda516176c479b424490154d0c6f70aff4528bc8716970b9a43e79 | def get_longest_peptide(rna_sequence, genetic_code):
"Get the longest peptide encoded by an RNA sequence.\n\n Explore six reading frames of `rna_sequence` (the three reading frames of\n `rna_sequence`, and the three reading frames of the reverse and complement\n of `rna_sequence`) and return (as a string) the longest sequence of amino\n acids that it encodes, according to the `genetic_code`.\n\n If no amino acids can be translated from `rna_sequence` nor its reverse and\n complement, an empty string is returned.\n\n Parameters\n ----------\n rna_sequence : str\n A string representing an RNA sequence (upper or lower-case).\n\n genetic_code : dict\n A dictionary mapping all 64 codons (strings of three RNA bases) to\n amino acids (string of single-letter amino acid abbreviation). Stop\n codons should be represented with asterisks ('*').\n\n Returns\n -------\n str\n A string of the longest sequence of amino acids encoded by\n `rna_sequence`.\n "
rna_sequence = rna_sequence.upper()
rna_sequence = rna_sequence.strip()
genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}
peptide = get_all_translations(rna_sequence=rna_sequence, genetic_code=genetic_code)
rev_comp = reverse_and_complement(sequence=rna_sequence)
rev_comp_peptide = get_all_translations(rna_sequence=rev_comp, genetic_code=genetic_code)
peptide += rev_comp_peptide
if (len(peptide) < 1):
return ''
max_len = (- 1)
for i in peptide:
if (len(i) > max_len):
max_len = len(i)
longest = str(i)
return longest | Get the longest peptide encoded by an RNA sequence.
Explore six reading frames of `rna_sequence` (the three reading frames of
`rna_sequence`, and the three reading frames of the reverse and complement
of `rna_sequence`) and return (as a string) the longest sequence of amino
acids that it encodes, according to the `genetic_code`.
If no amino acids can be translated from `rna_sequence` nor its reverse and
complement, an empty string is returned.
Parameters
----------
rna_sequence : str
A string representing an RNA sequence (upper or lower-case).
genetic_code : dict
A dictionary mapping all 64 codons (strings of three RNA bases) to
amino acids (string of single-letter amino acid abbreviation). Stop
codons should be represented with asterisks ('*').
Returns
-------
str
A string of the longest sequence of amino acids encoded by
`rna_sequence`. | translate.py | get_longest_peptide | emw0083/python-translation-project | 0 | python | def get_longest_peptide(rna_sequence, genetic_code):
"Get the longest peptide encoded by an RNA sequence.\n\n Explore six reading frames of `rna_sequence` (the three reading frames of\n `rna_sequence`, and the three reading frames of the reverse and complement\n of `rna_sequence`) and return (as a string) the longest sequence of amino\n acids that it encodes, according to the `genetic_code`.\n\n If no amino acids can be translated from `rna_sequence` nor its reverse and\n complement, an empty string is returned.\n\n Parameters\n ----------\n rna_sequence : str\n A string representing an RNA sequence (upper or lower-case).\n\n genetic_code : dict\n A dictionary mapping all 64 codons (strings of three RNA bases) to\n amino acids (string of single-letter amino acid abbreviation). Stop\n codons should be represented with asterisks ('*').\n\n Returns\n -------\n str\n A string of the longest sequence of amino acids encoded by\n `rna_sequence`.\n "
rna_sequence = rna_sequence.upper()
rna_sequence = rna_sequence.strip()
genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}
peptide = get_all_translations(rna_sequence=rna_sequence, genetic_code=genetic_code)
rev_comp = reverse_and_complement(sequence=rna_sequence)
rev_comp_peptide = get_all_translations(rna_sequence=rev_comp, genetic_code=genetic_code)
peptide += rev_comp_peptide
if (len(peptide) < 1):
return
max_len = (- 1)
for i in peptide:
if (len(i) > max_len):
max_len = len(i)
longest = str(i)
return longest | def get_longest_peptide(rna_sequence, genetic_code):
"Get the longest peptide encoded by an RNA sequence.\n\n Explore six reading frames of `rna_sequence` (the three reading frames of\n `rna_sequence`, and the three reading frames of the reverse and complement\n of `rna_sequence`) and return (as a string) the longest sequence of amino\n acids that it encodes, according to the `genetic_code`.\n\n If no amino acids can be translated from `rna_sequence` nor its reverse and\n complement, an empty string is returned.\n\n Parameters\n ----------\n rna_sequence : str\n A string representing an RNA sequence (upper or lower-case).\n\n genetic_code : dict\n A dictionary mapping all 64 codons (strings of three RNA bases) to\n amino acids (string of single-letter amino acid abbreviation). Stop\n codons should be represented with asterisks ('*').\n\n Returns\n -------\n str\n A string of the longest sequence of amino acids encoded by\n `rna_sequence`.\n "
rna_sequence = rna_sequence.upper()
rna_sequence = rna_sequence.strip()
genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}
peptide = get_all_translations(rna_sequence=rna_sequence, genetic_code=genetic_code)
rev_comp = reverse_and_complement(sequence=rna_sequence)
rev_comp_peptide = get_all_translations(rna_sequence=rev_comp, genetic_code=genetic_code)
peptide += rev_comp_peptide
if (len(peptide) < 1):
return
max_len = (- 1)
for i in peptide:
if (len(i) > max_len):
max_len = len(i)
longest = str(i)
return longest<|docstring|>Get the longest peptide encoded by an RNA sequence.
Explore six reading frames of `rna_sequence` (the three reading frames of
`rna_sequence`, and the three reading frames of the reverse and complement
of `rna_sequence`) and return (as a string) the longest sequence of amino
acids that it encodes, according to the `genetic_code`.
If no amino acids can be translated from `rna_sequence` nor its reverse and
complement, an empty string is returned.
Parameters
----------
rna_sequence : str
A string representing an RNA sequence (upper or lower-case).
genetic_code : dict
A dictionary mapping all 64 codons (strings of three RNA bases) to
amino acids (string of single-letter amino acid abbreviation). Stop
codons should be represented with asterisks ('*').
Returns
-------
str
A string of the longest sequence of amino acids encoded by
`rna_sequence`.<|endoftext|> |
4db797ee3188143184475145cb6a205a60e347eb3fc29361c396897e91dfe19f | def msv(sentences, original_indices, sent_representations):
'Reproduction of Yogatama et al. (2015).'
ranking = []
indices = []
bases = []
cluster_centroid = np.mean(sent_representations, axis=0)[(None, :)]
reprd_sentences = [sentences[i] for i in original_indices]
distances = cdist(sent_representations, cluster_centroid, metric='cosine')
index = np.argmax(distances)
sentence = reprd_sentences[index]
indices.append(index)
ranking.append((index, sentence))
base_vector = normalize(sent_representations[index][(:, np.newaxis)], axis=0).ravel()
bases.append(base_vector)
for i in range((len(reprd_sentences) - 1)):
if (i == 50):
break
print('Starting iteration {}'.format(i))
distances = np.array([distance_from_subspace(s, bases) for s in sent_representations])
distances[indices] = np.nan
index = np.nanargmax(distances)
sentence = reprd_sentences[index]
indices.append(index)
ranking.append((index, sentence))
base_vector = normalize(sent_representations[index][(:, np.newaxis)], axis=0).ravel()
bases.append(base_vector)
ranking = [(original_indices[i], s) for (i, s) in ranking]
return ranking | Reproduction of Yogatama et al. (2015). | sent_selectors/msv.py | msv | blendle/summblogcode | 8 | python | def msv(sentences, original_indices, sent_representations):
ranking = []
indices = []
bases = []
cluster_centroid = np.mean(sent_representations, axis=0)[(None, :)]
reprd_sentences = [sentences[i] for i in original_indices]
distances = cdist(sent_representations, cluster_centroid, metric='cosine')
index = np.argmax(distances)
sentence = reprd_sentences[index]
indices.append(index)
ranking.append((index, sentence))
base_vector = normalize(sent_representations[index][(:, np.newaxis)], axis=0).ravel()
bases.append(base_vector)
for i in range((len(reprd_sentences) - 1)):
if (i == 50):
break
print('Starting iteration {}'.format(i))
distances = np.array([distance_from_subspace(s, bases) for s in sent_representations])
distances[indices] = np.nan
index = np.nanargmax(distances)
sentence = reprd_sentences[index]
indices.append(index)
ranking.append((index, sentence))
base_vector = normalize(sent_representations[index][(:, np.newaxis)], axis=0).ravel()
bases.append(base_vector)
ranking = [(original_indices[i], s) for (i, s) in ranking]
return ranking | def msv(sentences, original_indices, sent_representations):
ranking = []
indices = []
bases = []
cluster_centroid = np.mean(sent_representations, axis=0)[(None, :)]
reprd_sentences = [sentences[i] for i in original_indices]
distances = cdist(sent_representations, cluster_centroid, metric='cosine')
index = np.argmax(distances)
sentence = reprd_sentences[index]
indices.append(index)
ranking.append((index, sentence))
base_vector = normalize(sent_representations[index][(:, np.newaxis)], axis=0).ravel()
bases.append(base_vector)
for i in range((len(reprd_sentences) - 1)):
if (i == 50):
break
print('Starting iteration {}'.format(i))
distances = np.array([distance_from_subspace(s, bases) for s in sent_representations])
distances[indices] = np.nan
index = np.nanargmax(distances)
sentence = reprd_sentences[index]
indices.append(index)
ranking.append((index, sentence))
base_vector = normalize(sent_representations[index][(:, np.newaxis)], axis=0).ravel()
bases.append(base_vector)
ranking = [(original_indices[i], s) for (i, s) in ranking]
return ranking<|docstring|>Reproduction of Yogatama et al. (2015).<|endoftext|> |
9f46cc2c7342498b0bc3f66bff2e18c48369944e751eca01e05ee5661bf258aa | def distance_from_subspace(sentence_repr, bases):
'Computation of distance from subspace as described in Yogatama et al. (2015).'
summed_projections = np.sum([projection_onto_base(sentence_repr, base) for base in bases], axis=0)
distance = np.linalg.norm((sentence_repr - summed_projections))
return distance | Computation of distance from subspace as described in Yogatama et al. (2015). | sent_selectors/msv.py | distance_from_subspace | blendle/summblogcode | 8 | python | def distance_from_subspace(sentence_repr, bases):
summed_projections = np.sum([projection_onto_base(sentence_repr, base) for base in bases], axis=0)
distance = np.linalg.norm((sentence_repr - summed_projections))
return distance | def distance_from_subspace(sentence_repr, bases):
summed_projections = np.sum([projection_onto_base(sentence_repr, base) for base in bases], axis=0)
distance = np.linalg.norm((sentence_repr - summed_projections))
return distance<|docstring|>Computation of distance from subspace as described in Yogatama et al. (2015).<|endoftext|> |
cb91b59c27a2662d0fb51da2562c488149a3124944d166cbdf4fd097ba3babff | def get_app_sign_v2(self, bucket, fileid, expired=0, userid='0'):
" GET V2 SIGN USE FILEID \n copy and del operation must have fileid and set expired=0\n\n Args:\n bucket: user bucket\n fileid: user defined fileid, not urlencoded\n expired: expire time\n userid: user id, pls ignore or set to '0'\n "
if isinstance(fileid, unicode):
fileid = fileid.encode('utf-8')
if ((not self._secret_id) or (not self._secret_key)):
return self.AUTH_SECRET_ID_KEY_ERROR
app_info = conf.get_app_info()
appid = app_info['appid']
puserid = ''
if (userid != ''):
if (len(userid) > 64):
return self.AUTH_URL_FORMAT_ERROR
puserid = userid
now = int(time.time())
rdm = random.randint(0, 999999999)
plain_text = ((((((((((((((('a=' + appid) + '&b=') + bucket) + '&k=') + self._secret_id) + '&e=') + str(expired)) + '&t=') + str(now)) + '&r=') + str(rdm)) + '&u=') + puserid) + '&f=') + fileid)
bin = hmac.new(self._secret_key, plain_text, hashlib.sha1)
s = bin.hexdigest()
s = binascii.unhexlify(s)
s = (s + plain_text)
signature = base64.b64encode(s).rstrip()
return signature | GET V2 SIGN USE FILEID
copy and del operation must have fileid and set expired=0
Args:
bucket: user bucket
fileid: user defined fileid, not urlencoded
expired: expire time
userid: user id, pls ignore or set to '0' | base/site-packages/tencentyun/auth.py | get_app_sign_v2 | edisonlz/fastor | 285 | python | def get_app_sign_v2(self, bucket, fileid, expired=0, userid='0'):
" GET V2 SIGN USE FILEID \n copy and del operation must have fileid and set expired=0\n\n Args:\n bucket: user bucket\n fileid: user defined fileid, not urlencoded\n expired: expire time\n userid: user id, pls ignore or set to '0'\n "
if isinstance(fileid, unicode):
fileid = fileid.encode('utf-8')
if ((not self._secret_id) or (not self._secret_key)):
return self.AUTH_SECRET_ID_KEY_ERROR
app_info = conf.get_app_info()
appid = app_info['appid']
puserid =
if (userid != ):
if (len(userid) > 64):
return self.AUTH_URL_FORMAT_ERROR
puserid = userid
now = int(time.time())
rdm = random.randint(0, 999999999)
plain_text = ((((((((((((((('a=' + appid) + '&b=') + bucket) + '&k=') + self._secret_id) + '&e=') + str(expired)) + '&t=') + str(now)) + '&r=') + str(rdm)) + '&u=') + puserid) + '&f=') + fileid)
bin = hmac.new(self._secret_key, plain_text, hashlib.sha1)
s = bin.hexdigest()
s = binascii.unhexlify(s)
s = (s + plain_text)
signature = base64.b64encode(s).rstrip()
return signature | def get_app_sign_v2(self, bucket, fileid, expired=0, userid='0'):
" GET V2 SIGN USE FILEID \n copy and del operation must have fileid and set expired=0\n\n Args:\n bucket: user bucket\n fileid: user defined fileid, not urlencoded\n expired: expire time\n userid: user id, pls ignore or set to '0'\n "
if isinstance(fileid, unicode):
fileid = fileid.encode('utf-8')
if ((not self._secret_id) or (not self._secret_key)):
return self.AUTH_SECRET_ID_KEY_ERROR
app_info = conf.get_app_info()
appid = app_info['appid']
puserid =
if (userid != ):
if (len(userid) > 64):
return self.AUTH_URL_FORMAT_ERROR
puserid = userid
now = int(time.time())
rdm = random.randint(0, 999999999)
plain_text = ((((((((((((((('a=' + appid) + '&b=') + bucket) + '&k=') + self._secret_id) + '&e=') + str(expired)) + '&t=') + str(now)) + '&r=') + str(rdm)) + '&u=') + puserid) + '&f=') + fileid)
bin = hmac.new(self._secret_key, plain_text, hashlib.sha1)
s = bin.hexdigest()
s = binascii.unhexlify(s)
s = (s + plain_text)
signature = base64.b64encode(s).rstrip()
return signature<|docstring|>GET V2 SIGN USE FILEID
copy and del operation must have fileid and set expired=0
Args:
bucket: user bucket
fileid: user defined fileid, not urlencoded
expired: expire time
userid: user id, pls ignore or set to '0'<|endoftext|> |
8c08d32b2d9f530a2b3609f0f58f4b1aa090d2757052811b906a1066ae12775e | def cleanText(thisText, cleanDict):
'cleanDict={"searchFor":"replaceWith"}'
for searchFor in cleanDict.keys():
replaceWith = cleanDict[searchFor]
thisText = thisText.replace(searchFor, replaceWith)
return thisText | cleanDict={"searchFor":"replaceWith"} | Kerning/Steal kerning from InDesign.py | cleanText | jpt/Glyphs-Scripts | 283 | python | def cleanText(thisText, cleanDict):
for searchFor in cleanDict.keys():
replaceWith = cleanDict[searchFor]
thisText = thisText.replace(searchFor, replaceWith)
return thisText | def cleanText(thisText, cleanDict):
for searchFor in cleanDict.keys():
replaceWith = cleanDict[searchFor]
thisText = thisText.replace(searchFor, replaceWith)
return thisText<|docstring|>cleanDict={"searchFor":"replaceWith"}<|endoftext|> |
a8e764c75fdeaea3d14d5c334dd1dd53db27bf4f4b17d852c98e528ab161b782 | def execute(self):
'Call when the command is executed.'
arg = self.rest(1)
if arg:
if (not os.path.isfile(arg)):
self.fm.notify('{} is not a file.'.format(arg))
return
file = File(arg)
else:
file = self.fm.thisfile
if (not file.is_file):
self.fm.notify('{} is not a file.'.format(file.relative_path))
return
relative_path = file.relative_path
self.fm.notify(f'File: {relative_path}')
upload_command = ['python', '/home/lovinator/Repository/screenshot-helper/main.py', '--upload', arg]
subprocess.check_call(upload_command)
self.fm.notify('Uploaded!') | Call when the command is executed. | ranger.py | execute | TheLovinator1/screenshot-helper | 1 | python | def execute(self):
arg = self.rest(1)
if arg:
if (not os.path.isfile(arg)):
self.fm.notify('{} is not a file.'.format(arg))
return
file = File(arg)
else:
file = self.fm.thisfile
if (not file.is_file):
self.fm.notify('{} is not a file.'.format(file.relative_path))
return
relative_path = file.relative_path
self.fm.notify(f'File: {relative_path}')
upload_command = ['python', '/home/lovinator/Repository/screenshot-helper/main.py', '--upload', arg]
subprocess.check_call(upload_command)
self.fm.notify('Uploaded!') | def execute(self):
arg = self.rest(1)
if arg:
if (not os.path.isfile(arg)):
self.fm.notify('{} is not a file.'.format(arg))
return
file = File(arg)
else:
file = self.fm.thisfile
if (not file.is_file):
self.fm.notify('{} is not a file.'.format(file.relative_path))
return
relative_path = file.relative_path
self.fm.notify(f'File: {relative_path}')
upload_command = ['python', '/home/lovinator/Repository/screenshot-helper/main.py', '--upload', arg]
subprocess.check_call(upload_command)
self.fm.notify('Uploaded!')<|docstring|>Call when the command is executed.<|endoftext|> |
036abad41273b9d7249f87f8903d681a113fe81f9755530af6b70925fe6c5f62 | def tab(self):
'Call when <TAB> is pressed. This will tab through files in directory.'
return self._tab_directory_content() | Call when <TAB> is pressed. This will tab through files in directory. | ranger.py | tab | TheLovinator1/screenshot-helper | 1 | python | def tab(self):
return self._tab_directory_content() | def tab(self):
return self._tab_directory_content()<|docstring|>Call when <TAB> is pressed. This will tab through files in directory.<|endoftext|> |
a03908424804dd5022c108abe5af3e6dae083cad8151fbe24f25f2e8e1e3d039 | @property
def chars(self) -> str:
"\n The characters that define this alphabet.\n\n Example usage:\n\n >>> alphabet.base16.chars\n '0123456789ABCDEF'\n\n "
return self._chars | The characters that define this alphabet.
Example usage:
>>> alphabet.base16.chars
'0123456789ABCDEF' | bases/alphabet/string_alphabet.py | chars | hashberg-io/bases | 1 | python | @property
def chars(self) -> str:
"\n The characters that define this alphabet.\n\n Example usage:\n\n >>> alphabet.base16.chars\n '0123456789ABCDEF'\n\n "
return self._chars | @property
def chars(self) -> str:
"\n The characters that define this alphabet.\n\n Example usage:\n\n >>> alphabet.base16.chars\n '0123456789ABCDEF'\n\n "
return self._chars<|docstring|>The characters that define this alphabet.
Example usage:
>>> alphabet.base16.chars
'0123456789ABCDEF'<|endoftext|> |
a35e7d38bf22da7ab9de65e5037c5dc4a6a1f26b13cdbd14a83c8807779d30c9 | @pytest.fixture
def response():
'Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n ' | Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html | tests/test_schrodingerseqn1d.py | response | raickhr/schrodingerseqn1d | 0 | python | @pytest.fixture
def response():
'Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n ' | @pytest.fixture
def response():
'Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n '<|docstring|>Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html<|endoftext|> |
57d1077697496419ef3e17274ea12080618184034e5c5256c29cff02162972f7 | def test_content(response):
'Sample pytest test function with the pytest fixture as an argument.' | Sample pytest test function with the pytest fixture as an argument. | tests/test_schrodingerseqn1d.py | test_content | raickhr/schrodingerseqn1d | 0 | python | def test_content(response):
| def test_content(response):
<|docstring|>Sample pytest test function with the pytest fixture as an argument.<|endoftext|> |
c1db64a07aa1ca518273142e4105a048e7439b571d93afaaf3ae3c28391345e3 | def test_command_line_interface():
'Test the CLI.'
runner = CliRunner()
result = runner.invoke(cli.main)
assert (result.exit_code == 0)
assert ('schrodingerseqn1d.cli.main' in result.output)
help_result = runner.invoke(cli.main, ['--help'])
assert (help_result.exit_code == 0)
assert ('--help Show this message and exit.' in help_result.output) | Test the CLI. | tests/test_schrodingerseqn1d.py | test_command_line_interface | raickhr/schrodingerseqn1d | 0 | python | def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.main)
assert (result.exit_code == 0)
assert ('schrodingerseqn1d.cli.main' in result.output)
help_result = runner.invoke(cli.main, ['--help'])
assert (help_result.exit_code == 0)
assert ('--help Show this message and exit.' in help_result.output) | def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.main)
assert (result.exit_code == 0)
assert ('schrodingerseqn1d.cli.main' in result.output)
help_result = runner.invoke(cli.main, ['--help'])
assert (help_result.exit_code == 0)
assert ('--help Show this message and exit.' in help_result.output)<|docstring|>Test the CLI.<|endoftext|> |
9af5d49b10a14fae5cdaba2fcbf90ce1ace6bc5826c85ae7ab8998f02d2f15ed | def process_data(d, p):
'Processing the data (d) and parameters (p)'
rounds = list(range(1, (p['rounds'] + 1)))
positions = (set(p['scoring positions']) | set(p['substitute positions']))
return {'players': set(d.keys()), 'positions': positions, 'scoring positions': set(p['scoring positions']), 'substitute positions': set(p['substitute positions']), 'players eligible in position q': {q_: set((player for (player, x) in d.items() if (q in x['positions']))) for q in p['scoring positions'] for q_ in [q, ('SUB ' + q)]}, 'positions eligible to player p': {player: (set(x['positions']) | set([('SUB ' + y) for y in x['positions']])) for (player, x) in d.items()}, 'rounds': rounds, 'trade limit per season': p['trades']['total'], 'trade limit in round r': defaultdict((lambda : p['trades']['default']), {int(key): int(value) for (key, value) in p['trades']['exceptions'].items()}), 'players required in position q': p['capacities'], 'scoring positions in round r': defaultdict((lambda : p['number of scoring positions']['default']), {int(key): int(value) for (key, value) in p['number of scoring positions']['exceptions'].items()}), 'starting budget': p['starting budget'], 'points scored by player p in round r': {(player, r): x['points'][str(r)] for (player, x) in d.items() for r in rounds}, 'value of player p in round r': {(player, r): x['price'][str(r)] for (player, x) in d.items() for r in rounds}, 'score to beat': p.get('score to beat', None), 'slots of position q': {q: range(p['capacities'][q]) for q in p['substitute positions']}, 'emergencies limit': p['emergencies']} | Processing the data (d) and parameters (p) | optimal_fantasy/notation.py | process_data | stevedwards/optimal-fantasy | 1 | python | def process_data(d, p):
rounds = list(range(1, (p['rounds'] + 1)))
positions = (set(p['scoring positions']) | set(p['substitute positions']))
return {'players': set(d.keys()), 'positions': positions, 'scoring positions': set(p['scoring positions']), 'substitute positions': set(p['substitute positions']), 'players eligible in position q': {q_: set((player for (player, x) in d.items() if (q in x['positions']))) for q in p['scoring positions'] for q_ in [q, ('SUB ' + q)]}, 'positions eligible to player p': {player: (set(x['positions']) | set([('SUB ' + y) for y in x['positions']])) for (player, x) in d.items()}, 'rounds': rounds, 'trade limit per season': p['trades']['total'], 'trade limit in round r': defaultdict((lambda : p['trades']['default']), {int(key): int(value) for (key, value) in p['trades']['exceptions'].items()}), 'players required in position q': p['capacities'], 'scoring positions in round r': defaultdict((lambda : p['number of scoring positions']['default']), {int(key): int(value) for (key, value) in p['number of scoring positions']['exceptions'].items()}), 'starting budget': p['starting budget'], 'points scored by player p in round r': {(player, r): x['points'][str(r)] for (player, x) in d.items() for r in rounds}, 'value of player p in round r': {(player, r): x['price'][str(r)] for (player, x) in d.items() for r in rounds}, 'score to beat': p.get('score to beat', None), 'slots of position q': {q: range(p['capacities'][q]) for q in p['substitute positions']}, 'emergencies limit': p['emergencies']} | def process_data(d, p):
rounds = list(range(1, (p['rounds'] + 1)))
positions = (set(p['scoring positions']) | set(p['substitute positions']))
return {'players': set(d.keys()), 'positions': positions, 'scoring positions': set(p['scoring positions']), 'substitute positions': set(p['substitute positions']), 'players eligible in position q': {q_: set((player for (player, x) in d.items() if (q in x['positions']))) for q in p['scoring positions'] for q_ in [q, ('SUB ' + q)]}, 'positions eligible to player p': {player: (set(x['positions']) | set([('SUB ' + y) for y in x['positions']])) for (player, x) in d.items()}, 'rounds': rounds, 'trade limit per season': p['trades']['total'], 'trade limit in round r': defaultdict((lambda : p['trades']['default']), {int(key): int(value) for (key, value) in p['trades']['exceptions'].items()}), 'players required in position q': p['capacities'], 'scoring positions in round r': defaultdict((lambda : p['number of scoring positions']['default']), {int(key): int(value) for (key, value) in p['number of scoring positions']['exceptions'].items()}), 'starting budget': p['starting budget'], 'points scored by player p in round r': {(player, r): x['points'][str(r)] for (player, x) in d.items() for r in rounds}, 'value of player p in round r': {(player, r): x['price'][str(r)] for (player, x) in d.items() for r in rounds}, 'score to beat': p.get('score to beat', None), 'slots of position q': {q: range(p['capacities'][q]) for q in p['substitute positions']}, 'emergencies limit': p['emergencies']}<|docstring|>Processing the data (d) and parameters (p)<|endoftext|> |
54e872c28d02d2cc39535bddf56b6ca5e568cd5f7970f296e7a4adc3ce1bdadb | def upload(self, address, username, password):
"\n This method uploads the GitHub release into a selected Exasol bucket.\n :param address: address in the format 'http://<host>:<port>/<bucket name>'\n :param username: bucket writing username\n :param password: bucket writing password\n "
download_url = self.__extract_download_url()
r_download = requests.get(download_url, stream=True)
upload_url = self.__build_upload_url(address, username, password)
requests.put(upload_url, data=r_download.iter_content((10 * 1024))) | This method uploads the GitHub release into a selected Exasol bucket.
:param address: address in the format 'http://<host>:<port>/<bucket name>'
:param username: bucket writing username
:param password: bucket writing password | utilities/extension_downloading/GithubReleaseFileBucketFSUploader.py | upload | stmandl/exa-toolbox | 7 | python | def upload(self, address, username, password):
"\n This method uploads the GitHub release into a selected Exasol bucket.\n :param address: address in the format 'http://<host>:<port>/<bucket name>'\n :param username: bucket writing username\n :param password: bucket writing password\n "
download_url = self.__extract_download_url()
r_download = requests.get(download_url, stream=True)
upload_url = self.__build_upload_url(address, username, password)
requests.put(upload_url, data=r_download.iter_content((10 * 1024))) | def upload(self, address, username, password):
"\n This method uploads the GitHub release into a selected Exasol bucket.\n :param address: address in the format 'http://<host>:<port>/<bucket name>'\n :param username: bucket writing username\n :param password: bucket writing password\n "
download_url = self.__extract_download_url()
r_download = requests.get(download_url, stream=True)
upload_url = self.__build_upload_url(address, username, password)
requests.put(upload_url, data=r_download.iter_content((10 * 1024)))<|docstring|>This method uploads the GitHub release into a selected Exasol bucket.
:param address: address in the format 'http://<host>:<port>/<bucket name>'
:param username: bucket writing username
:param password: bucket writing password<|endoftext|> |
c4e54a91622a785748ce85e2dbe753032cf6392ce871e0dcfe7f0e2329332f51 | @admin_blueprint.route('/')
@login_required
@admin_required
def user_list():
'\n **Get The List of User**\n\n Lists all users in the database.\n\n '
context = base_context()
context['users'] = User.query.all()
return render_template('admin/index.html', **context) | **Get The List of User**
Lists all users in the database. | shopyo/modules/admin/view.py | user_list | FullteaOfEEIC/shopyo | 0 | python | @admin_blueprint.route('/')
@login_required
@admin_required
def user_list():
'\n **Get The List of User**\n\n Lists all users in the database.\n\n '
context = base_context()
context['users'] = User.query.all()
return render_template('admin/index.html', **context) | @admin_blueprint.route('/')
@login_required
@admin_required
def user_list():
'\n **Get The List of User**\n\n Lists all users in the database.\n\n '
context = base_context()
context['users'] = User.query.all()
return render_template('admin/index.html', **context)<|docstring|>**Get The List of User**
Lists all users in the database.<|endoftext|> |
b11fc700fc56de2de83197d79dd6c5840b9c4e7e6f75e09c702394718cfcd098 | @admin_blueprint.route('/add', methods=['GET', 'POST'])
@login_required
@admin_required
def user_add():
'\n **Adds a User**\n\n adds a user to database.\n\n '
context = base_context()
if (request.method == 'POST'):
username = request.form['name']
password = request.form['password']
admin_user = request.form.get('admin_user')
if (admin_user == 'True'):
admin_user = True
else:
admin_user = False
has_user = db.session.query(exists().where((User.username == username))).scalar()
if (has_user is False):
new_user = User()
new_user.username = username
new_user.admin_user = admin_user
new_user.set_hash(password)
for key in request.form:
if key.startswith('role_'):
roleid = key.split('_')[1]
role = Role.query.get(roleid)
new_user.roles.append(role)
new_user.insert()
return redirect(url_for('admin.user_add'))
context['roles'] = Role.query.all()
return render_template('admin/add.html', **context) | **Adds a User**
adds a user to database. | shopyo/modules/admin/view.py | user_add | FullteaOfEEIC/shopyo | 0 | python | @admin_blueprint.route('/add', methods=['GET', 'POST'])
@login_required
@admin_required
def user_add():
'\n **Adds a User**\n\n adds a user to database.\n\n '
context = base_context()
if (request.method == 'POST'):
username = request.form['name']
password = request.form['password']
admin_user = request.form.get('admin_user')
if (admin_user == 'True'):
admin_user = True
else:
admin_user = False
has_user = db.session.query(exists().where((User.username == username))).scalar()
if (has_user is False):
new_user = User()
new_user.username = username
new_user.admin_user = admin_user
new_user.set_hash(password)
for key in request.form:
if key.startswith('role_'):
roleid = key.split('_')[1]
role = Role.query.get(roleid)
new_user.roles.append(role)
new_user.insert()
return redirect(url_for('admin.user_add'))
context['roles'] = Role.query.all()
return render_template('admin/add.html', **context) | @admin_blueprint.route('/add', methods=['GET', 'POST'])
@login_required
@admin_required
def user_add():
'\n **Adds a User**\n\n adds a user to database.\n\n '
context = base_context()
if (request.method == 'POST'):
username = request.form['name']
password = request.form['password']
admin_user = request.form.get('admin_user')
if (admin_user == 'True'):
admin_user = True
else:
admin_user = False
has_user = db.session.query(exists().where((User.username == username))).scalar()
if (has_user is False):
new_user = User()
new_user.username = username
new_user.admin_user = admin_user
new_user.set_hash(password)
for key in request.form:
if key.startswith('role_'):
roleid = key.split('_')[1]
role = Role.query.get(roleid)
new_user.roles.append(role)
new_user.insert()
return redirect(url_for('admin.user_add'))
context['roles'] = Role.query.all()
return render_template('admin/add.html', **context)<|docstring|>**Adds a User**
adds a user to database.<|endoftext|> |
4a1fce69ebb04eac768a1f3b5bcb6a194ad22715027c8023805fdce6434d1432 | @admin_blueprint.route('/delete/<id>', methods=['GET'])
@login_required
@admin_required
def admin_delete(id):
'\n **Delete a User**\n\n :param id: id of the user\n :type id: int\n\n '
user = User.query.get(id)
user.delete()
return redirect('/admin') | **Delete a User**
:param id: id of the user
:type id: int | shopyo/modules/admin/view.py | admin_delete | FullteaOfEEIC/shopyo | 0 | python | @admin_blueprint.route('/delete/<id>', methods=['GET'])
@login_required
@admin_required
def admin_delete(id):
'\n **Delete a User**\n\n :param id: id of the user\n :type id: int\n\n '
user = User.query.get(id)
user.delete()
return redirect('/admin') | @admin_blueprint.route('/delete/<id>', methods=['GET'])
@login_required
@admin_required
def admin_delete(id):
'\n **Delete a User**\n\n :param id: id of the user\n :type id: int\n\n '
user = User.query.get(id)
user.delete()
return redirect('/admin')<|docstring|>**Delete a User**
:param id: id of the user
:type id: int<|endoftext|> |
f1d461fe67b301688366826635c2e4e1c8a9fc4907774862d5bc64840251df5e | @admin_blueprint.route('/edit/<id>', methods=['GET'])
@login_required
@admin_required
def admin_edit(id):
'\n **Update information for a User**\n\n :param id: id of the user\n :type id: int\n\n '
context = base_context()
user = User.query.get(id)
context['user'] = user
context['user_roles'] = [r.name for r in user.roles]
context['roles'] = Role.query.all()
return render_template('admin/edit.html', **context) | **Update information for a User**
:param id: id of the user
:type id: int | shopyo/modules/admin/view.py | admin_edit | FullteaOfEEIC/shopyo | 0 | python | @admin_blueprint.route('/edit/<id>', methods=['GET'])
@login_required
@admin_required
def admin_edit(id):
'\n **Update information for a User**\n\n :param id: id of the user\n :type id: int\n\n '
context = base_context()
user = User.query.get(id)
context['user'] = user
context['user_roles'] = [r.name for r in user.roles]
context['roles'] = Role.query.all()
return render_template('admin/edit.html', **context) | @admin_blueprint.route('/edit/<id>', methods=['GET'])
@login_required
@admin_required
def admin_edit(id):
'\n **Update information for a User**\n\n :param id: id of the user\n :type id: int\n\n '
context = base_context()
user = User.query.get(id)
context['user'] = user
context['user_roles'] = [r.name for r in user.roles]
context['roles'] = Role.query.all()
return render_template('admin/edit.html', **context)<|docstring|>**Update information for a User**
:param id: id of the user
:type id: int<|endoftext|> |
949058ef4ff8ca48fb8b14e2e90d9603f3c7af0282c293eb1460a470ab392f10 | @admin_blueprint.route('/update', methods=['POST'])
@login_required
@admin_required
def admin_update():
'\n **Update a User record**\n\n '
id = request.form['id']
password = request.form['password']
username = request.form['username']
admin_user = request.form.get('admin_user')
if (admin_user == 'True'):
admin_user = True
else:
admin_user = False
user = User.query.get(id)
user.set_hash(password)
user.admin_user = admin_user
user.username = username
if password.strip():
user.set_hash(password)
user.roles[:] = []
for key in request.form:
if key.startswith('role_'):
roleid = key.split('_')[1]
role = Role.query.get(roleid)
user.roles.append(role)
user.update()
return redirect('/admin') | **Update a User record** | shopyo/modules/admin/view.py | admin_update | FullteaOfEEIC/shopyo | 0 | python | @admin_blueprint.route('/update', methods=['POST'])
@login_required
@admin_required
def admin_update():
'\n \n\n '
id = request.form['id']
password = request.form['password']
username = request.form['username']
admin_user = request.form.get('admin_user')
if (admin_user == 'True'):
admin_user = True
else:
admin_user = False
user = User.query.get(id)
user.set_hash(password)
user.admin_user = admin_user
user.username = username
if password.strip():
user.set_hash(password)
user.roles[:] = []
for key in request.form:
if key.startswith('role_'):
roleid = key.split('_')[1]
role = Role.query.get(roleid)
user.roles.append(role)
user.update()
return redirect('/admin') | @admin_blueprint.route('/update', methods=['POST'])
@login_required
@admin_required
def admin_update():
'\n \n\n '
id = request.form['id']
password = request.form['password']
username = request.form['username']
admin_user = request.form.get('admin_user')
if (admin_user == 'True'):
admin_user = True
else:
admin_user = False
user = User.query.get(id)
user.set_hash(password)
user.admin_user = admin_user
user.username = username
if password.strip():
user.set_hash(password)
user.roles[:] = []
for key in request.form:
if key.startswith('role_'):
roleid = key.split('_')[1]
role = Role.query.get(roleid)
user.roles.append(role)
user.update()
return redirect('/admin')<|docstring|>**Update a User record**<|endoftext|> |
bd294cbba441a8e2efb6716c1d1ff68c78938b47fd5ceed26697b51d4b37c6fc | def header_property(header_name):
"Create a read-only header property.\n\n Args:\n wsgi_name (str): Case-sensitive name of the header as it would\n appear in the WSGI environ ``dict`` (i.e., 'HTTP_*')\n\n Returns:\n A property instance than can be assigned to a class variable.\n\n "
header_name = header_name.lower().encode()
def fget(self):
try:
return (self._asgi_headers[header_name].decode('latin1') or None)
except KeyError:
return None
return property(fget) | Create a read-only header property.
Args:
wsgi_name (str): Case-sensitive name of the header as it would
appear in the WSGI environ ``dict`` (i.e., 'HTTP_*')
Returns:
A property instance than can be assigned to a class variable. | falcon/asgi/_request_helpers.py | header_property | andriyor/falcon | 8,217 | python | def header_property(header_name):
"Create a read-only header property.\n\n Args:\n wsgi_name (str): Case-sensitive name of the header as it would\n appear in the WSGI environ ``dict`` (i.e., 'HTTP_*')\n\n Returns:\n A property instance than can be assigned to a class variable.\n\n "
header_name = header_name.lower().encode()
def fget(self):
try:
return (self._asgi_headers[header_name].decode('latin1') or None)
except KeyError:
return None
return property(fget) | def header_property(header_name):
"Create a read-only header property.\n\n Args:\n wsgi_name (str): Case-sensitive name of the header as it would\n appear in the WSGI environ ``dict`` (i.e., 'HTTP_*')\n\n Returns:\n A property instance than can be assigned to a class variable.\n\n "
header_name = header_name.lower().encode()
def fget(self):
try:
return (self._asgi_headers[header_name].decode('latin1') or None)
except KeyError:
return None
return property(fget)<|docstring|>Create a read-only header property.
Args:
wsgi_name (str): Case-sensitive name of the header as it would
appear in the WSGI environ ``dict`` (i.e., 'HTTP_*')
Returns:
A property instance than can be assigned to a class variable.<|endoftext|> |
d16eeb9b22d37964c1e4aec69f67a5b682377e56d41d7deeb152a209aa15038f | def inject_year_week_sentiment_analysis(db_connector: Connector, google_client: LanguageServiceClient, redis_manager: RedisManager, list_week_year: list, company_id: str) -> dict:
'\n Inject the week/year to surveys replies manager\n :param db_connector: connector\n :param google_client: google client\n :param redis_manager: redis manager\n :param list_week_year: list of weeks years\n :param company_id: company target\n :return:\n '
(week_s, year_s, week_e, year_e) = extract_first_last_weeks(list_week_year)
period = {'start_year': year_s, 'start_week': week_s, 'end_year': year_e, 'end_week': week_e}
survey_replies_manager = SurveyRepliesManager(api_manager=APISourcesFetcher(db_connector=db_connector), google_client=google_client, redis_manager=redis_manager, period=period, company_ids=[company_id])
survey_replies_manager.fetch_data()
survey_replies_manager.process_replies(process_scores_only=True)
return survey_replies_manager.get_results() | Inject the week/year to surveys replies manager
:param db_connector: connector
:param google_client: google client
:param redis_manager: redis manager
:param list_week_year: list of weeks years
:param company_id: company target
:return: | engage-analytics/sentiment_analysis/src/extract_from_db_insert_weeks_to_redis.py | inject_year_week_sentiment_analysis | oliveriopt/mood-analytics | 0 | python | def inject_year_week_sentiment_analysis(db_connector: Connector, google_client: LanguageServiceClient, redis_manager: RedisManager, list_week_year: list, company_id: str) -> dict:
'\n Inject the week/year to surveys replies manager\n :param db_connector: connector\n :param google_client: google client\n :param redis_manager: redis manager\n :param list_week_year: list of weeks years\n :param company_id: company target\n :return:\n '
(week_s, year_s, week_e, year_e) = extract_first_last_weeks(list_week_year)
period = {'start_year': year_s, 'start_week': week_s, 'end_year': year_e, 'end_week': week_e}
survey_replies_manager = SurveyRepliesManager(api_manager=APISourcesFetcher(db_connector=db_connector), google_client=google_client, redis_manager=redis_manager, period=period, company_ids=[company_id])
survey_replies_manager.fetch_data()
survey_replies_manager.process_replies(process_scores_only=True)
return survey_replies_manager.get_results() | def inject_year_week_sentiment_analysis(db_connector: Connector, google_client: LanguageServiceClient, redis_manager: RedisManager, list_week_year: list, company_id: str) -> dict:
'\n Inject the week/year to surveys replies manager\n :param db_connector: connector\n :param google_client: google client\n :param redis_manager: redis manager\n :param list_week_year: list of weeks years\n :param company_id: company target\n :return:\n '
(week_s, year_s, week_e, year_e) = extract_first_last_weeks(list_week_year)
period = {'start_year': year_s, 'start_week': week_s, 'end_year': year_e, 'end_week': week_e}
survey_replies_manager = SurveyRepliesManager(api_manager=APISourcesFetcher(db_connector=db_connector), google_client=google_client, redis_manager=redis_manager, period=period, company_ids=[company_id])
survey_replies_manager.fetch_data()
survey_replies_manager.process_replies(process_scores_only=True)
return survey_replies_manager.get_results()<|docstring|>Inject the week/year to surveys replies manager
:param db_connector: connector
:param google_client: google client
:param redis_manager: redis manager
:param list_week_year: list of weeks years
:param company_id: company target
:return:<|endoftext|> |
281b2fbfad3923bddb14191489896881c42cfbd5e080e4469b7a88354d26d025 | def persist_result_redis(company_id: str, processing_result: dict, redis_manager) -> None:
'\n Write the dictionary into redis\n :param company_id: str of company id\n :param redis_manager: redis manager\n :param processing_result: dict to persist into redis\n :return:\n '
redis_score_field = 'score'
if (not processing_result):
logger.warning(msg='No data to be persisted in Redis.')
return
company_redis_data = redis_manager.retrieve(key=company_id, field=redis_score_field)
processed_data = nested_lookup(redis_score_field, processing_result[company_id])
scores = (company_redis_data + processed_data)
redis_manager.persist(key=company_id, field=redis_score_field, data=scores) | Write the dictionary into redis
:param company_id: str of company id
:param redis_manager: redis manager
:param processing_result: dict to persist into redis
:return: | engage-analytics/sentiment_analysis/src/extract_from_db_insert_weeks_to_redis.py | persist_result_redis | oliveriopt/mood-analytics | 0 | python | def persist_result_redis(company_id: str, processing_result: dict, redis_manager) -> None:
'\n Write the dictionary into redis\n :param company_id: str of company id\n :param redis_manager: redis manager\n :param processing_result: dict to persist into redis\n :return:\n '
redis_score_field = 'score'
if (not processing_result):
logger.warning(msg='No data to be persisted in Redis.')
return
company_redis_data = redis_manager.retrieve(key=company_id, field=redis_score_field)
processed_data = nested_lookup(redis_score_field, processing_result[company_id])
scores = (company_redis_data + processed_data)
redis_manager.persist(key=company_id, field=redis_score_field, data=scores) | def persist_result_redis(company_id: str, processing_result: dict, redis_manager) -> None:
'\n Write the dictionary into redis\n :param company_id: str of company id\n :param redis_manager: redis manager\n :param processing_result: dict to persist into redis\n :return:\n '
redis_score_field = 'score'
if (not processing_result):
logger.warning(msg='No data to be persisted in Redis.')
return
company_redis_data = redis_manager.retrieve(key=company_id, field=redis_score_field)
processed_data = nested_lookup(redis_score_field, processing_result[company_id])
scores = (company_redis_data + processed_data)
redis_manager.persist(key=company_id, field=redis_score_field, data=scores)<|docstring|>Write the dictionary into redis
:param company_id: str of company id
:param redis_manager: redis manager
:param processing_result: dict to persist into redis
:return:<|endoftext|> |
13f6d23735508e594af878f369c34d0c7f911fcd590c03bd86982e250c878da4 | def add_cite(self, cite):
'Add a citation to a node, adjusting weights as necessary'
self.weight += 1
if (len(cite) == 0):
return
section = cite[0]
subsections = cite[1:]
for child in self.children:
if (child.label == section):
child.add_cite(subsections)
break
else:
new_child = Node(section, rebuild_cite(self.cite, section))
self.children.append(new_child)
new_child.add_cite(subsections) | Add a citation to a node, adjusting weights as necessary | tree.py | add_cite | wecassidy/dp-cite | 0 | python | def add_cite(self, cite):
self.weight += 1
if (len(cite) == 0):
return
section = cite[0]
subsections = cite[1:]
for child in self.children:
if (child.label == section):
child.add_cite(subsections)
break
else:
new_child = Node(section, rebuild_cite(self.cite, section))
self.children.append(new_child)
new_child.add_cite(subsections) | def add_cite(self, cite):
self.weight += 1
if (len(cite) == 0):
return
section = cite[0]
subsections = cite[1:]
for child in self.children:
if (child.label == section):
child.add_cite(subsections)
break
else:
new_child = Node(section, rebuild_cite(self.cite, section))
self.children.append(new_child)
new_child.add_cite(subsections)<|docstring|>Add a citation to a node, adjusting weights as necessary<|endoftext|> |
67623f5a6f002808edba88e52c1fba6613b4482ce7c608d5a958bf06be2de66e | def individual_weight(self):
'\n Find number of direct citations of a node. This is the weight\n of a node minus the weight of its children.\n '
return (self.weight - sum((c.weight for c in self.children))) | Find number of direct citations of a node. This is the weight
of a node minus the weight of its children. | tree.py | individual_weight | wecassidy/dp-cite | 0 | python | def individual_weight(self):
'\n Find number of direct citations of a node. This is the weight\n of a node minus the weight of its children.\n '
return (self.weight - sum((c.weight for c in self.children))) | def individual_weight(self):
'\n Find number of direct citations of a node. This is the weight\n of a node minus the weight of its children.\n '
return (self.weight - sum((c.weight for c in self.children)))<|docstring|>Find number of direct citations of a node. This is the weight
of a node minus the weight of its children.<|endoftext|> |
aa91914fd455605a73d0e3cc84a69881b125a6da07f9059cb3c45976cc61e028 | def __str__(self, level=0):
'Pretty-print a tree'
out = '{indent}{label}: {weight}'.format(indent=(' ' * level), label=self.cite, weight=self.weight)
for child in self.children:
out += ('\n' + child.__str__((level + 1)))
return out | Pretty-print a tree | tree.py | __str__ | wecassidy/dp-cite | 0 | python | def __str__(self, level=0):
out = '{indent}{label}: {weight}'.format(indent=(' ' * level), label=self.cite, weight=self.weight)
for child in self.children:
out += ('\n' + child.__str__((level + 1)))
return out | def __str__(self, level=0):
out = '{indent}{label}: {weight}'.format(indent=(' ' * level), label=self.cite, weight=self.weight)
for child in self.children:
out += ('\n' + child.__str__((level + 1)))
return out<|docstring|>Pretty-print a tree<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.