code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def rpc_campaign_landing_page_new(handler, session, campaign_id, hostname, page):
"""
Add a landing page for the specified campaign. Landing pages refer
to resources that when visited by a user should cause the visit
counter to be incremented.
:param int campaign_id: The ID of the campaign.
:param str hostname: The hostname which will be used to serve the request.
:param str page: The request resource.
"""
hostname = hostname.split(':', 1)[0]
page = page.lstrip('/')
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page)
if query.count() == 0:
landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page)
landing_page.assert_session_has_permissions('c', handler.rpc_session)
session.add(landing_page)
session.commit()
|
Add a landing page for the specified campaign. Landing pages refer
to resources that when visited by a user should cause the visit
counter to be incremented.
:param int campaign_id: The ID of the campaign.
:param str hostname: The hostname which will be used to serve the request.
:param str page: The request resource.
|
rpc_campaign_landing_page_new
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_campaign_message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=None):
"""
Record a message that has been sent as part of a campaign. These details can
be retrieved later for value substitution in template pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str department_name: The name of the company department that the message's recipient belongs to.
"""
message = _message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=department_name)
message.assert_session_has_permissions('c', handler.rpc_session)
session.add(message)
session.commit()
|
Record a message that has been sent as part of a campaign. These details can
be retrieved later for value substitution in template pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str department_name: The name of the company department that the message's recipient belongs to.
|
rpc_campaign_message_new
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_campaign_message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=None):
"""
Record a message that has been sent as part of a campaign. These details can
be retrieved later for value substitution in template pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str department_name: The name of the company department that the message's recipient belongs to.
"""
message = _message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=department_name)
message.sent = db_models.sql_null()
message.assert_session_has_permissions('c', handler.rpc_session)
session.add(message)
session.commit()
|
Record a message that has been sent as part of a campaign. These details can
be retrieved later for value substitution in template pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str department_name: The name of the company department that the message's recipient belongs to.
|
rpc_campaign_message_new
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_campaign_stats(handler, session, campaign_id):
"""
Generate statistics regarding the specified campaign and return them in a
dictionary. The dictionary will contain the keys credentials,
credentials-unique, messages, messages-trained, visits, visits-unique.
Values with unique in the key are counted unique by the message id for
which they are associated.
:param campaign_id: The unique ID of the campaign to generate statistics for.
:return: The statistics for the specified campaign.
:rtype: dict
"""
stats = {}
stats['credentials'] = session.query(db_models.Credential).filter_by(campaign_id=campaign_id).count()
stats['credentials-unique'] = session.query(db_models.Credential).filter_by(campaign_id=campaign_id).distinct(db_models.Credential.message_id).count()
stats['messages'] = session.query(db_models.Message).filter_by(campaign_id=campaign_id).count()
stats['messages-trained'] = session.query(db_models.Message).filter_by(campaign_id=campaign_id, trained=True).count()
stats['visits'] = session.query(db_models.Visit).filter_by(campaign_id=campaign_id).count()
stats['visits-unique'] = session.query(db_models.Visit).filter_by(campaign_id=campaign_id).distinct(db_models.Visit.message_id).count()
return stats
|
Generate statistics regarding the specified campaign and return them in a
dictionary. The dictionary will contain the keys credentials,
credentials-unique, messages, messages-trained, visits, visits-unique.
Values with unique in the key are counted unique by the message id for
which they are associated.
:param campaign_id: The unique ID of the campaign to generate statistics for.
:return: The statistics for the specified campaign.
:rtype: dict
|
rpc_campaign_stats
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_database_count_rows(handler, session, table_name, query_filter=None):
"""
Get a count of the rows in the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: The number of matching rows.
:rtype: int
"""
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
query_filter = query_filter or {}
for column in query_filter.keys():
if column not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
query = session.query(metatable.model)
query = query.filter_by(**query_filter)
return query.count()
|
Get a count of the rows in the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: The number of matching rows.
:rtype: int
|
rpc_database_count_rows
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_database_view_rows(handler, session, table_name, page=0, query_filter=None):
"""
Retrieve the rows from the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param int page: The page number to retrieve results for.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: A dictionary with columns and rows keys.
:rtype: dict
"""
metatable = handler.server.tables_api.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
query_filter = query_filter or {}
for column in query_filter.keys():
if column not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
offset = page * VIEW_ROW_COUNT
# it's critical that the columns are in the order that the client is expecting
rows = []
query = session.query(metatable.model)
query = query.filter_by(**query_filter)
total_rows = query.count()
for row in query[offset:]:
if len(rows) == VIEW_ROW_COUNT:
break
if row.session_has_permissions('r', handler.rpc_session):
rows.append([getattr(row, c) for c in metatable.column_names])
if not len(rows):
return None
return {'columns': metatable.column_names, 'rows': rows, 'total_rows': total_rows, 'page_size': VIEW_ROW_COUNT}
|
Retrieve the rows from the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param int page: The page number to retrieve results for.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: A dictionary with columns and rows keys.
:rtype: dict
|
rpc_database_view_rows
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_database_delete_row_by_id(handler, session, table_name, row_id):
"""
Delete the row from the table with the specified value in the id column.
If the row does not exist, no error is raised.
:param str table_name: The name of the database table to delete a row from.
:param row_id: The id value.
"""
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
row = db_manager.get_row_by_id(session, metatable.model, row_id)
if row is None:
logger = logging.getLogger('KingPhisher.Server.API.RPC')
logger.debug("received delete request for non existing row with id {0} from table {1}".format(row_id, table_name))
return
row.assert_session_has_permissions('d', handler.rpc_session)
session.delete(row)
session.commit()
|
Delete the row from the table with the specified value in the id column.
If the row does not exist, no error is raised.
:param str table_name: The name of the database table to delete a row from.
:param row_id: The id value.
|
rpc_database_delete_row_by_id
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_database_delete_rows_by_id(handler, session, table_name, row_ids):
"""
Delete multiple rows from a table with the specified values in the id
column. If a row id specified in *row_ids* does not exist, then it will
be skipped and no error will be thrown.
:param str table_name: The name of the database table to delete rows from.
:param list row_ids: The row ids to delete.
:return: The row ids that were deleted.
:rtype: list
"""
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
deleted_rows = []
for row_id in row_ids:
row = db_manager.get_row_by_id(session, metatable.model, row_id)
if not row:
continue
if not row.session_has_permissions('d', handler.rpc_session):
continue
session.delete(row)
deleted_rows.append(row_id)
session.commit()
return deleted_rows
|
Delete multiple rows from a table with the specified values in the id
column. If a row id specified in *row_ids* does not exist, then it will
be skipped and no error will be thrown.
:param str table_name: The name of the database table to delete rows from.
:param list row_ids: The row ids to delete.
:return: The row ids that were deleted.
:rtype: list
|
rpc_database_delete_rows_by_id
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_database_get_row_by_id(handler, session, table_name, row_id):
"""
Retrieve a row from a given table with the specified value in the
id column.
:param str table_name: The name of the database table to retrieve a row from.
:param row_id: The id value.
:return: The specified row data.
:rtype: dict
"""
metatable = handler.server.tables_api.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
row = db_manager.get_row_by_id(session, metatable.model, row_id)
if row:
row.assert_session_has_permissions('r', handler.rpc_session)
row = dict(zip(metatable.column_names, (getattr(row, c) for c in metatable.column_names)))
elif metatable.model.is_private:
raise errors.KingPhisherPermissionError()
return row
|
Retrieve a row from a given table with the specified value in the
id column.
:param str table_name: The name of the database table to retrieve a row from.
:param row_id: The id value.
:return: The specified row data.
:rtype: dict
|
rpc_database_get_row_by_id
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_database_insert_row(handler, session, table_name, keys, values):
"""
Insert a new row into the specified table.
:param str table_name: The name of the database table to insert a new row into.
:param list keys: The column names of *values*.
:param list values: The values to be inserted in the row.
:return: The id of the new row that has been added.
"""
_log_rpc_call(handler, 'rpc_database_insert_row', table_name, keys, _REDACTED)
if not isinstance(keys, (list, tuple)):
keys = (keys,)
if not isinstance(values, (list, tuple)):
values = (values,)
if len(keys) != len(values):
raise errors.KingPhisherAPIError('the number of keys does not match the number of values')
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
for key in keys:
if key not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(key, table_name))
row = metatable.model()
for key, value in zip(keys, values):
setattr(row, key, value)
row.assert_session_has_permissions('c', handler.rpc_session)
session.add(row)
session.commit()
return row.id
|
Insert a new row into the specified table.
:param str table_name: The name of the database table to insert a new row into.
:param list keys: The column names of *values*.
:param list values: The values to be inserted in the row.
:return: The id of the new row that has been added.
|
rpc_database_insert_row
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_database_insert_row_multi(handler, session, table_name, keys, rows, deconflict_ids=False):
"""
Insert multiple new rows into the specified table. If *deconflict_ids* is
true, new id values will be assigned as necessary to merge the data into
the database. This function will fail if constraints for the table are
not met.
:param str table_name: The name of the database table to insert data into.
:param list keys: The column names of the values in *rows*.
:param list rows: A list of rows, each row is a list of values ordered and identified by *keys* to be inserted.
:return: List of ids of the newly inserted rows.
:rtype: list
"""
_log_rpc_call(handler, 'rpc_database_insert_row_multi', table_name, keys, _REDACTED, deconflict_ids=deconflict_ids)
inserted_rows = collections.deque()
if not isinstance(keys, list):
keys = list(keys)
if not isinstance(rows, list):
rows = list(rows)
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError('failed to get table object for: {0}'.format(table_name))
for key in keys:
if key not in metatable.column_names:
raise errors.KingPhisherAPIError('column {0} is invalid for table {1}'.format(keys, table_name))
for row in rows:
if len(row) != len(keys):
raise errors.KingPhisherAPIError('row is not the same length as the number of values defined')
row = dict(zip(keys, row))
if 'id' in row and db_manager.get_row_by_id(session, metatable.model, row['id']) is not None:
if deconflict_ids:
row['id'] = None
else:
raise errors.KingPhisherAPIError('row id conflicts with an existing value')
table_row = metatable.model(**row)
table_row.assert_session_has_permissions('c', handler.rpc_session)
session.add(table_row)
inserted_rows.append(table_row)
session.commit()
return [row.id for row in inserted_rows]
|
Insert multiple new rows into the specified table. If *deconflict_ids* is
true, new id values will be assigned as necessary to merge the data into
the database. This function will fail if constraints for the table are
not met.
:param str table_name: The name of the database table to insert data into.
:param list keys: The column names of the values in *rows*.
:param list rows: A list of rows, each row is a list of values ordered and identified by *keys* to be inserted.
:return: List of ids of the newly inserted rows.
:rtype: list
|
rpc_database_insert_row_multi
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_database_set_row_value(handler, session, table_name, row_id, keys, values):
"""
Set values for a row in the specified table with an id of *row_id*.
:param str table_name: The name of the database table to set the values of the specified row.
:param tuple keys: The column names of *values*.
:param tuple values: The values to be updated in the row.
"""
_log_rpc_call(handler, 'rpc_database_rpc_row_value', table_name, row_id, keys, _REDACTED)
if not isinstance(keys, (list, tuple)):
keys = (keys,)
if not isinstance(values, (list, tuple)):
values = (values,)
if len(keys) != len(values):
raise errors.KingPhisherAPIError('the number of keys does not match the number of values')
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
for key, value in zip(keys, values):
if key not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(key, table_name))
row = db_manager.get_row_by_id(session, metatable.model, row_id)
if not row:
raise errors.KingPhisherAPIError("failed to get row id: {0} from table: {1}".format(row_id, table_name))
row.assert_session_has_permissions('u', handler.rpc_session)
for key, value in zip(keys, values):
setattr(row, key, value)
row.assert_session_has_permissions('u', handler.rpc_session)
session.commit()
|
Set values for a row in the specified table with an id of *row_id*.
:param str table_name: The name of the database table to set the values of the specified row.
:param tuple keys: The column names of *values*.
:param tuple values: The values to be updated in the row.
|
rpc_database_set_row_value
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_events_is_subscribed(handler, event_id, event_type):
"""
Check if the client is currently subscribed to the specified server event.
:param str event_id: The identifier of the event to subscribe to.
:param str event_type: A sub-type for the corresponding event.
:return: Whether or not the client is subscribed to the event.
:rtype: bool
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
if not isinstance(event_type, str):
raise errors.KingPhisherAPIError('a valid event type must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
return event_socket.is_subscribed(event_id, event_type)
|
Check if the client is currently subscribed to the specified server event.
:param str event_id: The identifier of the event to subscribe to.
:param str event_type: A sub-type for the corresponding event.
:return: Whether or not the client is subscribed to the event.
:rtype: bool
|
rpc_events_is_subscribed
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_events_subscribe(handler, event_id, event_types=None, attributes=None):
"""
Subscribe the client to the specified event published by the server.
When the event is published the specified *attributes* of it and it's
corresponding id and type information will be sent to the client.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
if not event_id.startswith('db-'):
# db-<table name> events are the only ones that are valid right now
raise errors.KingPhisherAPIError('invalid event_id: ' + event_id)
table_name = event_id[3:]
table_name = table_name.replace('-', '_')
metatable = database_tables.get(table_name)
if metatable is None:
raise errors.KingPhisherAPIError("invalid table object: {0}".format(table_name))
for event_type in event_types:
if event_type not in ('deleted', 'inserted', 'updated'):
raise errors.KingPhisherAPIError("event type {0} is invalid for db-* events".format(event_type))
for column in attributes:
if column not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
return event_socket.subscribe(event_id, event_types=event_types, attributes=attributes)
|
Subscribe the client to the specified event published by the server.
When the event is published the specified *attributes* of it and it's
corresponding id and type information will be sent to the client.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
|
rpc_events_subscribe
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_events_unsubscribe(handler, event_id, event_types=None, attributes=None):
"""
Unsubscribe from an event published by the server that the client
previously subscribed to.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
return event_socket.unsubscribe(event_id, event_types=event_types, attributes=attributes)
|
Unsubscribe from an event published by the server that the client
previously subscribed to.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
|
rpc_events_unsubscribe
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_geoip_lookup(handler, ip, lang=None):
"""
Look up an IP address in the servers GeoIP database. If the IP address
can not be found in the database, None will be returned.
:param str ip: The IP address to look up.
:param str lang: The language to prefer for regional names.
:return: The geographic information for the specified IP address.
:rtype: dict
"""
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
return result
|
Look up an IP address in the servers GeoIP database. If the IP address
can not be found in the database, None will be returned.
:param str ip: The IP address to look up.
:param str lang: The language to prefer for regional names.
:return: The geographic information for the specified IP address.
:rtype: dict
|
rpc_geoip_lookup
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_geoip_lookup_multi(handler, ips, lang=None):
"""
Look up multiple IP addresses in the servers GeoIP database. Each IP
address that can not be found in the database will have its result set
to None.
:param list ips: The list of IP addresses to look up.
:param str lang: The language to prefer for regional names.
:return: A dictionary containing the results keyed by the specified IP
addresses.
:rtype: dict
"""
results = {}
for ip in ips:
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
results[ip] = result
return results
|
Look up multiple IP addresses in the servers GeoIP database. Each IP
address that can not be found in the database will have its result set
to None.
:param list ips: The list of IP addresses to look up.
:param str lang: The language to prefer for regional names.
:return: A dictionary containing the results keyed by the specified IP
addresses.
:rtype: dict
|
rpc_geoip_lookup_multi
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_hostnames_add(handler, hostname):
"""
Add a hostname to the list of values that are configured for use with this
server. At this time, these changes (like other config changes) are not
persisted in the server so they will be lost when the server reboots.
.. versionadded:: 1.13.0
:param str hostname: The hostname to add.
"""
hostnames = handler.config.get_if_exists('server.hostnames', [])
if hostname not in hostnames:
hostnames.append(hostname)
handler.config.set('server.hostnames', hostnames)
# don't return a value indicating whether it was added or not because it could have been a vhost directory
|
Add a hostname to the list of values that are configured for use with this
server. At this time, these changes (like other config changes) are not
persisted in the server so they will be lost when the server reboots.
.. versionadded:: 1.13.0
:param str hostname: The hostname to add.
|
rpc_hostnames_add
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_plugins_list(handler):
"""
Return information regarding enabled plugins in the server.
:return: A dictionary representing enabled plugins and their meta-data.
:rtype: dict
"""
plugin_manager = handler.server.plugin_manager
plugins = {}
for _, plugin in plugin_manager:
plugins[plugin.name] = {
'authors': plugin.authors,
'classifiers': plugin.classifiers,
'description': plugin.description,
'homepage': plugin.homepage,
'name': plugin.name,
'reference_urls': plugin.reference_urls,
'title': plugin.title,
'version': plugin.version
}
return plugins
|
Return information regarding enabled plugins in the server.
:return: A dictionary representing enabled plugins and their meta-data.
:rtype: dict
|
rpc_plugins_list
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_graphql(handler, session, query, query_vars=None):
"""
Execute a GraphQL query and return the results. If the query fails to
execute the errors returned are populated in the **errors** key of the
results dictionary. If the query executes successfully the returned data
is available in the **data** key of the results dictionary.
:param str query: The GraphQL query to execute.
:param dict query_vars: Any variables needed by the *query*.
:return: The results of the query as a dictionary.
:rtype: dict
"""
query_vars = query_vars or {}
result = graphql_schema.execute(
query,
context_value={
'plugin_manager': handler.server.plugin_manager,
'rpc_session': handler.rpc_session,
'server_config': handler.config,
'session': session
},
variable_values=query_vars
)
errors = None
if result.errors:
errors = []
for error in result.errors:
if hasattr(error, 'message'):
errors.append(error.message)
elif hasattr(error, 'args') and error.args:
errors.append(str(error.args[0]))
else:
errors.append(repr(error))
return {'data': result.data, 'errors': errors}
|
Execute a GraphQL query and return the results. If the query fails to
execute the errors returned are populated in the **errors** key of the
results dictionary. If the query executes successfully the returned data
is available in the **data** key of the results dictionary.
:param str query: The GraphQL query to execute.
:param dict query_vars: Any variables needed by the *query*.
:return: The results of the query as a dictionary.
:rtype: dict
|
rpc_graphql
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_letsencrypt_certbot_version(handler):
"""
Find the certbot binary and retrieve it's version information. If the
certbot binary could not be found, ``None`` is returned.
.. versionadded:: 1.14.0
:return: The version of certbot.
:rtype: str
"""
bin_path = letsencrypt.get_certbot_bin_path(handler.config)
if bin_path is None:
return None
results = startup.run_process((bin_path, '--version'))
match = re.match(r'^certbot (?P<version>\d+\.\d+\.\d+)$', results.stdout)
if match is None:
return None
return match.group('version')
|
Find the certbot binary and retrieve it's version information. If the
certbot binary could not be found, ``None`` is returned.
.. versionadded:: 1.14.0
:return: The version of certbot.
:rtype: str
|
rpc_ssl_letsencrypt_certbot_version
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_letsencrypt_issue(handler, hostname, load=True):
"""
Issue a certificate with Let's Encrypt. This operation can fail for a wide
variety of reasons, check the ``message`` key of the returned dictionary for
a string description of what occurred. Successful operation requires that
the certbot utility be installed, and the server's Let's Encrypt data path
is configured.
.. versionadded:: 1.14.0
:param str hostname: The hostname of the certificate to issue.
:param bool load: Whether or not to load the certificate once it has been issued.
:return: A dictionary containing the results of the operation.
:rtype: dict
"""
config = handler.config
result = {'success': False}
letsencrypt_config = config.get_if_exists('server.letsencrypt', {})
# step 1: ensure that a letsencrypt configuration is available
data_path = letsencrypt_config.get('data_path')
if not data_path:
result['message'] = 'Let\'s Encrypt is not configured for use.'
return result
if not os.path.isdir(data_path):
rpc_logger.info('creating the letsencrypt data directory')
os.mkdir(data_path)
# step 2: ensure that SSL is enabled already
if not _ssl_is_enabled(handler):
result['message'] = 'Can not issue certificates when SSL is not in use.'
return result
if not advancedhttpserver.g_ssl_has_server_sni:
result['message'] = 'Can not issue certificates when SNI is not available.'
return result
# step 3: ensure that the certbot utility is available
bin_path = letsencrypt_config.get('certbot_path') or startup.which('certbot')
if not bin_path:
result['message'] = 'Can not issue certificates without the certbot utility.'
return result
# step 4: ensure the hostname looks legit (TM) and hasn't already been issued
if re.match(r'^[a-z0-9][a-z0-9-]*(\.[a-z0-9-]+)+$', hostname, flags=re.IGNORECASE) is None:
result['message'] = 'Can not issue certificates for invalid hostnames.'
return result
if letsencrypt.get_sni_hostname_config(hostname, config):
result['message'] = 'The specified hostname already has the necessary files.'
return result
# step 5: determine the web_root path for this hostname and create it if necessary
web_root = config.get('server.web_root')
if config.get('server.vhost_directories'):
web_root = os.path.join(web_root, hostname)
if not os.path.isdir(web_root):
rpc_logger.info('vhost directory does not exist for hostname: ' + hostname)
os.mkdir(web_root)
# step 6: issue the certificate with certbot, this starts the subprocess and may take a few seconds
with _lend_semaphore(handler):
status = letsencrypt.certbot_issue(web_root, hostname, bin_path=bin_path, unified_directory=data_path)
if status != os.EX_OK:
result['message'] = 'Failed to issue the certificate.'
return result
# step 7: ensure the necessary files were created
sni_config = letsencrypt.get_sni_hostname_config(hostname, config)
if sni_config is None:
result['message'] = 'The certificate files were not generated.'
return result
# step 8: store the data in the database so it can be loaded next time the server starts
if load:
handler.server.add_sni_cert(hostname, ssl_certfile=sni_config.certfile, ssl_keyfile=sni_config.keyfile)
else:
letsencrypt.set_sni_hostname(hostname, sni_config.certfile, sni_config.certfile, enabled=False)
result['success'] = True
result['message'] = 'The operation completed successfully.'
return result
|
Issue a certificate with Let's Encrypt. This operation can fail for a wide
variety of reasons, check the ``message`` key of the returned dictionary for
a string description of what occurred. Successful operation requires that
the certbot utility be installed, and the server's Let's Encrypt data path
is configured.
.. versionadded:: 1.14.0
:param str hostname: The hostname of the certificate to issue.
:param bool load: Whether or not to load the certificate once it has been issued.
:return: A dictionary containing the results of the operation.
:rtype: dict
|
rpc_ssl_letsencrypt_issue
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_sni_hostnames_get(handler):
"""
Get the hostnames that have available Server Name Indicator (SNI)
configurations for use with SSL.
.. versionadded:: 1.14.0
:return: A dictionary keyed by hostnames with values of dictionaries containing additional metadata.
:rtype: dict
"""
if not advancedhttpserver.g_ssl_has_server_sni:
rpc_logger.warning('can not enumerate SNI hostnames when SNI is not available')
return
hostnames = {}
for hostname, sni_config in letsencrypt.get_sni_hostnames(handler.config).items():
hostnames[hostname] = {'enabled': sni_config.enabled}
return hostnames
|
Get the hostnames that have available Server Name Indicator (SNI)
configurations for use with SSL.
.. versionadded:: 1.14.0
:return: A dictionary keyed by hostnames with values of dictionaries containing additional metadata.
:rtype: dict
|
rpc_ssl_sni_hostnames_get
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_sni_hostnames_load(handler, hostname):
"""
Load the SNI configuration for the specified *hostname*, effectively
enabling it. If SSL is not enabled, SNI is not available, or the necessary
data files are not available, this function returns ``False``.
.. versionadded:: 1.14.0
:param str hostname: The hostname to configure SSL for.
:return: Returns ``True`` only if the SNI configuration for *hostname* was
either able to be loaded or was already loaded.
:rtype: bool
"""
if not _ssl_is_enabled(handler):
rpc_logger.warning('can not add an SNI hostname when SSL is not in use')
return False
if not advancedhttpserver.g_ssl_has_server_sni:
rpc_logger.warning('can not add an SNI hostname when SNI is not available')
return False
for sni_cert in handler.server.get_sni_certs():
if sni_cert.hostname == hostname:
rpc_logger.info('ignoring directive to add an SNI hostname that already exists')
return True
sni_config = letsencrypt.get_sni_hostname_config(hostname, handler.config)
if not sni_config:
rpc_logger.warning('can not add an SNI hostname without the necessary files')
return False
handler.server.add_sni_cert(hostname, sni_config.certfile, sni_config.keyfile)
return True
|
Load the SNI configuration for the specified *hostname*, effectively
enabling it. If SSL is not enabled, SNI is not available, or the necessary
data files are not available, this function returns ``False``.
.. versionadded:: 1.14.0
:param str hostname: The hostname to configure SSL for.
:return: Returns ``True`` only if the SNI configuration for *hostname* was
either able to be loaded or was already loaded.
:rtype: bool
|
rpc_ssl_sni_hostnames_load
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_sni_hostnames_unload(handler, hostname):
"""
Unload the SNI configuration for the specified *hostname*, effectively
disabling it. If SNI is not available, or the specified configuration was
not already loaded, this function returns ``False``.
.. versionadded:: 1.14.0
:param str hostname: The hostname to configure SSL for.
:return: Returns ``True`` only if the SNI configuration for *hostname* was unloaded.
:rtype: bool
"""
if not advancedhttpserver.g_ssl_has_server_sni:
rpc_logger.warning('can not remove an SNI hostname when SNI is not available')
return False
for sni_cert in handler.server.get_sni_certs():
if sni_cert.hostname == hostname:
break
else:
rpc_logger.warning('can not remove an SNI hostname that does not exist')
return False
handler.server.remove_sni_cert(sni_cert.hostname)
return True
|
Unload the SNI configuration for the specified *hostname*, effectively
disabling it. If SNI is not available, or the specified configuration was
not already loaded, this function returns ``False``.
.. versionadded:: 1.14.0
:param str hostname: The hostname to configure SSL for.
:return: Returns ``True`` only if the SNI configuration for *hostname* was unloaded.
:rtype: bool
|
rpc_ssl_sni_hostnames_unload
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_status(handler):
"""
Get information regarding the status of SSL on the server. This method
returns a dictionary with keys describing whether or not SSL is enabled on
one or more interfaces, and whether or not the server possess the SNI
support. For details regarding which addresses are using SSL, see the
:py:func:`~rpc_config_get` method.
.. versionadded:: 1.14.0
:return: A dictionary with SSL status information.
:rtype: dict
"""
status = {
'enabled': _ssl_is_enabled(handler),
'has-letsencrypt': letsencrypt.get_certbot_bin_path(handler.config) is not None,
'has-sni': advancedhttpserver.g_ssl_has_server_sni
}
return status
|
Get information regarding the status of SSL on the server. This method
returns a dictionary with keys describing whether or not SSL is enabled on
one or more interfaces, and whether or not the server possess the SNI
support. For details regarding which addresses are using SSL, see the
:py:func:`~rpc_config_get` method.
.. versionadded:: 1.14.0
:return: A dictionary with SSL status information.
:rtype: dict
|
rpc_ssl_status
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def embed_youtube_video(video_id, autoplay=True, enable_js=False, start=0, end=None):
"""
A Jinja function to embed a video into a web page using YouTube's
`iframe API <https://developers.google.com/youtube/iframe_api_reference>`_.
In order to enable a training button after the video has ended the
youtube.js file needs to be included and *enable_js* just be set to True. If
*start* or *end* are specified as strings, the must be in a format suitable
to be parsed by :py:func:`~smoke_zephyr.utilities.parse_timespan`.
:param str video_id: The id of the YouTube video to embed.
:param bool autoplay: Start playing the video as soon as the page loads.
:param bool enable_js: Enable the Javascript API.
:param start: The time offset at which the video should begin playing.
:type start: int, str
:param end: The time offset at which the video should stop playing.
:type end: int, str
"""
autoplay = int(autoplay)
yt_url = "https://www.youtube.com/embed/{0}?autoplay={1}&modestbranding=1&rel=0&showinfo=0".format(video_id, autoplay)
if enable_js:
yt_url += '&enablejsapi=1'
if start:
if isinstance(start, str):
start = smoke_zephyr.utilities.parse_timespan(start)
yt_url += "&start={0}".format(start)
if end:
if isinstance(end, str):
end = smoke_zephyr.utilities.parse_timespan(end)
yt_url += "&end={0}".format(end)
iframe_tag = "<iframe id=\"ytplayer\" type=\"text/html\" width=\"720\" height=\"405\" src=\"{0}\" frameborder=\"0\" allowfullscreen></iframe>".format(yt_url)
return markupsafe.Markup(iframe_tag)
|
A Jinja function to embed a video into a web page using YouTube's
`iframe API <https://developers.google.com/youtube/iframe_api_reference>`_.
In order to enable a training button after the video has ended the
youtube.js file needs to be included and *enable_js* just be set to True. If
*start* or *end* are specified as strings, the must be in a format suitable
to be parsed by :py:func:`~smoke_zephyr.utilities.parse_timespan`.
:param str video_id: The id of the YouTube video to embed.
:param bool autoplay: Start playing the video as soon as the page loads.
:param bool enable_js: Enable the Javascript API.
:param start: The time offset at which the video should begin playing.
:type start: int, str
:param end: The time offset at which the video should stop playing.
:type end: int, str
|
embed_youtube_video
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/template_extras.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/template_extras.py
|
BSD-3-Clause
|
def make_redirect_page(url, title='Automatic Redirect'):
"""
A Jinja function which will create an HTML page that will automatically
redirect the viewer to a different url.
:param str url: The URL to redirect the user to.
:param str title: The title to use in the resulting HTML page.
"""
title = html.escape(title, quote=True)
url = html.escape(url, quote=True)
page = []
page.append('<!DOCTYPE html>')
page.append('<html lang="en-US">')
page.append(' <head>')
page.append(" <title>{0}</title>".format(title))
page.append(" <meta http-equiv=\"refresh\" content=\"0;url={0}\" />".format(url))
page.append(' </head>')
page.append(' <body>')
page.append(" <p>The content you are looking for has been moved. If you are not redirected automatically then <a href=\"{0}\">click here</a> to proceed.</p>".format(url))
page.append(' </body>')
page.append('</html>')
page = '\n'.join(page)
return markupsafe.Markup(page)
|
A Jinja function which will create an HTML page that will automatically
redirect the viewer to a different url.
:param str url: The URL to redirect the user to.
:param str title: The title to use in the resulting HTML page.
|
make_redirect_page
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/template_extras.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/template_extras.py
|
BSD-3-Clause
|
def __init__(self, handler, manager):
"""
:param handler: The request handler that should be used by this socket.
:type handler: :py:class:`advancedhttpserver.RequestHandler`
:param manager: The manager that this event socket should register with.
:type manager: :py:class:`.WebSocketsManager`
"""
handler.connection.settimeout(None)
self._subscriptions = {}
self.rpc_session = handler.rpc_session
if self.rpc_session.event_socket is not None:
self.rpc_session.event_socket.close()
self.rpc_session.event_socket = self
manager.add(self)
self._manager_ref = weakref.ref(manager)
super(EventSocket, self).__init__(handler)
|
:param handler: The request handler that should be used by this socket.
:type handler: :py:class:`advancedhttpserver.RequestHandler`
:param manager: The manager that this event socket should register with.
:type manager: :py:class:`.WebSocketsManager`
|
__init__
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def is_subscribed(self, event_id, event_type):
"""
Check if the client is currently subscribed to the specified server event.
:param str event_id: The identifier of the event to subscribe to.
:param str event_type: A sub-type for the corresponding event.
:return: Whether or not the client is subscribed to the event.
:rtype: bool
"""
if event_id not in self._subscriptions:
return False
return event_type in self._subscriptions[event_id].event_types
|
Check if the client is currently subscribed to the specified server event.
:param str event_id: The identifier of the event to subscribe to.
:param str event_type: A sub-type for the corresponding event.
:return: Whether or not the client is subscribed to the event.
:rtype: bool
|
is_subscribed
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def publish(self, event):
"""
Publish the event by sending the relevant information to the client.
If the client has not requested to receive the information through a
subscription, then no data will be sent.
:param event: The object representing the data to be published.
:type event: :py:class:`.Event`
"""
subscription = self._subscriptions.get(event.event_id)
if subscription is None:
return
if event.event_type not in subscription.event_types:
return
summaries = []
for source in event.sources:
if isinstance(source, db_models.Base) and not source.session_has_permissions('r', self.rpc_session):
continue
summary = dict((attribute, getattr(source, attribute, None)) for attribute in subscription.attributes)
summaries.append(summary)
if not summaries:
return
msg = {
'event': {
'id': event.event_id,
'type': event.event_type,
'objects': summaries
}
}
self.logger.debug("publishing event {0} (type: {1}) with {2} objects".format(event.event_id, event.event_type, len(summaries)))
self.send_message_text(serializers.JSON.dumps(msg, pretty=False))
|
Publish the event by sending the relevant information to the client.
If the client has not requested to receive the information through a
subscription, then no data will be sent.
:param event: The object representing the data to be published.
:type event: :py:class:`.Event`
|
publish
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def subscribe(self, event_id, event_types=None, attributes=None):
"""
Subscribe the client to the specified event published by the server.
When the event is published the specified *attributes* of it and it's
corresponding id and type information will be sent to the client.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
utilities.assert_arg_type(event_id, str, arg_pos=1)
utilities.assert_arg_type(event_types, (type(None), list, set, tuple), arg_pos=2)
utilities.assert_arg_type(event_types, (type(None), list, set, tuple), arg_pos=3)
subscription = self._subscriptions.get(event_id)
if subscription is None:
subscription = EventSubscription(attributes=set(), event_types=set())
if event_types is not None:
subscription.event_types.update(event_types)
if attributes is not None:
subscription.attributes.update(attributes)
self._subscriptions[event_id] = subscription
|
Subscribe the client to the specified event published by the server.
When the event is published the specified *attributes* of it and it's
corresponding id and type information will be sent to the client.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
|
subscribe
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def unsubscribe(self, event_id, event_types=None, attributes=None):
"""
Unsubscribe from an event published by the server that the client
previously subscribed to.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
utilities.assert_arg_type(event_id, str, arg_pos=1)
utilities.assert_arg_type(event_types, (type(None), list, set, tuple), arg_pos=2)
utilities.assert_arg_type(event_types, (type(None), list, set, tuple), arg_pos=3)
subscription = self._subscriptions.get(event_id)
if subscription is None:
return
if event_types is not None:
for event_type in event_types:
subscription.event_types.discard(event_type)
if attributes is not None:
for attribute in attributes:
subscription.attributes.discard(attribute)
if not subscription.event_types and not subscription.attributes:
del self._subscriptions[event_id]
|
Unsubscribe from an event published by the server that the client
previously subscribed to.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
|
unsubscribe
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def __init__(self, config, job_manager):
"""
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:param job_manager: A job manager instance that can be used to schedule tasks.
:type job_manager: :py:class:`smoke_zephyr.job.JobManager`
"""
self.config = config
self.web_sockets = []
self.job_manager = job_manager
self._ping_job = job_manager.job_add(self.ping_all, seconds=30)
self._work_queue = queue.Queue()
self._worker_thread = threading.Thread(target=self._worker_routine)
self._worker_thread.start()
signals.db_session_deleted.connect(self._sig_db_deleted)
signals.db_session_inserted.connect(self._sig_db_inserted)
signals.db_session_updated.connect(self._sig_db_updated)
|
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:param job_manager: A job manager instance that can be used to schedule tasks.
:type job_manager: :py:class:`smoke_zephyr.job.JobManager`
|
__init__
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def dispatch(self, handler):
"""
A method that is suitable for use as a
:py:attr:`~advancedhttpserver.RequestHandler.web_socket_handler`.
:param handler: The current request handler instance.
:type handler: :py:class:`~king_phisher.server.server.KingPhisherRequestHandler`
"""
if not ipaddress.ip_address(handler.client_address[0]).is_loopback:
return
prefix = '/'
if self.config.get('server.vhost_directories'):
prefix += handler.vhost + '/'
request_path = handler.path
if request_path.startswith(prefix):
request_path = request_path[len(prefix):]
if request_path == '_/ws/events/json':
EventSocket(handler, self)
return
handler.respond_not_found()
return
|
A method that is suitable for use as a
:py:attr:`~advancedhttpserver.RequestHandler.web_socket_handler`.
:param handler: The current request handler instance.
:type handler: :py:class:`~king_phisher.server.server.KingPhisherRequestHandler`
|
dispatch
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def ping_all(self):
"""
Ping all of the connected web sockets to ensure they stay alive. This
method is automatically executed periodically through a job added when
the manager is initialized.
"""
disconnected = collections.deque()
for web_socket in self.web_sockets:
if web_socket.connected:
try:
web_socket.ping()
except Exception:
self.logger.error('error occurred while pinging the web socket, closing it', exc_info=True)
web_socket.close()
else:
continue
disconnected.append(web_socket)
for web_socket in disconnected:
self.logger.debug('closing a disconnected web socket')
self.web_sockets.remove(web_socket)
|
Ping all of the connected web sockets to ensure they stay alive. This
method is automatically executed periodically through a job added when
the manager is initialized.
|
ping_all
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def stop(self):
"""
Shutdown the manager and clean up the resources it has allocated.
"""
self.job_manager.job_delete(self._ping_job)
for web_socket in self.web_sockets:
if web_socket.connected:
web_socket.close()
self.web_sockets = []
self._work_queue.put(None)
self._worker_thread.join()
|
Shutdown the manager and clean up the resources it has allocated.
|
stop
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def get_hostnames(config):
"""
List the hostnames that are configured for this server instance. This list
is generated by first checking the server's configuration for the
``hostnames`` option. Then if ``vhost_directories`` is enabled, the webroot
is checked for additional values.
.. note::
This function makes no attempt to validate these values, they are
strictly what have been configured for use.
.. versionadded:: 1.13.0
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: A tuple of the enumerated hostnames.
:rtype: tuple
"""
hostnames = config.get_if_exists('server.hostnames', [])
hostnames.extend(get_vhost_directories(config) or ())
hostnames.extend(letsencrypt.get_sni_hostnames(config).keys())
hostnames = smoke_zephyr.utilities.unique(hostnames)
return tuple(sorted(hostnames))
|
List the hostnames that are configured for this server instance. This list
is generated by first checking the server's configuration for the
``hostnames`` option. Then if ``vhost_directories`` is enabled, the webroot
is checked for additional values.
.. note::
This function makes no attempt to validate these values, they are
strictly what have been configured for use.
.. versionadded:: 1.13.0
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: A tuple of the enumerated hostnames.
:rtype: tuple
|
get_hostnames
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_tools.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_tools.py
|
BSD-3-Clause
|
def get_vhost_directories(config):
"""
List the hostnames that are configured through the Virtual Host directories.
If the server option ``vhost_directories`` is disabled, this function
returns ``None``.
.. versionadded:: 1.13.0
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: A tuple of the enumerated virtual hostname directories.
:rtype: tuple
"""
if not config.get('server.vhost_directories'):
return None
web_root = config.get('server.web_root')
directories = [entry for entry in os.listdir(web_root) if os.path.isdir(os.path.join(web_root, entry))]
return tuple(sorted(directories))
|
List the hostnames that are configured through the Virtual Host directories.
If the server option ``vhost_directories`` is disabled, this function
returns ``None``.
.. versionadded:: 1.13.0
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: A tuple of the enumerated virtual hostname directories.
:rtype: tuple
|
get_vhost_directories
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_tools.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_tools.py
|
BSD-3-Clause
|
def _ex_config_logging(arguments, config, console_handler):
"""
If a setting is configured improperly, this will terminate execution via
:py:func:`sys.exit`.
:return: The path to a log file if one is in use.
:rtype: str
"""
default_log_level = min(
getattr(logging, (arguments.loglvl or constants.DEFAULT_LOG_LEVEL)),
getattr(logging, config.get_if_exists('logging.level', 'critical').upper())
)
log_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'FATAL')
file_path = None
if config.has_option('logging.file'):
options = config.get('logging.file')
for _ in range(1):
default_format = '%(asctime)s %(name)-50s %(levelname)-8s %(message)s'
if isinstance(options, dict): # new style
if not options.get('enabled', True):
break
if 'path' not in options:
color.print_error('logging.file is missing required key \'path\'')
sys.exit(os.EX_CONFIG)
if 'level' not in options:
color.print_error('logging.file is missing required key \'level\'')
sys.exit(os.EX_CONFIG)
file_path = options['path']
formatter = logging.Formatter(options.get('format', default_format))
if not options['level'].upper() in log_levels:
color.print_error('logging.file.level is invalid, must be one of: ' + ', '.join(log_levels))
sys.exit(os.EX_CONFIG)
log_level = getattr(logging, options['level'].upper())
root = options.get('root', '')
elif isinstance(options, str): # old style
file_path = options
formatter = logging.Formatter(default_format)
log_level = default_log_level
root = ''
else:
break
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(formatter)
logging.getLogger(root).addHandler(file_handler)
file_handler.setLevel(log_level)
if config.has_option('logging.console'):
options = config.get('logging.console')
for _ in range(1):
if isinstance(options, dict): # new style
if not options.get('enabled', True):
break
if 'format' in options:
console_handler.setFormatter(color.ColoredLogFormatter(options['format']))
if arguments.loglvl is None and 'level' in options:
log_level = str(options.get('level', '')).upper()
if log_level not in log_levels:
color.print_error('logging.console.level is invalid, must be one of: ' + ', '.join(log_levels))
sys.exit(os.EX_CONFIG)
console_handler.setLevel(getattr(logging, log_level))
elif isinstance(options, str): # old style
console_handler.setLevel(default_log_level)
return file_path
|
If a setting is configured improperly, this will terminate execution via
:py:func:`sys.exit`.
:return: The path to a log file if one is in use.
:rtype: str
|
_ex_config_logging
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/__main__.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/__main__.py
|
BSD-3-Clause
|
def clear_database():
"""
Delete all data from all tables in the connected database. The database
schema will remain unaffected.
.. warning::
This action can not be reversed and there is no confirmation before it
takes place.
"""
engine = Session.connection().engine
with contextlib.closing(engine.connect()) as connection:
transaction = connection.begin()
for table in reversed(models.metadata.sorted_tables):
connection.execute(table.delete())
transaction.commit()
|
Delete all data from all tables in the connected database. The database
schema will remain unaffected.
.. warning::
This action can not be reversed and there is no confirmation before it
takes place.
|
clear_database
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def export_database(target_file):
"""
Export the contents of the database using SQLAlchemy's serialization. This
creates an archive file containing all of the tables and their data. The
resulting export can be imported into another supported database so long
as the :py:data:`~king_phisher.server.database.models.SCHEMA_VERSION` is the
same.
:param str target_file: The file to write the export to.
"""
session = Session()
kpdb = archive.ArchiveFile(target_file, 'w')
kpdb.metadata['database-schema'] = models.SCHEMA_VERSION
for table in models.metadata.sorted_tables:
table_name = table.name
model = models.database_table[table_name].model
kpdb.add_data('tables/' + table_name, sqlalchemy.ext.serializer.dumps(session.query(model).all()))
kpdb.close()
|
Export the contents of the database using SQLAlchemy's serialization. This
creates an archive file containing all of the tables and their data. The
resulting export can be imported into another supported database so long
as the :py:data:`~king_phisher.server.database.models.SCHEMA_VERSION` is the
same.
:param str target_file: The file to write the export to.
|
export_database
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def import_database(target_file, clear=True):
"""
Import the contents of a serialized database from an archive previously
created with the :py:func:`.export_database` function. The current
:py:data:`~king_phisher.server.database.models.SCHEMA_VERSION` must be the
same as the exported archive.
.. warning::
This will by default delete the contents of the current database in
accordance with the *clear* parameter. If *clear* is not
specified and objects in the database and import share an ID, they will
be merged.
:param str target_file: The database archive file to import from.
:param bool clear: Whether or not to delete the contents of the
existing database before importing the new data.
"""
kpdb = archive.ArchiveFile(target_file, 'r')
schema_version = kpdb.metadata['database-schema']
if schema_version != models.SCHEMA_VERSION:
raise errors.KingPhisherDatabaseError("incompatible database schema versions ({0} vs {1})".format(schema_version, models.SCHEMA_VERSION))
if clear:
clear_database()
session = Session()
for table in models.metadata.sorted_tables:
table_data = kpdb.get_data('tables/' + table.name)
for row in sqlalchemy.ext.serializer.loads(table_data):
session.merge(row)
session.commit()
kpdb.close()
|
Import the contents of a serialized database from an archive previously
created with the :py:func:`.export_database` function. The current
:py:data:`~king_phisher.server.database.models.SCHEMA_VERSION` must be the
same as the exported archive.
.. warning::
This will by default delete the contents of the current database in
accordance with the *clear* parameter. If *clear* is not
specified and objects in the database and import share an ID, they will
be merged.
:param str target_file: The database archive file to import from.
:param bool clear: Whether or not to delete the contents of the
existing database before importing the new data.
|
import_database
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def get_metadata(key, session=None):
"""
Store a piece of metadata regarding the King Phisher database.
:param str key: The name of the data.
:param value: The value to store.
:type value: int, str
:param session: The session to use to store the value.
"""
if not isinstance(key, str):
raise TypeError('key must be a str instance')
close_session = session is None
session = (session or Session())
obj = session.query(models.StorageData).filter_by(namespace=_metadata_namespace, key=key).first()
if obj is None:
raise KeyError(key)
value = obj.value
if value is not None:
value = _metadata_serializer.loads(value)
if close_session:
session.commit()
session.close()
return value
|
Store a piece of metadata regarding the King Phisher database.
:param str key: The name of the data.
:param value: The value to store.
:type value: int, str
:param session: The session to use to store the value.
|
get_metadata
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def get_row_by_id(session, table, row_id):
"""
Retrieve a database row from the specified table by it's unique id.
:param session: The database session to use for the query.
:type session: `.Session`
:param table: The table object or the name of the database table where the row resides.
:param row_id: The id of the row to retrieve.
:return: The object representing the specified row or None if it does not exist.
"""
if not issubclass(table, models.Base):
table = models.database_tables[table].model
query = session.query(table)
query = query.filter_by(id=row_id)
result = query.first()
return result
|
Retrieve a database row from the specified table by it's unique id.
:param session: The database session to use for the query.
:type session: `.Session`
:param table: The table object or the name of the database table where the row resides.
:param row_id: The id of the row to retrieve.
:return: The object representing the specified row or None if it does not exist.
|
get_row_by_id
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def set_metadata(key, value, session=None):
"""
Store a piece of metadata regarding the King Phisher database.
:param str key: The name of the data.
:param value: The value to store.
:type value: int, str
:param session: The session to use to store the value.
"""
if not isinstance(key, str):
raise TypeError('key must be a str instance')
close_session = session is None
session = (session or Session())
if value is not None:
value = _metadata_serializer.dumps(value)
obj = session.query(models.StorageData).filter_by(namespace=_metadata_namespace, key=key).first()
if obj is None:
obj = models.StorageData(namespace=_metadata_namespace, key=key)
elif obj.value != value:
obj.value = value
obj.modified = datetime.datetime.utcnow()
obj.value = value
session.add(obj)
if close_session:
session.commit()
session.close()
return
|
Store a piece of metadata regarding the King Phisher database.
:param str key: The name of the data.
:param value: The value to store.
:type value: int, str
:param session: The session to use to store the value.
|
set_metadata
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def normalize_connection_url(connection_url):
"""
Normalize a connection url by performing any conversions necessary for it to
be used with the database API.
:param str connection_url: The connection url to normalize.
:return: The normalized connection url.
:rtype: str
"""
if connection_url == ':memory:':
connection_url = 'sqlite://'
elif os.path.isfile(connection_url) or os.path.isdir(os.path.dirname(connection_url)):
connection_url = 'sqlite:///' + os.path.abspath(connection_url)
return connection_url
|
Normalize a connection url by performing any conversions necessary for it to
be used with the database API.
:param str connection_url: The connection url to normalize.
:return: The normalized connection url.
:rtype: str
|
normalize_connection_url
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def init_alembic(engine, schema_version):
"""
Creates the alembic_version table and sets the value of the table according
to the specified schema version.
:param engine: The engine used to connect to the database.
:type engine: :py:class:`sqlalchemy.engine.Engine`
:param int schema_version: The MetaData schema_version to set the alembic version to.
"""
pattern = re.compile(r'[a-f0-9]{10,16}_schema_v\d+\.py')
alembic_revision = None
alembic_directory = find.data_directory('alembic')
if not alembic_directory:
raise errors.KingPhisherDatabaseError('cannot find the alembic data directory')
alembic_versions_files = os.listdir(os.path.join(alembic_directory, 'versions'))
for file in alembic_versions_files:
if not pattern.match(file):
continue
if not file.endswith('_schema_v' + str(schema_version) + '.py'):
continue
alembic_revision = file.split('_', 1)[0]
break
if not alembic_revision:
raise errors.KingPhisherDatabaseError("cannot find current alembic version for schema version {0}".format(schema_version))
alembic_metadata = sqlalchemy.MetaData(engine)
alembic_table = sqlalchemy.Table(
'alembic_version',
alembic_metadata,
sqlalchemy.Column(
'version_num',
sqlalchemy.String,
primary_key=True,
nullable=False
)
)
alembic_metadata.create_all()
alembic_version_entry = alembic_table.insert().values(version_num=alembic_revision)
engine.connect().execute(alembic_version_entry)
logger.info("alembic_version table initialized to {0}".format(alembic_revision))
|
Creates the alembic_version table and sets the value of the table according
to the specified schema version.
:param engine: The engine used to connect to the database.
:type engine: :py:class:`sqlalchemy.engine.Engine`
:param int schema_version: The MetaData schema_version to set the alembic version to.
|
init_alembic
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def init_database(connection_url, extra_init=False):
"""
Create and initialize the database engine. This must be done before the
session object can be used. This will also attempt to perform any updates to
the database schema if the backend supports such operations.
:param str connection_url: The url for the database connection.
:param bool extra_init: Run optional extra dbms-specific initialization logic.
:return: The initialized database engine.
"""
connection_url = normalize_connection_url(connection_url)
connection_url = sqlalchemy.engine.url.make_url(connection_url)
logger.info("initializing database connection with driver {0}".format(connection_url.drivername))
if connection_url.drivername == 'sqlite':
engine = sqlalchemy.create_engine(connection_url, connect_args={'check_same_thread': False}, poolclass=sqlalchemy.pool.StaticPool)
sqlalchemy.event.listens_for(engine, 'begin')(lambda conn: conn.execute('BEGIN'))
elif connection_url.drivername == 'postgresql':
if extra_init:
init_database_postgresql(connection_url)
engine = sqlalchemy.create_engine(connection_url, connect_args={'client_encoding': 'utf8'})
else:
raise errors.KingPhisherDatabaseError('only sqlite and postgresql database drivers are supported')
try:
Session.remove()
Session.configure(bind=engine)
inspector = sqlalchemy.inspect(engine)
except sqlalchemy.exc.OperationalError as error:
if error.args:
match = re.match(r'\(psycopg2\.OperationalError\) FATAL:\s+\w+ authentication failed for user \"(?P<username>\w+)\"$', error.args[0])
if match:
raise errors.KingPhisherDatabaseAuthenticationError('database initialization failed', username=match.group('username')) from None
logger.debug('encountered a sqlalchemy OperationalError while initializing the database', exc_info=True)
raise errors.KingPhisherDatabaseError('database initialization failed') from error
if 'campaigns' not in inspector.get_table_names():
logger.debug('campaigns table not found, creating all new tables')
try:
models.Base.metadata.create_all(engine)
except sqlalchemy.exc.SQLAlchemyError as error:
error_lines = (line.strip() for line in error.message.split('\n'))
raise errors.KingPhisherDatabaseError('SQLAlchemyError: ' + ' '.join(error_lines).strip())
schema_version = get_schema_version(engine)
logger.debug("current database schema version: {0} ({1})".format(schema_version, ('latest' if schema_version == models.SCHEMA_VERSION else 'obsolete')))
if 'alembic_version' not in inspector.get_table_names():
logger.debug('alembic version table not found, attempting to create and set version')
init_alembic(engine, schema_version)
if schema_version > models.SCHEMA_VERSION:
raise errors.KingPhisherDatabaseError('the database schema is for a newer version, automatic downgrades are not supported')
elif schema_version < models.SCHEMA_VERSION:
alembic_config_file = find.data_file('alembic.ini')
if not alembic_config_file:
raise errors.KingPhisherDatabaseError('cannot find the alembic.ini configuration file')
alembic_directory = find.data_directory('alembic')
if not alembic_directory:
raise errors.KingPhisherDatabaseError('cannot find the alembic data directory')
config = alembic.config.Config(alembic_config_file)
config.config_file_name = alembic_config_file
config.set_main_option('script_location', alembic_directory)
config.set_main_option('skip_logger_config', 'True')
config.set_main_option('sqlalchemy.url', str(connection_url))
logger.warning("automatically updating the database schema from version {0} to {1}".format(schema_version, models.SCHEMA_VERSION))
try:
alembic.command.upgrade(config, 'head')
except Exception as error:
logger.critical("database schema upgrade failed with exception: {0}.{1} {2}".format(error.__class__.__module__, error.__class__.__name__, getattr(error, 'message', '')).rstrip(), exc_info=True)
raise errors.KingPhisherDatabaseError('failed to upgrade to the latest database schema')
logger.info("successfully updated the database schema from version {0} to {1}".format(schema_version, models.SCHEMA_VERSION))
# reset it because it may have been altered by alembic
Session.remove()
Session.configure(bind=engine)
set_metadata('database_driver', connection_url.drivername)
set_metadata('last_started', datetime.datetime.utcnow())
set_metadata('schema_version', models.SCHEMA_VERSION)
logger.debug("connected to {0} database: {1}".format(connection_url.drivername, connection_url.database))
signals.db_initialized.send(connection_url)
return engine
|
Create and initialize the database engine. This must be done before the
session object can be used. This will also attempt to perform any updates to
the database schema if the backend supports such operations.
:param str connection_url: The url for the database connection.
:param bool extra_init: Run optional extra dbms-specific initialization logic.
:return: The initialized database engine.
|
init_database
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def init_database_postgresql(connection_url):
"""
Perform additional initialization checks and operations for a PostgreSQL
database. If the database is hosted locally this will ensure that the
service is currently running and start it if it is not. Additionally if the
specified database or user do not exist, they will be created.
:param connection_url: The url for the PostgreSQL database connection.
:type connection_url: :py:class:`sqlalchemy.engine.url.URL`
:return: The initialized database engine.
"""
if not ipaddress.is_loopback(connection_url.host):
return
is_sanitary = lambda s: re.match(r'^[a-zA-Z0-9_]+$', s) is not None
systemctl_bin = smoke_zephyr.utilities.which('systemctl')
if systemctl_bin is None:
logger.info('postgresql service status check failed (could not find systemctl)')
else:
postgresql_setup = smoke_zephyr.utilities.which('postgresql-setup')
if postgresql_setup is None:
logger.debug('postgresql-setup was not found')
else:
logger.debug('using postgresql-setup to ensure that the database is initialized')
startup.run_process([postgresql_setup, '--initdb'])
results = startup.run_process([systemctl_bin, 'status', 'postgresql.service'])
# wait for the process to return and check if it's running (status 0)
if results.status == os.EX_OK:
logger.debug('postgresql service is already running via systemctl')
else:
logger.info('postgresql service is not running, starting it now via systemctl')
results = startup.run_process([systemctl_bin, 'start', 'postgresql'])
if results.status != os.EX_OK:
logger.error('failed to start the postgresql service via systemctl')
raise errors.KingPhisherDatabaseError('postgresql service failed to start via systemctl')
logger.debug('postgresql service successfully started via systemctl')
rows = _popen_psql('SELECT usename FROM pg_user')
if connection_url.username not in rows:
logger.info('the specified postgresql user does not exist, adding it now')
if not is_sanitary(connection_url.username):
raise errors.KingPhisherInputValidationError('will not create the postgresql user (username contains bad characters)')
if not is_sanitary(connection_url.password):
raise errors.KingPhisherInputValidationError('will not create the postgresql user (password contains bad characters)')
rows = _popen_psql("CREATE USER {url.username} WITH PASSWORD '{url.password}'".format(url=connection_url))
if rows != ['CREATE ROLE']:
logger.error('failed to create the postgresql user')
raise errors.KingPhisherDatabaseError('failed to create the postgresql user')
logger.debug('the specified postgresql user was successfully created')
rows = _popen_psql('SELECT datname FROM pg_database')
if connection_url.database not in rows:
logger.info('the specified postgresql database does not exist, adding it now')
if not is_sanitary(connection_url.database):
raise errors.KingPhisherInputValidationError('will not create the postgresql database (name contains bad characters)')
rows = _popen_psql("CREATE DATABASE {url.database} OWNER {url.username}".format(url=connection_url))
if rows != ['CREATE DATABASE']:
logger.error('failed to create the postgresql database')
raise errors.KingPhisherDatabaseError('failed to create the postgresql database')
logger.debug('the specified postgresql database was successfully created')
|
Perform additional initialization checks and operations for a PostgreSQL
database. If the database is hosted locally this will ensure that the
service is currently running and start it if it is not. Additionally if the
specified database or user do not exist, they will be created.
:param connection_url: The url for the PostgreSQL database connection.
:type connection_url: :py:class:`sqlalchemy.engine.url.URL`
:return: The initialized database engine.
|
init_database_postgresql
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def register_table(table):
"""
Register a database table. This will populate the information provided in
DATABASE_TABLES dictionary. This also forwards signals to the appropriate
listeners within the :py:mod:`server.signal` module.
:param cls table: The table to register.
"""
metatable = table.metatable()
database_tables[metatable.name] = metatable
sqlalchemy.event.listen(table, 'before_delete', forward_signal_delete)
sqlalchemy.event.listen(table, 'before_insert', forward_signal_insert)
sqlalchemy.event.listen(table, 'before_update', forward_signal_update)
return table
|
Register a database table. This will populate the information provided in
DATABASE_TABLES dictionary. This also forwards signals to the appropriate
listeners within the :py:mod:`server.signal` module.
:param cls table: The table to register.
|
register_table
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def assert_session_has_permissions(self, *args, **kwargs):
"""
A convenience function which wraps :py:meth:`~.session_has_permissions`
and raises a :py:exc:`~king_phisher.errors.KingPhisherPermissionError`
if the session does not have the specified permissions.
"""
if self.session_has_permissions(*args, **kwargs):
return
raise errors.KingPhisherPermissionError()
|
A convenience function which wraps :py:meth:`~.session_has_permissions`
and raises a :py:exc:`~king_phisher.errors.KingPhisherPermissionError`
if the session does not have the specified permissions.
|
assert_session_has_permissions
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_permissions(self, access, session):
"""
Check that the authenticated session has the permissions specified in
*access*. The permissions in *access* are abbreviated with the first
letter of create, read, update, and delete. For example, to check for
read and update permissions, *access* would be ``'ru'``.
.. note::
This will always return ``True`` for sessions which are for
administrative users. To maintain this logic, this method **should
not** be overridden in subclasses. Instead override the specific
``_session_has_*_access`` methods as necessary.
:param str access: The desired permissions.
:param session: The authenticated session to check access for.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
cls = self.__class__
if cls.is_private:
return False
access = access.lower()
for case in utilities.switch(access, comp=operator.contains, swapped=True):
if case('c') and not cls.session_has_create_access(session, instance=self):
break
if case('r') and not cls.session_has_read_access(session, instance=self):
break
if case('u') and not cls.session_has_update_access(session, instance=self):
break
if case('d') and not cls.session_has_delete_access(session, instance=self):
break
else:
return True
return False
|
Check that the authenticated session has the permissions specified in
*access*. The permissions in *access* are abbreviated with the first
letter of create, read, update, and delete. For example, to check for
read and update permissions, *access* would be ``'ru'``.
.. note::
This will always return ``True`` for sessions which are for
administrative users. To maintain this logic, this method **should
not** be overridden in subclasses. Instead override the specific
``_session_has_*_access`` methods as necessary.
:param str access: The desired permissions.
:param session: The authenticated session to check access for.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_permissions
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_create_access(cls, session, instance=None):
"""
Check that the authenticated *session* has access to create the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_create_access(session, instance=instance)
|
Check that the authenticated *session* has access to create the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_create_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_delete_access(cls, session, instance=None):
"""
Check that the authenticated *session* has access to delete the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_delete_access(session, instance=instance)
|
Check that the authenticated *session* has access to delete the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_delete_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_read_access(cls, session, instance=None):
"""
Check that the authenticated *session* has access to read the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_read_access(session, instance=instance)
|
Check that the authenticated *session* has access to read the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_read_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_read_prop_access(cls, session, prop, instance=None):
"""
Check that the authenticated *session* has access to read the property
of the specified model *instance*. This allows models to only explicitly
control which of their attributes can be read by a particular *session*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_read_prop_access(session, prop, instance=instance)
|
Check that the authenticated *session* has access to read the property
of the specified model *instance*. This allows models to only explicitly
control which of their attributes can be read by a particular *session*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_read_prop_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_update_access(cls, session, instance=None):
"""
Check that the authenticated *session* has access to update the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_update_access(session, instance=instance)
|
Check that the authenticated *session* has access to update the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_update_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def metatable(cls):
"""
Generate a :py:class:`.MetaTable` instance for this model class.
:return: The appropriate metadata for the table represented by this model.
:rtype: :py:class:`.MetaTable`
"""
columns = tuple(col.name for col in cls.__table__.columns)
return MetaTable(column_names=columns, model=cls, name=cls.__tablename__, table=cls.__table__)
|
Generate a :py:class:`.MetaTable` instance for this model class.
:return: The appropriate metadata for the table represented by this model.
:rtype: :py:class:`.MetaTable`
|
metatable
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def __init__(self, namespace=None, order_by='created'):
"""
.. versionchanged:: 1.14.0
Added the *order_by* parameter.
:param str namespace: The unique identifier of this namespace.
:param str order_by: The attribute to order stored items by. This must be one of "created", "id", "key", or "modified".
"""
self.namespace = namespace
self.order_by = order_by
|
.. versionchanged:: 1.14.0
Added the *order_by* parameter.
:param str namespace: The unique identifier of this namespace.
:param str order_by: The attribute to order stored items by. This must be one of "created", "id", "key", or "modified".
|
__init__
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/storage.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/storage.py
|
BSD-3-Clause
|
def validate_credential(credential, campaign):
"""
Validate a *credential* object with regards to the configuration provided in
*campaign*. This uses :py:func:`~.validate_credential_fields` to validate
each field individually and then return either ``None``, ``True`` or
``False``. If no validation took place on any field, ``None`` is returned,
otherwise if any field was validated then a boolean is returned indicating
whether or not all validated (non-``None``) fields passed validation.
:param credential: The credential object to validate.
:param campaign: The campaign with the validation configuration.
:return: Either a boolean or ``None`` depending on the results.
"""
fields = validate_credential_fields(credential, campaign)
fields = tuple(getattr(fields, field) for field in CredentialCollection._fields)
if all(field is None for field in fields):
return None
return all(field is None or field is True for field in fields)
|
Validate a *credential* object with regards to the configuration provided in
*campaign*. This uses :py:func:`~.validate_credential_fields` to validate
each field individually and then return either ``None``, ``True`` or
``False``. If no validation took place on any field, ``None`` is returned,
otherwise if any field was validated then a boolean is returned indicating
whether or not all validated (non-``None``) fields passed validation.
:param credential: The credential object to validate.
:param campaign: The campaign with the validation configuration.
:return: Either a boolean or ``None`` depending on the results.
|
validate_credential
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/validation.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/validation.py
|
BSD-3-Clause
|
def validate_credential_fields(credential, campaign):
"""
Validate a *credential* object with regards to the configuration provided in
*campaign*. Each field in the *credential* object is validated and a new
:py:class:`~.CredentialCollection` is returned with it's fields set to the
results of the validation. A fields validation results are either ``None``,
``True`` or ``False``. If no validation took place on the field, either
because nothing was configured for it in *campaign*,or the validation
information was invalid (a malformed regex for example) the result will be
``None``. Otherwise, the result is either ``True`` or ``False`` for the
field depending on the validation.
:param credential: The credential object to validate.
:param campaign: The campaign with the validation configuration.
:return: A :py:class:`~.CredentialCollection` object with the fields set to the results of their respective validation.
:rtype: :py:class:`~.CredentialCollection`
"""
# note that this uses duck-typing so the *credential* object could be either a db_models.Credential instance or a
# db_validation.CredentialCollection instance
validated = True
results = {}
for field in CredentialCollection._fields:
results[field] = None # default to None (no validation occurred on this field)
regex = getattr(campaign, 'credential_regex_' + field, None)
if regex is None:
continue
try:
regex = re.compile(regex)
except re.error:
logger.warning("regex compile error while validating credential field: {0}".format(field), exc_info=True)
continue
value = getattr(credential, field)
if value is None:
validated = False
else:
validated = validated and regex.match(value) is not None
if not validated:
logger.debug("credential failed regex validation on field: {0}".format(field))
results[field] = validated
return CredentialCollection(**results)
|
Validate a *credential* object with regards to the configuration provided in
*campaign*. Each field in the *credential* object is validated and a new
:py:class:`~.CredentialCollection` is returned with it's fields set to the
results of the validation. A fields validation results are either ``None``,
``True`` or ``False``. If no validation took place on the field, either
because nothing was configured for it in *campaign*,or the validation
information was invalid (a malformed regex for example) the result will be
``None``. Otherwise, the result is either ``True`` or ``False`` for the
field depending on the validation.
:param credential: The credential object to validate.
:param campaign: The campaign with the validation configuration.
:return: A :py:class:`~.CredentialCollection` object with the fields set to the results of their respective validation.
:rtype: :py:class:`~.CredentialCollection`
|
validate_credential_fields
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/validation.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/validation.py
|
BSD-3-Clause
|
def info_has_read_prop_access(cls, info, model, field_name=None, instance=None):
"""
Check that the context provided by *info* has access to read the
specified property of the model. This can be used to ensure that
sessions which can not read a protected field can also not obtain
indirect access such as filtering or sorting by it.
:param info: The resolve information for this execution.
:type info: :py:class:`graphql.execution.base.ResolveInfo`
:param model: The SQLAlchemy model to check read-property access on.
:type model: :py:class:`sqlalchemy.ext.declarative.api.Base`
:param str field_name: The specific field name to check, otherwise ``info.field_name``.
:param instance: An optional instance of *model* to use for the access check.
:return: Whether or not the context is authorized to access the property.
:rtype: bool
"""
rpc_session = info.context.get('rpc_session')
if rpc_session is None:
return True
field_name = field_name or info.field_name
return model.session_has_read_prop_access(rpc_session, field_name, instance=instance)
|
Check that the context provided by *info* has access to read the
specified property of the model. This can be used to ensure that
sessions which can not read a protected field can also not obtain
indirect access such as filtering or sorting by it.
:param info: The resolve information for this execution.
:type info: :py:class:`graphql.execution.base.ResolveInfo`
:param model: The SQLAlchemy model to check read-property access on.
:type model: :py:class:`sqlalchemy.ext.declarative.api.Base`
:param str field_name: The specific field name to check, otherwise ``info.field_name``.
:param instance: An optional instance of *model* to use for the access check.
:return: Whether or not the context is authorized to access the property.
:rtype: bool
|
info_has_read_prop_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/graphql/middleware.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/graphql/middleware.py
|
BSD-3-Clause
|
def sa_get_relationship(session, model, name):
"""
Resolve the relationship on a SQLAlchemy model to either an object (in the
case of one-to-one relationships) or a query to all of the objects (in the
case of one-to-many relationships).
:param session: The SQLAlchemy session to associate the query with.
:param model: The SQLAlchemy model of the object associated with the relationship.
:param name: The name of the relationship as it exists in the *model*.
:return: Either the object or a SQLAlchemy query for the objects.
"""
mapper = sqlalchemy.inspect(model.__class__)
relationship = mapper.relationships[name]
foreign_model = db_models.database_tables[relationship.target.name].model
query = session.query(foreign_model)
if relationship.uselist:
column_name = relationship.primaryjoin.right.name
return query.filter(getattr(foreign_model, column_name) == model.id)
column_name = relationship.primaryjoin.left.name
query = query.filter(getattr(foreign_model, column_name) == getattr(model, relationship.primaryjoin.right.name))
return query.first()
|
Resolve the relationship on a SQLAlchemy model to either an object (in the
case of one-to-one relationships) or a query to all of the objects (in the
case of one-to-many relationships).
:param session: The SQLAlchemy session to associate the query with.
:param model: The SQLAlchemy model of the object associated with the relationship.
:param name: The name of the relationship as it exists in the *model*.
:return: Either the object or a SQLAlchemy query for the objects.
|
sa_get_relationship
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/graphql/types/database.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/graphql/types/database.py
|
BSD-3-Clause
|
def sa_object_resolver(attname, default_value, model, info, **kwargs):
"""
Resolve the attribute for the given SQLAlchemy model object. If the
attribute is a relationship, use :py:func:`.sa_get_relationship` to resolve
it.
:param str attname: The name of the attribute to resolve on the object.
:param default_value: The default value to return if the attribute is unavailable.
:param model: The SQLAlchemy model to resolve the attribute for.
:type model: :py:class:`sqlalchemy.ext.declarative.api.Base`
:param info: The resolve information for this execution.
:type info: :py:class:`graphql.execution.base.ResolveInfo`
"""
mapper = sqlalchemy.inspect(model.__class__)
if attname in mapper.relationships:
return sa_get_relationship(info.context['session'], model, attname)
return getattr(model, attname, default_value)
|
Resolve the attribute for the given SQLAlchemy model object. If the
attribute is a relationship, use :py:func:`.sa_get_relationship` to resolve
it.
:param str attname: The name of the attribute to resolve on the object.
:param default_value: The default value to return if the attribute is unavailable.
:param model: The SQLAlchemy model to resolve the attribute for.
:type model: :py:class:`sqlalchemy.ext.declarative.api.Base`
:param info: The resolve information for this execution.
:type info: :py:class:`graphql.execution.base.ResolveInfo`
|
sa_object_resolver
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/graphql/types/database.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/graphql/types/database.py
|
BSD-3-Clause
|
def prepare_latents(
self,
batch_size: int, # Number of videos to generate in parallel
num_channels_latents: int, # Number of channels in the latents
width: int, # Width of the video frame
height: int, # Height of the video frame
video_length: int, # Length of the video in frames
dtype: torch.dtype, # Data type of the latents
device: torch.device, # Device to store the latents on
generator: Optional[torch.Generator] = None, # Random number generator for reproducibility
latents: Optional[torch.Tensor] = None # Pre-generated latents (optional)
):
"""
Prepares the initial latents for video generation.
Args:
batch_size (int): Number of videos to generate in parallel.
num_channels_latents (int): Number of channels in the latents.
width (int): Width of the video frame.
height (int): Height of the video frame.
video_length (int): Length of the video in frames.
dtype (torch.dtype): Data type of the latents.
device (torch.device): Device to store the latents on.
generator (Optional[torch.Generator]): Random number generator for reproducibility.
latents (Optional[torch.Tensor]): Pre-generated latents (optional).
Returns:
latents (torch.Tensor): Tensor of shape (batch_size, num_channels_latents, width, height)
containing the initial latents for video generation.
"""
shape = (
batch_size,
num_channels_latents,
video_length,
height // self.vae_scale_factor,
width // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(
shape, generator=generator, device=device, dtype=dtype
)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
|
Prepares the initial latents for video generation.
Args:
batch_size (int): Number of videos to generate in parallel.
num_channels_latents (int): Number of channels in the latents.
width (int): Width of the video frame.
height (int): Height of the video frame.
video_length (int): Length of the video in frames.
dtype (torch.dtype): Data type of the latents.
device (torch.device): Device to store the latents on.
generator (Optional[torch.Generator]): Random number generator for reproducibility.
latents (Optional[torch.Tensor]): Pre-generated latents (optional).
Returns:
latents (torch.Tensor): Tensor of shape (batch_size, num_channels_latents, width, height)
containing the initial latents for video generation.
|
prepare_latents
|
python
|
fudan-generative-vision/hallo
|
hallo/animate/face_animate.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/animate/face_animate.py
|
MIT
|
def decode_latents(self, latents):
"""
Decode the latents to produce a video.
Parameters:
latents (torch.Tensor): The latents to be decoded.
Returns:
video (torch.Tensor): The decoded video.
video_length (int): The length of the video in frames.
"""
video_length = latents.shape[2]
latents = 1 / 0.18215 * latents
latents = rearrange(latents, "b c f h w -> (b f) c h w")
# video = self.vae.decode(latents).sample
video = []
for frame_idx in tqdm(range(latents.shape[0])):
video.append(self.vae.decode(
latents[frame_idx: frame_idx + 1]).sample)
video = torch.cat(video)
video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length)
video = (video / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
video = video.cpu().float().numpy()
return video
|
Decode the latents to produce a video.
Parameters:
latents (torch.Tensor): The latents to be decoded.
Returns:
video (torch.Tensor): The decoded video.
video_length (int): The length of the video in frames.
|
decode_latents
|
python
|
fudan-generative-vision/hallo
|
hallo/animate/face_animate.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/animate/face_animate.py
|
MIT
|
def enable_sequential_cpu_offload(self, gpu_id=0):
"""
Offloads selected models to the GPU for increased performance.
Args:
gpu_id (int, optional): The ID of the GPU to offload models to. Defaults to 0.
"""
device = torch.device(f"cuda:{gpu_id}")
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
if cpu_offloaded_model is not None:
cpu_offload(cpu_offloaded_model, device)
|
Offloads selected models to the GPU for increased performance.
Args:
gpu_id (int, optional): The ID of the GPU to offload models to. Defaults to 0.
|
enable_sequential_cpu_offload
|
python
|
fudan-generative-vision/hallo
|
hallo/animate/face_animate_static.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/animate/face_animate_static.py
|
MIT
|
def decode_latents(self, latents):
"""
Decode the given latents to video frames.
Parameters:
latents (torch.Tensor): The latents to be decoded. Shape: (batch_size, num_channels_latents, video_length, height, width).
Returns:
video (torch.Tensor): The decoded video frames. Shape: (batch_size, num_channels_latents, video_length, height, width).
"""
video_length = latents.shape[2]
latents = 1 / 0.18215 * latents
latents = rearrange(latents, "b c f h w -> (b f) c h w")
# video = self.vae.decode(latents).sample
video = []
for frame_idx in tqdm(range(latents.shape[0])):
video.append(self.vae.decode(
latents[frame_idx: frame_idx + 1]).sample)
video = torch.cat(video)
video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length)
video = (video / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
video = video.cpu().float().numpy()
return video
|
Decode the given latents to video frames.
Parameters:
latents (torch.Tensor): The latents to be decoded. Shape: (batch_size, num_channels_latents, video_length, height, width).
Returns:
video (torch.Tensor): The decoded video frames. Shape: (batch_size, num_channels_latents, video_length, height, width).
|
decode_latents
|
python
|
fudan-generative-vision/hallo
|
hallo/animate/face_animate_static.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/animate/face_animate_static.py
|
MIT
|
def prepare_latents(
self,
batch_size,
num_channels_latents,
width,
height,
dtype,
device,
generator,
latents=None,
):
"""
Prepares the initial latents for the diffusion pipeline.
Args:
batch_size (int): The number of images to generate in one forward pass.
num_channels_latents (int): The number of channels in the latents tensor.
width (int): The width of the latents tensor.
height (int): The height of the latents tensor.
dtype (torch.dtype): The data type of the latents tensor.
device (torch.device): The device to place the latents tensor on.
generator (Optional[torch.Generator], optional): A random number generator
for reproducibility. Defaults to None.
latents (Optional[torch.Tensor], optional): Pre-computed latents to use as
initial conditions for the diffusion process. Defaults to None.
Returns:
torch.Tensor: The prepared latents tensor.
"""
shape = (
batch_size,
num_channels_latents,
height // self.vae_scale_factor,
width // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(
shape, generator=generator, device=device, dtype=dtype
)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
|
Prepares the initial latents for the diffusion pipeline.
Args:
batch_size (int): The number of images to generate in one forward pass.
num_channels_latents (int): The number of channels in the latents tensor.
width (int): The width of the latents tensor.
height (int): The height of the latents tensor.
dtype (torch.dtype): The data type of the latents tensor.
device (torch.device): The device to place the latents tensor on.
generator (Optional[torch.Generator], optional): A random number generator
for reproducibility. Defaults to None.
latents (Optional[torch.Tensor], optional): Pre-computed latents to use as
initial conditions for the diffusion process. Defaults to None.
Returns:
torch.Tensor: The prepared latents tensor.
|
prepare_latents
|
python
|
fudan-generative-vision/hallo
|
hallo/animate/face_animate_static.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/animate/face_animate_static.py
|
MIT
|
def prepare_condition(
self,
cond_image,
width,
height,
device,
dtype,
do_classififer_free_guidance=False,
):
"""
Prepares the condition for the face animation pipeline.
Args:
cond_image (torch.Tensor): The conditional image tensor.
width (int): The width of the output image.
height (int): The height of the output image.
device (torch.device): The device to run the pipeline on.
dtype (torch.dtype): The data type of the tensor.
do_classififer_free_guidance (bool, optional): Whether to use classifier-free guidance or not. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple of processed condition and mask tensors.
"""
image = self.cond_image_processor.preprocess(
cond_image, height=height, width=width
).to(dtype=torch.float32)
image = image.to(device=device, dtype=dtype)
if do_classififer_free_guidance:
image = torch.cat([image] * 2)
return image
|
Prepares the condition for the face animation pipeline.
Args:
cond_image (torch.Tensor): The conditional image tensor.
width (int): The width of the output image.
height (int): The height of the output image.
device (torch.device): The device to run the pipeline on.
dtype (torch.dtype): The data type of the tensor.
do_classififer_free_guidance (bool, optional): Whether to use classifier-free guidance or not. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple of processed condition and mask tensors.
|
prepare_condition
|
python
|
fudan-generative-vision/hallo
|
hallo/animate/face_animate_static.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/animate/face_animate_static.py
|
MIT
|
def preprocess(self, wav_file: str, clip_length: int=-1):
"""
Preprocess a WAV audio file by separating the vocals from the background and resampling it to a 16 kHz sample rate.
The separated vocal track is then converted into wav2vec2 for further processing or analysis.
Args:
wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format.
Raises:
RuntimeError: Raises an exception if the WAV file cannot be processed. This could be due to issues
such as file not found, unsupported file format, or errors during the audio processing steps.
Returns:
torch.tensor: Returns an audio embedding as a torch.tensor
"""
if self.audio_separator is not None:
# 1. separate vocals
# TODO: process in memory
outputs = self.audio_separator.separate(wav_file)
if len(outputs) <= 0:
raise RuntimeError("Audio separate failed.")
vocal_audio_file = outputs[0]
vocal_audio_name, _ = os.path.splitext(vocal_audio_file)
vocal_audio_file = os.path.join(self.audio_separator.output_dir, vocal_audio_file)
vocal_audio_file = resample_audio(vocal_audio_file, os.path.join(self.audio_separator.output_dir, f"{vocal_audio_name}-16k.wav"), self.sample_rate)
else:
vocal_audio_file=wav_file
# 2. extract wav2vec features
speech_array, sampling_rate = librosa.load(vocal_audio_file, sr=self.sample_rate)
audio_feature = np.squeeze(self.wav2vec_feature_extractor(speech_array, sampling_rate=sampling_rate).input_values)
seq_len = math.ceil(len(audio_feature) / self.sample_rate * self.fps)
audio_length = seq_len
audio_feature = torch.from_numpy(audio_feature).float().to(device=self.device)
if clip_length>0 and seq_len % clip_length != 0:
audio_feature = torch.nn.functional.pad(audio_feature, (0, (clip_length - seq_len % clip_length) * (self.sample_rate // self.fps)), 'constant', 0.0)
seq_len += clip_length - seq_len % clip_length
audio_feature = audio_feature.unsqueeze(0)
with torch.no_grad():
embeddings = self.audio_encoder(audio_feature, seq_len=seq_len, output_hidden_states=True)
assert len(embeddings) > 0, "Fail to extract audio embedding"
if self.only_last_features:
audio_emb = embeddings.last_hidden_state.squeeze()
else:
audio_emb = torch.stack(embeddings.hidden_states[1:], dim=1).squeeze(0)
audio_emb = rearrange(audio_emb, "b s d -> s b d")
audio_emb = audio_emb.cpu().detach()
return audio_emb, audio_length
|
Preprocess a WAV audio file by separating the vocals from the background and resampling it to a 16 kHz sample rate.
The separated vocal track is then converted into wav2vec2 for further processing or analysis.
Args:
wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format.
Raises:
RuntimeError: Raises an exception if the WAV file cannot be processed. This could be due to issues
such as file not found, unsupported file format, or errors during the audio processing steps.
Returns:
torch.tensor: Returns an audio embedding as a torch.tensor
|
preprocess
|
python
|
fudan-generative-vision/hallo
|
hallo/datasets/audio_processor.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/datasets/audio_processor.py
|
MIT
|
def get_embedding(self, wav_file: str):
"""preprocess wav audio file convert to embeddings
Args:
wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format.
Returns:
torch.tensor: Returns an audio embedding as a torch.tensor
"""
speech_array, sampling_rate = librosa.load(
wav_file, sr=self.sample_rate)
assert sampling_rate == 16000, "The audio sample rate must be 16000"
audio_feature = np.squeeze(self.wav2vec_feature_extractor(
speech_array, sampling_rate=sampling_rate).input_values)
seq_len = math.ceil(len(audio_feature) / self.sample_rate * self.fps)
audio_feature = torch.from_numpy(
audio_feature).float().to(device=self.device)
audio_feature = audio_feature.unsqueeze(0)
with torch.no_grad():
embeddings = self.audio_encoder(
audio_feature, seq_len=seq_len, output_hidden_states=True)
assert len(embeddings) > 0, "Fail to extract audio embedding"
if self.only_last_features:
audio_emb = embeddings.last_hidden_state.squeeze()
else:
audio_emb = torch.stack(
embeddings.hidden_states[1:], dim=1).squeeze(0)
audio_emb = rearrange(audio_emb, "b s d -> s b d")
audio_emb = audio_emb.cpu().detach()
return audio_emb
|
preprocess wav audio file convert to embeddings
Args:
wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format.
Returns:
torch.tensor: Returns an audio embedding as a torch.tensor
|
get_embedding
|
python
|
fudan-generative-vision/hallo
|
hallo/datasets/audio_processor.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/datasets/audio_processor.py
|
MIT
|
def preprocess(self, source_image_path: str, cache_dir: str, face_region_ratio: float):
"""
Apply preprocessing to the source image to prepare for face analysis.
Parameters:
source_image_path (str): The path to the source image.
cache_dir (str): The directory to cache intermediate results.
Returns:
None
"""
source_image = Image.open(source_image_path)
ref_image_pil = source_image.convert("RGB")
# 1. image augmentation
pixel_values_ref_img = self._augmentation(ref_image_pil, self.pixel_transform)
# 2.1 detect face
faces = self.face_analysis.get(cv2.cvtColor(np.array(ref_image_pil.copy()), cv2.COLOR_RGB2BGR))
if not faces:
print("No faces detected in the image. Using the entire image as the face region.")
# Use the entire image as the face region
face = {
"bbox": [0, 0, ref_image_pil.width, ref_image_pil.height],
"embedding": np.zeros(512)
}
else:
# Sort faces by size and select the largest one
faces_sorted = sorted(faces, key=lambda x: (x["bbox"][2] - x["bbox"][0]) * (x["bbox"][3] - x["bbox"][1]), reverse=True)
face = faces_sorted[0] # Select the largest face
# 2.2 face embedding
face_emb = face["embedding"]
# 2.3 render face mask
get_mask(source_image_path, cache_dir, face_region_ratio)
file_name = os.path.basename(source_image_path).split(".")[0]
face_mask_pil = Image.open(
os.path.join(cache_dir, f"{file_name}_face_mask.png")).convert("RGB")
face_mask = self._augmentation(face_mask_pil, self.cond_transform)
# 2.4 detect and expand lip, face mask
sep_background_mask = Image.open(
os.path.join(cache_dir, f"{file_name}_sep_background.png"))
sep_face_mask = Image.open(
os.path.join(cache_dir, f"{file_name}_sep_face.png"))
sep_lip_mask = Image.open(
os.path.join(cache_dir, f"{file_name}_sep_lip.png"))
pixel_values_face_mask = [
self._augmentation(sep_face_mask, self.attn_transform_64),
self._augmentation(sep_face_mask, self.attn_transform_32),
self._augmentation(sep_face_mask, self.attn_transform_16),
self._augmentation(sep_face_mask, self.attn_transform_8),
]
pixel_values_lip_mask = [
self._augmentation(sep_lip_mask, self.attn_transform_64),
self._augmentation(sep_lip_mask, self.attn_transform_32),
self._augmentation(sep_lip_mask, self.attn_transform_16),
self._augmentation(sep_lip_mask, self.attn_transform_8),
]
pixel_values_full_mask = [
self._augmentation(sep_background_mask, self.attn_transform_64),
self._augmentation(sep_background_mask, self.attn_transform_32),
self._augmentation(sep_background_mask, self.attn_transform_16),
self._augmentation(sep_background_mask, self.attn_transform_8),
]
pixel_values_full_mask = [mask.view(1, -1)
for mask in pixel_values_full_mask]
pixel_values_face_mask = [mask.view(1, -1)
for mask in pixel_values_face_mask]
pixel_values_lip_mask = [mask.view(1, -1)
for mask in pixel_values_lip_mask]
return pixel_values_ref_img, face_mask, face_emb, pixel_values_full_mask, pixel_values_face_mask, pixel_values_lip_mask
|
Apply preprocessing to the source image to prepare for face analysis.
Parameters:
source_image_path (str): The path to the source image.
cache_dir (str): The directory to cache intermediate results.
Returns:
None
|
preprocess
|
python
|
fudan-generative-vision/hallo
|
hallo/datasets/image_processor.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/datasets/image_processor.py
|
MIT
|
def close(self):
"""
Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance.
Args:
self: The ImageProcessor instance.
Returns:
None.
"""
for _, model in self.face_analysis.models.items():
if hasattr(model, "Dispose"):
model.Dispose()
|
Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance.
Args:
self: The ImageProcessor instance.
Returns:
None.
|
close
|
python
|
fudan-generative-vision/hallo
|
hallo/datasets/image_processor.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/datasets/image_processor.py
|
MIT
|
def preprocess(self, source_image_path: str):
"""
Apply preprocessing to the source image to prepare for face analysis.
Parameters:
source_image_path (str): The path to the source image.
cache_dir (str): The directory to cache intermediate results.
Returns:
None
"""
# 1. get face embdeding
face_mask, face_emb, sep_pose_mask, sep_face_mask, sep_lip_mask = None, None, None, None, None
if self.face_analysis:
for frame in sorted(os.listdir(source_image_path)):
try:
source_image = Image.open(
os.path.join(source_image_path, frame))
ref_image_pil = source_image.convert("RGB")
# 2.1 detect face
faces = self.face_analysis.get(cv2.cvtColor(
np.array(ref_image_pil.copy()), cv2.COLOR_RGB2BGR))
# use max size face
face = sorted(faces, key=lambda x: (
x["bbox"][2] - x["bbox"][0]) * (x["bbox"][3] - x["bbox"][1]))[-1]
# 2.2 face embedding
face_emb = face["embedding"]
if face_emb is not None:
break
except Exception as _:
continue
if self.landmarker:
# 3.1 get landmark
landmarks, height, width = get_landmark_overframes(
self.landmarker, source_image_path)
assert len(landmarks) == len(os.listdir(source_image_path))
# 3 render face and lip mask
face_mask = get_union_face_mask(landmarks, height, width)
lip_mask = get_union_lip_mask(landmarks, height, width)
# 4 gaussian blur
blur_face_mask = blur_mask(face_mask, (64, 64), (51, 51))
blur_lip_mask = blur_mask(lip_mask, (64, 64), (31, 31))
# 5 seperate mask
sep_face_mask = cv2.subtract(blur_face_mask, blur_lip_mask)
sep_pose_mask = 255.0 - blur_face_mask
sep_lip_mask = blur_lip_mask
return face_mask, face_emb, sep_pose_mask, sep_face_mask, sep_lip_mask
|
Apply preprocessing to the source image to prepare for face analysis.
Parameters:
source_image_path (str): The path to the source image.
cache_dir (str): The directory to cache intermediate results.
Returns:
None
|
preprocess
|
python
|
fudan-generative-vision/hallo
|
hallo/datasets/image_processor.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/datasets/image_processor.py
|
MIT
|
def close(self):
"""
Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance.
Args:
self: The ImageProcessor instance.
Returns:
None.
"""
for _, model in self.face_analysis.models.items():
if hasattr(model, "Dispose"):
model.Dispose()
|
Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance.
Args:
self: The ImageProcessor instance.
Returns:
None.
|
close
|
python
|
fudan-generative-vision/hallo
|
hallo/datasets/image_processor.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/datasets/image_processor.py
|
MIT
|
def augmentation(self, image, transform, state=None):
"""
Apply data augmentation to the input image.
Args:
image (PIL.Image): The input image.
transform (torchvision.transforms.Compose): The data augmentation transforms.
state (dict, optional): The random state for reproducibility. Defaults to None.
Returns:
PIL.Image: The augmented image.
"""
if state is not None:
torch.set_rng_state(state)
return transform(image)
|
Apply data augmentation to the input image.
Args:
image (PIL.Image): The input image.
transform (torchvision.transforms.Compose): The data augmentation transforms.
state (dict, optional): The random state for reproducibility. Defaults to None.
Returns:
PIL.Image: The augmented image.
|
augmentation
|
python
|
fudan-generative-vision/hallo
|
hallo/datasets/mask_image.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/datasets/mask_image.py
|
MIT
|
def augmentation(self, images, transform, state=None):
"""
Apply the given transformation to the input images.
Args:
images (List[PIL.Image] or PIL.Image): The input images to be transformed.
transform (torchvision.transforms.Compose): The transformation to be applied to the images.
state (torch.ByteTensor, optional): The state of the random number generator.
If provided, it will set the RNG state to this value before applying the transformation. Defaults to None.
Returns:
torch.Tensor: The transformed images as a tensor.
If the input was a list of images, the tensor will have shape (f, c, h, w),
where f is the number of images, c is the number of channels, h is the height, and w is the width.
If the input was a single image, the tensor will have shape (c, h, w),
where c is the number of channels, h is the height, and w is the width.
"""
if state is not None:
torch.set_rng_state(state)
if isinstance(images, List):
transformed_images = [transform(img) for img in images]
ret_tensor = torch.stack(transformed_images, dim=0) # (f, c, h, w)
else:
ret_tensor = transform(images) # (c, h, w)
return ret_tensor
|
Apply the given transformation to the input images.
Args:
images (List[PIL.Image] or PIL.Image): The input images to be transformed.
transform (torchvision.transforms.Compose): The transformation to be applied to the images.
state (torch.ByteTensor, optional): The state of the random number generator.
If provided, it will set the RNG state to this value before applying the transformation. Defaults to None.
Returns:
torch.Tensor: The transformed images as a tensor.
If the input was a list of images, the tensor will have shape (f, c, h, w),
where f is the number of images, c is the number of channels, h is the height, and w is the width.
If the input was a single image, the tensor will have shape (c, h, w),
where c is the number of channels, h is the height, and w is the width.
|
augmentation
|
python
|
fudan-generative-vision/hallo
|
hallo/datasets/talk_video.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/datasets/talk_video.py
|
MIT
|
def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:
"""
Apply the Gated Self-Attention mechanism to the input tensor `x` and object tensor `objs`.
Args:
x (torch.Tensor): The input tensor.
objs (torch.Tensor): The object tensor.
Returns:
torch.Tensor: The output tensor after applying Gated Self-Attention.
"""
if not self.enabled:
return x
n_visual = x.shape[1]
objs = self.linear(objs)
x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))
return x
|
Apply the Gated Self-Attention mechanism to the input tensor `x` and object tensor `objs`.
Args:
x (torch.Tensor): The input tensor.
objs (torch.Tensor): The object tensor.
Returns:
torch.Tensor: The output tensor after applying Gated Self-Attention.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/attention.py
|
MIT
|
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
"""
Sets the chunk size for feed-forward processing in the transformer block.
Args:
chunk_size (Optional[int]): The size of the chunks to process in feed-forward layers.
If None, the chunk size is set to the maximum possible value.
dim (int, optional): The dimension along which to split the input tensor into chunks. Defaults to 0.
Returns:
None.
"""
self._chunk_size = chunk_size
self._chunk_dim = dim
|
Sets the chunk size for feed-forward processing in the transformer block.
Args:
chunk_size (Optional[int]): The size of the chunks to process in feed-forward layers.
If None, the chunk size is set to the maximum possible value.
dim (int, optional): The dimension along which to split the input tensor into chunks. Defaults to 0.
Returns:
None.
|
set_chunk_feed_forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/attention.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
timestep: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
class_labels: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
This function defines the forward pass of the BasicTransformerBlock.
Args:
self (BasicTransformerBlock):
An instance of the BasicTransformerBlock class.
hidden_states (torch.FloatTensor):
A tensor containing the hidden states.
attention_mask (Optional[torch.FloatTensor], optional):
A tensor containing the attention mask. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional):
A tensor containing the encoder hidden states. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional):
A tensor containing the encoder attention mask. Defaults to None.
timestep (Optional[torch.LongTensor], optional):
A tensor containing the timesteps. Defaults to None.
cross_attention_kwargs (Dict[str, Any], optional):
Additional cross-attention arguments. Defaults to None.
class_labels (Optional[torch.LongTensor], optional):
A tensor containing the class labels. Defaults to None.
Returns:
torch.FloatTensor:
A tensor containing the transformed hidden states.
"""
# Notice that normalization is always applied before the real computation in the following blocks.
# 0. Self-Attention
batch_size = hidden_states.shape[0]
gate_msa = None
scale_mlp = None
shift_mlp = None
gate_mlp = None
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
elif self.use_ada_layer_norm_zero:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
elif self.use_layer_norm:
norm_hidden_states = self.norm1(hidden_states)
elif self.use_ada_layer_norm_single:
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
self.scale_shift_table[None] +
timestep.reshape(batch_size, 6, -1)
).chunk(6, dim=1)
norm_hidden_states = self.norm1(hidden_states)
norm_hidden_states = norm_hidden_states * \
(1 + scale_msa) + shift_msa
norm_hidden_states = norm_hidden_states.squeeze(1)
else:
raise ValueError("Incorrect norm used")
if self.pos_embed is not None:
norm_hidden_states = self.pos_embed(norm_hidden_states)
# 1. Retrieve lora scale.
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
# 2. Prepare GLIGEN inputs
cross_attention_kwargs = (
cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
)
gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=(
encoder_hidden_states if self.only_cross_attention else None
),
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.use_ada_layer_norm_zero:
attn_output = gate_msa.unsqueeze(1) * attn_output
elif self.use_ada_layer_norm_single:
attn_output = gate_msa * attn_output
hidden_states = attn_output + hidden_states
if hidden_states.ndim == 4:
hidden_states = hidden_states.squeeze(1)
# 2.5 GLIGEN Control
if gligen_kwargs is not None:
hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
# 3. Cross-Attention
if self.attn2 is not None:
if self.use_ada_layer_norm:
norm_hidden_states = self.norm2(hidden_states, timestep)
elif self.use_ada_layer_norm_zero or self.use_layer_norm:
norm_hidden_states = self.norm2(hidden_states)
elif self.use_ada_layer_norm_single:
# For PixArt norm2 isn't applied here:
# https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
norm_hidden_states = hidden_states
else:
raise ValueError("Incorrect norm")
if self.pos_embed is not None and self.use_ada_layer_norm_single is False:
norm_hidden_states = self.pos_embed(norm_hidden_states)
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
hidden_states = attn_output + hidden_states
# 4. Feed-forward
if not self.use_ada_layer_norm_single:
norm_hidden_states = self.norm3(hidden_states)
if self.use_ada_layer_norm_zero:
norm_hidden_states = (
norm_hidden_states *
(1 + scale_mlp[:, None]) + shift_mlp[:, None]
)
if self.use_ada_layer_norm_single:
norm_hidden_states = self.norm2(hidden_states)
norm_hidden_states = norm_hidden_states * \
(1 + scale_mlp) + shift_mlp
ff_output = self.ff(norm_hidden_states, scale=lora_scale)
if self.use_ada_layer_norm_zero:
ff_output = gate_mlp.unsqueeze(1) * ff_output
elif self.use_ada_layer_norm_single:
ff_output = gate_mlp * ff_output
hidden_states = ff_output + hidden_states
if hidden_states.ndim == 4:
hidden_states = hidden_states.squeeze(1)
return hidden_states
|
This function defines the forward pass of the BasicTransformerBlock.
Args:
self (BasicTransformerBlock):
An instance of the BasicTransformerBlock class.
hidden_states (torch.FloatTensor):
A tensor containing the hidden states.
attention_mask (Optional[torch.FloatTensor], optional):
A tensor containing the attention mask. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional):
A tensor containing the encoder hidden states. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional):
A tensor containing the encoder attention mask. Defaults to None.
timestep (Optional[torch.LongTensor], optional):
A tensor containing the timesteps. Defaults to None.
cross_attention_kwargs (Dict[str, Any], optional):
Additional cross-attention arguments. Defaults to None.
class_labels (Optional[torch.LongTensor], optional):
A tensor containing the class labels. Defaults to None.
Returns:
torch.FloatTensor:
A tensor containing the transformed hidden states.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/attention.py
|
MIT
|
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout=0.0,
cross_attention_dim: Optional[int] = None,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
attention_bias: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
):
"""
The TemporalBasicTransformerBlock class is a PyTorch module that extends the BasicTransformerBlock to include temporal attention mechanisms.
This is particularly useful for video-related tasks, where the model needs to capture the temporal information within the sequence of frames.
The block consists of self-attention, cross-attention, feed-forward, and temporal attention mechanisms.
dim (int): The dimension of the input and output embeddings.
num_attention_heads (int): The number of attention heads in the multi-head self-attention mechanism.
attention_head_dim (int): The dimension of each attention head.
dropout (float, optional): The dropout probability for the attention scores. Defaults to 0.0.
cross_attention_dim (int, optional): The dimension of the cross-attention mechanism. Defaults to None.
activation_fn (str, optional): The activation function used in the feed-forward layer. Defaults to "geglu".
num_embeds_ada_norm (int, optional): The number of embeddings for adaptive normalization. Defaults to None.
attention_bias (bool, optional): If True, uses bias in the attention mechanism. Defaults to False.
only_cross_attention (bool, optional): If True, only uses cross-attention. Defaults to False.
upcast_attention (bool, optional): If True, upcasts the attention mechanism for better performance. Defaults to False.
unet_use_cross_frame_attention (bool, optional): If True, uses cross-frame attention in the UNet model. Defaults to None.
unet_use_temporal_attention (bool, optional): If True, uses temporal attention in the UNet model. Defaults to None.
Forward method:
hidden_states (torch.FloatTensor): The input hidden states.
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states. Defaults to None.
timestep (torch.LongTensor, optional): The current timestep for the transformer model. Defaults to None.
attention_mask (torch.FloatTensor, optional): The attention mask for the self-attention mechanism. Defaults to None.
video_length (int, optional): The length of the video sequence. Defaults to None.
Returns:
torch.FloatTensor: The output hidden states after passing through the TemporalBasicTransformerBlock.
"""
super().__init__()
self.only_cross_attention = only_cross_attention
self.use_ada_layer_norm = num_embeds_ada_norm is not None
self.unet_use_cross_frame_attention = unet_use_cross_frame_attention
self.unet_use_temporal_attention = unet_use_temporal_attention
# SC-Attn
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.norm1 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
# Cross-Attn
if cross_attention_dim is not None:
self.attn2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
else:
self.attn2 = None
if cross_attention_dim is not None:
self.norm2 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
else:
self.norm2 = None
# Feed-forward
self.ff = FeedForward(dim, dropout=dropout,
activation_fn=activation_fn)
self.norm3 = nn.LayerNorm(dim)
self.use_ada_layer_norm_zero = False
# Temp-Attn
# assert unet_use_temporal_attention is not None
if unet_use_temporal_attention is None:
unet_use_temporal_attention = False
if unet_use_temporal_attention:
self.attn_temp = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
nn.init.zeros_(self.attn_temp.to_out[0].weight.data)
self.norm_temp = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
|
The TemporalBasicTransformerBlock class is a PyTorch module that extends the BasicTransformerBlock to include temporal attention mechanisms.
This is particularly useful for video-related tasks, where the model needs to capture the temporal information within the sequence of frames.
The block consists of self-attention, cross-attention, feed-forward, and temporal attention mechanisms.
dim (int): The dimension of the input and output embeddings.
num_attention_heads (int): The number of attention heads in the multi-head self-attention mechanism.
attention_head_dim (int): The dimension of each attention head.
dropout (float, optional): The dropout probability for the attention scores. Defaults to 0.0.
cross_attention_dim (int, optional): The dimension of the cross-attention mechanism. Defaults to None.
activation_fn (str, optional): The activation function used in the feed-forward layer. Defaults to "geglu".
num_embeds_ada_norm (int, optional): The number of embeddings for adaptive normalization. Defaults to None.
attention_bias (bool, optional): If True, uses bias in the attention mechanism. Defaults to False.
only_cross_attention (bool, optional): If True, only uses cross-attention. Defaults to False.
upcast_attention (bool, optional): If True, upcasts the attention mechanism for better performance. Defaults to False.
unet_use_cross_frame_attention (bool, optional): If True, uses cross-frame attention in the UNet model. Defaults to None.
unet_use_temporal_attention (bool, optional): If True, uses temporal attention in the UNet model. Defaults to None.
Forward method:
hidden_states (torch.FloatTensor): The input hidden states.
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states. Defaults to None.
timestep (torch.LongTensor, optional): The current timestep for the transformer model. Defaults to None.
attention_mask (torch.FloatTensor, optional): The attention mask for the self-attention mechanism. Defaults to None.
video_length (int, optional): The length of the video sequence. Defaults to None.
Returns:
torch.FloatTensor: The output hidden states after passing through the TemporalBasicTransformerBlock.
|
__init__
|
python
|
fudan-generative-vision/hallo
|
hallo/models/attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/attention.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
timestep=None,
attention_mask=None,
video_length=None,
):
"""
Forward pass for the TemporalBasicTransformerBlock.
Args:
hidden_states (torch.FloatTensor): The input hidden states with shape (batch_size, seq_len, dim).
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states with shape (batch_size, src_seq_len, dim).
timestep (torch.LongTensor, optional): The timestep for the transformer block.
attention_mask (torch.FloatTensor, optional): The attention mask with shape (batch_size, seq_len, seq_len).
video_length (int, optional): The length of the video sequence.
Returns:
torch.FloatTensor: The output tensor after passing through the transformer block with shape (batch_size, seq_len, dim).
"""
norm_hidden_states = (
self.norm1(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm1(hidden_states)
)
if self.unet_use_cross_frame_attention:
hidden_states = (
self.attn1(
norm_hidden_states,
attention_mask=attention_mask,
video_length=video_length,
)
+ hidden_states
)
else:
hidden_states = (
self.attn1(norm_hidden_states, attention_mask=attention_mask)
+ hidden_states
)
if self.attn2 is not None:
# Cross-Attention
norm_hidden_states = (
self.norm2(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm2(hidden_states)
)
hidden_states = (
self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
)
+ hidden_states
)
# Feed-forward
hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
# Temporal-Attention
if self.unet_use_temporal_attention:
d = hidden_states.shape[1]
hidden_states = rearrange(
hidden_states, "(b f) d c -> (b d) f c", f=video_length
)
norm_hidden_states = (
self.norm_temp(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm_temp(hidden_states)
)
hidden_states = self.attn_temp(norm_hidden_states) + hidden_states
hidden_states = rearrange(
hidden_states, "(b d) f c -> (b f) d c", d=d)
return hidden_states
|
Forward pass for the TemporalBasicTransformerBlock.
Args:
hidden_states (torch.FloatTensor): The input hidden states with shape (batch_size, seq_len, dim).
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states with shape (batch_size, src_seq_len, dim).
timestep (torch.LongTensor, optional): The timestep for the transformer block.
attention_mask (torch.FloatTensor, optional): The attention mask with shape (batch_size, seq_len, seq_len).
video_length (int, optional): The length of the video sequence.
Returns:
torch.FloatTensor: The output tensor after passing through the transformer block with shape (batch_size, seq_len, dim).
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/attention.py
|
MIT
|
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout=0.0,
cross_attention_dim: Optional[int] = None,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
attention_bias: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
depth=0,
unet_block_name=None,
stack_enable_blocks_name: Optional[List[str]] = None,
stack_enable_blocks_depth: Optional[List[int]] = None,
):
"""
Initializes the AudioTemporalBasicTransformerBlock module.
Args:
dim (int): The dimension of the input and output embeddings.
num_attention_heads (int): The number of attention heads in the multi-head self-attention mechanism.
attention_head_dim (int): The dimension of each attention head.
dropout (float, optional): The dropout probability for the attention mechanism. Defaults to 0.0.
cross_attention_dim (Optional[int], optional): The dimension of the cross-attention mechanism. Defaults to None.
activation_fn (str, optional): The activation function to be used in the feed-forward network. Defaults to "geglu".
num_embeds_ada_norm (Optional[int], optional): The number of embeddings for adaptive normalization. Defaults to None.
attention_bias (bool, optional): If True, uses bias in the attention mechanism. Defaults to False.
only_cross_attention (bool, optional): If True, only uses cross-attention. Defaults to False.
upcast_attention (bool, optional): If True, upcasts the attention mechanism to float32. Defaults to False.
unet_use_cross_frame_attention (Optional[bool], optional): If True, uses cross-frame attention in UNet. Defaults to None.
unet_use_temporal_attention (Optional[bool], optional): If True, uses temporal attention in UNet. Defaults to None.
depth (int, optional): The depth of the transformer block. Defaults to 0.
unet_block_name (Optional[str], optional): The name of the UNet block. Defaults to None.
stack_enable_blocks_name (Optional[List[str]], optional): The list of enabled blocks in the stack. Defaults to None.
stack_enable_blocks_depth (Optional[List[int]], optional): The list of depths for the enabled blocks in the stack. Defaults to None.
"""
super().__init__()
self.only_cross_attention = only_cross_attention
self.use_ada_layer_norm = num_embeds_ada_norm is not None
self.unet_use_cross_frame_attention = unet_use_cross_frame_attention
self.unet_use_temporal_attention = unet_use_temporal_attention
self.unet_block_name = unet_block_name
self.depth = depth
zero_conv_full = nn.Conv2d(
dim, dim, kernel_size=1)
self.zero_conv_full = zero_module(zero_conv_full)
zero_conv_face = nn.Conv2d(
dim, dim, kernel_size=1)
self.zero_conv_face = zero_module(zero_conv_face)
zero_conv_lip = nn.Conv2d(
dim, dim, kernel_size=1)
self.zero_conv_lip = zero_module(zero_conv_lip)
# SC-Attn
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.norm1 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
# Cross-Attn
if cross_attention_dim is not None:
if (stack_enable_blocks_name is not None and
stack_enable_blocks_depth is not None and
self.unet_block_name in stack_enable_blocks_name and
self.depth in stack_enable_blocks_depth):
self.attn2_0 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.attn2_1 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.attn2_2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.attn2 = None
else:
self.attn2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.attn2_0=None
else:
self.attn2 = None
self.attn2_0 = None
if cross_attention_dim is not None:
self.norm2 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
else:
self.norm2 = None
# Feed-forward
self.ff = FeedForward(dim, dropout=dropout,
activation_fn=activation_fn)
self.norm3 = nn.LayerNorm(dim)
self.use_ada_layer_norm_zero = False
|
Initializes the AudioTemporalBasicTransformerBlock module.
Args:
dim (int): The dimension of the input and output embeddings.
num_attention_heads (int): The number of attention heads in the multi-head self-attention mechanism.
attention_head_dim (int): The dimension of each attention head.
dropout (float, optional): The dropout probability for the attention mechanism. Defaults to 0.0.
cross_attention_dim (Optional[int], optional): The dimension of the cross-attention mechanism. Defaults to None.
activation_fn (str, optional): The activation function to be used in the feed-forward network. Defaults to "geglu".
num_embeds_ada_norm (Optional[int], optional): The number of embeddings for adaptive normalization. Defaults to None.
attention_bias (bool, optional): If True, uses bias in the attention mechanism. Defaults to False.
only_cross_attention (bool, optional): If True, only uses cross-attention. Defaults to False.
upcast_attention (bool, optional): If True, upcasts the attention mechanism to float32. Defaults to False.
unet_use_cross_frame_attention (Optional[bool], optional): If True, uses cross-frame attention in UNet. Defaults to None.
unet_use_temporal_attention (Optional[bool], optional): If True, uses temporal attention in UNet. Defaults to None.
depth (int, optional): The depth of the transformer block. Defaults to 0.
unet_block_name (Optional[str], optional): The name of the UNet block. Defaults to None.
stack_enable_blocks_name (Optional[List[str]], optional): The list of enabled blocks in the stack. Defaults to None.
stack_enable_blocks_depth (Optional[List[int]], optional): The list of depths for the enabled blocks in the stack. Defaults to None.
|
__init__
|
python
|
fudan-generative-vision/hallo
|
hallo/models/attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/attention.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
timestep=None,
attention_mask=None,
full_mask=None,
face_mask=None,
lip_mask=None,
motion_scale=None,
video_length=None,
):
"""
Forward pass for the AudioTemporalBasicTransformerBlock.
Args:
hidden_states (torch.FloatTensor): The input hidden states.
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states. Defaults to None.
timestep (torch.LongTensor, optional): The timestep for the transformer block. Defaults to None.
attention_mask (torch.FloatTensor, optional): The attention mask. Defaults to None.
full_mask (torch.FloatTensor, optional): The full mask. Defaults to None.
face_mask (torch.FloatTensor, optional): The face mask. Defaults to None.
lip_mask (torch.FloatTensor, optional): The lip mask. Defaults to None.
video_length (int, optional): The length of the video. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the AudioTemporalBasicTransformerBlock.
"""
norm_hidden_states = (
self.norm1(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm1(hidden_states)
)
if self.unet_use_cross_frame_attention:
hidden_states = (
self.attn1(
norm_hidden_states,
attention_mask=attention_mask,
video_length=video_length,
)
+ hidden_states
)
else:
hidden_states = (
self.attn1(norm_hidden_states, attention_mask=attention_mask)
+ hidden_states
)
if self.attn2 is not None:
# Cross-Attention
norm_hidden_states = (
self.norm2(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm2(hidden_states)
)
hidden_states = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
) + hidden_states
elif self.attn2_0 is not None:
norm_hidden_states = (
self.norm2(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm2(hidden_states)
)
level = self.depth
full_hidden_states = (
self.attn2_0(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
) * full_mask[level][:, :, None]
)
bz, sz, c = full_hidden_states.shape
sz_sqrt = int(sz ** 0.5)
full_hidden_states = full_hidden_states.reshape(
bz, sz_sqrt, sz_sqrt, c).permute(0, 3, 1, 2)
full_hidden_states = self.zero_conv_full(full_hidden_states).permute(0, 2, 3, 1).reshape(bz, -1, c)
face_hidden_state = (
self.attn2_1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
) * face_mask[level][:, :, None]
)
face_hidden_state = face_hidden_state.reshape(
bz, sz_sqrt, sz_sqrt, c).permute(0, 3, 1, 2)
face_hidden_state = self.zero_conv_face(
face_hidden_state).permute(0, 2, 3, 1).reshape(bz, -1, c)
lip_hidden_state = (
self.attn2_2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
) * lip_mask[level][:, :, None]
) # [32, 4096, 320]
lip_hidden_state = lip_hidden_state.reshape(
bz, sz_sqrt, sz_sqrt, c).permute(0, 3, 1, 2)
lip_hidden_state = self.zero_conv_lip(
lip_hidden_state).permute(0, 2, 3, 1).reshape(bz, -1, c)
if motion_scale is not None:
hidden_states = (
motion_scale[0] * full_hidden_states +
motion_scale[1] * face_hidden_state +
motion_scale[2] * lip_hidden_state + hidden_states
)
else:
hidden_states = (
full_hidden_states +
face_hidden_state +
lip_hidden_state + hidden_states
)
# Feed-forward
hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
return hidden_states
|
Forward pass for the AudioTemporalBasicTransformerBlock.
Args:
hidden_states (torch.FloatTensor): The input hidden states.
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states. Defaults to None.
timestep (torch.LongTensor, optional): The timestep for the transformer block. Defaults to None.
attention_mask (torch.FloatTensor, optional): The attention mask. Defaults to None.
full_mask (torch.FloatTensor, optional): The full mask. Defaults to None.
face_mask (torch.FloatTensor, optional): The face mask. Defaults to None.
lip_mask (torch.FloatTensor, optional): The lip mask. Defaults to None.
video_length (int, optional): The length of the video. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the AudioTemporalBasicTransformerBlock.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/attention.py
|
MIT
|
def zero_module(module):
"""
Zeroes out the parameters of a given module.
Args:
module (nn.Module): The module whose parameters need to be zeroed out.
Returns:
None.
"""
for p in module.parameters():
nn.init.zeros_(p)
return module
|
Zeroes out the parameters of a given module.
Args:
module (nn.Module): The module whose parameters need to be zeroed out.
Returns:
None.
|
zero_module
|
python
|
fudan-generative-vision/hallo
|
hallo/models/attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/attention.py
|
MIT
|
def forward(self, audio_embeds):
"""
Defines the forward pass for the AudioProjModel.
Parameters:
audio_embeds (torch.Tensor): The input audio embeddings with shape (batch_size, video_length, blocks, channels).
Returns:
context_tokens (torch.Tensor): The output context tokens with shape (batch_size, video_length, context_tokens, output_dim).
"""
# merge
video_length = audio_embeds.shape[1]
audio_embeds = rearrange(audio_embeds, "bz f w b c -> (bz f) w b c")
batch_size, window_size, blocks, channels = audio_embeds.shape
audio_embeds = audio_embeds.view(batch_size, window_size * blocks * channels)
audio_embeds = torch.relu(self.proj1(audio_embeds))
audio_embeds = torch.relu(self.proj2(audio_embeds))
context_tokens = self.proj3(audio_embeds).reshape(
batch_size, self.context_tokens, self.output_dim
)
context_tokens = self.norm(context_tokens)
context_tokens = rearrange(
context_tokens, "(bz f) m c -> bz f m c", f=video_length
)
return context_tokens
|
Defines the forward pass for the AudioProjModel.
Parameters:
audio_embeds (torch.Tensor): The input audio embeddings with shape (batch_size, video_length, blocks, channels).
Returns:
context_tokens (torch.Tensor): The output context tokens with shape (batch_size, video_length, context_tokens, output_dim).
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/audio_proj.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/audio_proj.py
|
MIT
|
def forward(self, conditioning):
"""
Forward pass of the FaceLocator model.
Args:
conditioning (Tensor): The input conditioning tensor.
Returns:
Tensor: The output embedding tensor.
"""
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
|
Forward pass of the FaceLocator model.
Args:
conditioning (Tensor): The input conditioning tensor.
Returns:
Tensor: The output embedding tensor.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/face_locator.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/face_locator.py
|
MIT
|
def forward(self, image_embeds):
"""
Forward pass of the ImageProjModel, which takes in image embeddings and returns the
projected tokens after reshaping and normalization.
Args:
image_embeds (torch.Tensor): The input image embeddings, with shape
batch_size x num_image_tokens x clip_embeddings_dim.
Returns:
clip_extra_context_tokens (torch.Tensor): The projected tokens after reshaping
and normalization, with shape batch_size x (clip_extra_context_tokens *
cross_attention_dim).
"""
embeds = image_embeds
clip_extra_context_tokens = self.proj(embeds).reshape(
-1, self.clip_extra_context_tokens, self.cross_attention_dim
)
clip_extra_context_tokens = self.norm(clip_extra_context_tokens)
return clip_extra_context_tokens
|
Forward pass of the ImageProjModel, which takes in image embeddings and returns the
projected tokens after reshaping and normalization.
Args:
image_embeds (torch.Tensor): The input image embeddings, with shape
batch_size x num_image_tokens x clip_embeddings_dim.
Returns:
clip_extra_context_tokens (torch.Tensor): The projected tokens after reshaping
and normalization, with shape batch_size x (clip_extra_context_tokens *
cross_attention_dim).
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/image_proj.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/image_proj.py
|
MIT
|
def zero_module(module):
"""
Zero out the parameters of a module and return it.
Args:
- module: A PyTorch module to zero out its parameters.
Returns:
A zeroed out PyTorch module.
"""
for p in module.parameters():
p.detach().zero_()
return module
|
Zero out the parameters of a module and return it.
Args:
- module: A PyTorch module to zero out its parameters.
Returns:
A zeroed out PyTorch module.
|
zero_module
|
python
|
fudan-generative-vision/hallo
|
hallo/models/motion_module.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/motion_module.py
|
MIT
|
def get_motion_module(in_channels, motion_module_type: str, motion_module_kwargs: dict):
"""
This function returns a motion module based on the given type and parameters.
Args:
- in_channels (int): The number of input channels for the motion module.
- motion_module_type (str): The type of motion module to create. Currently, only "Vanilla" is supported.
- motion_module_kwargs (dict): Additional keyword arguments to pass to the motion module constructor.
Returns:
VanillaTemporalModule: The created motion module.
Raises:
ValueError: If an unsupported motion_module_type is provided.
"""
if motion_module_type == "Vanilla":
return VanillaTemporalModule(
in_channels=in_channels,
**motion_module_kwargs,
)
raise ValueError
|
This function returns a motion module based on the given type and parameters.
Args:
- in_channels (int): The number of input channels for the motion module.
- motion_module_type (str): The type of motion module to create. Currently, only "Vanilla" is supported.
- motion_module_kwargs (dict): Additional keyword arguments to pass to the motion module constructor.
Returns:
VanillaTemporalModule: The created motion module.
Raises:
ValueError: If an unsupported motion_module_type is provided.
|
get_motion_module
|
python
|
fudan-generative-vision/hallo
|
hallo/models/motion_module.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/motion_module.py
|
MIT
|
def forward(
self,
input_tensor,
encoder_hidden_states,
attention_mask=None,
):
"""
Forward pass of the TemporalTransformer3DModel.
Args:
hidden_states (torch.Tensor): The hidden states of the model.
encoder_hidden_states (torch.Tensor, optional): The hidden states of the encoder.
attention_mask (torch.Tensor, optional): The attention mask.
Returns:
torch.Tensor: The output tensor after the forward pass.
"""
hidden_states = input_tensor
hidden_states = self.temporal_transformer(
hidden_states, encoder_hidden_states
)
output = hidden_states
return output
|
Forward pass of the TemporalTransformer3DModel.
Args:
hidden_states (torch.Tensor): The hidden states of the model.
encoder_hidden_states (torch.Tensor, optional): The hidden states of the encoder.
attention_mask (torch.Tensor, optional): The attention mask.
Returns:
torch.Tensor: The output tensor after the forward pass.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/motion_module.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/motion_module.py
|
MIT
|
def forward(self, hidden_states, encoder_hidden_states=None):
"""
Forward pass for the TemporalTransformer3DModel.
Args:
hidden_states (torch.Tensor): The input hidden states with shape (batch_size, sequence_length, in_channels).
encoder_hidden_states (torch.Tensor, optional): The encoder hidden states with shape (batch_size, encoder_sequence_length, in_channels).
Returns:
torch.Tensor: The output hidden states with shape (batch_size, sequence_length, in_channels).
"""
assert (
hidden_states.dim() == 5
), f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
video_length = hidden_states.shape[2]
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
batch, _, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * weight, inner_dim
)
hidden_states = self.proj_in(hidden_states)
# Transformer Blocks
for block in self.transformer_blocks:
hidden_states = block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
video_length=video_length,
)
# output
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
output = hidden_states + residual
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
return output
|
Forward pass for the TemporalTransformer3DModel.
Args:
hidden_states (torch.Tensor): The input hidden states with shape (batch_size, sequence_length, in_channels).
encoder_hidden_states (torch.Tensor, optional): The encoder hidden states with shape (batch_size, encoder_sequence_length, in_channels).
Returns:
torch.Tensor: The output hidden states with shape (batch_size, sequence_length, in_channels).
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/motion_module.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/motion_module.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
video_length=None,
):
"""
Forward pass for the TemporalTransformerBlock.
Args:
hidden_states (torch.Tensor): The input hidden states with shape
(batch_size, video_length, in_channels).
encoder_hidden_states (torch.Tensor, optional): The encoder hidden states
with shape (batch_size, encoder_length, in_channels).
video_length (int, optional): The length of the video.
Returns:
torch.Tensor: The output hidden states with shape
(batch_size, video_length, in_channels).
"""
for attention_block, norm in zip(self.attention_blocks, self.norms):
norm_hidden_states = norm(hidden_states)
hidden_states = (
attention_block(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states
if attention_block.is_cross_attention
else None,
video_length=video_length,
)
+ hidden_states
)
hidden_states = self.ff(self.ff_norm(hidden_states)) + hidden_states
output = hidden_states
return output
|
Forward pass for the TemporalTransformerBlock.
Args:
hidden_states (torch.Tensor): The input hidden states with shape
(batch_size, video_length, in_channels).
encoder_hidden_states (torch.Tensor, optional): The encoder hidden states
with shape (batch_size, encoder_length, in_channels).
video_length (int, optional): The length of the video.
Returns:
torch.Tensor: The output hidden states with shape
(batch_size, video_length, in_channels).
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/motion_module.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/motion_module.py
|
MIT
|
def set_use_memory_efficient_attention_xformers(
self,
use_memory_efficient_attention_xformers: bool,
attention_op = None,
):
"""
Sets the use of memory-efficient attention xformers for the VersatileAttention class.
Args:
use_memory_efficient_attention_xformers (bool): A boolean flag indicating whether to use memory-efficient attention xformers or not.
Returns:
None
"""
if use_memory_efficient_attention_xformers:
if not is_xformers_available():
raise ModuleNotFoundError(
(
"Refer to https://github.com/facebookresearch/xformers for more information on how to install"
" xformers"
),
name="xformers",
)
if not torch.cuda.is_available():
raise ValueError(
"torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is"
" only available for GPU "
)
try:
# Make sure we can run the memory efficient attention
_ = xformers.ops.memory_efficient_attention(
torch.randn((1, 2, 40), device="cuda"),
torch.randn((1, 2, 40), device="cuda"),
torch.randn((1, 2, 40), device="cuda"),
)
except Exception as e:
raise e
processor = AttnProcessor()
else:
processor = AttnProcessor()
self.set_processor(processor)
|
Sets the use of memory-efficient attention xformers for the VersatileAttention class.
Args:
use_memory_efficient_attention_xformers (bool): A boolean flag indicating whether to use memory-efficient attention xformers or not.
Returns:
None
|
set_use_memory_efficient_attention_xformers
|
python
|
fudan-generative-vision/hallo
|
hallo/models/motion_module.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/motion_module.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
video_length=None,
**cross_attention_kwargs,
):
"""
Args:
hidden_states (`torch.Tensor`):
The hidden states to be passed through the model.
encoder_hidden_states (`torch.Tensor`, optional):
The encoder hidden states to be passed through the model.
attention_mask (`torch.Tensor`, optional):
The attention mask to be used in the model.
video_length (`int`, optional):
The length of the video.
cross_attention_kwargs (`dict`, optional):
Additional keyword arguments to be used for cross-attention.
Returns:
`torch.Tensor`:
The output tensor after passing through the model.
"""
if self.attention_mode == "Temporal":
d = hidden_states.shape[1] # d means HxW
hidden_states = rearrange(
hidden_states, "(b f) d c -> (b d) f c", f=video_length
)
if self.pos_encoder is not None:
hidden_states = self.pos_encoder(hidden_states)
encoder_hidden_states = (
repeat(encoder_hidden_states, "b n c -> (b d) n c", d=d)
if encoder_hidden_states is not None
else encoder_hidden_states
)
else:
raise NotImplementedError
hidden_states = self.processor(
self,
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.attention_mode == "Temporal":
hidden_states = rearrange(
hidden_states, "(b d) f c -> (b f) d c", d=d)
return hidden_states
|
Args:
hidden_states (`torch.Tensor`):
The hidden states to be passed through the model.
encoder_hidden_states (`torch.Tensor`, optional):
The encoder hidden states to be passed through the model.
attention_mask (`torch.Tensor`, optional):
The attention mask to be used in the model.
video_length (`int`, optional):
The length of the video.
cross_attention_kwargs (`dict`, optional):
Additional keyword arguments to be used for cross-attention.
Returns:
`torch.Tensor`:
The output tensor after passing through the model.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/motion_module.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/motion_module.py
|
MIT
|
def torch_dfs(model: torch.nn.Module):
"""
Perform a depth-first search (DFS) traversal on a PyTorch model's neural network architecture.
This function recursively traverses all the children modules of a given PyTorch model and returns a list
containing all the modules in the model's architecture. The DFS approach starts with the input model and
explores its children modules depth-wise before backtracking and exploring other branches.
Args:
model (torch.nn.Module): The root module of the neural network to traverse.
Returns:
list: A list of all the modules in the model's architecture.
"""
result = [model]
for child in model.children():
result += torch_dfs(child)
return result
|
Perform a depth-first search (DFS) traversal on a PyTorch model's neural network architecture.
This function recursively traverses all the children modules of a given PyTorch model and returns a list
containing all the modules in the model's architecture. The DFS approach starts with the input model and
explores its children modules depth-wise before backtracking and exploring other branches.
Args:
model (torch.nn.Module): The root module of the neural network to traverse.
Returns:
list: A list of all the modules in the model's architecture.
|
torch_dfs
|
python
|
fudan-generative-vision/hallo
|
hallo/models/mutual_self_attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/mutual_self_attention.py
|
MIT
|
def __init__(
self,
unet,
mode="write",
do_classifier_free_guidance=False,
attention_auto_machine_weight=float("inf"),
gn_auto_machine_weight=1.0,
style_fidelity=1.0,
reference_attn=True,
reference_adain=False,
fusion_blocks="midup",
batch_size=1,
) -> None:
"""
Initializes the ReferenceAttentionControl class.
Args:
unet (torch.nn.Module): The UNet model.
mode (str, optional): The mode of operation. Defaults to "write".
do_classifier_free_guidance (bool, optional): Whether to do classifier-free guidance. Defaults to False.
attention_auto_machine_weight (float, optional): The weight for attention auto-machine. Defaults to infinity.
gn_auto_machine_weight (float, optional): The weight for group-norm auto-machine. Defaults to 1.0.
style_fidelity (float, optional): The style fidelity. Defaults to 1.0.
reference_attn (bool, optional): Whether to use reference attention. Defaults to True.
reference_adain (bool, optional): Whether to use reference AdaIN. Defaults to False.
fusion_blocks (str, optional): The fusion blocks to use. Defaults to "midup".
batch_size (int, optional): The batch size. Defaults to 1.
Raises:
ValueError: If the mode is not recognized.
ValueError: If the fusion blocks are not recognized.
"""
# 10. Modify self attention and group norm
self.unet = unet
assert mode in ["read", "write"]
assert fusion_blocks in ["midup", "full"]
self.reference_attn = reference_attn
self.reference_adain = reference_adain
self.fusion_blocks = fusion_blocks
self.register_reference_hooks(
mode,
do_classifier_free_guidance,
attention_auto_machine_weight,
gn_auto_machine_weight,
style_fidelity,
reference_attn,
reference_adain,
fusion_blocks,
batch_size=batch_size,
)
|
Initializes the ReferenceAttentionControl class.
Args:
unet (torch.nn.Module): The UNet model.
mode (str, optional): The mode of operation. Defaults to "write".
do_classifier_free_guidance (bool, optional): Whether to do classifier-free guidance. Defaults to False.
attention_auto_machine_weight (float, optional): The weight for attention auto-machine. Defaults to infinity.
gn_auto_machine_weight (float, optional): The weight for group-norm auto-machine. Defaults to 1.0.
style_fidelity (float, optional): The style fidelity. Defaults to 1.0.
reference_attn (bool, optional): Whether to use reference attention. Defaults to True.
reference_adain (bool, optional): Whether to use reference AdaIN. Defaults to False.
fusion_blocks (str, optional): The fusion blocks to use. Defaults to "midup".
batch_size (int, optional): The batch size. Defaults to 1.
Raises:
ValueError: If the mode is not recognized.
ValueError: If the fusion blocks are not recognized.
|
__init__
|
python
|
fudan-generative-vision/hallo
|
hallo/models/mutual_self_attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/mutual_self_attention.py
|
MIT
|
def update(self, writer, dtype=torch.float16):
"""
Update the model's parameters.
Args:
writer (torch.nn.Module): The model's writer object.
dtype (torch.dtype, optional): The data type to be used for the update. Defaults to torch.float16.
Returns:
None.
"""
if self.reference_attn:
if self.fusion_blocks == "midup":
reader_attn_modules = [
module
for module in (
torch_dfs(self.unet.mid_block) +
torch_dfs(self.unet.up_blocks)
)
if isinstance(module, TemporalBasicTransformerBlock)
]
writer_attn_modules = [
module
for module in (
torch_dfs(writer.unet.mid_block)
+ torch_dfs(writer.unet.up_blocks)
)
if isinstance(module, BasicTransformerBlock)
]
elif self.fusion_blocks == "full":
reader_attn_modules = [
module
for module in torch_dfs(self.unet)
if isinstance(module, TemporalBasicTransformerBlock)
]
writer_attn_modules = [
module
for module in torch_dfs(writer.unet)
if isinstance(module, BasicTransformerBlock)
]
assert len(reader_attn_modules) == len(writer_attn_modules)
reader_attn_modules = sorted(
reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]
)
writer_attn_modules = sorted(
writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]
)
for r, w in zip(reader_attn_modules, writer_attn_modules):
r.bank = [v.clone().to(dtype) for v in w.bank]
|
Update the model's parameters.
Args:
writer (torch.nn.Module): The model's writer object.
dtype (torch.dtype, optional): The data type to be used for the update. Defaults to torch.float16.
Returns:
None.
|
update
|
python
|
fudan-generative-vision/hallo
|
hallo/models/mutual_self_attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/mutual_self_attention.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.