text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Return the number of entries in the database.
<END_TASK>
<USER_TASK:>
Description:
def count(self):
"""
Return the number of entries in the database.
""" |
Statement = self.get_model('statement')
session = self.Session()
statement_count = session.query(Statement).count()
session.close()
return statement_count |
<SYSTEM_TASK:>
Removes the statement that matches the input text.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, statement_text):
"""
Removes the statement that matches the input text.
Removes any responses from statements where the response text matches
the input text.
""" |
Statement = self.get_model('statement')
session = self.Session()
query = session.query(Statement).filter_by(text=statement_text)
record = query.first()
session.delete(record)
self._session_finish(session) |
<SYSTEM_TASK:>
Returns a list of objects from the database.
<END_TASK>
<USER_TASK:>
Description:
def filter(self, **kwargs):
"""
Returns a list of objects from the database.
The kwargs parameter can contain any number
of attributes. Only objects which contain all
listed attributes and in which all values match
for all listed attributes will be returned.
""" |
from sqlalchemy import or_
Statement = self.get_model('statement')
Tag = self.get_model('tag')
session = self.Session()
page_size = kwargs.pop('page_size', 1000)
order_by = kwargs.pop('order_by', None)
tags = kwargs.pop('tags', [])
exclude_text = kwargs.pop('exclude_text', None)
exclude_text_words = kwargs.pop('exclude_text_words', [])
persona_not_startswith = kwargs.pop('persona_not_startswith', None)
search_text_contains = kwargs.pop('search_text_contains', None)
# Convert a single sting into a list if only one tag is provided
if type(tags) == str:
tags = [tags]
if len(kwargs) == 0:
statements = session.query(Statement).filter()
else:
statements = session.query(Statement).filter_by(**kwargs)
if tags:
statements = statements.join(Statement.tags).filter(
Tag.name.in_(tags)
)
if exclude_text:
statements = statements.filter(
~Statement.text.in_(exclude_text)
)
if exclude_text_words:
or_word_query = [
Statement.text.ilike('%' + word + '%') for word in exclude_text_words
]
statements = statements.filter(
~or_(*or_word_query)
)
if persona_not_startswith:
statements = statements.filter(
~Statement.persona.startswith('bot:')
)
if search_text_contains:
or_query = [
Statement.search_text.contains(word) for word in search_text_contains.split(' ')
]
statements = statements.filter(
or_(*or_query)
)
if order_by:
if 'created_at' in order_by:
index = order_by.index('created_at')
order_by[index] = Statement.created_at.asc()
statements = statements.order_by(*order_by)
total_statements = statements.count()
for start_index in range(0, total_statements, page_size):
for statement in statements.slice(start_index, start_index + page_size):
yield self.model_to_object(statement)
session.close() |
<SYSTEM_TASK:>
Modifies an entry in the database.
<END_TASK>
<USER_TASK:>
Description:
def update(self, statement):
"""
Modifies an entry in the database.
Creates an entry if one does not exist.
""" |
Statement = self.get_model('statement')
Tag = self.get_model('tag')
if statement is not None:
session = self.Session()
record = None
if hasattr(statement, 'id') and statement.id is not None:
record = session.query(Statement).get(statement.id)
else:
record = session.query(Statement).filter(
Statement.text == statement.text,
Statement.conversation == statement.conversation,
).first()
# Create a new statement entry if one does not already exist
if not record:
record = Statement(
text=statement.text,
conversation=statement.conversation,
persona=statement.persona
)
# Update the response value
record.in_response_to = statement.in_response_to
record.created_at = statement.created_at
record.search_text = self.tagger.get_bigram_pair_string(statement.text)
if statement.in_response_to:
record.search_in_response_to = self.tagger.get_bigram_pair_string(statement.in_response_to)
for tag_name in statement.get_tags():
tag = session.query(Tag).filter_by(name=tag_name).first()
if not tag:
# Create the record
tag = Tag(name=tag_name)
record.tags.append(tag)
session.add(record)
self._session_finish(session) |
<SYSTEM_TASK:>
Returns a random statement from the database.
<END_TASK>
<USER_TASK:>
Description:
def get_random(self):
"""
Returns a random statement from the database.
""" |
import random
Statement = self.get_model('statement')
session = self.Session()
count = self.count()
if count < 1:
raise self.EmptyDatabaseException()
random_index = random.randrange(0, count)
random_statement = session.query(Statement)[random_index]
statement = self.model_to_object(random_statement)
session.close()
return statement |
<SYSTEM_TASK:>
Populate the database with the tables.
<END_TASK>
<USER_TASK:>
Description:
def create_database(self):
"""
Populate the database with the tables.
""" |
from chatterbot.ext.sqlalchemy_app.models import Base
Base.metadata.create_all(self.engine) |
<SYSTEM_TASK:>
Return a response to the statement in the posted data.
<END_TASK>
<USER_TASK:>
Description:
def post(self, request, *args, **kwargs):
"""
Return a response to the statement in the posted data.
* The JSON data should contain a 'text' attribute.
""" |
input_data = json.loads(request.body.decode('utf-8'))
if 'text' not in input_data:
return JsonResponse({
'text': [
'The attribute "text" is required.'
]
}, status=400)
response = self.chatterbot.get_response(input_data)
response_data = response.serialize()
return JsonResponse(response_data, status=200) |
<SYSTEM_TASK:>
Reads a dotted file path and returns the file path.
<END_TASK>
<USER_TASK:>
Description:
def get_file_path(dotted_path, extension='json'):
"""
Reads a dotted file path and returns the file path.
""" |
# If the operating system's file path seperator character is in the string
if os.sep in dotted_path or '/' in dotted_path:
# Assume the path is a valid file path
return dotted_path
parts = dotted_path.split('.')
if parts[0] == 'chatterbot':
parts.pop(0)
parts[0] = DATA_DIRECTORY
corpus_path = os.path.join(*parts)
if os.path.exists(corpus_path + '.{}'.format(extension)):
corpus_path += '.{}'.format(extension)
return corpus_path |
<SYSTEM_TASK:>
Read and return the data from a corpus json file.
<END_TASK>
<USER_TASK:>
Description:
def read_corpus(file_name):
"""
Read and return the data from a corpus json file.
""" |
with io.open(file_name, encoding='utf-8') as data_file:
return yaml.load(data_file) |
<SYSTEM_TASK:>
Return a list of file paths to each data file in the specified corpus.
<END_TASK>
<USER_TASK:>
Description:
def list_corpus_files(dotted_path):
"""
Return a list of file paths to each data file in the specified corpus.
""" |
corpus_path = get_file_path(dotted_path, extension=CORPUS_EXTENSION)
paths = []
if os.path.isdir(corpus_path):
paths = glob.glob(corpus_path + '/**/*.' + CORPUS_EXTENSION, recursive=True)
else:
paths.append(corpus_path)
paths.sort()
return paths |
<SYSTEM_TASK:>
Return the data contained within a specified corpus.
<END_TASK>
<USER_TASK:>
Description:
def load_corpus(*data_file_paths):
"""
Return the data contained within a specified corpus.
""" |
for file_path in data_file_paths:
corpus = []
corpus_data = read_corpus(file_path)
conversations = corpus_data.get('conversations', [])
corpus.extend(conversations)
categories = corpus_data.get('categories', [])
yield corpus, categories, file_path |
<SYSTEM_TASK:>
Return a string of text containing part-of-speech, lemma pairs.
<END_TASK>
<USER_TASK:>
Description:
def get_bigram_pair_string(self, text):
"""
Return a string of text containing part-of-speech, lemma pairs.
""" |
bigram_pairs = []
if len(text) <= 2:
text_without_punctuation = text.translate(self.punctuation_table)
if len(text_without_punctuation) >= 1:
text = text_without_punctuation
document = self.nlp(text)
if len(text) <= 2:
bigram_pairs = [
token.lemma_.lower() for token in document
]
else:
tokens = [
token for token in document if token.is_alpha and not token.is_stop
]
if len(tokens) < 2:
tokens = [
token for token in document if token.is_alpha
]
for index in range(1, len(tokens)):
bigram_pairs.append('{}:{}'.format(
tokens[index - 1].pos_,
tokens[index].lemma_.lower()
))
if not bigram_pairs:
bigram_pairs = [
token.lemma_.lower() for token in document
]
return ' '.join(bigram_pairs) |
<SYSTEM_TASK:>
Removes the statement that matches the input text.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, statement_text):
"""
Removes the statement that matches the input text.
Removes any responses from statements if the response text matches the
input text.
""" |
Statement = self.get_model('statement')
statements = Statement.objects.filter(text=statement_text)
statements.delete() |
<SYSTEM_TASK:>
Remove all data from the database.
<END_TASK>
<USER_TASK:>
Description:
def drop(self):
"""
Remove all data from the database.
""" |
Statement = self.get_model('statement')
Tag = self.get_model('tag')
Statement.objects.all().delete()
Tag.objects.all().delete() |
<SYSTEM_TASK:>
Remove any consecutive whitespace characters from the statement text.
<END_TASK>
<USER_TASK:>
Description:
def clean_whitespace(statement):
"""
Remove any consecutive whitespace characters from the statement text.
""" |
import re
# Replace linebreaks and tabs with spaces
statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
# Remove any leeding or trailing whitespace
statement.text = statement.text.strip()
# Remove consecutive spaces
statement.text = re.sub(' +', ' ', statement.text)
return statement |
<SYSTEM_TASK:>
Convert strings to numbers
<END_TASK>
<USER_TASK:>
Description:
def convert_string_to_number(value):
"""
Convert strings to numbers
""" |
if value is None:
return 1
if isinstance(value, int):
return value
if value.isdigit():
return int(value)
num_list = map(lambda s: NUMBERS[s], re.findall(numbers + '+', value.lower()))
return sum(num_list) |
<SYSTEM_TASK:>
Convert time to hour, minute
<END_TASK>
<USER_TASK:>
Description:
def convert_time_to_hour_minute(hour, minute, convention):
"""
Convert time to hour, minute
""" |
if hour is None:
hour = 0
if minute is None:
minute = 0
if convention is None:
convention = 'am'
hour = int(hour)
minute = int(minute)
if convention.lower() == 'pm':
hour += 12
return {'hours': hour, 'minutes': minute} |
<SYSTEM_TASK:>
Extract date from quarter of a year
<END_TASK>
<USER_TASK:>
Description:
def date_from_quarter(base_date, ordinal, year):
"""
Extract date from quarter of a year
""" |
interval = 3
month_start = interval * (ordinal - 1)
if month_start < 0:
month_start = 9
month_end = month_start + interval
if month_start == 0:
month_start = 1
return [
datetime(year, month_start, 1),
datetime(year, month_end, calendar.monthrange(year, month_end)[1])
] |
<SYSTEM_TASK:>
Converts relative day to time
<END_TASK>
<USER_TASK:>
Description:
def date_from_relative_week_year(base_date, time, dow, ordinal=1):
"""
Converts relative day to time
Eg. this tuesday, last tuesday
""" |
# If there is an ordinal (next 3 weeks) => return a start and end range
# Reset date to start of the day
relative_date = datetime(base_date.year, base_date.month, base_date.day)
ord = convert_string_to_number(ordinal)
if dow in year_variations:
if time == 'this' or time == 'coming':
return datetime(relative_date.year, 1, 1)
elif time == 'last' or time == 'previous':
return datetime(relative_date.year - 1, relative_date.month, 1)
elif time == 'next' or time == 'following':
return relative_date + timedelta(ord * 365)
elif time == 'end of the':
return datetime(relative_date.year, 12, 31)
elif dow in month_variations:
if time == 'this':
return datetime(relative_date.year, relative_date.month, relative_date.day)
elif time == 'last' or time == 'previous':
return datetime(relative_date.year, relative_date.month - 1, relative_date.day)
elif time == 'next' or time == 'following':
if relative_date.month + ord >= 12:
month = relative_date.month - 1 + ord
year = relative_date.year + month // 12
month = month % 12 + 1
day = min(relative_date.day, calendar.monthrange(year, month)[1])
return datetime(year, month, day)
else:
return datetime(relative_date.year, relative_date.month + ord, relative_date.day)
elif time == 'end of the':
return datetime(
relative_date.year,
relative_date.month,
calendar.monthrange(relative_date.year, relative_date.month)[1]
)
elif dow in week_variations:
if time == 'this':
return relative_date - timedelta(days=relative_date.weekday())
elif time == 'last' or time == 'previous':
return relative_date - timedelta(weeks=1)
elif time == 'next' or time == 'following':
return relative_date + timedelta(weeks=ord)
elif time == 'end of the':
day_of_week = base_date.weekday()
return day_of_week + timedelta(days=6 - relative_date.weekday())
elif dow in day_variations:
if time == 'this':
return relative_date
elif time == 'last' or time == 'previous':
return relative_date - timedelta(days=1)
elif time == 'next' or time == 'following':
return relative_date + timedelta(days=ord)
elif time == 'end of the':
return datetime(relative_date.year, relative_date.month, relative_date.day, 23, 59, 59) |
<SYSTEM_TASK:>
Finds coming weekday
<END_TASK>
<USER_TASK:>
Description:
def this_week_day(base_date, weekday):
"""
Finds coming weekday
""" |
day_of_week = base_date.weekday()
# If today is Tuesday and the query is `this monday`
# We should output the next_week monday
if day_of_week > weekday:
return next_week_day(base_date, weekday)
start_of_this_week = base_date - timedelta(days=day_of_week + 1)
day = start_of_this_week + timedelta(days=1)
while day.weekday() != weekday:
day = day + timedelta(days=1)
return day |
<SYSTEM_TASK:>
Finds previous weekday
<END_TASK>
<USER_TASK:>
Description:
def previous_week_day(base_date, weekday):
"""
Finds previous weekday
""" |
day = base_date - timedelta(days=1)
while day.weekday() != weekday:
day = day - timedelta(days=1)
return day |
<SYSTEM_TASK:>
Finds next weekday
<END_TASK>
<USER_TASK:>
Description:
def next_week_day(base_date, weekday):
"""
Finds next weekday
""" |
day_of_week = base_date.weekday()
end_of_this_week = base_date + timedelta(days=6 - day_of_week)
day = end_of_this_week + timedelta(days=1)
while day.weekday() != weekday:
day = day + timedelta(days=1)
return day |
<SYSTEM_TASK:>
Search for close matches to the input. Confidence scores for
<END_TASK>
<USER_TASK:>
Description:
def search(self, input_statement, **additional_parameters):
"""
Search for close matches to the input. Confidence scores for
subsequent results will order of increasing value.
:param input_statement: A statement.
:type input_statement: chatterbot.conversation.Statement
:param **additional_parameters: Additional parameters to be passed
to the ``filter`` method of the storage adapter when searching.
:rtype: Generator yielding one closest matching statement at a time.
""" |
self.chatbot.logger.info('Beginning search for close text match')
input_search_text = input_statement.search_text
if not input_statement.search_text:
self.chatbot.logger.warn(
'No value for search_text was available on the provided input'
)
input_search_text = self.chatbot.storage.tagger.get_bigram_pair_string(
input_statement.text
)
search_parameters = {
'search_text_contains': input_search_text,
'persona_not_startswith': 'bot:',
'page_size': self.search_page_size
}
if additional_parameters:
search_parameters.update(additional_parameters)
statement_list = self.chatbot.storage.filter(**search_parameters)
closest_match = Statement(text='')
closest_match.confidence = 0
self.chatbot.logger.info('Processing search results')
# Find the closest matching known statement
for statement in statement_list:
confidence = self.compare_statements(input_statement, statement)
if confidence > closest_match.confidence:
statement.confidence = confidence
closest_match = statement
self.chatbot.logger.info('Similar text found: {} {}'.format(
closest_match.text, confidence
))
yield closest_match |
<SYSTEM_TASK:>
Get a response from the chatbot and display it.
<END_TASK>
<USER_TASK:>
Description:
def get_response(self):
"""
Get a response from the chatbot and display it.
""" |
user_input = self.usr_input.get()
self.usr_input.delete(0, tk.END)
response = self.chatbot.get_response(user_input)
self.conversation['state'] = 'normal'
self.conversation.insert(
tk.END, "Human: " + user_input + "\n" + "ChatBot: " + str(response.text) + "\n"
)
self.conversation['state'] = 'disabled'
time.sleep(0.5) |
<SYSTEM_TASK:>
Display svelte components in iPython.
<END_TASK>
<USER_TASK:>
Description:
def SvelteComponent(name, path):
"""Display svelte components in iPython.
Args:
name: name of svelte component (must match component filename when built)
path: path to compile svelte .js file or source svelte .html file.
(If html file, we try to call svelte and build the file.)
Returns:
A function mapping data to a rendered svelte component in ipython.
""" |
if path[-3:] == ".js":
js_path = path
elif path[-5:] == ".html":
print("Trying to build svelte component from html...")
js_path = build_svelte(path)
js_content = read(js_path, mode='r')
def inner(data):
id_str = js_id(name)
html = _template \
.replace("$js", js_content) \
.replace("$name", name) \
.replace("$data", json.dumps(data)) \
.replace("$id", id_str)
_display_html(html)
return inner |
<SYSTEM_TASK:>
Save dict of numpy array as npz file.
<END_TASK>
<USER_TASK:>
Description:
def save_npz(object, handle):
"""Save dict of numpy array as npz file.""" |
# there is a bug where savez doesn't actually accept a file handle.
log.warning("Saving npz files currently only works locally. :/")
path = handle.name
handle.close()
if type(object) is dict:
np.savez(path, **object)
elif type(object) is list:
np.savez(path, *object)
else:
log.warning("Saving non dict or list as npz file, did you maybe want npy?")
np.savez(path, object) |
<SYSTEM_TASK:>
Save object to file on CNS.
<END_TASK>
<USER_TASK:>
Description:
def save(thing, url_or_handle, **kwargs):
"""Save object to file on CNS.
File format is inferred from path. Use save_img(), save_npy(), or save_json()
if you need to force a particular format.
Args:
obj: object to save.
path: CNS path.
Raises:
RuntimeError: If file extension not supported.
""" |
is_handle = hasattr(url_or_handle, "write") and hasattr(url_or_handle, "name")
if is_handle:
_, ext = os.path.splitext(url_or_handle.name)
else:
_, ext = os.path.splitext(url_or_handle)
if not ext:
raise RuntimeError("No extension in URL: " + url_or_handle)
if ext in savers:
saver = savers[ext]
if is_handle:
saver(thing, url_or_handle, **kwargs)
else:
with write_handle(url_or_handle) as handle:
saver(thing, handle, **kwargs)
else:
saver_names = [(key, fn.__name__) for (key, fn) in savers.items()]
message = "Unknown extension '{}', supports {}."
raise ValueError(message.format(ext, saver_names)) |
<SYSTEM_TASK:>
Compute L2 norms alogn specified axes.
<END_TASK>
<USER_TASK:>
Description:
def anorm(x, axis=None, keepdims=False):
"""Compute L2 norms alogn specified axes.""" |
return np.sqrt((x*x).sum(axis=axis, keepdims=keepdims)) |
<SYSTEM_TASK:>
L2 Normalize along specified axes.
<END_TASK>
<USER_TASK:>
Description:
def normalize(v, axis=None, eps=1e-10):
"""L2 Normalize along specified axes.""" |
return v / max(anorm(v, axis=axis, keepdims=True), eps) |
<SYSTEM_TASK:>
Unify lengths of each row of a.
<END_TASK>
<USER_TASK:>
Description:
def _unify_rows(a):
"""Unify lengths of each row of a.""" |
lens = np.fromiter(map(len, a), np.int32)
if not (lens[0] == lens).all():
out = np.zeros((len(a), lens.max()), np.float32)
for i, row in enumerate(a):
out[i, :lens[i]] = row
else:
out = np.float32(a)
return out |
<SYSTEM_TASK:>
Loads sampled activations, which requires network access.
<END_TASK>
<USER_TASK:>
Description:
def activations(self):
"""Loads sampled activations, which requires network access.""" |
if self._activations is None:
self._activations = _get_aligned_activations(self)
return self._activations |
<SYSTEM_TASK:>
Import model GraphDef into the current graph.
<END_TASK>
<USER_TASK:>
Description:
def import_graph(self, t_input=None, scope='import', forget_xy_shape=True):
"""Import model GraphDef into the current graph.""" |
graph = tf.get_default_graph()
assert graph.unique_name(scope, False) == scope, (
'Scope "%s" already exists. Provide explicit scope names when '
'importing multiple instances of the model.') % scope
t_input, t_prep_input = self.create_input(t_input, forget_xy_shape)
tf.import_graph_def(
self.graph_def, {self.input_name: t_prep_input}, name=scope)
self.post_import(scope) |
<SYSTEM_TASK:>
`activations` can be a list of ndarrays. In that case a list of layouts is returned.
<END_TASK>
<USER_TASK:>
Description:
def aligned_umap(activations, umap_options={}, normalize=True, verbose=False):
"""`activations` can be a list of ndarrays. In that case a list of layouts is returned.""" |
umap_defaults = dict(
n_components=2, n_neighbors=50, min_dist=0.05, verbose=verbose, metric="cosine"
)
umap_defaults.update(umap_options)
# if passed a list of activations, we combine them and later split the layouts
if type(activations) is list or type(activations) is tuple:
num_activation_groups = len(activations)
combined_activations = np.concatenate(activations)
else:
num_activation_groups = 1
combined_activations = activations
try:
layout = UMAP(**umap_defaults).fit_transform(combined_activations)
except (RecursionError, SystemError) as exception:
log.error("UMAP failed to fit these activations. We're not yet sure why this sometimes occurs.")
raise ValueError("UMAP failed to fit activations: %s", exception)
if normalize:
layout = normalize_layout(layout)
if num_activation_groups > 1:
layouts = np.split(layout, num_activation_groups, axis=0)
return layouts
else:
return layout |
<SYSTEM_TASK:>
Render each cell in the tile and stitch it into a single image
<END_TASK>
<USER_TASK:>
Description:
def render_tile(cells, ti, tj, render, params, metadata, layout, summary):
"""
Render each cell in the tile and stitch it into a single image
""" |
image_size = params["cell_size"] * params["n_tile"]
tile = Image.new("RGB", (image_size, image_size), (255,255,255))
keys = cells.keys()
for i,key in enumerate(keys):
print("cell", i+1, "/", len(keys), end='\r')
cell_image = render(cells[key], params, metadata, layout, summary)
# stitch this rendering into the tile image
ci = key[0] % params["n_tile"]
cj = key[1] % params["n_tile"]
xmin = ci*params["cell_size"]
ymin = cj*params["cell_size"]
xmax = (ci+1)*params["cell_size"]
ymax = (cj+1)*params["cell_size"]
if params.get("scale_density", False):
density = len(cells[key]["gi"])
# scale = density/summary["max_density"]
scale = math.log(density)/(math.log(summary["max_density"]) or 1)
owidth = xmax - xmin
width = int(round(owidth * scale))
if(width < 1):
width = 1
offsetL = int(round((owidth - width)/2))
offsetR = owidth - width - offsetL # handle odd numbers
# print("\n")
# print("width", width, offsetL, offsetR)
box = [xmin + offsetL, ymin + offsetL, xmax - offsetR, ymax - offsetR]
resample = params.get("scale_type", Image.NEAREST)
cell_image = cell_image.resize(size=(width,width), resample=resample)
# print(cell_image)
else:
box = [xmin, ymin, xmax, ymax]
# print("box", box)
tile.paste(cell_image, box)
print("\n")
return tile |
<SYSTEM_TASK:>
Call the user defined aggregation function on each cell and combine into a single json object
<END_TASK>
<USER_TASK:>
Description:
def aggregate_tile(cells, ti, tj, aggregate, params, metadata, layout, summary):
"""
Call the user defined aggregation function on each cell and combine into a single json object
""" |
tile = []
keys = cells.keys()
for i,key in enumerate(keys):
print("cell", i+1, "/", len(keys), end='\r')
cell_json = aggregate(cells[key], params, metadata, layout, summary)
tile.append({"aggregate":cell_json, "i":int(key[0]), "j":int(key[1])})
return tile |
<SYSTEM_TASK:>
Create offscreen OpenGL context and make it current.
<END_TASK>
<USER_TASK:>
Description:
def create_opengl_context(surface_size=(640, 480)):
"""Create offscreen OpenGL context and make it current.
Users are expected to directly use EGL API in case more advanced
context management is required.
Args:
surface_size: (width, height), size of the offscreen rendering surface.
""" |
egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)
major, minor = egl.EGLint(), egl.EGLint()
egl.eglInitialize(egl_display, pointer(major), pointer(minor))
config_attribs = [
egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8,
egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24,
egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE
]
config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs)
num_configs = egl.EGLint()
egl_cfg = egl.EGLConfig()
egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1,
pointer(num_configs))
width, height = surface_size
pbuffer_attribs = [
egl.EGL_WIDTH,
width,
egl.EGL_HEIGHT,
height,
egl.EGL_NONE,
]
pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs)
egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs)
egl.eglBindAPI(egl.EGL_OPENGL_API)
egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT,
None)
egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context) |
<SYSTEM_TASK:>
Bilinear resizes a tensor t to have shape target_shape.
<END_TASK>
<USER_TASK:>
Description:
def resize_bilinear_nd(t, target_shape):
"""Bilinear resizes a tensor t to have shape target_shape.
This function bilinearly resizes a n-dimensional tensor by iteratively
applying tf.image.resize_bilinear (which can only resize 2 dimensions).
For bilinear interpolation, the order in which it is applied does not matter.
Args:
t: tensor to be resized
target_shape: the desired shape of the new tensor.
Returns:
The resized tensor
""" |
shape = t.get_shape().as_list()
target_shape = list(target_shape)
assert len(shape) == len(target_shape)
# We progressively move through the shape, resizing dimensions...
d = 0
while d < len(shape):
# If we don't need to deal with the next dimesnion, step over it
if shape[d] == target_shape[d]:
d += 1
continue
# Otherwise, we'll resize the next two dimensions...
# If d+2 doesn't need to be resized, this will just be a null op for it
new_shape = shape[:]
new_shape[d : d+2] = target_shape[d : d+2]
# The helper collapse_shape() makes our shapes 4-dimensional with
# the two dimesnions we want to deal with in the middle.
shape_ = collapse_shape(shape, d, d+2)
new_shape_ = collapse_shape(new_shape, d, d+2)
# We can then reshape and use the 2d tf.image.resize_bilinear() on the
# inner two dimesions.
t_ = tf.reshape(t, shape_)
t_ = tf.image.resize_bilinear(t_, new_shape_[1:3])
# And then reshape back to our uncollapsed version, having finished resizing
# two more dimensions in our shape.
t = tf.reshape(t_, new_shape)
shape = new_shape
d += 2
return t |
<SYSTEM_TASK:>
Downloads 100k activations of the specified layer sampled from iterating over
<END_TASK>
<USER_TASK:>
Description:
def get_aligned_activations(layer):
"""Downloads 100k activations of the specified layer sampled from iterating over
ImageNet. Activations of all layers where sampled at the same spatial positions for
each image, allowing the calculation of correlations.""" |
activation_paths = [
PATH_TEMPLATE.format(
sanitize(layer.model_class.name), sanitize(layer.name), page
)
for page in range(NUMBER_OF_PAGES)
]
activations = np.vstack([load(path) for path in activation_paths])
assert np.all(np.isfinite(activations))
return activations |
<SYSTEM_TASK:>
Computes the covariance matrix between the neurons of two layers. If only one
<END_TASK>
<USER_TASK:>
Description:
def layer_covariance(layer1, layer2=None):
"""Computes the covariance matrix between the neurons of two layers. If only one
layer is passed, computes the symmetric covariance matrix of that layer.""" |
layer2 = layer2 or layer1
act1, act2 = layer1.activations, layer2.activations
num_datapoints = act1.shape[0] # cast to avoid numpy type promotion during division
return np.matmul(act1.T, act2) / float(num_datapoints) |
<SYSTEM_TASK:>
Push activations from one model to another using prerecorded correlations
<END_TASK>
<USER_TASK:>
Description:
def push_activations(activations, from_layer, to_layer):
"""Push activations from one model to another using prerecorded correlations""" |
inverse_covariance_matrix = layer_inverse_covariance(from_layer)
activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T
covariance_matrix = layer_covariance(from_layer, to_layer)
activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix)
return activation_recorrelated |
<SYSTEM_TASK:>
A paramaterization for interpolating between each pair of N objectives.
<END_TASK>
<USER_TASK:>
Description:
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128,
channels=3):
"""A paramaterization for interpolating between each pair of N objectives.
Sometimes you want to interpolate between optimizing a bunch of objectives,
in a paramaterization that encourages images to align.
Args:
n_objectives: number of objectives you want interpolate between
n_interp_steps: number of interpolation steps
width: width of intepolated images
channel
Returns:
A [n_objectives, n_objectives, n_interp_steps, width, width, channel]
shaped tensor, t, where the final [width, width, channel] should be
seen as images, such that the following properties hold:
t[a, b] = t[b, a, ::-1]
t[a, i, 0] = t[a, j, 0] for all i, j
t[a, a, i] = t[a, a, j] for all i, j
t[a, b, i] = t[b, a, -i] for all i
""" |
N, M, W, Ch = n_objectives, n_interp_steps, width, channels
const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch])
example_interps = [
sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
for _ in range(N)]
example_basis = []
for n in range(N):
col = []
for m in range(N):
interp = example_interps[n] + example_interps[m][::-1]
col.append(interp)
example_basis.append(col)
interp_basis = []
for n in range(N):
col = [interp_basis[m][N-n][::-1] for m in range(n)]
col.append(tf.zeros([M, W, W, 3]))
for m in range(n+1, N):
interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch])
for k in [1, 2]])
col.append(interp)
interp_basis.append(col)
basis = []
for n in range(N):
col_ex = tf.stack(example_basis[n])
col_in = tf.stack(interp_basis[n])
basis.append(col_ex + col_in)
basis = tf.stack(basis)
return basis + const_term |
<SYSTEM_TASK:>
Register a gradient function to a random string.
<END_TASK>
<USER_TASK:>
Description:
def register_to_random_name(grad_f):
"""Register a gradient function to a random string.
In order to use a custom gradient in TensorFlow, it must be registered to a
string. This is both a hassle, and -- because only one function can every be
registered to a string -- annoying to iterate on in an interactive
environemnt.
This function registers a function to a unique random string of the form:
{FUNCTION_NAME}_{RANDOM_SALT}
And then returns the random string. This is a helper in creating more
convenient gradient overrides.
Args:
grad_f: gradient function to register. Should map (op, grad) -> grad(s)
Returns:
String that gradient function was registered to.
""" |
grad_f_name = grad_f.__name__ + "_" + str(uuid.uuid4())
tf.RegisterGradient(grad_f_name)(grad_f)
return grad_f_name |
<SYSTEM_TASK:>
Decorator for easily setting custom gradients for TensorFlow functions.
<END_TASK>
<USER_TASK:>
Description:
def use_gradient(grad_f):
"""Decorator for easily setting custom gradients for TensorFlow functions.
* DO NOT use this function if you need to serialize your graph.
* This function will cause the decorated function to run slower.
Example:
def _foo_grad(op, grad): ...
@use_gradient(_foo_grad)
def foo(x1, x2, x3): ...
Args:
grad_f: function to use as gradient.
Returns:
A decorator to apply to the function you wish to override the gradient of.
""" |
grad_f_name = register_to_random_name(grad_f)
def function_wrapper(f):
def inner(*inputs):
# TensorFlow only supports (as of writing) overriding the gradient of
# individual ops. In order to override the gardient of `f`, we need to
# somehow make it appear to be an individual TensorFlow op.
#
# Our solution is to create a PyFunc that mimics `f`.
#
# In particular, we construct a graph for `f` and run it, then use a
# stateful PyFunc to stash it's results in Python. Then we have another
# PyFunc mimic it by taking all the same inputs and returning the stashed
# output.
#
# I wish we could do this without PyFunc, but I don't see a way to have
# it be fully general.
state = {"out_value": None}
# First, we need to run `f` and store it's output.
out = f(*inputs)
def store_out(out_value):
"""Store the value of out to a python variable."""
state["out_value"] = out_value
store_name = "store_" + f.__name__
store = tf.py_func(store_out, [out], (), stateful=True, name=store_name)
# Next, we create the mock function, with an overriden gradient.
# Note that we need to make sure store gets evaluated before the mock
# runs.
def mock_f(*inputs):
"""Mimic f by retrieving the stored value of out."""
return state["out_value"]
with tf.control_dependencies([store]):
with gradient_override_map({"PyFunc": grad_f_name}):
mock_name = "mock_" + f.__name__
mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True,
name=mock_name)
mock_out.set_shape(out.get_shape())
# Finally, we can return the mock.
return mock_out
return inner
return function_wrapper |
<SYSTEM_TASK:>
A naive, pixel-based image parameterization.
<END_TASK>
<USER_TASK:>
Description:
def pixel_image(shape, sd=None, init_val=None):
"""A naive, pixel-based image parameterization.
Defaults to a random initialization, but can take a supplied init_val argument
instead.
Args:
shape: shape of resulting image, [batch, width, height, channels].
sd: standard deviation of param initialization noise.
init_val: an initial value to use instead of a random initialization. Needs
to have the same shape as the supplied shape argument.
Returns:
tensor with shape from first argument.
""" |
if sd is not None and init_val is not None:
warnings.warn(
"`pixel_image` received both an initial value and a sd argument. Ignoring sd in favor of the supplied initial value."
)
sd = sd or 0.01
init_val = init_val or np.random.normal(size=shape, scale=sd).astype(np.float32)
return tf.Variable(init_val) |
<SYSTEM_TASK:>
Simple laplacian pyramid paramaterization of an image.
<END_TASK>
<USER_TASK:>
Description:
def laplacian_pyramid_image(shape, n_levels=4, sd=None):
"""Simple laplacian pyramid paramaterization of an image.
For more flexibility, use a sum of lowres_tensor()s.
Args:
shape: shape of resulting image, [batch, width, height, channels].
n_levels: number of levels of laplacian pyarmid.
sd: standard deviation of param initialization.
Returns:
tensor with shape from first argument.
""" |
batch_dims = shape[:-3]
w, h, ch = shape[-3:]
pyramid = 0
for n in range(n_levels):
k = 2 ** n
pyramid += lowres_tensor(shape, batch_dims + (w // k, h // k, ch), sd=sd)
return pyramid |
<SYSTEM_TASK:>
Build bilinear texture sampling graph.
<END_TASK>
<USER_TASK:>
Description:
def bilinearly_sampled_image(texture, uv):
"""Build bilinear texture sampling graph.
Coordinate transformation rules match OpenGL GL_REPEAT wrapping and GL_LINEAR
interpolation modes.
Args:
texture: [tex_h, tex_w, channel_n] tensor.
uv: [frame_h, frame_h, 2] tensor with per-pixel UV coordinates in range [0..1]
Returns:
[frame_h, frame_h, channel_n] tensor with per-pixel sampled values.
""" |
h, w = tf.unstack(tf.shape(texture)[:2])
u, v = tf.split(uv, 2, axis=-1)
v = 1.0 - v # vertical flip to match GL convention
u, v = u * tf.to_float(w) - 0.5, v * tf.to_float(h) - 0.5
u0, u1 = tf.floor(u), tf.ceil(u)
v0, v1 = tf.floor(v), tf.ceil(v)
uf, vf = u - u0, v - v0
u0, u1, v0, v1 = map(tf.to_int32, [u0, u1, v0, v1])
def sample(u, v):
vu = tf.concat([v % h, u % w], axis=-1)
return tf.gather_nd(texture, vu)
s00, s01 = sample(u0, v0), sample(u0, v1)
s10, s11 = sample(u1, v0), sample(u1, v1)
s0 = s00 * (1.0 - vf) + s01 * vf
s1 = s10 * (1.0 - vf) + s11 * vf
s = s0 * (1.0 - uf) + s1 * uf
return s |
<SYSTEM_TASK:>
Add Inception bottlenecks and their pre-Relu versions to the graph.
<END_TASK>
<USER_TASK:>
Description:
def _populate_inception_bottlenecks(scope):
"""Add Inception bottlenecks and their pre-Relu versions to the graph.""" |
graph = tf.get_default_graph()
for op in graph.get_operations():
if op.name.startswith(scope+'/') and 'Concat' in op.type:
name = op.name.split('/')[1]
pre_relus = []
for tower in op.inputs[1:]:
if tower.op.type == 'Relu':
tower = tower.op.inputs[0]
pre_relus.append(tower)
concat_name = scope + '/' + name + '_pre_relu'
_ = tf.concat(pre_relus, -1, name=concat_name) |
<SYSTEM_TASK:>
Decorator for creating Objective factories.
<END_TASK>
<USER_TASK:>
Description:
def wrap_objective(f, *args, **kwds):
"""Decorator for creating Objective factories.
Changes f from the closure: (args) => () => TF Tensor
into an Obejective factory: (args) => Objective
while perserving function name, arg info, docs... for interactive python.
""" |
objective_func = f(*args, **kwds)
objective_name = f.__name__
args_str = " [" + ", ".join([_make_arg_str(arg) for arg in args]) + "]"
description = objective_name.title() + args_str
return Objective(objective_func, objective_name, description) |
<SYSTEM_TASK:>
Visualize a single neuron of a single channel.
<END_TASK>
<USER_TASK:>
Description:
def neuron(layer_name, channel_n, x=None, y=None, batch=None):
"""Visualize a single neuron of a single channel.
Defaults to the center neuron. When width and height are even numbers, we
choose the neuron in the bottom right of the center 2x2 neurons.
Odd width & height: Even width & height:
+---+---+---+ +---+---+---+---+
| | | | | | | | |
+---+---+---+ +---+---+---+---+
| | X | | | | | | |
+---+---+---+ +---+---+---+---+
| | | | | | | X | |
+---+---+---+ +---+---+---+---+
| | | | |
+---+---+---+---+
""" |
def inner(T):
layer = T(layer_name)
shape = tf.shape(layer)
x_ = shape[1] // 2 if x is None else x
y_ = shape[2] // 2 if y is None else y
if batch is None:
return layer[:, x_, y_, channel_n]
else:
return layer[batch, x_, y_, channel_n]
return inner |
<SYSTEM_TASK:>
Visualize a single channel
<END_TASK>
<USER_TASK:>
Description:
def channel(layer, n_channel, batch=None):
"""Visualize a single channel""" |
if batch is None:
return lambda T: tf.reduce_mean(T(layer)[..., n_channel])
else:
return lambda T: tf.reduce_mean(T(layer)[batch, ..., n_channel]) |
<SYSTEM_TASK:>
L1 norm of layer. Generally used as penalty.
<END_TASK>
<USER_TASK:>
Description:
def L1(layer="input", constant=0, batch=None):
"""L1 norm of layer. Generally used as penalty.""" |
if batch is None:
return lambda T: tf.reduce_sum(tf.abs(T(layer) - constant))
else:
return lambda T: tf.reduce_sum(tf.abs(T(layer)[batch] - constant)) |
<SYSTEM_TASK:>
L2 norm of layer. Generally used as penalty.
<END_TASK>
<USER_TASK:>
Description:
def L2(layer="input", constant=0, epsilon=1e-6, batch=None):
"""L2 norm of layer. Generally used as penalty.""" |
if batch is None:
return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer) - constant) ** 2))
else:
return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer)[batch] - constant) ** 2)) |
<SYSTEM_TASK:>
Minimizing this objective is equivelant to blurring input each step.
<END_TASK>
<USER_TASK:>
Description:
def blur_input_each_step():
"""Minimizing this objective is equivelant to blurring input each step.
Optimizing (-k)*blur_input_each_step() is equivelant to:
input <- (1-k)*input + k*blur(input)
An operation that was used in early feature visualization work.
See Nguyen, et al., 2015.
""" |
def inner(T):
t_input = T("input")
t_input_blurred = tf.stop_gradient(_tf_blur(t_input))
return 0.5*tf.reduce_sum((t_input - t_input_blurred)**2)
return inner |
<SYSTEM_TASK:>
Interpolate between layer1, n_channel1 and layer2, n_channel2.
<END_TASK>
<USER_TASK:>
Description:
def channel_interpolate(layer1, n_channel1, layer2, n_channel2):
"""Interpolate between layer1, n_channel1 and layer2, n_channel2.
Optimize for a convex combination of layer1, n_channel1 and
layer2, n_channel2, transitioning across the batch.
Args:
layer1: layer to optimize 100% at batch=0.
n_channel1: neuron index to optimize 100% at batch=0.
layer2: layer to optimize 100% at batch=N.
n_channel2: neuron index to optimize 100% at batch=N.
Returns:
Objective
""" |
def inner(T):
batch_n = T(layer1).get_shape().as_list()[0]
arr1 = T(layer1)[..., n_channel1]
arr2 = T(layer2)[..., n_channel2]
weights = (np.arange(batch_n)/float(batch_n-1))
S = 0
for n in range(batch_n):
S += (1-weights[n]) * tf.reduce_mean(arr1[n])
S += weights[n] * tf.reduce_mean(arr2[n])
return S
return inner |
<SYSTEM_TASK:>
Encourage the boundaries of an image to have less variation and of color C.
<END_TASK>
<USER_TASK:>
Description:
def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5):
"""Encourage the boundaries of an image to have less variation and of color C.
Args:
shp: shape of T("input") because this may not be known.
w: width of boundary to penalize. Ignored if mask is set.
mask: mask describing what area should be penalized.
Returns:
Objective.
""" |
def inner(T):
arr = T("input")
# print shp
if mask is None:
mask_ = np.ones(shp)
mask_[:, w:-w, w:-w] = 0
else:
mask_ = mask
blur = _tf_blur(arr, w=5)
diffs = (blur-arr)**2
diffs += 0.8*(arr-C)**2
return -tf.reduce_sum(diffs*mask_)
return inner |
<SYSTEM_TASK:>
Encourage neighboring images to be similar.
<END_TASK>
<USER_TASK:>
Description:
def alignment(layer, decay_ratio=2):
"""Encourage neighboring images to be similar.
When visualizing the interpolation between two objectives, it's often
desireable to encourage analagous boejcts to be drawn in the same position,
to make them more comparable.
This term penalizes L2 distance between neighboring images, as evaluated at
layer.
In general, we find this most effective if used with a paramaterization that
shares across the batch. (In fact, that works quite well by iteself, so this
function may just be obselete.)
Args:
layer: layer to penalize at.
decay_ratio: how much to decay penalty as images move apart in batch.
Returns:
Objective.
""" |
def inner(T):
batch_n = T(layer).get_shape().as_list()[0]
arr = T(layer)
accum = 0
for d in [1, 2, 3, 4]:
for i in range(batch_n - d):
a, b = i, i+d
arr1, arr2 = arr[a], arr[b]
accum += tf.reduce_mean((arr1-arr2)**2) / decay_ratio**float(d)
return -accum
return inner |
<SYSTEM_TASK:>
Encourage diversity between each batch element.
<END_TASK>
<USER_TASK:>
Description:
def diversity(layer):
"""Encourage diversity between each batch element.
A neural net feature often responds to multiple things, but naive feature
visualization often only shows us one. If you optimize a batch of images,
this objective will encourage them all to be different.
In particular, it caculuates the correlation matrix of activations at layer
for each image, and then penalizes cossine similarity between them. This is
very similar to ideas in style transfer, except we're *penalizing* style
similarity instead of encouraging it.
Args:
layer: layer to evaluate activation correlations on.
Returns:
Objective.
""" |
def inner(T):
layer_t = T(layer)
batch_n, _, _, channels = layer_t.get_shape().as_list()
flattened = tf.reshape(layer_t, [batch_n, -1, channels])
grams = tf.matmul(flattened, flattened, transpose_a=True)
grams = tf.nn.l2_normalize(grams, axis=[1,2], epsilon=1e-10)
return sum([ sum([ tf.reduce_sum(grams[i]*grams[j])
for j in range(batch_n) if j != i])
for i in range(batch_n)]) / batch_n
return inner |
<SYSTEM_TASK:>
Average L2 difference between optimized image and orig_img.
<END_TASK>
<USER_TASK:>
Description:
def input_diff(orig_img):
"""Average L2 difference between optimized image and orig_img.
This objective is usually mutliplied by a negative number and used as a
penalty in making advarsarial counterexamples.
""" |
def inner(T):
diff = T("input") - orig_img
return tf.sqrt(tf.reduce_mean(diff**2))
return inner |
<SYSTEM_TASK:>
Like channel, but for softmax layers.
<END_TASK>
<USER_TASK:>
Description:
def class_logit(layer, label):
"""Like channel, but for softmax layers.
Args:
layer: A layer name string.
label: Either a string (refering to a label in model.labels) or an int
label position.
Returns:
Objective maximizing a logit.
""" |
def inner(T):
if isinstance(label, int):
class_n = label
else:
class_n = T("labels").index(label)
logits = T(layer)
logit = tf.reduce_sum(logits[:, class_n])
return logit
return inner |
<SYSTEM_TASK:>
Convert obj into Objective class.
<END_TASK>
<USER_TASK:>
Description:
def as_objective(obj):
"""Convert obj into Objective class.
Strings of the form "layer:n" become the Objective channel(layer, n).
Objectives are returned unchanged.
Args:
obj: string or Objective.
Returns:
Objective
""" |
if isinstance(obj, Objective):
return obj
elif callable(obj):
return obj
elif isinstance(obj, str):
layer, n = obj.split(":")
layer, n = layer.strip(), int(n)
return channel(layer, n) |
<SYSTEM_TASK:>
Gradient for constrained optimization on an L2 unit ball.
<END_TASK>
<USER_TASK:>
Description:
def _constrain_L2_grad(op, grad):
"""Gradient for constrained optimization on an L2 unit ball.
This function projects the gradient onto the ball if you are on the boundary
(or outside!), but leaves it untouched if you are inside the ball.
Args:
op: the tensorflow op we're computing the gradient for.
grad: gradient we need to backprop
Returns:
(projected if necessary) gradient.
""" |
inp = op.inputs[0]
inp_norm = tf.norm(inp)
unit_inp = inp / inp_norm
grad_projection = dot(unit_inp, grad)
parallel_grad = unit_inp * grad_projection
is_in_ball = tf.less_equal(inp_norm, 1)
is_pointed_inward = tf.less(grad_projection, 0)
allow_grad = tf.logical_or(is_in_ball, is_pointed_inward)
clip_grad = tf.logical_not(allow_grad)
clipped_grad = tf.cond(clip_grad, lambda: grad - parallel_grad, lambda: grad)
return clipped_grad |
<SYSTEM_TASK:>
A tensorflow variable tranfomed to be constrained in a L2 unit ball.
<END_TASK>
<USER_TASK:>
Description:
def unit_ball_L2(shape):
"""A tensorflow variable tranfomed to be constrained in a L2 unit ball.
EXPERIMENTAL: Do not use for adverserial examples if you need to be confident
they are strong attacks. We are not yet confident in this code.
""" |
x = tf.Variable(tf.zeros(shape))
return constrain_L2(x) |
<SYSTEM_TASK:>
A tensorflow variable tranfomed to be constrained in a L_inf unit ball.
<END_TASK>
<USER_TASK:>
Description:
def unit_ball_L_inf(shape, precondition=True):
"""A tensorflow variable tranfomed to be constrained in a L_inf unit ball.
Note that this code also preconditions the gradient to go in the L_inf
direction of steepest descent.
EXPERIMENTAL: Do not use for adverserial examples if you need to be confident
they are strong attacks. We are not yet confident in this code.
""" |
x = tf.Variable(tf.zeros(shape))
if precondition:
return constrain_L_inf_precondition(x)
else:
return constrain_L_inf(x) |
<SYSTEM_TASK:>
Flexible optimization-base feature vis.
<END_TASK>
<USER_TASK:>
Description:
def render_vis(model, objective_f, param_f=None, optimizer=None,
transforms=None, thresholds=(512,), print_objectives=None,
verbose=True, relu_gradient_override=True, use_fixed_seed=False):
"""Flexible optimization-base feature vis.
There's a lot of ways one might wish to customize otpimization-based
feature visualization. It's hard to create an abstraction that stands up
to all the things one might wish to try.
This function probably can't do *everything* you want, but it's much more
flexible than a naive attempt. The basic abstraction is to split the problem
into several parts. Consider the rguments:
Args:
model: The model to be visualized, from Alex' modelzoo.
objective_f: The objective our visualization maximizes.
See the objectives module for more details.
param_f: Paramaterization of the image we're optimizing.
See the paramaterization module for more details.
Defaults to a naively paramaterized [1, 128, 128, 3] image.
optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance,
or a function from (graph, sess) to such an instance.
Defaults to Adam with lr .05.
transforms: A list of stochastic transformations that get composed,
which our visualization should robustly activate the network against.
See the transform module for more details.
Defaults to [transform.jitter(8)].
thresholds: A list of numbers of optimization steps, at which we should
save (and display if verbose=True) the visualization.
print_objectives: A list of objectives separate from those being optimized,
whose values get logged during the optimization.
verbose: Should we display the visualization when we hit a threshold?
This should only be used in IPython.
relu_gradient_override: Whether to use the gradient override scheme
described in lucid/misc/redirected_relu_grad.py. On by default!
use_fixed_seed: Seed the RNG with a fixed value so results are reproducible.
Off by default. As of tf 1.8 this does not work as intended, see:
https://github.com/tensorflow/tensorflow/issues/9171
Returns:
2D array of optimization results containing of evaluations of supplied
param_f snapshotted at specified thresholds. Usually that will mean one or
multiple channel visualizations stacked on top of each other.
""" |
with tf.Graph().as_default() as graph, tf.Session() as sess:
if use_fixed_seed: # does not mean results are reproducible, see Args doc
tf.set_random_seed(0)
T = make_vis_T(model, objective_f, param_f, optimizer, transforms,
relu_gradient_override)
print_objective_func = make_print_objective_func(print_objectives, T)
loss, vis_op, t_image = T("loss"), T("vis_op"), T("input")
tf.global_variables_initializer().run()
images = []
try:
for i in range(max(thresholds)+1):
loss_, _ = sess.run([loss, vis_op])
if i in thresholds:
vis = t_image.eval()
images.append(vis)
if verbose:
print(i, loss_)
print_objective_func(sess)
show(np.hstack(vis))
except KeyboardInterrupt:
log.warning("Interrupted optimization at step {:d}.".format(i+1))
vis = t_image.eval()
show(np.hstack(vis))
return images |
<SYSTEM_TASK:>
Even more flexible optimization-base feature vis.
<END_TASK>
<USER_TASK:>
Description:
def make_vis_T(model, objective_f, param_f=None, optimizer=None,
transforms=None, relu_gradient_override=False):
"""Even more flexible optimization-base feature vis.
This function is the inner core of render_vis(), and can be used
when render_vis() isn't flexible enough. Unfortunately, it's a bit more
tedious to use:
> with tf.Graph().as_default() as graph, tf.Session() as sess:
>
> T = make_vis_T(model, "mixed4a_pre_relu:0")
> tf.initialize_all_variables().run()
>
> for i in range(10):
> T("vis_op").run()
> showarray(T("input").eval()[0])
This approach allows more control over how the visualizaiton is displayed
as it renders. It also allows a lot more flexibility in constructing
objectives / params because the session is already in scope.
Args:
model: The model to be visualized, from Alex' modelzoo.
objective_f: The objective our visualization maximizes.
See the objectives module for more details.
param_f: Paramaterization of the image we're optimizing.
See the paramaterization module for more details.
Defaults to a naively paramaterized [1, 128, 128, 3] image.
optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance,
or a function from (graph, sess) to such an instance.
Defaults to Adam with lr .05.
transforms: A list of stochastic transformations that get composed,
which our visualization should robustly activate the network against.
See the transform module for more details.
Defaults to [transform.jitter(8)].
Returns:
A function T, which allows access to:
* T("vis_op") -- the operation for to optimize the visualization
* T("input") -- the visualization itself
* T("loss") -- the loss for the visualization
* T(layer) -- any layer inside the network
""" |
# pylint: disable=unused-variable
t_image = make_t_image(param_f)
objective_f = objectives.as_objective(objective_f)
transform_f = make_transform_f(transforms)
optimizer = make_optimizer(optimizer, [])
global_step = tf.train.get_or_create_global_step()
init_global_step = tf.variables_initializer([global_step])
init_global_step.run()
if relu_gradient_override:
with gradient_override_map({'Relu': redirected_relu_grad,
'Relu6': redirected_relu6_grad}):
T = import_model(model, transform_f(t_image), t_image)
else:
T = import_model(model, transform_f(t_image), t_image)
loss = objective_f(T)
vis_op = optimizer.minimize(-loss, global_step=global_step)
local_vars = locals()
# pylint: enable=unused-variable
def T2(name):
if name in local_vars:
return local_vars[name]
else: return T(name)
return T2 |
<SYSTEM_TASK:>
Write a file for each tile
<END_TASK>
<USER_TASK:>
Description:
def write_grid_local(tiles, params):
"""
Write a file for each tile
""" |
# TODO: this isn't being used right now, will need to be
# ported to gfile if we want to keep it
for ti,tj,tile in enumerate_tiles(tiles):
filename = "{directory}/{name}/tile_{n_layer}_{n_tile}_{ti}_{tj}".format(ti=ti, tj=tj, **params) #directory=directory, name=name, n_layer=n_layer, n_tile=n_tile,
# write out the tile as a npz
print("saving", filename + ".npz")
np.savez_compressed(filename + ".npz", **tile)
# write out the tile as a csv
print("saving", filename + ".csv")
df = pd.DataFrame(tile)
df.to_csv(filename + ".csv", index=False) |
<SYSTEM_TASK:>
Load image file as numpy array.
<END_TASK>
<USER_TASK:>
Description:
def _load_img(handle, target_dtype=np.float32, size=None, **kwargs):
"""Load image file as numpy array.""" |
image_pil = PIL.Image.open(handle, **kwargs)
# resize the image to the requested size, if one was specified
if size is not None:
if len(size) > 2:
size = size[:2]
log.warning("`_load_img()` received size: {}, trimming to first two dims!".format(size))
image_pil = image_pil.resize(size, resample=PIL.Image.LANCZOS)
image_array = np.asarray(image_pil)
# remove alpha channel if it contains no information
# if image_array.shape[-1] > 3 and 'A' not in image_pil.mode:
# image_array = image_array[..., :-1]
image_dtype = image_array.dtype
image_max_value = np.iinfo(image_dtype).max # ...for uint8 that's 255, etc.
# using np.divide should avoid an extra copy compared to doing division first
ndimage = np.divide(image_array, image_max_value, dtype=target_dtype)
rank = len(ndimage.shape)
if rank == 3:
return ndimage
elif rank == 2:
return np.repeat(np.expand_dims(ndimage, axis=2), 3, axis=2)
else:
message = "Loaded image has more dimensions than expected: {}".format(rank)
raise NotImplementedError(message) |
<SYSTEM_TASK:>
Load and decode a string.
<END_TASK>
<USER_TASK:>
Description:
def _load_text(handle, split=False, encoding="utf-8"):
"""Load and decode a string.""" |
string = handle.read().decode(encoding)
return string.splitlines() if split else string |
<SYSTEM_TASK:>
Load a file.
<END_TASK>
<USER_TASK:>
Description:
def load(url_or_handle, cache=None, **kwargs):
"""Load a file.
File format is inferred from url. File retrieval strategy is inferred from
URL. Returned object type is inferred from url extension.
Args:
url_or_handle: a (reachable) URL, or an already open file handle
Raises:
RuntimeError: If file extension or URL is not supported.
""" |
ext = get_extension(url_or_handle)
try:
loader = loaders[ext.lower()]
message = "Using inferred loader '%s' due to passed file extension '%s'."
log.debug(message, loader.__name__[6:], ext)
return load_using_loader(url_or_handle, loader, cache, **kwargs)
except KeyError:
log.warning("Unknown extension '%s', attempting to load as image.", ext)
try:
with read_handle(url_or_handle, cache=cache) as handle:
result = _load_img(handle)
except Exception as e:
message = "Could not load resource %s as image. Supported extensions: %s"
log.error(message, url_or_handle, list(loaders))
raise RuntimeError(message.format(url_or_handle, list(loaders)))
else:
log.info("Unknown extension '%s' successfully loaded as image.", ext)
return result |
<SYSTEM_TASK:>
Ensures the specified spatial shape by either padding or cropping.
<END_TASK>
<USER_TASK:>
Description:
def crop_or_pad_to(height, width):
"""Ensures the specified spatial shape by either padding or cropping.
Meant to be used as a last transform for architectures insisting on a specific
spatial shape of their inputs.
""" |
def inner(t_image):
return tf.image.resize_image_with_crop_or_pad(t_image, height, width)
return inner |
<SYSTEM_TASK:>
Given an arbitrary rank-3 NumPy array, produce one representing an image.
<END_TASK>
<USER_TASK:>
Description:
def _normalize_array(array, domain=(0, 1)):
"""Given an arbitrary rank-3 NumPy array, produce one representing an image.
This ensures the resulting array has a dtype of uint8 and a domain of 0-255.
Args:
array: NumPy array representing the image
domain: expected range of values in array,
defaults to (0, 1), if explicitly set to None will use the array's
own range of values and normalize them.
Returns:
normalized PIL.Image
""" |
# first copy the input so we're never mutating the user's data
array = np.array(array)
# squeeze helps both with batch=1 and B/W and PIL's mode inference
array = np.squeeze(array)
assert len(array.shape) <= 3
assert np.issubdtype(array.dtype, np.number)
assert not np.isnan(array).any()
low, high = np.min(array), np.max(array)
if domain is None:
message = "No domain specified, normalizing from measured (~%.2f, ~%.2f)"
log.debug(message, low, high)
domain = (low, high)
# clip values if domain was specified and array contains values outside of it
if low < domain[0] or high > domain[1]:
message = "Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})."
log.info(message.format(low, high, domain[0], domain[1]))
array = array.clip(*domain)
min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max # 0, 255
# convert signed to unsigned if needed
if np.issubdtype(array.dtype, np.inexact):
offset = domain[0]
if offset != 0:
array -= offset
log.debug("Converting inexact array by subtracting -%.2f.", offset)
scalar = max_value / (domain[1] - domain[0])
if scalar != 1:
array *= scalar
log.debug("Converting inexact array by scaling by %.2f.", scalar)
return array.clip(min_value, max_value).astype(np.uint8) |
<SYSTEM_TASK:>
Given a normalized array, returns byte representation of image encoding.
<END_TASK>
<USER_TASK:>
Description:
def _serialize_normalized_array(array, fmt='png', quality=70):
"""Given a normalized array, returns byte representation of image encoding.
Args:
array: NumPy array of dtype uint8 and range 0 to 255
fmt: string describing desired file format, defaults to 'png'
quality: specifies compression quality from 0 to 100 for lossy formats
Returns:
image data as BytesIO buffer
""" |
dtype = array.dtype
assert np.issubdtype(dtype, np.unsignedinteger)
assert np.max(array) <= np.iinfo(dtype).max
assert array.shape[-1] > 1 # array dims must have been squeezed
image = PIL.Image.fromarray(array)
image_bytes = BytesIO()
image.save(image_bytes, fmt, quality=quality)
# TODO: Python 3 could save a copy here by using `getbuffer()` instead.
image_data = image_bytes.getvalue()
return image_data |
<SYSTEM_TASK:>
Given an arbitrary rank-3 NumPy array,
<END_TASK>
<USER_TASK:>
Description:
def serialize_array(array, domain=(0, 1), fmt='png', quality=70):
"""Given an arbitrary rank-3 NumPy array,
returns the byte representation of the encoded image.
Args:
array: NumPy array of dtype uint8 and range 0 to 255
domain: expected range of values in array, see `_normalize_array()`
fmt: string describing desired file format, defaults to 'png'
quality: specifies compression quality from 0 to 100 for lossy formats
Returns:
image data as BytesIO buffer
""" |
normalized = _normalize_array(array, domain=domain)
return _serialize_normalized_array(normalized, fmt=fmt, quality=quality) |
<SYSTEM_TASK:>
Utility for applying f to inner dimension of acts.
<END_TASK>
<USER_TASK:>
Description:
def _apply_flat(cls, f, acts):
"""Utility for applying f to inner dimension of acts.
Flattens acts into a 2D tensor, applies f, then unflattens so that all
dimesnions except innermost are unchanged.
""" |
orig_shape = acts.shape
acts_flat = acts.reshape([-1, acts.shape[-1]])
new_flat = f(acts_flat)
if not isinstance(new_flat, np.ndarray):
return new_flat
shape = list(orig_shape[:-1]) + [-1]
return new_flat.reshape(shape) |
<SYSTEM_TASK:>
Create a data URL representing an image from a PIL.Image.
<END_TASK>
<USER_TASK:>
Description:
def _image_url(array, fmt='png', mode="data", quality=90, domain=None):
"""Create a data URL representing an image from a PIL.Image.
Args:
image: a numpy
mode: presently only supports "data" for data URL
Returns:
URL representing image
""" |
supported_modes = ("data")
if mode not in supported_modes:
message = "Unsupported mode '%s', should be one of '%s'."
raise ValueError(message, mode, supported_modes)
image_data = serialize_array(array, fmt=fmt, quality=quality)
base64_byte_string = base64.b64encode(image_data).decode('ascii')
return "data:image/" + fmt.upper() + ";base64," + base64_byte_string |
<SYSTEM_TASK:>
Display an image.
<END_TASK>
<USER_TASK:>
Description:
def image(array, domain=None, width=None, format='png', **kwargs):
"""Display an image.
Args:
array: NumPy array representing the image
fmt: Image format e.g. png, jpeg
domain: Domain of pixel values, inferred from min & max values if None
w: width of output image, scaled using nearest neighbor interpolation.
size unchanged if None
""" |
image_data = serialize_array(array, fmt=format, domain=domain)
image = IPython.display.Image(data=image_data, format=format, width=width)
IPython.display.display(image) |
<SYSTEM_TASK:>
Display a nupmy array without having to specify what it represents.
<END_TASK>
<USER_TASK:>
Description:
def show(thing, domain=(0, 1), **kwargs):
"""Display a nupmy array without having to specify what it represents.
This module will attempt to infer how to display your tensor based on its
rank, shape and dtype. rank 4 tensors will be displayed as image grids, rank
2 and 3 tensors as images.
""" |
if isinstance(thing, np.ndarray):
rank = len(thing.shape)
if rank == 4:
log.debug("Show is assuming rank 4 tensor to be a list of images.")
images(thing, domain=domain, **kwargs)
elif rank in (2, 3):
log.debug("Show is assuming rank 2 or 3 tensor to be an image.")
image(thing, domain=domain, **kwargs)
else:
log.warning("Show only supports numpy arrays of rank 2-4. Using repr().")
print(repr(thing))
elif isinstance(thing, (list, tuple)):
log.debug("Show is assuming list or tuple to be a collection of images.")
images(thing, domain=domain, **kwargs)
else:
log.warning("Show only supports numpy arrays so far. Using repr().")
print(repr(thing)) |
<SYSTEM_TASK:>
Strip large constant values from graph_def.
<END_TASK>
<USER_TASK:>
Description:
def _strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def.
This is mostly a utility function for graph(), and also originates here:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
""" |
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = tf.compat.as_bytes("<stripped %d bytes>"%size)
return strip_def |
<SYSTEM_TASK:>
Takes two images and composites them.
<END_TASK>
<USER_TASK:>
Description:
def composite(
background_image,
foreground_image,
foreground_width_ratio=0.25,
foreground_position=(0.0, 0.0),
):
"""Takes two images and composites them.""" |
if foreground_width_ratio <= 0:
return background_image
composite = background_image.copy()
width = int(foreground_width_ratio * background_image.shape[1])
foreground_resized = resize(foreground_image, width)
size = foreground_resized.shape
x = int(foreground_position[1] * (background_image.shape[1] - size[1]))
y = int(foreground_position[0] * (background_image.shape[0] - size[0]))
# TODO: warn if resulting coordinates are out of bounds?
composite[y : y + size[0], x : x + size[1]] = foreground_resized
return composite |
<SYSTEM_TASK:>
Produces a tensor paramaterized by a interpolated lower resolution tensor.
<END_TASK>
<USER_TASK:>
Description:
def lowres_tensor(shape, underlying_shape, offset=None, sd=None):
"""Produces a tensor paramaterized by a interpolated lower resolution tensor.
This is like what is done in a laplacian pyramid, but a bit more general. It
can be a powerful way to describe images.
Args:
shape: desired shape of resulting tensor
underlying_shape: shape of the tensor being resized into final tensor
offset: Describes how to offset the interpolated vector (like phase in a
Fourier transform). If None, apply no offset. If a scalar, apply the same
offset to each dimension; if a list use each entry for each dimension.
If a int, offset by that much. If False, do not offset. If True, offset by
half the ratio between shape and underlying shape (analagous to 90
degrees).
sd: Standard deviation of initial tensor variable.
Returns:
A tensor paramaterized by a lower resolution tensorflow variable.
""" |
sd = sd or 0.01
init_val = sd * np.random.randn(*underlying_shape).astype("float32")
underlying_t = tf.Variable(init_val)
t = resize_bilinear_nd(underlying_t, shape)
if offset is not None:
# Deal with non-list offset
if not isinstance(offset, list):
offset = len(shape) * [offset]
# Deal with the non-int offset entries
for n in range(len(offset)):
if offset[n] is True:
offset[n] = shape[n] / underlying_shape[n] / 2
if offset[n] is False:
offset[n] = 0
offset[n] = int(offset[n])
# Actually apply offset by padding and then croping off the excess.
padding = [(pad, 0) for pad in offset]
t = tf.pad(t, padding, "SYMMETRIC")
begin = len(shape) * [0]
t = tf.slice(t, begin, shape)
return t |
<SYSTEM_TASK:>
Read from any URL.
<END_TASK>
<USER_TASK:>
Description:
def read(url, encoding=None, cache=None, mode="rb"):
"""Read from any URL.
Internally differentiates between URLs supported by tf.gfile, such as URLs
with the Google Cloud Storage scheme ('gs://...') or local paths, and HTTP
URLs. This way users don't need to know about the underlying fetch mechanism.
Args:
url: a URL including scheme or a local path
mode: mode in which to open the file. defaults to binary ('rb')
encoding: if specified, encoding that should be used to decode read data
if mode is specified to be text ('r'), this defaults to 'utf-8'.
cache: whether to attempt caching the resource. Defaults to True only if
the given URL specifies a remote resource.
Returns:
All bytes form the specified resource, or a decoded string of those.
""" |
with read_handle(url, cache, mode=mode) as handle:
data = handle.read()
if encoding:
data = data.decode(encoding)
return data |
<SYSTEM_TASK:>
Read from any URL with a file handle.
<END_TASK>
<USER_TASK:>
Description:
def read_handle(url, cache=None, mode="rb"):
"""Read from any URL with a file handle.
Use this to get a handle to a file rather than eagerly load the data:
```
with read_handle(url) as handle:
result = something.load(handle)
result.do_something()
```
When program execution leaves this `with` block, the handle will be closed
automatically.
Args:
url: a URL including scheme or a local path
Returns:
A file handle to the specified resource if it could be reached.
The handle will be closed automatically once execution leaves this context.
""" |
scheme = urlparse(url).scheme
if cache == 'purge':
_purge_cached(url)
cache = None
if _is_remote(scheme) and cache is None:
cache = True
log.debug("Cache not specified, enabling because resource is remote.")
if cache:
handle = _read_and_cache(url, mode=mode)
else:
if scheme in ("http", "https"):
handle = _handle_web_url(url, mode=mode)
elif scheme in ("gs"):
handle = _handle_gfile(url, mode=mode)
else:
handle = open(url, mode=mode)
yield handle
handle.close() |
<SYSTEM_TASK:>
Returns the path that remote_url would be cached at locally.
<END_TASK>
<USER_TASK:>
Description:
def local_cache_path(remote_url):
"""Returns the path that remote_url would be cached at locally.""" |
local_name = RESERVED_PATH_CHARS.sub("_", remote_url)
return os.path.join(gettempdir(), local_name) |
<SYSTEM_TASK:>
Compositional Pattern Producing Network
<END_TASK>
<USER_TASK:>
Description:
def cppn(
width,
batch=1,
num_output_channels=3,
num_hidden_channels=24,
num_layers=8,
activation_func=_composite_activation,
normalize=False,
):
"""Compositional Pattern Producing Network
Args:
width: width of resulting image, equals height
batch: batch dimension of output, note that all params share the same weights!
num_output_channels:
num_hidden_channels:
num_layers:
activation_func:
normalize:
Returns:
The collapsed shape, represented as a list.
""" |
r = 3.0 ** 0.5 # std(coord_range) == 1.0
coord_range = tf.linspace(-r, r, width)
y, x = tf.meshgrid(coord_range, coord_range, indexing="ij")
net = tf.stack([tf.stack([x, y], -1)] * batch, 0)
with slim.arg_scope(
[slim.conv2d],
kernel_size=[1, 1],
activation_fn=None,
weights_initializer=tf.initializers.variance_scaling(),
biases_initializer=tf.initializers.random_normal(0.0, 0.1),
):
for i in range(num_layers):
x = slim.conv2d(net, num_hidden_channels)
if normalize:
x = slim.instance_norm(x)
net = activation_func(x)
rgb = slim.conv2d(
net,
num_output_channels,
activation_fn=tf.nn.sigmoid,
weights_initializer=tf.zeros_initializer(),
)
return rgb |
<SYSTEM_TASK:>
Renders two aligned Activation Atlases of the given models' layers.
<END_TASK>
<USER_TASK:>
Description:
def aligned_activation_atlas(
model1,
layer1,
model2,
layer2,
grid_size=10,
icon_size=80,
num_steps=1024,
whiten_layers=True,
number_activations=NUMBER_OF_AVAILABLE_SAMPLES,
icon_batch_size=32,
verbose=False,
):
"""Renders two aligned Activation Atlases of the given models' layers.
Returns a generator of the two atlasses, and a nested generator for intermediate
atlasses while they're being rendered.
""" |
combined_activations = _combine_activations(
layer1, layer2, number_activations=number_activations
)
layouts = aligned_umap(combined_activations, verbose=verbose)
for model, layer, layout in zip((model1, model2), (layer1, layer2), layouts):
directions, coordinates, densities = bin_laid_out_activations(
layout, layer.activations[:number_activations, ...], grid_size, threshold=10
)
def _progressive_canvas_iterator():
icons = []
for directions_batch in chunked(directions, icon_batch_size):
icon_batch, losses = render_icons(
directions_batch,
model,
alpha=False,
layer=layer.name,
size=icon_size,
n_steps=num_steps,
S=layer_inverse_covariance(layer) if whiten_layers else None,
)
icons += icon_batch
yield make_canvas(icons, coordinates, grid_size)
yield _progressive_canvas_iterator() |
<SYSTEM_TASK:>
Given two layers, combines their activations according to mode.
<END_TASK>
<USER_TASK:>
Description:
def _combine_activations(
layer1,
layer2,
activations1=None,
activations2=None,
mode=ActivationTranslation.BIDIRECTIONAL,
number_activations=NUMBER_OF_AVAILABLE_SAMPLES,
):
"""Given two layers, combines their activations according to mode.
ActivationTranslation.ONE_TO_TWO:
Translate activations of layer1 into the space of layer2, and return a tuple of
the translated activations and the original layer2 activations.
ActivationTranslation.BIDIRECTIONAL:
Translate activations of layer1 into the space of layer2, activations of layer2
into the space of layer 1, concatenate them along their channels, and returns a
tuple of the concatenated activations for each layer.
""" |
activations1 = activations1 or layer1.activations[:number_activations, ...]
activations2 = activations2 or layer2.activations[:number_activations, ...]
if mode is ActivationTranslation.ONE_TO_TWO:
acts_1_to_2 = push_activations(activations1, layer1, layer2)
return acts_1_to_2, activations2
elif mode is ActivationTranslation.BIDIRECTIONAL:
acts_1_to_2 = push_activations(activations1, layer1, layer2)
acts_2_to_1 = push_activations(activations2, layer2, layer1)
activations_model1 = np.concatenate((activations1, acts_1_to_2), axis=1)
activations_model2 = np.concatenate((acts_2_to_1, activations2), axis=1)
return activations_model1, activations_model2 |
<SYSTEM_TASK:>
Given a layout and activations, overlays a grid on the layout and returns
<END_TASK>
<USER_TASK:>
Description:
def bin_laid_out_activations(layout, activations, grid_size, threshold=5):
"""Given a layout and activations, overlays a grid on the layout and returns
averaged activations for each grid cell. If a cell contains less than `threshold`
activations it will be discarded, so the number of returned data is variable.""" |
assert layout.shape[0] == activations.shape[0]
# calculate which grid cells each activation's layout position falls into
# first bin stays empty because nothing should be < 0, so we add an extra bin
bins = np.linspace(0, 1, num=grid_size + 1)
bins[-1] = np.inf # last bin should include all higher values
indices = np.digitize(layout, bins) - 1 # subtract 1 to account for empty first bin
# because of thresholding we may need to return a variable number of means
means, coordinates, counts = [], [], []
# iterate over all grid cell coordinates to compute their average directions
grid_coordinates = np.indices((grid_size, grid_size)).transpose().reshape(-1, 2)
for xy_coordinates in grid_coordinates:
mask = np.equal(xy_coordinates, indices).all(axis=1)
count = np.count_nonzero(mask)
if count > threshold:
counts.append(count)
coordinates.append(xy_coordinates)
mean = np.average(activations[mask], axis=0)
means.append(mean)
assert len(means) == len(coordinates) == len(counts)
if len(coordinates) == 0:
raise RuntimeError("Binning activations led to 0 cells containing activations!")
return means, coordinates, counts |
<SYSTEM_TASK:>
Return frozen and simplified graph_def of default graph.
<END_TASK>
<USER_TASK:>
Description:
def frozen_default_graph_def(input_node_names, output_node_names):
"""Return frozen and simplified graph_def of default graph.""" |
sess = tf.get_default_session()
input_graph_def = tf.get_default_graph().as_graph_def()
pruned_graph = tf.graph_util.remove_training_nodes(
input_graph_def, protected_nodes=(output_node_names + input_node_names)
)
pruned_graph = tf.graph_util.extract_sub_graph(pruned_graph, output_node_names)
# remove explicit device assignments
for node in pruned_graph.node:
node.device = ""
all_variable_names = [v.op.name for v in tf.global_variables()]
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=pruned_graph,
output_node_names=output_node_names,
variable_names_whitelist=all_variable_names,
)
return output_graph_def |
<SYSTEM_TASK:>
Embed meta data as a string constant in a TF graph.
<END_TASK>
<USER_TASK:>
Description:
def infuse_metadata(graph_def, info):
"""Embed meta data as a string constant in a TF graph.
This function takes info, converts it into json, and embeds
it in graph_def as a constant op called `__lucid_metadata_json`.
""" |
temp_graph = tf.Graph()
with temp_graph.as_default():
tf.constant(json.dumps(info, cls=NumpyJSONEncoder), name=metadata_node_name)
meta_node = temp_graph.as_graph_def().node[0]
graph_def.node.extend([meta_node]) |
<SYSTEM_TASK:>
Attempt to extract meta data hidden in graph_def.
<END_TASK>
<USER_TASK:>
Description:
def extract_metadata(graph_def):
"""Attempt to extract meta data hidden in graph_def.
Looks for a `__lucid_metadata_json` constant string op.
If present, extract it's content and convert it from json to python.
If not, returns None.
""" |
meta_matches = [n for n in graph_def.node if n.name==metadata_node_name]
if meta_matches:
assert len(meta_matches) == 1, "found more than 1 lucid metadata node!"
meta_tensor = meta_matches[0].attr['value'].tensor
return json.loads(meta_tensor.string_val[0])
else:
return None |
<SYSTEM_TASK:>
Am I really handcoding graph traversal please no
<END_TASK>
<USER_TASK:>
Description:
def neighborhood(self, node, degree=4):
"""Am I really handcoding graph traversal please no""" |
assert self.by_name[node.name] == node
already_visited = frontier = set([node.name])
for _ in range(degree):
neighbor_names = set()
for node_name in frontier:
outgoing = set(n.name for n in self.by_input[node_name])
incoming = set(self.by_name[node_name].input)
neighbor_names |= incoming | outgoing
frontier = neighbor_names - already_visited
already_visited |= neighbor_names
return [self.by_name[name] for name in already_visited] |
<SYSTEM_TASK:>
An iterator that recursively walks through this cog's commands and subcommands.
<END_TASK>
<USER_TASK:>
Description:
def walk_commands(self):
"""An iterator that recursively walks through this cog's commands and subcommands.""" |
from .core import GroupMixin
for command in self.__cog_commands__:
if command.parent is None:
yield command
if isinstance(command, GroupMixin):
yield from command.walk_commands() |
<SYSTEM_TASK:>
A decorator that marks a function as a listener.
<END_TASK>
<USER_TASK:>
Description:
def listener(cls, name=None):
"""A decorator that marks a function as a listener.
This is the cog equivalent of :meth:`.Bot.listen`.
Parameters
------------
name: :class:`str`
The name of the event being listened to. If not provided, it
defaults to the function's name.
Raises
--------
TypeError
The function is not a coroutine function or a string was not passed as
the name.
""" |
if name is not None and not isinstance(name, str):
raise TypeError('Cog.listener expected str but received {0.__class__.__name__!r} instead.'.format(name))
def decorator(func):
actual = func
if isinstance(actual, staticmethod):
actual = actual.__func__
if not inspect.iscoroutinefunction(actual):
raise TypeError('Listener function must be a coroutine function.')
actual.__cog_listener__ = True
to_assign = name or actual.__name__
try:
actual.__cog_listener_names__.append(to_assign)
except AttributeError:
actual.__cog_listener_names__ = [to_assign]
# we have to return `func` instead of `actual` because
# we need the type to be `staticmethod` for the metaclass
# to pick it up but the metaclass unfurls the function and
# thus the assignments need to be on the actual function
return func
return decorator |
<SYSTEM_TASK:>
Sets the footer for the embed content.
<END_TASK>
<USER_TASK:>
Description:
def set_footer(self, *, text=EmptyEmbed, icon_url=EmptyEmbed):
"""Sets the footer for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
text: :class:`str`
The footer text.
icon_url: :class:`str`
The URL of the footer icon. Only HTTP(S) is supported.
""" |
self._footer = {}
if text is not EmptyEmbed:
self._footer['text'] = str(text)
if icon_url is not EmptyEmbed:
self._footer['icon_url'] = str(icon_url)
return self |
<SYSTEM_TASK:>
Sets the author for the embed content.
<END_TASK>
<USER_TASK:>
Description:
def set_author(self, *, name, url=EmptyEmbed, icon_url=EmptyEmbed):
"""Sets the author for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
name: :class:`str`
The name of the author.
url: :class:`str`
The URL for the author.
icon_url: :class:`str`
The URL of the author icon. Only HTTP(S) is supported.
""" |
self._author = {
'name': str(name)
}
if url is not EmptyEmbed:
self._author['url'] = str(url)
if icon_url is not EmptyEmbed:
self._author['icon_url'] = str(icon_url)
return self |
<SYSTEM_TASK:>
Adds a field to the embed object.
<END_TASK>
<USER_TASK:>
Description:
def add_field(self, *, name, value, inline=True):
"""Adds a field to the embed object.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
name: :class:`str`
The name of the field.
value: :class:`str`
The value of the field.
inline: :class:`bool`
Whether the field should be displayed inline.
""" |
field = {
'inline': inline,
'name': str(name),
'value': str(value)
}
try:
self._fields.append(field)
except AttributeError:
self._fields = [field]
return self |
<SYSTEM_TASK:>
Modifies a field to the embed object.
<END_TASK>
<USER_TASK:>
Description:
def set_field_at(self, index, *, name, value, inline=True):
"""Modifies a field to the embed object.
The index must point to a valid pre-existing field.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
index: :class:`int`
The index of the field to modify.
name: :class:`str`
The name of the field.
value: :class:`str`
The value of the field.
inline: :class:`bool`
Whether the field should be displayed inline.
Raises
-------
IndexError
An invalid index was provided.
""" |
try:
field = self._fields[index]
except (TypeError, IndexError, AttributeError):
raise IndexError('field index out of range')
field['name'] = str(name)
field['value'] = str(value)
field['inline'] = inline
return self |
<SYSTEM_TASK:>
Returns a friendly URL version of the avatar the user has.
<END_TASK>
<USER_TASK:>
Description:
def avatar_url_as(self, *, format=None, static_format='webp', size=1024):
"""Returns a friendly URL version of the avatar the user has.
If the user does not have a traditional avatar, their default
avatar URL is returned instead.
The format must be one of 'webp', 'jpeg', 'jpg', 'png' or 'gif', and
'gif' is only valid for animated avatars. The size must be a power of 2
between 16 and 1024.
Parameters
-----------
format: Optional[:class:`str`]
The format to attempt to convert the avatar to.
If the format is ``None``, then it is automatically
detected into either 'gif' or static_format depending on the
avatar being animated or not.
static_format: Optional[:class:`str`]
Format to attempt to convert only non-animated avatars to.
Defaults to 'webp'
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or ``static_format``, or
invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
""" |
return Asset._from_avatar(self._state, self, format=format, static_format=static_format, size=size) |
<SYSTEM_TASK:>
Checks if the user is mentioned in the specified message.
<END_TASK>
<USER_TASK:>
Description:
def mentioned_in(self, message):
"""Checks if the user is mentioned in the specified message.
Parameters
-----------
message: :class:`Message`
The message to check if you're mentioned in.
""" |
if message.mention_everyone:
return True
for user in message.mentions:
if user.id == self.id:
return True
return False |
<SYSTEM_TASK:>
Returns a numeric snowflake pretending to be created at the given date.
<END_TASK>
<USER_TASK:>
Description:
def time_snowflake(datetime_obj, high=False):
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use time_snowflake(high=False) - 1 to be inclusive, high=True to be exclusive
When using as the higher end of a range, use time_snowflake(high=True) + 1 to be inclusive, high=False to be exclusive
Parameters
-----------
datetime_obj
A timezone-naive datetime object representing UTC time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
""" |
unix_seconds = (datetime_obj - type(datetime_obj)(1970, 1, 1)).total_seconds()
discord_millis = int(unix_seconds * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2**22-1 if high else 0) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.