markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
Assess privacy risks with TensorFlow Privacy Membership Inference Attacks Run in Google Colab View source on GitHub OverviewIn this codelab we'll train a simple image classification model on the CIFAR10 dataset, and then use the "membership inference attack" against this model to assess if the attacker is able to "guess" whether a particular sample was present in the training set. SetupFirst, set this notebook's runtime to use a GPU, under Runtime > Change runtime type > Hardware accelerator. Then, begin importing the necessary libraries. | #@title Import statements.
import numpy as np
from typing import Tuple, Text
from scipy import special
import tensorflow as tf
import tensorflow_datasets as tfds
# Set verbosity.
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from warnings import simplefilter
from sklearn.exceptions import ConvergenceWarning
simplefilter(action="ignore", category=ConvergenceWarning)
simplefilter(action="ignore", category=FutureWarning) | _____no_output_____ | Apache-2.0 | tensorflow_privacy/privacy/membership_inference_attack/codelab.ipynb | LuluBeatson/privacy |
Install TensorFlow Privacy. | !pip3 install git+https://github.com/tensorflow/privacy
from tensorflow_privacy.privacy.membership_inference_attack import membership_inference_attack as mia | _____no_output_____ | Apache-2.0 | tensorflow_privacy/privacy/membership_inference_attack/codelab.ipynb | LuluBeatson/privacy |
Train a simple model on CIFAR10 with Keras. | dataset = 'cifar10'
num_classes = 10
num_conv = 3
activation = 'relu'
optimizer = 'adam'
lr = 0.02
momentum = 0.9
batch_size = 250
epochs = 100 # Privacy risks are especially visible with lots of epochs.
def small_cnn(input_shape: Tuple[int],
num_classes: int,
num_conv: int,
activation: Text = 'relu') -> tf.keras.models.Sequential:
"""Setup a small CNN for image classification.
Args:
input_shape: Integer tuple for the shape of the images.
num_classes: Number of prediction classes.
num_conv: Number of convolutional layers.
activation: The activation function to use for conv and dense layers.
Returns:
The Keras model.
"""
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=input_shape))
# Conv layers
for _ in range(num_conv):
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation=activation))
model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation=activation))
model.add(tf.keras.layers.Dense(num_classes))
return model
print('Loading the dataset.')
train_ds = tfds.as_numpy(
tfds.load(dataset, split=tfds.Split.TRAIN, batch_size=-1))
test_ds = tfds.as_numpy(
tfds.load(dataset, split=tfds.Split.TEST, batch_size=-1))
x_train = train_ds['image'].astype('float32') / 255.
y_train_indices = train_ds['label'][:, np.newaxis]
x_test = test_ds['image'].astype('float32') / 255.
y_test_indices = test_ds['label'][:, np.newaxis]
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train_indices, num_classes)
y_test = tf.keras.utils.to_categorical(y_test_indices, num_classes)
input_shape = x_train.shape[1:]
model = small_cnn(
input_shape, num_classes, num_conv=num_conv, activation=activation)
print('Optimizer ', optimizer)
print('learning rate %f', lr)
optimizer = tf.keras.optimizers.SGD(lr=lr, momentum=momentum)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
model.summary()
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
print('Finished training.')
#@title Calculate logits, probabilities and loss values for training and test sets.
#@markdown We will use these values later in the membership inference attack to
#@markdown separate training and test samples.
print('Predict on train...')
logits_train = model.predict(x_train, batch_size=batch_size)
print('Predict on test...')
logits_test = model.predict(x_test, batch_size=batch_size)
print('Apply softmax to get probabilities from logits...')
prob_train = special.softmax(logits_train)
prob_test = special.softmax(logits_test)
print('Compute losses...')
cce = tf.keras.backend.categorical_crossentropy
constant = tf.keras.backend.constant
loss_train = cce(constant(y_train), constant(prob_train), from_logits=False).numpy()
loss_test = cce(constant(y_test), constant(prob_test), from_logits=False).numpy() | _____no_output_____ | Apache-2.0 | tensorflow_privacy/privacy/membership_inference_attack/codelab.ipynb | LuluBeatson/privacy |
Run membership inference attacks. | #@markdown We will now execute membership inference attack against the
#@markdown previously trained CIFAR10 model. This will generate a number of
#@markdown scores (most notably, attacker advantage and AUC for the membership
#@markdown inference classifier). An AUC of close to 0.5 means that the attack
#@markdown isn't able to identify training samples, which means that the model
#@markdown doesn't have privacy issues according to this test. Higher values,
#@markdown on the contrary, indicate potential privacy issues.
labels_train = np.argmax(y_train, axis=1)
labels_test = np.argmax(y_test, axis=1)
results_without_classifiers = mia.run_all_attacks(
loss_train,
loss_test,
logits_train,
logits_test,
labels_train,
labels_test,
attack_classifiers=[],
)
print(results_without_classifiers)
# Note: This will take a while, since it also trains ML models to
# separate train/test examples. If it's taking too looking, use
# the `run_all_attacks` function instead.
attack_result_summary = mia.run_all_attacks_and_create_summary(
loss_train,
loss_test,
logits_train,
logits_test,
labels_train,
labels_test,
)[0]
print(attack_result_summary) | _____no_output_____ | Apache-2.0 | tensorflow_privacy/privacy/membership_inference_attack/codelab.ipynb | LuluBeatson/privacy |
Recherche naive par fenêtre glissante | def correspondance_motif(texte, motif,i):
"""Recherche la correspondance de motif dans texte
à partir de la position i"""
if i + len(motif) > len(texte):
return False
for j in range(0, len(motif)):
if motif[j] != texte[i + j]:
return False
return True
def recherche_motif_naive(texte, motif):
"""Retourne la position où le motif a été trouvé par fenetre glissante
ou -1 si le motif ne se trouve pas dans le texte
Si n = len(texte) et m = len(motif), la complexité est en O((n-m)*m)"""
for i in range(len(texte) - len(motif) + 1):
if correspondance_motif(texte, motif,i):
return i
return -1 | _____no_output_____ | CC0-1.0 | bloc5/RechercheTextuelle/Recherche_textuelle.ipynb | frederic-junier/DIU-Junier |
Algorithme de Boyer-Moore Sitographie : * [https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string-search_algorithm](https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string-search_algorithm)* [http://whocouldthat.be/visualizing-string-matching/](http://whocouldthat.be/visualizing-string-matching/)* [https://www.inf.hs-flensburg.de/lang/algorithmen/pattern/bmen.htm](https://www.inf.hs-flensburg.de/lang/algorithmen/pattern/bmen.htm) Règle du mauvais caractère | def mauvais_caractere(motif, alphabet):
"""Retourne un dictionnaire avec pour chaque caractère de l'alphabet, le nombre de décalage
à partir de la fin du motif avant de trouver ce caractère
On ne compte pas la dernière lettre du motif et le décalage vaut m = len(motif)"
si on ne trouve pas le caractère"""
m = len(motif)
#mc = [0] * len(alphabet)
mc = {c : 0 for c in alphabet} #j préfère utiliser un dictionnaire
for c in alphabet:
k = 1
while k < m and c != motif[m - 1 - k]:
k = k + 1
mc[c] = k
return mc
mauvais_caractere('GCAGAGAG', 'ACGT')
def correspondance_suffixe(motif, i, j):
m = len(motif)
if motif[j] != motif[i]:
d = 1
while i + d < m and motif[j + d] == motif[i + d]:
d += 1
return i + d == m
return False
def comparaison_prefixe_suffixe(debut_suffixe, motif):
index_prefixe = 0
index_suffixe = debut_suffixe
m = len(motif)
while index_suffixe < m and motif[index_suffixe] == motif[index_prefixe]:
index_prefixe += 1
index_suffixe += 1
return index_suffixe == m
def bon_suffixe(motif):
m = len(motif)
bs = [0] * m
for i in range(m - 1, -1, -1):
j = i - 1
while j >= 0 and not correspondance_suffixe(motif, i, j):
j = j - 1
if j >= 0: #premier cas du bon suffixe :
bs[i] = i - j
else: # second cas du bon suffixe : rrecherche du début d'un suffixe/préfixe
p = i + 1
while p < m and not comparaison_prefixe_suffixe(p, motif):
p = p + 1
bs[i] = p
return bs
bon_suffixe('GCAGAGAG')
bon_suffixe('ABABA')
bon_suffixe('AAA')
def boyer_moore(texte, motif, alphabet):
#initialisation des longueurs
n = len(texte)
m = len(motif)
#pré-traitement du motif
bs = bon_suffixe(motif)
mc = mauvais_caractere(motif, alphabet)
print(bs, mc)
#recherche du motif dans le texte
i = 0 #indice dans le texte
while i <= n - m:
j = m - 1 #on lit le motif de droite à gauche
while j >= 0 and motif[j] == texte[i+j]:
j = j - 1
if j < 0:
print(f"Motif trouvé en {i}")
#décalage du motif
i = i + bs[0]
else:
#décalage du motif
i = i + max(bs[j], mc[texte[i+j]] + j - m + 1)
texte = "GCATCGCAGAGAGTATACAGTACG"
motif = "GCAGAGAG"
alphabet = "ACGT"
boyer_moore(texte, motif, alphabet)
T = "GCATCGCAGAGAGTATACAGTACG"
M = "GCAGAGAG"
alphabet = "ACGT"
boyer_moore(T, M, alphabet)
bon_suffixe(M)
T='CBABABA'
M='ABABA'
alphabet = "ACB"
print("Mauvais caractère : ", mauvais_caractere(M, 'ABC'))
print("Bon suffixe : ", bon_suffixe(M))
print(f"Recherche de {M} dans {T} avec Boyer-Moore")
boyer_moore(T, M, alphabet)
bon_suffixe("TATATA")
bon_suffixe("AAA") | _____no_output_____ | CC0-1.0 | bloc5/RechercheTextuelle/Recherche_textuelle.ipynb | frederic-junier/DIU-Junier |
Version du formateur | T = "GCATCGCAGAGAGTATACAGTACG"
M = "GCAGAGAG"
#M = "CCGGTGAA"
#T = "AAAAAAAAAAAAAAAAAAAA"
#M = "AAAAAA"
#T = "AAAAAAAAAAAAAAAAAAAA"
#M = "ACGT"
#M = "ACGCA"
n = len(T)
m = len(M)
for i in range(n-m+1):
for j in range(m):
if T[i+j] != M[j]: # on s'arrête dès qu'on voit une différence (mismatch)
break
if (j == (m-1)): # critère d'arrêt à (j == (m-1)) car j n'est pas incrémenté à la fin
print("motif trouvé en " + str(i))
nb_comp = 0 # nombre total de comparaisons
i = 0
while (i <= (n-m)):
j = 0
while (j < m) and (T[i+j] == M[j]): # on incrémente tant que c'est identique
nb_comp += 1
j = j + 1
if (j == m): # on remarque que le critère d'arrêt est (j == m) ici
print("motif trouvé en " + str(i))
else:
nb_comp += 1 # pour ne pas oublier de compter les échecs de comparaison (mismatch)
i = i + 1
print("Nombre total de comparaisons : " + str(nb_comp))
| motif trouvé en 5
Nombre total de comparaisons : 30
| CC0-1.0 | bloc5/RechercheTextuelle/Recherche_textuelle.ipynb | frederic-junier/DIU-Junier |
Heuristique du Mauvais Caractère | symboles = ["A", "C", "G", "T"] # c'est l'alphabet
# calcul préalable de MC
MC = {}
for s in symboles: # on initialise à m par défaut (caractère introuvable dans le motif)
MC[s] = m
for i in range(m-1):
MC[M[i]] = m-i-1
MC
import numpy as np
nb_comp = 0 # nombre total de comparaisons
i = 0
while (i <= (n-m)):
print("Position : " + str(i))
j = m - 1 # on commence par la fin du motif
while (j >= 0) and (T[i+j] == M[j]): # on incrémente tant que c'est identique
#print("comp de " + str(i+j) + " et " + str(j))
nb_comp += 1
j = j - 1
if (j >= 0):
nb_comp += 1
i = i + np.max([1, MC[T[i+j]] + j - m + 1])
else: # on remarque que le critère d'arrêt est à présent (j < 0)
print("motif trouvé en " + str(i))
i = i + 1
print("Nombre total de comparaisons : " + str(nb_comp))
| Position : 0
Position : 1
Position : 5
motif trouvé en 5
Position : 6
Position : 14
Position : 15
Nombre total de comparaisons : 15
| CC0-1.0 | bloc5/RechercheTextuelle/Recherche_textuelle.ipynb | frederic-junier/DIU-Junier |
Heuristique du Bon Suffixe (BS) | M = "AAAA"
m = len(M)
# calcul préalable de BS
# (attention, il s'agit probablement de l'implémentation la moins efficace
# mais peut-être la plus claire)
# calcul du plus grand préfixe qui est également suffixe (mais pas M tout entier)
pref_suff = m
for i in range(m-1):
if M[0:i+1] == M[m-(i+1):m]:
pref_suff = m-(i+1)
print(pref_suff)
BS = [pref_suff] * m
BS[m-1] = 1 # cas particulier pour le dernier symbole de M
# recherche du prochain motif le plus à droite
i = m - 2
while (i >= 0):
# motif à rechercher
MM = M[i+1:m]
l_MM = len(MM)
k = i
# on cherche le motif "à rebours"
while (k>=0):
if (M[k:k+l_MM] == MM) and ((k==0) or (M[k-1]!=M[i])):
print("à l'index " + str(i) + " : sous-motif " + MM + " trouvé en " + str(k))
BS[i] = i - k + 1
break;
k = k - 1
i = i - 1
BS
import numpy as np
nb_comp = 0 # nombre total de comparaisons
i = 0
while (i <= (n-m)):
print("Position : " + str(i))
j = m - 1 # on commence par la fin du motif
while (j >= 0) and (T[i+j] == M[j]): # on incrémente tant que c'est identique
nb_comp += 1
j = j - 1
if (j >= 0):
nb_comp += 1
i = i + BS[j]
else:
print("motif trouvé en " + str(i))
i = i + BS[0]
print("Nombre total de comparaisons : " + str(nb_comp)) | Position : 0
Position : 1
Position : 2
Position : 3
Position : 4
Position : 7
Position : 8
Position : 11
Position : 14
Position : 15
Position : 18
Nombre total de comparaisons : 16
| CC0-1.0 | bloc5/RechercheTextuelle/Recherche_textuelle.ipynb | frederic-junier/DIU-Junier |
Boyer-Moore : mettre tout ça ensemble | import numpy as np
nb_comp = 0 # nombre total de comparaisons
i = 0
while (i <= (n-m)):
print("Position : " + str(i))
j = m - 1 # on commence par la fin du motif
while (j >= 0) and (T[i+j] == M[j]): # on incrémente tant que c'est identique
nb_comp += 1
j = j - 1
if (j >= 0):
nb_comp += 1
i = i + np.max([BS[j], MC[T[i+j]] + j - m + 1])
else:
print("motif trouvé en " + str(i))
i = i + BS[0]
print("Nombre total de comparaisons : " + str(nb_comp)) | Position : 0
Position : 8
Position : 11
Position : 18
Nombre total de comparaisons : 7
| CC0-1.0 | bloc5/RechercheTextuelle/Recherche_textuelle.ipynb | frederic-junier/DIU-Junier |
Test | T='CBABABA'
M='AEBBBA'
n = len(T)
m = len(M)
symboles = ["A", "C", "B"] # c'est l'alphabet
# calcul préalable de MC
MC = {}
for s in symboles: # on initialise à m par défaut (caractère introuvable dans le motif)
MC[s] = m
for i in range(m-1):
MC[M[i]] = m-i-1
# calcul préalable de BS
# (attention, il s'agit probablement de l'implémentation la moins efficace
# mais peut-être la plus claire)
# calcul du plus grand préfixe qui est également suffixe (mais pas M tout entier)
pref_suff = m
for i in range(m-1):
if M[0:i+1] == M[m-(i+1):m]:
pref_suff = m-(i+1)
BS = [pref_suff] * m
print(pref_suff)
BS[m-1] = 1 # cas particulier pour le dernier symbole de M
# recherche du prochain motif le plus à droite
i = m - 2
while (i >= 0):
# motif à rechercher
MM = M[i+1:m]
l_MM = len(MM)
k = i
# on cherche le motif "à rebours"
while (k>=0):
if (M[k:k+l_MM] == MM) and ((k==0) or (M[k-1]!=M[i])):
#print("à l'index " + str(i) + " : sous-motif " + MM + " trouvé en " + str(k))
BS[i] = i - k + 1
break;
k = k - 1
i = i - 1
nb_comp = 0 # nombre total de comparaisons
i = 0
while (i <= (n-m)):
print("Position : " + str(i))
j = m - 1 # on commence par la fin du motif
while (j >= 0) and (T[i+j] == M[j]): # on incrémente tant que c'est identique
nb_comp += 1
j = j - 1
if (j >= 0):
nb_comp += 1
i = i + np.max([BS[j], MC[T[i+j]] + j - m + 1])
else:
print("motif trouvé en " + str(i))
i = i + BS[0]
print(MC)
print(BS)
print("Nombre total de comparaisons : " + str(nb_comp)) | 5
Position : 0
Position : 1
{'A': 5, 'C': 6, 'B': 1, 'E': 4}
[5, 5, 5, 5, 5, 1]
Nombre total de comparaisons : 4
| CC0-1.0 | bloc5/RechercheTextuelle/Recherche_textuelle.ipynb | frederic-junier/DIU-Junier |
Setup Data Fetching | import pandas as pd
import tensortrade.env.default as default
from tensortrade.data.cdd import CryptoDataDownload
from tensortrade.feed.core import Stream, DataFeed
from tensortrade.oms.exchanges import Exchange
from tensortrade.oms.services.execution.simulated import execute_order
from tensortrade.oms.instruments import USD, BTC, ETH
from tensortrade.oms.wallets import Wallet, Portfolio
from tensortrade.agents import DQNAgent
from ta import add_all_ta_features
# gather data
def get_feed(n_events=None):
cdd = CryptoDataDownload()
data = cdd.fetch("Bitstamp", "USD", "BTC", "1h")
data = add_all_ta_features(data, 'open', 'high', 'low', 'close', 'volume')
if n_events is not None:
data = data.iloc[n_events:]
print(len(data))
features = []
for c in data.columns[2:]:
s = Stream.source(list(data[c]), dtype="float").rename(data[c].name)
features += [s]
feed = DataFeed(features)
feed.compile()
return data, feed
data, feed = get_feed()
# Create environment
def create_env(config=None):
bitstamp = Exchange("bitstamp", service=execute_order)(
Stream.source(list(data["close"]), dtype="float").rename("USD-BTC")
)
portfolio = Portfolio(USD, [
Wallet(bitstamp, 10000 * USD),
Wallet(bitstamp, 10 * BTC)
])
renderer_feed = DataFeed([
Stream.source(list(data["date"])).rename("date"),
Stream.source(list(data["open"]), dtype="float").rename("open"),
Stream.source(list(data["high"]), dtype="float").rename("high"),
Stream.source(list(data["low"]), dtype="float").rename("low"),
Stream.source(list(data["close"]), dtype="float").rename("close"),
Stream.source(list(data["volume"]), dtype="float").rename("volume")
])
env = default.create(
portfolio=portfolio,
action_scheme="simple",
reward_scheme="risk-adjusted",
feed=feed,
renderer_feed=renderer_feed,
renderer=default.renderers.FileLogger(),
window_size=20
)
return env
env = create_env() | _____no_output_____ | Apache-2.0 | examples/train_and_evaluate_save_restore.ipynb | 8ball030/tensortrade |
Setup and Train DQN Agent | %load_ext autoreload
%autoreload 2
# create agent
def get_agent(env, agent_id=None):
agent = DQNAgent(env)
if agent_id is not None:
agent.id = "TEST_AGENT"
return agent
agent = get_agent(env=env, agent_id="TEST_AGENT")
# train the agent
mean_reward = agent.train(n_steps=len(data) / 100,
n_episodes=1,
save_every=1
)
agent.save("./")
print(mean_reward)
# remove the agent
del agent
# we restore the agent
agent = get_agent(env=env, agent_id="TEST_AGENT")
agent.restore("./policy_network__TEST_AGENT.hdf5")
# now we have restored our agent, we can save our model
agent.save("./")
# we reset the environment
initial_state = agent.env.reset()
initial_state
# predict our next action
agent.get_action(state=initial_state)
env.action_space | _____no_output_____ | Apache-2.0 | examples/train_and_evaluate_save_restore.ipynb | 8ball030/tensortrade |
_____no_output_____ | MIT | VariantesMexico/SARS.ipynb | JManuelRG/sarscov2IPN |
||
Coin Prices displays. Code cribbed from [this notebook](http://nbviewer.ipython.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb) by [Thomas Wiecki](https://github.com/twiecki). | COIN = ['bitcoin','eth','doge'] # Display names are stored in notebook metadata
import requests
try:
headers = {
'X-Mboum-Secret': "demo"
}
res = requests.get(
f"https://mboum.com/api/v1/cr/crypto/coin/quote?key={COIN}",
headers=headers
)
data = res.json()['data']
for key in data:
print(key,"\t", data[key])
except Exception as e:
print(e) | _____no_output_____ | MIT | examples/Coin Price.ipynb | sam2332/nbparameterise |
Bug Helper
> A friendly bug interceptor | # default_exp bug
# export
from IPython.core.ultratb import AutoFormattedTB
from traceback import format_exc
from datetime import datetime
from forgebox.html import list_group, list_group_kv, HTML
import html
import json
import base64
from jinja2 import Template
from unpackai.utils import STATIC
import logging
from inspect import isfunction
from typing import Union, Callable, Dict, Any
# export
try:
ishell = get_ipython()
except NameError as e:
from IPython.testing.globalipapp import get_ipython
ishell = get_ipython() | _____no_output_____ | MIT | nbs/12_bug.ipynb | vtecftwy/unpackai |
BugBook
> Collects the know bugs | # export
class BugBook(dict):
"""
A collection of bugs, and how to handle them
"""
def __init__(self, **kwargs):
self.rules = dict(kwargs)
def __repr__(self): return "Bug Book"
def __getitem__(
self, key
) -> Dict[str, Any]:
if isfunction(key):
return self.rules[key.__name__]
return self[str(key)]
def __setitem__(self,
key: Union[str, Callable],
value: Union[str, Callable]
) -> None:
if type(key) == str:
self.rules[key] = {"key": key,
"value": value,
"keytype": "string"}
elif isfunction(key):
self.rules[key.__name__] = {"key": key,
"value": value,
"keytype": "function"}
else:
self.rules[str(key)] = {"key": key, "value": value,
"keytype": "unknown"}
return
def __call__(self, etype, evalue, tb):
custom = None
type_name = etype.__name__
for d in self.rules.values():
if d["keytype"] == "function":
if d['key'](etype, evalue, tb):
custom = d["value"]
break
if custom is None:
if type_name in self.rules:
custom = self.rules[type_name]["value"]
if custom is None:
return None
else:
if type(custom) == str:
return custom
elif isfunction(custom):
return custom(etype, evalue, tb)
else:
logging.error(
f"{type(custom)} is not a valid type for bugbook")
return None | _____no_output_____ | MIT | nbs/12_bug.ipynb | vtecftwy/unpackai |
Filter Error Rules | # export
# functions that we can use as value of the rule
def module_not_found_message1(etype, evalue, tb):
libname = str(evalue).replace("No module named ", "")[1:-1]
return f'Library "{libname}" not installed, run a cell like "pip install -q {libname}"'
def module_not_found_message2(etype, evalue, tb):
libname = str(evalue).replace("No module named ", "")[1:-1]
return f'''
Are you sure the library name <strong>{libname}</strong> is correct? <br>
If so run "pip install -q {libname}" to install again📦 <br><br>
Or ⏯ re-run the cell contains "pip install ..."
'''
# functions that we can use as key of the fule
def module_not_found_error_filter(etype, evalue, tb):
if etype.__name__ == "ModuleNotFoundError":
libname = str(evalue).replace("No module named ", "")[1:-1]
if libname in ["fastai", "unpackai", "transformers","test_filter"]:
return True
return False | _____no_output_____ | MIT | nbs/12_bug.ipynb | vtecftwy/unpackai |
Assign filter to configuration | # export
BUGBOOK = BugBook()
BUGBOOK["ImportError"] = "Make sure all the libraries are installed for the required version🦮🐩"
BUGBOOK["SyntaxError"] ="""
<h5>There is a <strong>grammatical</strong> error in your python code</h5>
<p>Please check the following</p>
<p>Every '(' or '[' or '{' or '"' or ' was closed with properly</p>
<p>':' should be followed by a new line with 1 extra <strong>indent</strong> (4 spaces)</p>
<p>or other grammatical errors, please check traceback below for clue, usually <strong>near ^ mark</strong></p>
"""
BUGBOOK["ModuleNotFoundError"] = module_not_found_message2
BUGBOOK[module_not_found_error_filter] = module_not_found_message1
# export
itb = AutoFormattedTB(mode = 'Plain', tb_offset = 1)
def render_download_button(
bytes_data:bytes,
filename: str,
description: str="Download",
color:str = "default"):
"""
Loads data from buffer into base64 payload
embedded into a HTML button.
Recommended for small files only.
bytes_data: open file object ready for reading.
A file like object with a read method.
filename: str
The name when it is downloaded.
description: str
The text that goes into the button.
"""
payload = base64.b64encode(bytes_data).decode()
with open(STATIC/"html"/"download_button.html","r") as f:
temp = Template(f.read())
download_button = temp.render(
filename=filename,
payload=payload,
color=color,
description=description)
return download_button
def custom_exc(shell, etype, evalue, tb, tb_offset=None, ):
"""
A customize exception method
That we can assign to the ishell kernel
Arguments follow the format of default exeception function
"""
# gathering data on this error
# the colorful traceback
stb = itb.structured_traceback(etype, evalue, tb)
sstb = itb.stb2text(stb)
# the plain string of traceback
traceback_string = format_exc()
# input_history, sanitized(escape) for html
input_history = list(html.escape(i)
for i in ishell.history_manager.input_hist_parsed[-20:])
# now time stamp
now_full = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
now = datetime.now().strftime("%m%d_%H%M%S")
error_data = {
"error_type_name": etype.__name__,
"error_value":str(evalue),
"traceback_string":html.escape(traceback_string),
"timestamp":now_full,
"input_history":input_history,
}
# custom made error text
msg = BUGBOOK(etype, evalue, tb)
if msg is not None:
error_data.update({"msg":msg})
error_data = json.dumps(error_data, indent=2)
# create an error report in html format
# by rendering a jinja2 template with error_data
with open(STATIC/"html"/"bug"/"error_report.html","r") as f:
temp = Template(f.read())
error_report_page = temp.render(
data = json.dumps(
error_data,
))
# create a mini error panel
# a download button with embedded data
download_button = render_download_button(
error_report_page.encode(),
filename=f"npakai_{etype.__name__}_{now}.html",
description="🦋 Download Report",
color="success")
with open(STATIC/"html"/"bug"/"error_tiny_page.html", "r") as f:
temp2 = Template(f.read())
error_tiny_page = temp2.render(
download_button=download_button,
error_type_name=etype.__name__,
msg=msg,
error_value=str(evalue),
)
display(HTML(error_tiny_page))
print(sstb) | _____no_output_____ | MIT | nbs/12_bug.ipynb | vtecftwy/unpackai |
Assign our customized funtion | # export
ishell.set_custom_exc((Exception,), custom_exc) | _____no_output_____ | MIT | nbs/12_bug.ipynb | vtecftwy/unpackai |
Stack - Bootcamp de Data Science Machine Learning. | import pandas as pd
import datetime
import glob
from minio import Minio
import numpy as np
import matplotlib.pyplot as plt
client = Minio(
"localhost:9000",
access_key="minioadmin",
secret_key="minioadmin",
secure=False
) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Baixando o Dataset do Data Lake. | client.fget_object(
"processing",
"employees_dataset.parquet",
"temp_.parquet",
)
df = pd.read_parquet("temp_.parquet")
df.head() | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Organizando o dataset. | df = df[['department', 'salary', 'mean_work_last_3_months',
'number_projects', 'satisfaction_level', 'last_evaluation',
'time_in_company', 'work_accident','left']]
df.head() | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Verificando os registros missing. | df.isnull().sum()
df[df.notnull()]
df = df[:14998] | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Alterando os tipos de dados. | df["number_projects"] = df["number_projects"].astype(int)
df["mean_work_last_3_months"] = df["mean_work_last_3_months"].astype(int)
df["time_in_company"] = df["time_in_company"].astype(int)
df["work_accident"] = df["work_accident"].astype(int)
df["left"] = df["left"].astype(int)
df.info()
df.head()
df = df[:14998] | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Renomeando atributos | df = df.rename(columns={'satisfaction_level': 'satisfaction',
'last_evaluation': 'evaluation',
'number_projects': 'projectCount',
'mean_work_last_3_months': 'averageMonthlyHours',
'time_in_company': 'yearsAtCompany',
'work_accident': 'workAccident',
'left' : 'turnover'
})
df.head() | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Importancia de Features Converte os atributos em categoricos. | df["department"] = df["department"].astype('category').cat.codes
df["salary"] = df["salary"].astype('category').cat.codes
df.head() | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Separando os conjuntos de dados em features e em classeNosso problema é um de `classificação` | # nosso atributo classe é o turnover
target_name = 'turnover'
# o x vai representa nossa features sem o turnover
X = df.drop('turnover', axis=1)
y = df[target_name] | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Transformando os dados. | from sklearn.preprocessing import MinMaxScaler | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
MinMaxScalerPara cada valor em um recurso, `MinMaxScaler` subtrai o valor mínimo no recurso e, em seguida, divide pelo intervalo. - O intervalo é a diferença entre o máximo original e o mínimo original. - MinMaxScaler preserva a forma da distribuição original. Isso não altera significativamente as informações incorporadas nos dados originais. - Observe que MinMaxScaler não reduz a importância de outliers. - O intervalo padrão para o recurso retornado por MinMaxScaler é de 0 a 1. | scaler = MinMaxScaler()
# Fazendo a transformação
X = scaler.fit_transform(X)
X | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Separando os conjuntos de treino e testes | from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X # conjunto das features
,y # é a classe
,test_size = 0.2 # 20% do conjunto, vai ser usado para testes
,random_state = 123 # semente aleatoria
,stratify = y # separação dos dados mantendo um numeros de classes(fazendo um balanceamento de classes)
) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Treinando o algoritmo de arvore de decisão. | from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree = dtree.fit(X_train,y_train)
importances = dtree.feature_importances_
feat_names = df.drop(['turnover'],axis=1).columns
indices = np.argsort(importances)[::-1]
plt.figure(figsize=(12,4))
plt.title("Feature importances by DecisionTreeClassifier")
plt.bar(range(len(indices)), importances[indices], color='lightblue', align="center")
plt.xticks(range(len(indices)), feat_names[indices], rotation='vertical',fontsize=14)
plt.xlim([-1, len(indices)])
plt.show() | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Filtrando apenas os atributos relevantes. | X = df[["satisfaction","evaluation","averageMonthlyHours","yearsAtCompany"]] | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Separando os conjuntos de dados. | scaler = MinMaxScaler()
X = scaler.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(
X
,y
,test_size = 0.2
,random_state = 123
,stratify = y
)
X_train | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Função do modelo de base. | def base_rate_model(X) :
y = np.zeros(X.shape[0])
return y | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Importando métodos de métrica de avaliação. | from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
def accuracy_result(y_test,y_predict):
acc = accuracy_score(y_test, y_predict)
print ("Accuracy = %2.2f" % acc)
def roc_classification_report_results(model,y_test,y_predict):
roc_ = roc_auc_score(y_test, y_predict)
classfication_report = classification_report(y_test, y_predict)
print ("\n{} AUC = {}\n".format(model, roc_))
print(classfication_report) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Análise do modelo de baseline | y_predict = base_rate_model(X_test)
accuracy_result(y_test, y_predict)
roc_classification_report_results("Base Model", y_test, y_predict) |
Base Model AUC = 0.5
precision recall f1-score support
0 0.76 1.00 0.86 2286
1 0.00 0.00 0.00 714
accuracy 0.76 3000
macro avg 0.38 0.50 0.43 3000
weighted avg 0.58 0.76 0.66 3000
| MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Modelo de Regressão Logística. Instânciando o algoritmo. | from sklearn.linear_model import LogisticRegression
logis = LogisticRegression() | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Realizando o treinamento. | logis.fit(X_train, y_train) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Calculando as predições. | y_predict = logis.predict(X_test) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Avaliando o resultado. | accuracy_result(y_test, y_predict)
roc_classification_report_results("Logistic Regression", y_test, y_predict) |
Logistic Regression AUC = 0.5897884088018409
precision recall f1-score support
0 0.80 0.92 0.85 2286
1 0.50 0.26 0.34 714
accuracy 0.76 3000
macro avg 0.65 0.59 0.60 3000
weighted avg 0.73 0.76 0.73 3000
| MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Modelo de Arvore de decisão. Instânciando o algoritmo. | from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier() | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Realizando o treinamento. | dtree = dtree.fit(X_train,y_train) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Calculando as predições. | y_predict = dtree.predict(X_test) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Avaliando o resultado. | accuracy_result(y_test, y_predict)
roc_classification_report_results("Decision Tree", y_test, y_predict) |
Decision Tree AUC = 0.9462622319268915
precision recall f1-score support
0 0.98 0.97 0.97 2286
1 0.90 0.93 0.91 714
accuracy 0.96 3000
macro avg 0.94 0.95 0.94 3000
weighted avg 0.96 0.96 0.96 3000
| MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Modelo de Arvore Aleatória (Random Forest) Instânciando o algoritmo. | from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier() | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Realizando o treinamento. | rf = rf.fit(X_train,y_train) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Calculando as predições. | y_predict = rf.predict(X_test) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Avaliando o resultado. | accuracy_result(y_test, y_predict)
roc_classification_report_results("Random Forest", y_test, y_predict) |
Random Forest AUC = 0.9535664659564612
precision recall f1-score support
0 0.98 0.99 0.98 2286
1 0.95 0.92 0.94 714
accuracy 0.97 3000
macro avg 0.96 0.95 0.96 3000
weighted avg 0.97 0.97 0.97 3000
| MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Pycaret | #pip install pycaret | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Importando os métodos. | from pycaret.classification import * | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Definindo o Setup. | s = setup( df[["satisfaction","evaluation","averageMonthlyHours","yearsAtCompany","turnover"]]
,target = "turnover"
,numeric_features = ["yearsAtCompany"]
,normalize = True
,normalize_method = "minmax"
,data_split_stratify = True
,fix_imbalance = True,
) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Comparando diferentes modelos. | best = compare_models(fold = 5,sort = 'AUC',) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Criando o modelo. | gbc = create_model('gbc', fold = 5) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Realizando o tunning do modelo. | tuned_gbc = tune_model(gbc
,fold = 5
,custom_grid = {"learning_rate":[0.1,0.2,0.5]
,"n_estimators":[100,500,1000]
,"min_samples_split":[1,2,5,10]
,"max_depth":[1,3,9]
}
,optimize = 'AUC') | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Finalizando o modelo. | final_model = finalize_model(tuned_gbc)
save_model(final_model,'model') | Transformation Pipeline and Model Successfully Saved
| MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Transferindo os arquivos para o Data Lake. Modelo de Classificação. | client.fput_object(
"curated",
"model.pkl",
"model.pkl"
) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Exportando o conjunto de dados para o disco. | df.to_csv("dataset.csv",index=False)
client.fput_object(
"curated",
"dataset.csv",
"dataset.csv"
) | _____no_output_____ | MIT | notebooks/machine_learning_deploy.ipynb | MieleSantos/bootcamp_ds |
Impact Craters | TestLat = np.array([0,45,60,-45,-60])
TestLon = np.array([0,-45,-90,45,90])
LatD,LatM,LonD,LonM = np.loadtxt('World.csv', delimiter=',', usecols=(2,3,4,5), unpack=True)
ImY,ImX = Topo.shape[0:-1]
MapY = -ImY/180.0 * LatD + ImY/2.0
MapX = (ImX/360.0 * LonD) + ImX/2.0
#mask1 = np.where(MapX > ImX)
#MapX[mask1] -= ImX
#fig = plt.figure()
#ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
fig, ax = plt.subplots(1,1)
fig.set_size_inches(10,5)
fig.tight_layout()
ax.autoscale_view('tight')
ax.set_axis_off()
ax.scatter(MapX,MapY,marker='o',s=30,color='y', edgecolor='k')
ax.imshow(Topo)
fig.savefig('EarthCraters_RAW.png', dpi=300, bbox_inches='tight',pad_inches=0) | _____no_output_____ | MIT | PlanetMaps_Earth.ipynb | tobyrsmith/BasemapPlots |
Copyright 2019 The TensorFlow Authors. | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc. | from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from IPython.display import clear_output
from matplotlib import pyplot as plt
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
try:
!pip install tf-nightly-2.0-preview
except Exception:
pass
import tensorflow as tf
tf.random.set_seed(123) | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set. | dftrain.head()
dftrain.describe() | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
There are 627 and 264 examples in the training and evaluation sets, respectively. | dftrain.shape[0], dfeval.shape[0] | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
The majority of passengers are in their 20's and 30's. | dftrain.age.hist(bins=20)
plt.show() | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
There are approximately twice as male passengers as female passengers aboard. | dftrain.sex.value_counts().plot(kind='barh')
plt.show() | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
The majority of passengers were in the "third" class. | dftrain['class'].value_counts().plot(kind='barh')
plt.show() | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
Most passengers embarked from Southampton. | dftrain['embark_town'].value_counts().plot(kind='barh')
plt.show() | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model. | pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')
plt.show() | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)): | fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32)) | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example: | example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy()) | Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
| Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
Additionally, you can view all of the feature column transformations together: | tf.keras.layers.DenseFeatures(feature_columns)(example).numpy() | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory. | # Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1) | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters.2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark. | linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result)) | accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
| Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`. | # Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result)) | accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
| Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set. | pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
plt.show() | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate. | from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show() | _____no_output_____ | Apache-2.0 | site/en/r2/tutorials/estimators/boosted_trees.ipynb | NexusXi/docs |
"data/nine_dreams/ninedreams.txt" IS REQUIRED SPECIFY FILE ENCODING TYOE IN PYTHON | # -*- coding: utf-8 -*-
print ("UTF-8 ENCODING") | UTF-8 ENCODING
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
LOAD PACKAGES | import chardet # https://github.com/chardet/chardet
import glob
import codecs
import sys
import os
from TextLoader import *
from Hangulpy3 import *
print ("PACKAGES LOADED") | PACKAGES LOADED
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
CONVERT UTF8-ENCODED TXT FILE | def conv_file(fromfile, tofile):
with open(fromfile, "rb") as f:
sample_text=f.read(10240)
pred = chardet.detect(sample_text)
if not pred['encoding'] in ('EUC-KR', 'UTF-8', 'CP949', 'UTF-16LE'):
print ("WARNING! Unknown encoding! : %s = %s") % (fromfile, pred['encoding'])
pred['encoding'] = "CP949" # 못찾으면 기본이 CP949
formfile = fromfile + ".unknown"
elif pred['confidence'] < 0.9:
print ("WARNING! Unsured encofing! : %s = %s / %s")
% (fromfile, pred['confidence'], pred['encoding'])
formfile = fromfile + ".notsure"
with codecs.open(fromfile, "r", encoding=pred['encoding'], errors="ignore") as f:
with codecs.open(tofile, "w+", encoding="utf8") as t:
all_text = f.read()
t.write(all_text) | _____no_output_____ | MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
"data/nine_dreams/ninedreams_utf8.txt" IS GENERATED | # SOURCE TXT FILE
fromfile = "data/nine_dreams/ninedreams.txt"
# TARGET TXT FILE
tofile = "data/nine_dreams/ninedreams_utf8.txt"
conv_file(fromfile, tofile)
print ("UTF8-CONVERTING DONE")
print (" [%s] IS GENERATED" % (tofile)) | UTF8-CONVERTING DONE
[data/nine_dreams/ninedreams_utf8.txt] IS GENERATED
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
DECOMPOSE HANGUL (THIS PART IS IMPORTANT!) | def dump_file(filename):
result=u"" # <= UNICODE STRING
with codecs.open(filename,"r", encoding="UTF8") as f:
for line in f.readlines():
line = tuple(line)
result = result + decompose_text(line)
return result
print ("FUNCTION READY") | FUNCTION READY
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
PYTHON 2 AND 3 COMPATIBILITY | if sys.version_info.major == 2:
parsed_txt = dump_file(tofile).encode("utf8")
else:
parsed_txt = dump_file(tofile)
print ("Parsing %s done" % (tofile))
# PRINT FIRST 100 CHARACTERS
print (parsed_txt[:100]) | Parsing data/nine_dreams/ninedreams_utf8.txt done
ㅎㅏㄴᴥㄱㅜㄱᴥ ㄱㅜㄱᴥㅁㅜㄴᴥㅎㅏㄱᴥㅅㅏᴥㅅㅏㅇᴥ ㅇㅕㅇᴥ�
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
"data/nine_dreams/input.txt" IS GENERATED | with open("data/nine_dreams/input.txt", "w") as text_file:
text_file.write(parsed_txt)
print ("Saved to a txt file")
print (text_file) | Saved to a txt file
<closed file 'data/nine_dreams/input.txt', mode 'w' at 0x7f62ae9a58a0>
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
COMPOSE HANGUL CHARACTER FROM PHONEME | data=[u'\u3147', u'\u3157', u'\u1d25', u'\u3134', u'\u3161', u'\u3139', u'\u1d25'
, u' ', u'\u314f', u'\u3147', u'\u3145', u'\u314f', u'\u1d25', u'\u1d25'
, u'\u3163', u'\u1d25', u' ', u'\u3147', u'\u1d25', u'\u3155', u'\u1d25'
, u'\u3134', u'\u314f', u'\u1d25', u'\u3155', u'\u3147', u'\u1d25'
, u'\u315b', u'\u3131', u'\u1d25', u'\u3147', u'\u3139', u'\u3146'
, u'\u1d25', u'\u3137', u'\u314f', u'\u314e', u'\u3139', u'\u1d25'
, u'\u3134', u'\u1d25', u'\u3145', u'\u3163', u'\u1d25', u'\u1d25'
, u'\u314f', u'\u1d25', u'\u314e', u'\u314f', u'\u3147', u'\u3131'
, u'\u3157', u'\u3134', u'\u1d25', u'\u1d25', u'\u315b', u'\u1d25'
, u'\u3148', u'\u3153', u'\u3136', u'\u1d25', u' ', u'\u3145', u'\u3150'
, u'\u3141', u'\u3136', u'\u3161', u'\u3134', u'\u3163', u'\u1d25', u'.'
, u'\u3148', u'\u3153', u'\u3134', u'\u314e', u'\u3153', u'\u1d25', u'\u1d25'
, u'\u3147', u'\u314f', u'\u3134', u'\u3148', u'\u314f', u'\u3139', u'\u315d'
, u'\u314c', u'\u1d25', u'\u3161', u'\u3134', u'\u3148', u'\u3163', u'\u313a'
, u'\u1d25', u' ', u'\u3147', u'\u3161', u'\u3146', u'\u1d25', u'?', u'\u3134'
, u'\u1d25', u'\u314e', u'\u3163', u'\u1d25', u'\u3147', u'\u3148', u'\u314f'
]
print automata("".join(data))
| 오늘 ㅏㅇ사ㅣ ㅇㅕ나ㅕㅇㅛㄱㅇㄹㅆ닿ㄹㄴ시ㅏ항곤ㅛ젆 샘ㄶㅡ니.젆ㅓ앉ㅏ뤝ㅡㄴ짉 읐?ㄴ히ㅇ
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
GENERATE "vocab.pkl" and "data.npy" in "data/nine_dreams/" FROM "data/nine_dreams/input.txt" | data_dir = "data/nine_dreams"
batch_size = 50
seq_length = 50
data_loader = TextLoader(data_dir, batch_size, seq_length) | loading preprocessed files
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
DATA_LOADER IS: | print ( "type of 'data_loader' is %s, length is %d"
% (type(data_loader.vocab), len(data_loader.vocab)) ) | type of 'data_loader' is <type 'dict'>, length is 76
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
DATA_LOADER.VOCAB IS: | print ("data_loader.vocab looks like \n%s " % (data_loader.vocab,)) | data_loader.vocab looks like
{u'_': 69, u'6': 59, u':': 57, u'\n': 19, u'4': 67, u'5': 63, u'>': 75, u'!': 52, u' ': 1, u'"': 28, u'\u1d25': 0, u"'": 49, u')': 46, u'(': 45, u'-': 65, u',': 27, u'.': 24, u'\u3131': 7, u'0': 73, u'\u3133': 60, u'\u3132': 29, u'\u3135': 50, u'\u3134': 4, u'\u3137': 13, u'\u3136': 44, u'\u3139': 5, u'\u3138': 32, u'\u313b': 55, u'\u313a': 48, u'\u313c': 54, u'?': 41, u'3': 66, u'\u3141': 12, u'\u3140': 51, u'\u3143': 47, u'\u3142': 17, u'\u3145': 10, u'\u3144': 43, u'\u3147': 2, u'\u3146': 22, u'\u3149': 40, u'\u3148': 15, u'\u314b': 42, u'\u314a': 23, u'\u314d': 31, u'\u314c': 30, u'\u314f': 3, u'\u314e': 14, u'\u3151': 34, u'\u3150': 21, u'\u3153': 11, u'\u3152': 74, u'\u3155': 18, u'\u3154': 20, u'\u3157': 9, u'\u3156': 39, u'\u3159': 53, u'\u3158': 26, u'\u315b': 38, u'\u315a': 33, u'\u315d': 36, u'\u315c': 16, u'\u315f': 35, u'\u315e': 61, u'\u3161': 8, u'\u3160': 37, u'\u3163': 6, u'\u3162': 25, u'\x1a': 72, u'9': 64, u'7': 71, u'2': 62, u'1': 58, u'\u313f': 56, u'\u313e': 70, u'8': 68}
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
DATA_LOADER.CHARS IS: | print ( "type of 'data_loader.chars' is %s, length is %d"
% (type(data_loader.chars), len(data_loader.chars)) ) | type of 'data_loader.chars' is <type 'tuple'>, length is 76
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
CHARS CONVERTS INDEX -> CHAR | print ("data_loader.chars looks like \n%s " % (data_loader.chars,))
for i, char in enumerate(data_loader.chars):
# GET INDEX OF THE CHARACTER
idx = data_loader.vocab[char]
print ("[%02d] %03s (%02d)"
% (i, automata("".join(char)), idx)) | [00] (00)
[01] (01)
[02] (02)
[03] ㅏ (03)
[04] (04)
[05] (05)
[06] ㅣ (06)
[07] (07)
[08] ㅡ (08)
[09] ㅗ (09)
[10] (10)
[11] ㅓ (11)
[12] (12)
[13] (13)
[14] (14)
[15] (15)
[16] ㅜ (16)
[17] (17)
[18] ㅕ (18)
[19]
(19)
[20] ㅔ (20)
[21] ㅐ (21)
[22] (22)
[23] (23)
[24] . (24)
[25] ㅢ (25)
[26] ㅘ (26)
[27] , (27)
[28] " (28)
[29] (29)
[30] (30)
[31] (31)
[32] (32)
[33] ㅚ (33)
[34] ㅑ (34)
[35] ㅟ (35)
[36] ㅝ (36)
[37] ㅠ (37)
[38] ㅛ (38)
[39] ㅖ (39)
[40] (40)
[41] ? (41)
[42] (42)
[43] ㅄ (43)
[44] ㄶ (44)
[45] ( (45)
[46] ) (46)
[47] (47)
[48] ㄺ (48)
[49] ' (49)
[50] ㄵ (50)
[51] ㅀ (51)
[52] ! (52)
[53] ㅙ (53)
[54] ㄼ (54)
[55] ㄻ (55)
[56] ㄿ (56)
[57] : (57)
[58] 1 (58)
[59] 6 (59)
[60] ㄳ (60)
[61] ㅞ (61)
[62] 2 (62)
[63] 5 (63)
[64] 9 (64)
[65] - (65)
[66] 3 (66)
[67] 4 (67)
[68] 8 (68)
[69] _ (69)
[70] ㄾ (70)
[71] 7 (71)
[72] (72)
[73] 0 (73)
[74] ㅒ (74)
[75] > (75)
| MIT | notebooks/demo_Hangul.ipynb | Badissane/TensorFlow-101 |
Command line functions> Console commands added by the nbdev library | # default_exp cli
# export
from nbdev.imports import *
from nbdev.export import *
from nbdev.sync import *
from nbdev.merge import *
from nbdev.export2html import *
from nbdev.test import *
from fastscript import call_parse,Param,bool_arg | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
`nbdev` comes with the following commands. To use any of them, you muse be in one of the subfolder of your project: they will search for the `settings.ini` recursively in the parent directory but need to accessit to be able to work. Their names all begin by nbdev so you can easily get a list with tab completion.- `nbdev_build_lib` builds the library from the notebooks- `nbdev_update_lib` propagates any change in the library back to the notebooks- `nbdev_diff_nbs` gives you the diff between the notebooks and the exported library- `nbdev_build_docs` builds the documentation from the notebooks- `nbdev_nb2md` to convert a notebook to a markdown file- `nbdev_clean_nbs` removes all superfluous metadata form the notebooks, to avoid merge conflicts- `nbdev_read_nbs` read all notebooks to make sure none are broken- `nbdev_trust_nbs` trust all notebooks (so that the HTML content is shown)- `nbdev_fix_merge` will fix merge conflicts in a notebook file- `nbdev_install_git_hooks` install the git hooks that use the last two command automatically on each commit/merge. Navigating from notebooks to script and back | #export
@call_parse
def nbdev_build_lib(fname:Param("A notebook name or glob to convert", str)=None):
"Export notebooks matching `fname` to python modules"
write_tmpls()
notebook2script(fname=fname) | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
By default (`fname` left to `None`), the whole library is built from the notebooks in the `lib_folder` set in your `settings.ini`. | #export
@call_parse
def nbdev_update_lib(fname:Param("A notebook name or glob to convert", str)=None):
"Propagates any change in the modules matching `fname` to the notebooks that created them"
script2notebook(fname=fname) | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
By default (`fname` left to `None`), the whole library is treated. Note that this tool is only designed for small changes such as typo or small bug fixes. You can't add new cells in notebook from the library. | #export
@call_parse
def nbdev_diff_nbs():
"Prints the diff between an export of the library in notebooks and the actual modules"
diff_nb_script() | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
Extracting tests | # export
def _test_one(fname, flags=None, verbose=True):
print(f"testing: {fname}")
start = time.time()
try:
test_nb(fname, flags=flags)
return True,time.time()-start
except Exception as e:
if "Kernel died before replying to kernel_info" in str(e):
time.sleep(random.random())
_test_one(fname, flags=flags)
if verbose: print(f'Error in {fname}:\n{e}')
return False,time.time()-start
# export
@call_parse
def nbdev_test_nbs(fname:Param("A notebook name or glob to convert", str)=None,
flags:Param("Space separated list of flags", str)=None,
n_workers:Param("Number of workers to use", int)=None,
verbose:Param("Print errors along the way", bool)=True,
timing:Param("Timing each notebook to see the ones are slow", bool)=False):
"Test in parallel the notebooks matching `fname`, passing along `flags`"
if flags is not None: flags = flags.split(' ')
if fname is None:
files = [f for f in Config().nbs_path.glob('*.ipynb') if not f.name.startswith('_')]
else: files = glob.glob(fname)
files = [Path(f).absolute() for f in sorted(files)]
if len(files)==1 and n_workers is None: n_workers=0
# make sure we are inside the notebook folder of the project
os.chdir(Config().nbs_path)
results = parallel(_test_one, files, flags=flags, verbose=verbose, n_workers=n_workers)
passed,times = [r[0] for r in results],[r[1] for r in results]
if all(passed): print("All tests are passing!")
else:
msg = "The following notebooks failed:\n"
raise Exception(msg + '\n'.join([f.name for p,f in zip(passed,files) if not p]))
if timing:
for i,t in sorted(enumerate(times), key=lambda o:o[1], reverse=True):
print(f"Notebook {files[i].name} took {int(t)} seconds") | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
By default (`fname` left to `None`), the whole library is tested from the notebooks in the `lib_folder` set in your `settings.ini`. Building documentation The following functions complete the ones in `export2html` to fully build the documentation of your library. | #export
import time,random,warnings
#export
def _leaf(k,v):
url = 'external_url' if "http" in v else 'url'
#if url=='url': v=v+'.html'
return {'title':k, url:v, 'output':'web,pdf'}
#export
_k_names = ['folders', 'folderitems', 'subfolders', 'subfolderitems']
def _side_dict(title, data, level=0):
k_name = _k_names[level]
level += 1
res = [(_side_dict(k, v, level) if isinstance(v,dict) else _leaf(k,v))
for k,v in data.items()]
return ({k_name:res} if not title
else res if title.startswith('empty')
else {'title': title, 'output':'web', k_name: res})
#export
_re_catch_title = re.compile('^title\s*:\s*(\S+.*)$', re.MULTILINE)
#export
def _get_title(fname):
"Grabs the title of html file `fname`"
with open(fname, 'r') as f: code = f.read()
src = _re_catch_title.search(code)
return fname.stem if src is None else src.groups()[0]
#hide
test_eq(_get_title(Config().doc_path/'export.html'), "Export to modules")
#export
from nbdev.export2html import _nb2htmlfname
#export
def create_default_sidebar():
"Create the default sidebar for the docs website"
dic = {"Overview": "/"}
files = [f for f in Config().nbs_path.glob('*.ipynb') if not f.name.startswith('_')]
fnames = [_nb2htmlfname(f) for f in sorted(files)]
titles = [_get_title(f) for f in fnames if 'index' not in f.stem!='index']
if len(titles) > len(set(titles)): print(f"Warning: Some of your Notebooks use the same title ({titles}).")
dic.update({_get_title(f):f'/{f.stem}' for f in fnames if f.stem!='index'})
dic = {Config().lib_name: dic}
json.dump(dic, open(Config().doc_path/'sidebar.json', 'w'), indent=2) | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
The default sidebar lists all html pages with their respective title, except the index that is named "Overview". To build a custom sidebar, set the flag `custom_sidebar` in your `settings.ini` to `True` then change the `sidebar.json` file in the `doc_folder` to your liking. Otherwise, the sidebar is updated at each doc build. | #hide
#create_default_sidebar()
#export
def make_sidebar():
"Making sidebar for the doc website form the content of `doc_folder/sidebar.json`"
if not (Config().doc_path/'sidebar.json').exists() or Config().custom_sidebar == 'False': create_default_sidebar()
sidebar_d = json.load(open(Config().doc_path/'sidebar.json', 'r'))
res = _side_dict('Sidebar', sidebar_d)
res = {'entries': [res]}
res_s = yaml.dump(res, default_flow_style=False)
res_s = res_s.replace('- subfolders:', ' subfolders:').replace(' - - ', ' - ')
res_s = f"""
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# Instead edit {'../../sidebar.json'}
"""+res_s
open(Config().doc_path/'_data/sidebars/home_sidebar.yml', 'w').write(res_s)
# export
_re_index = re.compile(r'^(?:\d*_|)index\.ipynb$')
# export
def make_readme():
"Convert the index notebook to README.md"
index_fn = None
for f in Config().nbs_path.glob('*.ipynb'):
if _re_index.match(f.name): index_fn = f
assert index_fn is not None, "Could not locate index notebook"
print(f"converting {index_fn} to README.md")
convert_md(index_fn, Config().config_file.parent, jekyll=False)
n = Config().config_file.parent/index_fn.with_suffix('.md').name
shutil.move(n, Config().config_file.parent/'README.md')
# export
@call_parse
def nbdev_build_docs(fname:Param("A notebook name or glob to convert", str)=None,
force_all:Param("Rebuild even notebooks that haven't changed", bool)=False,
mk_readme:Param("Also convert the index notebook to README", bool)=True,
n_workers:Param("Number of workers to use", int)=None):
"Build the documentation by converting notebooks mathing `fname` to html"
notebook2html(fname=fname, force_all=force_all, n_workers=n_workers)
if fname is None: make_sidebar()
if mk_readme: make_readme() | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
By default (`fname` left to `None`), the whole documentation is build from the notebooks in the `lib_folder` set in your `settings.ini`, only converting the ones that have been modified since the their corresponding html was last touched unless you pass `force_all=True`. The index is also converted to make the README file, unless you pass along `mk_readme=False`. | # export
@call_parse
def nbdev_nb2md(fname:Param("A notebook file name to convert", str),
dest:Param("The destination folder", str)='.',
img_path:Param("Folder to export images to")="",
jekyll:Param("To use jekyll metadata for your markdown file or not", bool_arg)=False,):
"Convert the notebook in `fname` to a markdown file"
nb_detach_cells(fname, dest=img_path)
convert_md(fname, dest, jekyll=jekyll, img_path=img_path)
# export
@call_parse
def nbdev_detach(path_nb:Param("Path to notebook"),
dest:Param("Destination folder", str)=""):
"Export cell attachments to `dest` and update references"
nb_detach_cells(path_nb, dest=dest) | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
Other utils | # export
@call_parse
def nbdev_read_nbs(fname:Param("A notebook name or glob to convert", str)=None):
"Check all notebooks matching `fname` can be opened"
files = Config().nbs_path.glob('**/*.ipynb') if fname is None else glob.glob(fname)
for nb in files:
try: _ = read_nb(nb)
except Exception as e:
print(f"{nb} is corrupted and can't be opened.")
raise e | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
By default (`fname` left to `None`), the all the notebooks in `lib_folder` are checked. | # export
@call_parse
def nbdev_trust_nbs(fname:Param("A notebook name or glob to convert", str)=None,
force_all:Param("Trust even notebooks that haven't changed", bool)=False):
"Trust noteboks matching `fname`"
check_fname = Config().nbs_path/".last_checked"
last_checked = os.path.getmtime(check_fname) if check_fname.exists() else None
files = Config().nbs_path.glob('**/*.ipynb') if fname is None else glob.glob(fname)
for fn in files:
if last_checked and not force_all:
last_changed = os.path.getmtime(fn)
if last_changed < last_checked: continue
nb = read_nb(fn)
if not NotebookNotary().check_signature(nb): NotebookNotary().sign(nb)
check_fname.touch(exist_ok=True) | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
By default (`fname` left to `None`), the all the notebooks in `lib_folder` are trusted. To speed things up, only the ones touched since the last time this command was run are trusted unless you pass along `force_all=True`. | # export
@call_parse
def nbdev_fix_merge(fname:Param("A notebook filename to fix", str),
fast:Param("Fast fix: automatically fix the merge conflicts in outputs or metadata", bool)=True,
trust_us:Param("Use local outputs/metadata when fast mergning", bool)=True):
"Fix merge conflicts in notebook `fname`"
fix_conflicts(fname, fast=fast, trust_us=trust_us) | _____no_output_____ | Apache-2.0 | nbs/06_cli.ipynb | maarten990/nbdev |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.