markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
File and network functions
#export #NB: Please don't move this to a different line or module, since it's used in testing `get_source_link` @patch def ls(self:Path, file_type=None, file_exts=None): "Contents of path as a list" extns=L(file_exts) if file_type: extns += L(k for k,v in mimetypes.types_map.items() if v.startswith(file_type)) return L(self.iterdir()).filtered(lambda x: len(extns)==0 or x.suffix in extns)
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
We add an `ls()` method to `pathlib.Path` which is simply defined as `list(Path.iterdir())`, mainly for convenience in REPL environments such as notebooks.
path = Path() t = path.ls() assert len(t)>0 t[0]
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
You can also pass an optional `file_type` MIME prefix and/or a list of file extensions.
txt_files=path.ls(file_type='text') assert len(txt_files) > 0 and txt_files[0].suffix=='.py' ipy_files=path.ls(file_exts=['.ipynb']) assert len(ipy_files) > 0 and ipy_files[0].suffix=='.ipynb' txt_files[0],ipy_files[0] #hide pkl = pickle.dumps(path) p2 =pickle.loads(pkl) test_eq(path.ls()[0], p2.ls()[0]) def bunzip(fn): "bunzip `fn`, raising exception if output already exists" fn = Path(fn) assert fn.exists(), f"{fn} doesn't exist" out_fn = fn.with_suffix('') assert not out_fn.exists(), f"{out_fn} already exists" with bz2.BZ2File(fn, 'rb') as src, out_fn.open('wb') as dst: for d in iter(lambda: src.read(1024*1024), b''): dst.write(d) f = Path('files/test.txt') if f.exists(): f.unlink() bunzip('files/test.txt.bz2') t = f.open().readlines() test_eq(len(t),1) test_eq(t[0], 'test\n') f.unlink()
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
Tensor functions
#export def apply(func, x, *args, **kwargs): "Apply `func` recursively to `x`, passing on args" if is_listy(x): return type(x)(apply(func, o, *args, **kwargs) for o in x) if isinstance(x,dict): return {k: apply(func, v, *args, **kwargs) for k,v in x.items()} return retain_type(func(x, *args, **kwargs), x) #export def to_detach(b, cpu=True): "Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`." def _inner(x, cpu=True): if not isinstance(x,Tensor): return x x = x.detach() return x.cpu() if cpu else x return apply(_inner, b, cpu=cpu) #export def to_half(b): "Recursively map lists of tensors in `b ` to FP16." return apply(lambda x: x.half() if torch.is_floating_point(x) else x, b) #export def to_float(b): "Recursively map lists of int tensors in `b ` to float." return apply(lambda x: x.float() if torch.is_floating_point(x) else x, b) #export # None: True if available; True: error if not availabe; False: use CPU defaults.use_cuda = None #export def default_device(use_cuda=-1): "Return or set default device; `use_cuda`: None - CUDA if available; True - error if not availabe; False - CPU" if use_cuda != -1: defaults.use_cuda=use_cuda use = defaults.use_cuda or (torch.cuda.is_available() and defaults.use_cuda is None) assert torch.cuda.is_available() or not use return torch.device(torch.cuda.current_device()) if use else torch.device('cpu') #cuda _td = torch.device(torch.cuda.current_device()) test_eq(default_device(None), _td) test_eq(default_device(True), _td) test_eq(default_device(False), torch.device('cpu')) default_device(None); #export def to_device(b, device=None): "Recursively put `b` on `device`." if device is None: device=default_device() def _inner(o): return o.to(device, non_blocking=True) if isinstance(o,Tensor) else o return apply(_inner, b) t = to_device((3,(tensor(3),tensor(2)))) t1,(t2,t3) = t test_eq_type(t,(3,(tensor(3).cuda(),tensor(2).cuda()))) test_eq(t2.type(), "torch.cuda.LongTensor") test_eq(t3.type(), "torch.cuda.LongTensor") #export def to_cpu(b): "Recursively map lists of tensors in `b ` to the cpu." return to_device(b,'cpu') t3 = to_cpu(t3) test_eq(t3.type(), "torch.LongTensor") test_eq(t3, 2) def to_np(x): "Convert a tensor to a numpy array." return x.data.cpu().numpy() t3 = to_np(t3) test_eq(type(t3), np.ndarray) test_eq(t3, 2) #export def item_find(x, idx=0): "Recursively takes the `idx`-th element of `x`" if is_listy(x): return item_find(x[idx]) if isinstance(x,dict): key = list(x.keys())[idx] if isinstance(idx, int) else idx return item_find(x[key]) return x #export def find_device(b): "Recursively search the device of `b`." return item_find(b).device dev = default_device() test_eq(find_device(t2), dev) test_eq(find_device([t2,t2]), dev) test_eq(find_device({'a':t2,'b':t2}), dev) test_eq(find_device({'a':[[t2],[t2]],'b':t2}), dev) #export def find_bs(b): "Recursively search the batch size of `b`." return item_find(b).shape[0] x = torch.randn(4,5) test_eq(find_bs(x), 4) test_eq(find_bs([x, x]), 4) test_eq(find_bs({'a':x,'b':x}), 4) test_eq(find_bs({'a':[[x],[x]],'b':x}), 4) def np_func(f): "Convert a function taking and returning numpy arrays to one taking and returning tensors" def _inner(*args, **kwargs): nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args] return tensor(f(*nargs, **kwargs)) functools.update_wrapper(_inner, f) return _inner
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
This decorator is particularly useful for using numpy functions as fastai metrics, for instance:
from sklearn.metrics import f1_score @np_func def f1(inp,targ): return f1_score(targ, inp) a1,a2 = array([0,1,1]),array([1,0,1]) t = f1(tensor(a1),tensor(a2)) test_eq(f1_score(a1,a2), t) assert isinstance(t,Tensor) class Module(nn.Module, metaclass=PrePostInitMeta): "Same as `nn.Module`, but no need for subclasses to call `super().__init__`" def __pre_init__(self): super().__init__() def __init__(self): pass show_doc(Module, title_level=3) class _T(Module): def __init__(self): self.f = nn.Linear(1,1) def forward(self,x): return self.f(x) t = _T() t(tensor([1.]))
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
Sorting objects from before/after Transforms and callbacks will have run_after/run_before attributes, this function will sort them to respect those requirements (if it's possible). Also, sometimes we want a tranform/callback to be run at the end, but still be able to use run_after/run_before behaviors. For those, the function checks for a toward_end attribute (that needs to be True).
#export def _is_instance(f, gs): tst = [g if type(g) in [type, 'function'] else g.__class__ for g in gs] for g in tst: if isinstance(f, g) or f==g: return True return False def _is_first(f, gs): for o in L(getattr(f, 'run_after', None)): if _is_instance(o, gs): return False for g in gs: if _is_instance(f, L(getattr(g, 'run_before', None))): return False return True def sort_by_run(fs): end = L(getattr(f, 'toward_end', False) for f in fs) inp,res = L(fs)[~end] + L(fs)[end], [] while len(inp) > 0: for i,o in enumerate(inp): if _is_first(o, inp): res.append(inp.pop(i)) break else: raise Exception("Impossible to sort") return res class Tst(): pass class Tst1(): run_before=[Tst] class Tst2(): run_before=Tst run_after=Tst1 tsts = [Tst(), Tst1(), Tst2()] test_eq(sort_by_run(tsts), [tsts[1], tsts[2], tsts[0]]) Tst2.run_before,Tst2.run_after = Tst1,Tst test_fail(lambda: sort_by_run([Tst(), Tst1(), Tst2()])) def tst1(x): return x tst1.run_before = Tst test_eq(sort_by_run([tsts[0], tst1]), [tst1, tsts[0]]) class Tst1(): toward_end=True class Tst2(): toward_end=True run_before=Tst1 tsts = [Tst(), Tst1(), Tst2()] test_eq(sort_by_run(tsts), [tsts[0], tsts[2], tsts[1]])
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
Other helpers
#export def round_multiple(x, mult, round_down=False): "Round `x` to nearest multiple of `mult`" def _f(x_): return (int if round_down else round)(x_/mult)*mult res = L(x).mapped(_f) return res if is_listy(x) else res[0] test_eq(round_multiple(63,32), 64) test_eq(round_multiple(50,32), 64) test_eq(round_multiple(40,32), 32) test_eq(round_multiple( 0,32), 0) test_eq(round_multiple(63,32, round_down=True), 32) test_eq(round_multiple((63,40),32), (64,32)) #export def num_cpus(): "Get number of cpus" try: return len(os.sched_getaffinity(0)) except AttributeError: return os.cpu_count() defaults.cpus = min(16, num_cpus()) #export def add_props(f, n=2): "Create properties passing each of `range(n)` to f" return (property(partial(f,i)) for i in range(n)) class _T(): a,b = add_props(lambda i,x:i*2) t = _T() test_eq(t.a,0) test_eq(t.b,2)
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
Image helpers This is a quick way to generate, for instance, *train* and *valid* versions of a property. See `DataBunch` definition for an example of this.
#export def make_cross_image(bw=True): "Create a tensor containing a cross image, either `bw` (True) or color" if bw: im = torch.zeros(5,5) im[2,:] = 1. im[:,2] = 1. else: im = torch.zeros(3,5,5) im[0,2,:] = 1. im[1,:,2] = 1. return im plt.imshow(make_cross_image(), cmap="Greys"); plt.imshow(make_cross_image(False).permute(1,2,0)); #export def show_title(o, ax=None, ctx=None, label=None, **kwargs): "Set title of `ax` to `o`, or print `o` if `ax` is `None`" ax = ifnone(ax,ctx) if ax is None: print(o) elif hasattr(ax, 'set_title'): ax.set_title(o) elif isinstance(ax, pd.Series): while label in ax: label += '_' ax = ax.append(pd.Series({label: o})) return ax test_stdout(lambda: show_title("title"), "title") # ensure that col names are unique when showing to a pandas series assert show_title("title", ctx=pd.Series(dict(a=1)), label='a').equals(pd.Series(dict(a=1,a_='title'))) #export def show_image(im, ax=None, figsize=None, title=None, ctx=None, **kwargs): "Show a PIL or PyTorch image on `ax`." ax = ifnone(ax,ctx) if ax is None: _,ax = plt.subplots(figsize=figsize) # Handle pytorch axis order if isinstance(im,Tensor): im = to_cpu(im) if im.shape[0]<5: im=im.permute(1,2,0) elif not isinstance(im,np.ndarray): im=array(im) # Handle 1-channel images if im.shape[-1]==1: im=im[...,0] ax.imshow(im, **kwargs) if title is not None: ax.set_title(title) ax.axis('off') return ax
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
`show_image` can show b&w images...
im = make_cross_image() ax = show_image(im, cmap="Greys", figsize=(2,2))
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
...and color images with standard `c*h*w` dim order...
im2 = make_cross_image(False) ax = show_image(im2, figsize=(2,2))
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
...and color images with `h*w*c` dim order...
im3 = im2.permute(1,2,0) ax = show_image(im3, figsize=(2,2)) ax = show_image(im, cmap="Greys", figsize=(2,2)) show_title("Cross", ax) #export def show_titled_image(o, **kwargs): "Call `show_image` destructuring `o` to `(img,title)`" show_image(o[0], title=str(o[1]), **kwargs) #export def show_image_batch(b, show=show_titled_image, items=9, cols=3, figsize=None, **kwargs): "Display batch `b` in a grid of size `items` with `cols` width" rows = (items+cols-1) // cols if figsize is None: figsize = (cols*3, rows*3) fig,axs = plt.subplots(rows, cols, figsize=figsize) for *o,ax in zip(*to_cpu(b), axs.flatten()): show(o, ax=ax, **kwargs) show_image_batch(([im,im2,im3],['bw','chw','hwc']), items=3)
_____no_output_____
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
Export -
#hide from local.notebook.export import notebook2script notebook2script(all_fs=True)
Converted 00_test.ipynb. Converted 01_core.ipynb. Converted 01a_dataloader.ipynb. Converted 01a_script.ipynb. Converted 02_transforms.ipynb. Converted 03_pipeline.ipynb. Converted 04_data_external.ipynb. Converted 05_data_core.ipynb. Converted 06_data_source.ipynb. Converted 07_vision_core.ipynb. Converted 08_pets_tutorial.ipynb. Converted 09_vision_augment.ipynb. Converted 11_layers.ipynb. Converted 12_optimizer.ipynb. Converted 13_learner.ipynb. Converted 14_callback_schedule.ipynb. Converted 15_callback_hook.ipynb. Converted 16_callback_progress.ipynb. Converted 17_callback_tracker.ipynb. Converted 18_callback_fp16.ipynb. Converted 19_callback_mixup.ipynb. Converted 20_metrics.ipynb. Converted 21_tutorial_imagenette.ipynb. Converted 30_text_core.ipynb. Converted 31_text_data.ipynb. Converted 32_text_models_awdlstm.ipynb. Converted 33_test_models_core.ipynb. Converted 34_callback_rnn.ipynb. Converted 35_tutorial_wikitext.ipynb. Converted 36_text_models_qrnn.ipynb. Converted 40_tabular_core.ipynb. Converted 41_tabular_model.ipynb. Converted 50_data_block.ipynb. Converted 60_vision_models_xresnet.ipynb. Converted 90_notebook_core.ipynb. Converted 91_notebook_export.ipynb. Converted 92_notebook_showdoc.ipynb. Converted 93_notebook_export2html.ipynb. Converted 94_index.ipynb. Converted 95_synth_learner.ipynb. Converted notebook2jekyll.ipynb.
Apache-2.0
dev/01_core.ipynb
nareshr8/fastai_dev
2์ฐจ์› ์ตœ์ ํ™”Two dimensional optimizations๋‹ค์Œ๊ณผ ๊ฐ™์€ ๋น„์šฉ ํ•จ์ˆ˜๋ฅผ ์ƒ๊ฐํ•ด ๋ณด์ž.Let's think about a cost function as follows.$$C(x_0, x_1) = \frac{x_0^2}{2^2} + \frac{x_1^2}{1^2}$$ํŒŒ์ด์ฌ์œผ๋กœ๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด ๊ตฌํ˜„ํ•  ์ˆ˜ ์žˆ์„ ๊ฒƒ์ด๋‹ค.We may implement in python as follows.
def c(x:np.ndarray, a:float=2, b:float=1) -> float: x0 = x[0] x1 = x[1] return (x0 * x0) / (a * a) + (x1 * x1) / (b * b)
_____no_output_____
BSD-3-Clause
15_Optimization/015_two_dimensional_optimization.ipynb
kangwonlee/2109eca-nmisp-template
์‹œ๊ฐํ™” ํ•ด ๋ณด์ž.Let's visualize.
def plot_cost(): # ref : https://matplotlib.org/stable/gallery/ fig = plt.figure(figsize=(15, 6)) ax1 = plt.subplot(1, 2, 1) ax2 = plt.subplot(1, 2, 2, projection="3d") x = np.linspace(-4, 4) y = np.linspace(-2, 2) X, Y = np.meshgrid(x, y) Z = c((X, Y)) cset = ax1.contour(X, Y, Z, cmap=cm.coolwarm) surf = ax2.plot_surface(X, Y, Z, antialiased=True, cmap=cm.viridis, alpha=0.5) fig.colorbar(surf) return ax1, ax2 plot_cost() plt.show()
_____no_output_____
BSD-3-Clause
15_Optimization/015_two_dimensional_optimization.ipynb
kangwonlee/2109eca-nmisp-template
์ค‘๊ฐ„ ๊ณผ์ •์˜ ๊ทธ๋ž˜ํ”„๋ฅผ ๊ทธ๋ ค ์ฃผ๋Š” ๋น„์šฉ ํ•จ์ˆ˜๋ฅผ ์„ ์–ธDeclare another cost function that will plot intermediate results
def get_cost_with_plot(a=2, b=1, b_triangle=True): x0_history = [] x1_history = [] c_history = [] def cost_with_plot(x, a=a, b=b): ''' ์ด๋Ÿฐ ํ•จ์ˆ˜๋ฅผ ํด๋กœ์ ธ ๋ผ๊ณ  ๋ถ€๋ฆ„. ๋‹ค๋ฅธ ํ•จ์ˆ˜์˜ ๋‚ด๋ถ€ ํ•จ์ˆ˜์ด๋ฉด์„œ ํ•ด๋‹น ํ•จ์ˆ˜์˜ ๋ฐ˜ํ™˜๊ฐ’. This is a closuer; an internal function being a return value ''' ax1, ax2 = plot_cost() result = c(x) x0_history.append(x[0]) x1_history.append(x[1]) c_history.append(result) ax1.plot(x0_history, x1_history, '.') ax2.plot(x0_history, x1_history, c_history, '.') if b_triangle and (3 <= len(x0_history)): ax1.plot( x0_history[-3:]+[x0_history[-3]], x1_history[-3:]+[x1_history[-3]], '-' ) ax2.plot( x0_history[-3:]+[x0_history[-3]], x1_history[-3:]+[x1_history[-3]], c_history[-3:]+[c_history[-3]], '-' ) plt.show() return result return cost_with_plot cost_with_plot = get_cost_with_plot()
_____no_output_____
BSD-3-Clause
15_Optimization/015_two_dimensional_optimization.ipynb
kangwonlee/2109eca-nmisp-template
Nelder-Mead ๋ฒ•ref : [[0]](https://en.wikipedia.org/wiki/Nelder-Mead_method)Nelder-Mead ๋ฒ•์€ ๋น„์šฉํ•จ์ˆ˜์˜ ๋…๋ฆฝ๋ณ€์ˆ˜๊ฐ€ $n$ ์ฐจ์›์ธ ๊ฒฝ์šฐ, $n+1$ ๊ฐœ์˜ ์ ์œผ๋กœ ์ด๋ฃจ์–ด์ง„ **simplex**๋ฅผ ์ด์šฉํ•œ๋‹ค.If the independend variables of the cost function is $n$-dimensional, the Nelder-Mead method uses a **simplex** of $n+1$ vertices.
fmin_result = so.fmin(cost_with_plot, [3.0, 1.0]) fmin_result
_____no_output_____
BSD-3-Clause
15_Optimization/015_two_dimensional_optimization.ipynb
kangwonlee/2109eca-nmisp-template
Newton-CG ๋ฒ•๋น„์šฉํ•จ์ˆ˜๋ฅผ ๊ฐ๊ฐ $x_0$, $x_1$์— ๋Œ€ํ•ด ํŽธ๋ฏธ๋ถ„ ํ•ด ๋ณด์ž.Let's get the partial derivatives of the cost function over $x_0$ and $x_1$.$$C(x_0, x_1) = \frac{x_0^2}{2^2} + \frac{x_1^2}{1^2} \\\frac{\partial C}{\partial x_0} = 2 \cdot \frac{x_0}{2^2} \\\frac{\partial C}{\partial x_1} = 2 \cdot \frac{x_1}{1^2}$$ํŒŒ์ด์ฌ์œผ๋กœ๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด ๊ตฌํ˜„ํ•  ์ˆ˜ ์žˆ์„ ๊ฒƒ์ด๋‹ค.One may implement in python as follows.
def jacobian(x, a=2, b=1): x0 = x[0] x1 = x[1] return (2 * x0 / (a*a), 2 * x1 / (b*b),)
_____no_output_____
BSD-3-Clause
15_Optimization/015_two_dimensional_optimization.ipynb
kangwonlee/2109eca-nmisp-template
์ตœ์ ํ™”์—๋„ ๊ธฐ์šธ๊ธฐ๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋‹ค.We can also use the slopes in the optimization.
cost_with_plot = get_cost_with_plot(b_triangle=False) fmin_newton = so.minimize(cost_with_plot, [3.0, 1.0], jac=jacobian, method="newton-cg") fmin_newton
_____no_output_____
BSD-3-Clause
15_Optimization/015_two_dimensional_optimization.ipynb
kangwonlee/2109eca-nmisp-template
Build a medium size KG from a CSV dataset First let's initialize the KG object as we did previously:
import kglab namespaces = { "wtm": "http://purl.org/heals/food/", "ind": "http://purl.org/heals/ingredient/", "skos": "http://www.w3.org/2004/02/skos/core#", } kg = kglab.KnowledgeGraph( name = "A recipe KG example based on Food.com", base_uri = "https://www.food.com/recipe/", namespaces = namespaces, )
_____no_output_____
MIT
examples/ex2_0.ipynb
vzkqwvku/kglab
Here's a way to describe the namespaces that are available to use:
kg.describe_ns()
_____no_output_____
MIT
examples/ex2_0.ipynb
vzkqwvku/kglab
Next, we'll define a dictionary that maps (somewhat magically) from strings (i.e., "labels") to ingredients defined in the vocabulary:
common_ingredient = { "water": kg.get_ns("ind").Water, "salt": kg.get_ns("ind").Salt, "pepper": kg.get_ns("ind").BlackPepper, "black pepper": kg.get_ns("ind").BlackPepper, "dried basil": kg.get_ns("ind").Basil, "butter": kg.get_ns("ind").Butter, "milk": kg.get_ns("ind").CowMilk, "egg": kg.get_ns("ind").ChickenEgg, "eggs": kg.get_ns("ind").ChickenEgg, "bacon": kg.get_ns("ind").Bacon, "sugar": kg.get_ns("ind").WhiteSugar, "brown sugar": kg.get_ns("ind").BrownSugar, "honey": kg.get_ns("ind").Honey, "vanilla": kg.get_ns("ind").VanillaExtract, "vanilla extract": kg.get_ns("ind").VanillaExtract, "flour": kg.get_ns("ind").AllPurposeFlour, "all-purpose flour": kg.get_ns("ind").AllPurposeFlour, "whole wheat flour": kg.get_ns("ind").WholeWheatFlour, "olive oil": kg.get_ns("ind").OliveOil, "vinegar": kg.get_ns("ind").AppleCiderVinegar, "garlic": kg.get_ns("ind").Garlic, "garlic clove": kg.get_ns("ind").Garlic, "garlic cloves": kg.get_ns("ind").Garlic, "onion": kg.get_ns("ind").Onion, "onions": kg.get_ns("ind").Onion, "cabbage": kg.get_ns("ind").Cabbage, "carrot": kg.get_ns("ind").Carrot, "carrots": kg.get_ns("ind").Carrot, "celery": kg.get_ns("ind").Celery, "potato": kg.get_ns("ind").Potato, "potatoes": kg.get_ns("ind").Potato, "tomato": kg.get_ns("ind").Tomato, "tomatoes": kg.get_ns("ind").Tomato, "baking powder": kg.get_ns("ind").BakingPowder, "baking soda": kg.get_ns("ind").BakingSoda, }
_____no_output_____
MIT
examples/ex2_0.ipynb
vzkqwvku/kglab
This is where use of NLP work to produce *annotations* begins to overlap with KG pratices. Now let's load our dataset of recipes โ€“ the `dat/recipes.csv` file in CSV format โ€“ into a `pandas` dataframe:
import pandas as pd df = pd.read_csv("../dat/recipes.csv") df.head()
_____no_output_____
MIT
examples/ex2_0.ipynb
vzkqwvku/kglab
Then iterate over the rows in the dataframe, representing a recipe in the KG for each row:
import rdflib for index, row in df.iterrows(): recipe_id = row["id"] node = rdflib.URIRef("https://www.food.com/recipe/{}".format(recipe_id)) kg.add(node, kg.get_ns("rdf").type, kg.get_ns("wtm").Recipe) recipe_name = row["name"] kg.add(node, kg.get_ns("skos").definition, rdflib.Literal(recipe_name)) cook_time = row["minutes"] cook_time_literal = "PT{}M".format(int(cook_time)) code_time_node = rdflib.Literal(cook_time_literal, datatype=kg.get_ns("xsd").duration) kg.add(node, kg.get_ns("wtm").hasCookTime, code_time_node) ind_list = eval(row["ingredients"]) for ind in ind_list: ingredient = ind.strip() ingredient_obj = common_ingredient[ingredient] kg.add(node, kg.get_ns("wtm").hasIngredient, ingredient_obj)
_____no_output_____
MIT
examples/ex2_0.ipynb
vzkqwvku/kglab
Notice how the `xsd:duration` literal is now getting used to represent cooking times.We've structured this example such that each of the recipes in the CSV file has a known representation for all of its ingredients.There are nearly 250K recipes in the full dataset from so the `common_ingredient` dictionary would need to be extended quite a lot to handle all of those possible ingredients. At this stage, our graph has grown by a couple orders of magnitude, so its visualization should be more interesting now.Let's take a look:
VIS_STYLE = { "wtm": { "color": "orange", "size": 20, }, "ind":{ "color": "blue", "size": 35, }, } subgraph = kglab.SubgraphTensor(kg) pyvis_graph = subgraph.build_pyvis_graph(notebook=True, style=VIS_STYLE) pyvis_graph.force_atlas_2based() pyvis_graph.show("tmp.fig01.html")
_____no_output_____
MIT
examples/ex2_0.ipynb
vzkqwvku/kglab
Given the defaults for this kind of visualization, there's likely a dense center mass of orange (recipes) at the center, with a close cluster of common ingredients (dark blue), surrounded by less common ingredients and cooking times (light blue). Performance analysis of serialization methods Let's serialize this recipe KG constructed from the CSV dataset to a local TTL file, while measuring the time and disk space required:
import time write_times = [] t0 = time.time() kg.save_rdf("tmp.ttl") write_times.append(round((time.time() - t0) * 1000.0, 2))
_____no_output_____
MIT
examples/ex2_0.ipynb
vzkqwvku/kglab
Let's also serialize the KG into the other formats that we've been using, to compare relative sizes for a medium size KG:
t0 = time.time() kg.save_rdf("tmp.xml", format="xml") write_times.append(round((time.time() - t0) * 1000.0, 2)) t0 = time.time() kg.save_jsonld("tmp.jsonld") write_times.append(round((time.time() - t0) * 1000.0, 2)) t0 = time.time() kg.save_parquet("tmp.parquet") write_times.append(round((time.time() - t0) * 1000.0, 2)) import pandas as pd import os file_paths = ["tmp.ttl", "tmp.xml", "tmp.jsonld", "tmp.parquet"] file_sizes = [os.path.getsize(file_path) for file_path in file_paths] df = pd.DataFrame({"file_path": file_paths, "file_size": file_sizes, "write_time": write_times}) df["ms_per_byte"] = df["write_time"] / df["file_size"] df
_____no_output_____
MIT
examples/ex2_0.ipynb
vzkqwvku/kglab
https://colab.research.google.com/drive/1OmAdxU_Lw7r-tMXiTOeSI7NWgB3AF9QF Issue with image translation
from keras.datasets import mnist import numpy from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.utils import np_utils import matplotlib.pyplot as plt %matplotlib inline (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train1 = X_train[y_train==1] num_pixels = X_train.shape[1] * X_train.shape[2] X_train = X_train.reshape(X_train.shape[0],num_pixels).astype('float32') X_test = X_test.reshape(X_test.shape[0],num_pixels).astype('float32') X_train = X_train / 255 X_test = X_test / 255 y_train = np_utils.to_categorical(y_train) y_test = np_utils.to_categorical(y_test) num_classes = y_train.shape[1] model = Sequential() model.add(Dense(1000, input_dim=num_pixels, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy']) history = model.fit(X_train, y_train, validation_data=(X_test, y_test),epochs=5, batch_size=1024, verbose=1) import numpy as np pic=np.zeros((28,28)) pic2=np.copy(pic) for i in range(X_train1.shape[0]): pic2=X_train1[i,:,:] pic=pic+pic2 pic=(pic/X_train1.shape[0]) plt.imshow(pic) for i in range(pic.shape[0]): if i<20: pic[:,i]=pic[:,i+1] plt.imshow(pic) model.predict(pic.reshape(1,784)/255)
_____no_output_____
MIT
Chapter04/Issue_with_image_translation.ipynb
PacktPublishing/Neural-Networks-with-Keras-Cookbook
Deep Learining project* Gianfranco Di Marco - 1962292* Giacomo Colizzi Coin - 1794538\**- Trajectory Prediction -**Is the problem of predicting the short-term (1-3 seconds) and long-term (3-5 seconds) spatial coordinates of various road-agents such as cars, buses, pedestrians, rickshaws, and animals, etc. These road-agents have different dynamic behaviors that may correspond to aggressive or conservative driving styles.**- nuScenes Dataset -**Available at. https://www.nuscenes.org/nuscenes. The nuScenesdataset is a large-scale autonomous driving dataset. The dataset has 3D bounding boxes for 1000 scenes collected in Boston and Singapore. Each scene is 20 seconds long and annotated at 2Hz. This results in a total of 28130 samples for training, 6019 samples for validation and 6008 samples for testing. The dataset has the full autonomous vehicle data suite: 32-beam LiDAR, 6 cameras and radars with complete 360ยฐ coverage> Holger Caesar and Varun Bankiti and Alex H. Lang and Sourabh Vora and Venice Erin Liong and Qiang Xu and Anush Krishnan and Yu Pan and Giancarlo Baldan and Oscar Beijbom: "*nuScenes: A multimodal dataset for autonomous driving*", arXiv preprint arXiv:1903.11027, 2019.The most important part of this dataset for our project is the Map Expansion Pack, which simplify the trajectory prediction problem Requirements **Environment**
# Necessary since Google Colab supports only Python 3.7 # -> some libraries can be different from local and Colab try: import google.colab from google.colab import drive ENVIRONMENT = 'colab' %pip install tf-estimator-nightly==2.8.0.dev2021122109 %pip install folium==0.2.1 except: ENVIRONMENT = 'local'
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
**Libraries**
%pip install nuscenes-devkit %pip install pytorch-lightning # Learning import torch import torch.nn as nn import torch.nn.functional as F from torchvision.models import resnet50 from torchvision.transforms import Normalize from torchmetrics import functional import pytorch_lightning as pl from pytorch_lightning.callbacks import ModelCheckpoint # Math import numpy as np # Dataset from nuscenes.nuscenes import NuScenes from nuscenes.prediction import PredictHelper from nuscenes.prediction.input_representation.static_layers import StaticLayerRasterizer from nuscenes.prediction.input_representation.agents import AgentBoxesWithFadedHistory from nuscenes.prediction.input_representation.interface import InputRepresentation from nuscenes.prediction.input_representation.combinators import Rasterizer from nuscenes.eval.prediction.config import PredictionConfig, load_prediction_config from nuscenes.eval.prediction.splits import get_prediction_challenge_split from nuscenes.eval.prediction import metrics, data_classes # File system import os import shutil import pickle import zipfile import tarfile import urllib.request # Generic import time from tqdm import tqdm from typing import List, Dict, Tuple, Any from collections import defaultdict from abc import abstractmethod import multiprocessing as mp import matplotlib.pyplot as plt
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
Configuration **Generic Parameters**
# Environment-dependent parameters if ENVIRONMENT == 'colab': ROOT = '/content/drive/MyDrive/DL/Trajectory-Prediction-PyTorch/' MAX_NUM_WORKERS = 0 MAX_BATCH_SIZE = 8 PROGRESS_BAR_REFRESH_RATE = 20 elif ENVIRONMENT == 'local': ROOT = os.getcwd() # TODO: solve problem with VRAM with PL if os.name == 'nt': MAX_NUM_WORKERS = 0 MAX_BATCH_SIZE = 16 else: MAX_NUM_WORKERS = 4 MAX_BATCH_SIZE = 8 PROGRESS_BAR_REFRESH_RATE = 10 else: raise ValueError("Wrong 'environment' value") # Train parameters BATCH_SIZE = MAX_BATCH_SIZE NUM_WORKERS = MAX_NUM_WORKERS LEARNING_RATE = 1e-4 MOMENTUM = 0.9 TRAIN_EPOCHES = 20 PLOT_PERIOD = 1 # 1 = plot at each epoch CHECKPOINT_DIR = os.path.join(ROOT, 'checkpoints') BEST_CHECKPOINT_DIR = os.path.join(CHECKPOINT_DIR, 'best') CHECKPOINT_MONITOR = "val_loss" TOP_K_SAVE = 10 # Test parameters DEBUG_MODE = False # Hardcoded parameters HELPER_NEEDED = False
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
**Network Parameters**
# TODO: add other baselines PREDICTION_MODEL = 'CoverNet' if PREDICTION_MODEL == 'CoverNet': # - Architecture parameters BACKBONE_WEIGHTS = 'ImageNet' BACKBONE_MODEL = 'ResNet18' K_SIZE = 20000 # - Trajectory parameters AGENT_HISTORY = 1 SHORT_TERM_HORIZON = 3 LONG_TERM_HORIZON = 6 TRAJ_HORIZON = SHORT_TERM_HORIZON TRAJ_LINK = 'https://www.nuscenes.org/public/nuscenes-prediction-challenge-trajectory-sets.zip' TRAJ_DIR = os.path.join(ROOT, 'trajectory_sets') EPSILON = 2
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
**Dataset Parameters**
# Organization parameters PREPARE_DATASET = False PREPROCESSED = True # File system parameters PL_SEED = 42 DATAROOT = os.path.join(ROOT, 'data', 'sets', 'nuscenes') PREPROCESSED_FOLDER = 'preprocessed' GT_SUFFIX = '-gt' FILENAME_EXT = '.pt' DATASET_VERSION = 'v1.0-trainval' AGGREGATORS = [{'name': "RowMean"}] # Other parameters MAX_PREDICTED_MODES = 25 SAMPLES_PER_SECOND = 2 NORMALIZATION = 'imagenet'
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
Dataset **Initialization**N.B: The download links in function *urllib.request.urlretrieve()* should be replaced periodically because it expires. Steps to download correctly are (on Firefox):1. Dowload Map Expansion pack (or Trainval metadata) from the website2. Stop the download3. Right-click on the file -> copy download link4. Paste the copied link into the first argument of the urlretrieve function. The second argument is the final name of the file
# Drive initialization if ENVIRONMENT == 'colab': drive.mount('/content/drive') if PREPARE_DATASET: # Creating dataset dir os.makedirs(DATAROOT, exist_ok=True) os.chdir(DATAROOT) # Downloading Map Expansion Pack os.mkdir('maps') os.chdir('maps') print("Downloading and extracting Map Expansion pack ...") urllib.request.urlretrieve('https://s3.amazonaws.com/data.nuscenes.org/public/v1.0/nuScenes-map-expansion-v1.3.zip?AWSAccessKeyId=AKIA6RIK4RRMFUKM7AM2&Signature=AvzxB6d7CxtpCUYIUChItvDSA3Q%3D&Expires=1651141974', 'nuScenes-map-expansion-v1.3.zip') with zipfile.ZipFile('nuScenes-map-expansion-v1.3.zip', 'r') as zip_ref: zip_ref.extractall(os.getcwd()) os.remove('nuScenes-map-expansion-v1.3.zip') # Downloading Trainval Metadata os.chdir('..') print("Downloading and extracting TrainVal metadata ...") urllib.request.urlretrieve('https://s3.amazonaws.com/data.nuscenes.org/public/v1.0/v1.0-trainval_meta.tgz?AWSAccessKeyId=AKIA6RIK4RRMFUKM7AM2&Signature=ZDr9UgOoV3UpYCI5RCY%2BNKiZVZ4%3D&Expires=1651142002', 'v1.0-trainval_meta.tgz') tar_ref = tarfile.open('v1.0-trainval_meta.tgz', 'r:gz') tar_ref.extractall(os.getcwd()) tar_ref.close() os.remove('v1.0-trainval_meta.tgz') os.chdir(DATAROOT)
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
**Dataset definition**
class TrajPredDataset(torch.utils.data.Dataset): """ Trajectory Prediction Dataset Base Class for Trajectory Prediction Datasets """ def __init__(self, dataset, name, data_type, preprocessed, split, dataroot, preprocessed_folder, filename_ext, gt_suffix, traj_horizon, max_traj_horizon, num_workers): """ Dataset Initialization Parameters ---------- dataset: the instantiated dataset name: name of the dataset data_type: data type of the dataset elements preprocessed: True if data has already been preprocessed split: the dataset split ('train', 'train_val', 'val') dataroot: the root directory of the dataset preprocessed_folder: the folder containing preprocessed data filename_ext: the extension of the generated filenames gt_suffix: the suffix added after each GT filename (before ext) traj_horizon: horizon (in seconds) for the future trajectory max_traj_horizon: maximum trajectory horizon possible (in seconds) num_workers: num of processes that collect data """ super(TrajPredDataset, self).__init__() self.dataset = dataset self.name = name self.data_type = data_type self.preprocessed = preprocessed self.split = split self.dataroot = dataroot self.preprocessed_folder = preprocessed_folder self.filename_ext = filename_ext self.gt_suffix = gt_suffix self.traj_horizon = traj_horizon self.max_traj_horizon = max_traj_horizon self.num_workers = num_workers self.helper = None self.tokens = None self.static_layer_rasterizer = None self.agent_rasterizer = None self.input_representation = None def __len__(self): """ Return the size of the dataset """ raise NotImplementedError def __getitem__(self, idx): """ Return an element of the dataset """ raise NotImplementedError @abstractmethod def generate_data(self): """ Data generation If self.preprocessed, directly collect data. Otherwise, generate data without preprocess it. """ raise NotImplementedError @abstractmethod def get_raster(self, token): """ Convert a token split into a raster Parameters ---------- token: token containing instance token and sample token Return ------ raster: the raster image """ raise NotImplementedError class nuScenesDataset(TrajPredDataset): """ nuScenes Dataset for Trajectory Prediction challenge """ def __init__(self, helper, data_type='raster', preprocessed=False, split='train', dataroot=DATAROOT, preprocessed_folder=PREPROCESSED_FOLDER, filename_ext=FILENAME_EXT, gt_suffix=GT_SUFFIX, traj_horizon=TRAJ_HORIZON, max_traj_horizon=LONG_TERM_HORIZON, samples_per_second=SAMPLES_PER_SECOND, agent_history=AGENT_HISTORY, normalization=NORMALIZATION, num_workers=NUM_WORKERS): """ nuScenes Dataset Initialization Parameters ---------- helper: the helper of the instantiated nuScenes dataset (None if not needed) data_type: data type of the dataset elements preprocessed: True if data has already been preprocessed split: the dataset split ('train', 'train_val', 'val') dataroot: the root directory of the dataset preprocessed_folder: the folder containing preprocessed data filename_ext: the extension of the generated filenames gt_suffix: the suffix added after each GT filename (before ext) traj_horizon: horizon (in seconds) for the future trajectory max_traj_horizon: maximum trajectory horizon possible (in seconds) samples_per_second: sampling frequency (in Hertz) agent_history: the seconds of considered agent history normalization: which kind of normalization to apply to input num_workers: num of processes that collect data """ # General initialization super(nuScenesDataset, self).__init__( None, 'nuScenes', data_type, preprocessed, split, dataroot, preprocessed_folder, filename_ext, gt_suffix, traj_horizon, max_traj_horizon, num_workers) self.helper = helper self.tokens = get_prediction_challenge_split( split, dataroot=dataroot) self.samples_per_second = samples_per_second if data_type == 'raster': if helper is not None: self.static_layer_rasterizer = StaticLayerRasterizer(self.helper) self.agent_rasterizer = AgentBoxesWithFadedHistory( self.helper, seconds_of_history=agent_history) self.input_representation = InputRepresentation( self.static_layer_rasterizer, self.agent_rasterizer, Rasterizer()) else: self.static_layer_rasterizer = None self.agent_rasterizer = None self.input_representation = None else: # NOTE: possible also other type of input data pass if not self.preprocessed: print("Preprocessing data ...") self.generate_data() # Normalization function if normalization == 'imagenet': self.normalization = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) else: raise ValueError("Available only 'imagenet' normalization") def __len__(self) -> int: """ Return the size of the dataset """ return len(self.tokens) def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]: """ Return an element of the dataset """ # Select subfolder if idx < 0: idx = len(self) + idx subfolder = f'batch_{idx//128}' # Load files complete_tensor = torch.load( os.path.join(self.dataroot, self.preprocessed_folder, self.split, subfolder, self.tokens[idx] + self.filename_ext)) gt_trajectory = torch.load( os.path.join(self.dataroot, self.preprocessed_folder, self.split, subfolder, self.tokens[idx] + self.gt_suffix + self.filename_ext)) # Adjust tensors # NOTE: maybe it's better to handle this section in data generation while gt_trajectory.shape[0] < self.samples_per_second * self.max_traj_horizon: gt_trajectory = torch.concat((gt_trajectory, gt_trajectory[-1].unsqueeze(0))) gt_trajectory = gt_trajectory[:(self.samples_per_second * self.traj_horizon)] agent_state_vector, raster_img = self.tensor_io_conversion( "read", None, None, complete_tensor) raster_img = self.normalization(raster_img) nan_mask = agent_state_vector != agent_state_vector if nan_mask.any(): agent_state_vector[nan_mask] = 0 return agent_state_vector, raster_img, gt_trajectory, idx def generate_data(self): """ Data generation If self.preprocessed, directly collect data. Otherwise, generate data without preprocess it. """ # Generate directories if don't exist preprocessed_dir = os.path.join(self.dataroot, self.preprocessed_folder) split_dir = os.path.join(preprocessed_dir, self.split) if self.preprocessed_folder not in os.listdir(self.dataroot): os.mkdir(preprocessed_dir) if self.split not in os.listdir(preprocessed_dir): os.mkdir(split_dir) # Variable useful to restore interrupted preprocessing preprocessed_batches = os.listdir(split_dir) already_preproc = \ len([f for f in preprocessed_batches if os.path.isfile(os.path.join(split_dir, f))]) # Create subfolders if len(preprocessed_batches) == 0: n_subfolders = len(self.tokens) // 128 + int(len(self.tokens) % 128 != 0) for i in range(n_subfolders): subfolder = 'batch_' + str(i) os.mkdir(os.path.join(split_dir, subfolder)) # Generate data if self.data_type == 'raster': for i, t in enumerate(tqdm(self.tokens)): subfolder = f'batch_{i//128}' if i >= int(already_preproc/2): self.generate_raster_data(t, split_dir, subfolder) else: pass def generate_raster_data(self, token, batches_dir, subfolder): """ Generate a raster map and agent state vector from token split The generated input data consists in a tensor like this: [raster map | agent state vector] The generated ground truth data is the future agent trajectory tensor Parameters ---------- token: token containing instance token and sample token batches_dir: the directory in which the batches will be generated subfolder: the data is divided into subfolders in order to avoid Drive timeouts; this parameter tells which is the actual subfolder towhere place data """ # Generate and concatenate input tensors instance_token, sample_token = token.split("_") raster_img = self.input_representation.make_input_representation( instance_token, sample_token) raster_tensor = torch.Tensor(raster_img).permute(2, 0, 1) / 255. agent_state_vector = torch.Tensor( [[self.helper.get_velocity_for_agent(instance_token, sample_token), self.helper.get_acceleration_for_agent(instance_token, sample_token), self.helper.get_heading_change_rate_for_agent(instance_token, sample_token)]]) raster_agent_tensor, _ = \ self.tensor_io_conversion('write', raster_tensor, agent_state_vector) # Generate ground truth gt_trajectory = torch.Tensor( self.helper.get_future_for_agent(instance_token, sample_token, seconds=self.max_traj_horizon, in_agent_frame=True)) # Save to disk torch.save(raster_agent_tensor, os.path.join( batches_dir, subfolder, token + self.filename_ext)) torch.save(gt_trajectory, os.path.join( batches_dir, subfolder, token + self.gt_suffix + self.filename_ext)) @staticmethod def tensor_io_conversion(mode, big_t=None, small_t=None, complete_t=None) -> Tuple[torch.Tensor, torch.Tensor]: """ Utility IO function to concatenate tensors of different shape Normally used to concatenate (or separate) raster map and agent state vector in order to speed up IO Parameters ---------- mode: 'write' (concatenate) or 'read' (separate) big_t: the bigger tensor (None if we are going to separate tensors) small_t: the smaller tensor (None if we are going to separate tensors) complete_t: the concatenated tensor (None if we are going to concatenate tensors) Return ------ out1: big tensor (mode == 'read') or complete tensor (mode == 'write') out2: small tensor (mode == 'read') or empty tensor (mode == 'write') """ out1, out2 = None, None if mode == 'write': # concatenate if big_t is None or small_t is None: raise ValueError("Wrong argument: 'big_t' and 'small_t' cannot be None") small_t = small_t.permute(1, 0).unsqueeze(2) small_t = small_t.expand(-1, -1, big_t.shape[-1]) out1 = torch.cat((big_t, small_t), dim=1) out2 = torch.empty(small_t.shape) elif mode == 'read': # separate if complete_t is None: raise ValueError("Wrong argument: 'complete_t' cannot be None") out1 = complete_t[..., -1, -1].unsqueeze(0) out2 = complete_t[..., :-1, :] else: raise ValueError( "Wrong argument 'mode'; available 'read' or 'write'") return out1, out2 class nuScenesDataModule(pl.LightningDataModule): """ PyTorch Lightning Data Module for the nuScenes dataset """ def __init__(self, nuscenes_train, nuscenes_val, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS): """ Data Module initialization Parameters ---------- nuscenes_train: instance of the nuScenesDataset class (split='train') nuscenes_val: instance of the nuScenesDataset class (split='val') batch_size: number of samples to extract from the dataset at each step num_workers: number of cores implied in data collection """ super(nuScenesDataModule, self).__init__() self.batch_size = batch_size self.num_workers = num_workers self.nuscenes_train = nuscenes_train self.nuscenes_val = nuscenes_val def setup(self, stage=None): """ Setup the data module """ if stage == "fit" or stage is None: self.nusc_train = self.nuscenes_train self.nusc_val = self.nuscenes_val if stage == "test" or stage is None: self.nusc_test = self.nuscenes_val def train_dataloader(self): """ Dataloader for the training part """ return torch.utils.data.DataLoader(self.nusc_train, self.batch_size, shuffle=True, num_workers=self.num_workers, drop_last=True) def val_dataloader(self): """ Dataloader for the validation part """ return torch.utils.data.DataLoader(self.nusc_val, self.batch_size, shuffle=False, num_workers=self.num_workers, drop_last=True) def test_dataloader(self): """ Dataloader for the testing part """ return torch.utils.data.DataLoader(self.nusc_test, self.batch_size, shuffle=False, num_workers=self.num_workers, drop_last=True)
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
Models **Covernet**
class CoverNet(pl.LightningModule): """ CoverNet model for Trajectory Prediction """ def __init__(self, K_size, epsilon, traj_link, traj_dir, device, lr=LEARNING_RATE, momentum=MOMENTUM, traj_samples=SAMPLES_PER_SECOND*TRAJ_HORIZON): """ CoverNet initialization Parameters ---------- K_size: number of modes (trajectories) (needed ?) epsilon: value (in meters) relative to the space coverage traj_link: link from which to download the trajectories device: target device of the model (e.g. 'cuda:0') lr: learning rate of the optimizer momentum: momentum of the optimizer traj_samples: number of samples to consider in the trajectory """ super().__init__() self.K_size = K_size self.convModel = resnet50(pretrained=True) self.activation = {} def get_activation(name): def hook(model, input, output): self.activation[name] = output return hook self.convModel.layer4.register_forward_hook(get_activation('layer4')) self.trajectories = prepare_trajectories(epsilon, traj_link, traj_dir) self.fc1 = nn.Linear(2051, 4096) self.fc2 = nn.Linear(4096, self.trajectories.size()[0]) self.traj_samples = traj_samples self.tgt_device = device self.momentum = momentum self.lr = lr def forward(self, x) -> torch.Tensor: """ Network inference """ img, state = x self.convModel(img) resnet_output = torch.flatten(self.convModel.avgpool(self.activation['layer4']),start_dim=1) x = torch.cat([resnet_output, state], 1) x = self.fc1(x) x = self.fc2(x) return x def training_step(self, batch, batch_idx): """ Training step of the model Parameters ---------- batch: batch of data batch_idx: index of the actual batch (from 0 to len(dataset)) """ # Collect data x_state, x_img, gt, _ = batch x_state = torch.flatten(x_state, 0, 1) reduced_traj = self.trajectories[:, :self.traj_samples] # Prepare positive samples with torch.no_grad(): y = get_positives(reduced_traj, gt.to('cpu')) y = y.to(self.tgt_device) # Inference y_hat = self((x_img, x_state)) loss = F.cross_entropy(y_hat, y) # Log self.log('train_loss', loss.item(), on_step=True) return loss def validation_step(self, batch, batch_idx): """ Validation step of the model Parameters ---------- batch: batch of data batch_idx: index of the actual batch (from 0 to len(dataset)) """ with torch.no_grad(): # Collect data x_state, x_img, gt, _ = batch x_state = torch.flatten(x_state, 0, 1) reduced_traj = self.trajectories[:, :self.traj_samples] # Prepare positive samples y = get_positives(reduced_traj, gt.to('cpu')) y = y.to(self.tgt_device) # Inference y_hat = self((x_img, x_state)) loss = F.cross_entropy(y_hat, y) # Log self.log('val_loss', loss.item(), on_epoch=True) return loss def configure_optimizers(self): """ Set the optimizer for the model """ # TODO: find best optimizer and parameters #return torch.optim.Adam(self.parameters(), lr=self.lr) return torch.optim.SGD(self.parameters(), lr=self.lr, momentum=self.momentum) # TODO: check if generated trajectory are expressed in the same frame of the agent def get_positives(trajectories, ground_truth) -> torch.Tensor: """ Get positive samples wrt the actual GT Parameters ---------- trajectories: the pre-generated set of trajectories ground_truth: the future trajectory for the agent Return ------ positive_traj: as defined in the original CoverNet paper, 'positive samples determined by the element in the trajectory set closest to the actual ground truth in minimum average of point-wise Euclidean distances' """ euclidean_dist = torch.stack([torch.pow(torch.sub(trajectories, gt), 2) for gt in ground_truth]).sum(dim=3).sqrt() mean_euclidean_dist = euclidean_dist.mean(dim=2) positive_traj = mean_euclidean_dist.argmin(dim=1) return positive_traj def prepare_trajectories(epsilon, download_link, directory) -> torch.Tensor: """ Function to download and extract trajectory sets for CoverNet Parameters ---------- epsilon: value (in meters) relative to the space coverage download_link: link from which to download trajectory sets directory: directory where to download trajectory sets Return ------ trajectories: tensor of the trajectory set for the specified epsilon """ # 1. Download and extract trajectories filename_zip = 'nuscenes-prediction-challenge-trajectory-sets.zip' filename = filename_zip[:-4] filename_dir = os.path.join(directory, filename) filename_zipdir = os.path.join(directory, filename_zip) if (not os.path.isdir(filename_dir) or any(e not in os.listdir(filename_dir) for e in ['epsilon_2.pkl', 'epsilon_4.pkl', 'epsilon_8.pkl'])): print("Downloading trajectories ...") os.makedirs(directory, exist_ok=True) urllib.request.urlretrieve(download_link, filename_zipdir) with zipfile.ZipFile(filename_zipdir, 'r') as archive: archive.extractall(directory) os.remove(filename_zipdir) # 2. Generate trajectories traj_set_path = os.path.join(filename_dir, 'epsilon_' + str(epsilon) + '.pkl') trajectories = pickle.load(open(traj_set_path, 'rb')) return torch.Tensor(trajectories)
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
Utilities **Metrics**
def compute_metrics(predictions: List[data_classes.Prediction], ground_truths: List[np.ndarray], helper, aggregators=AGGREGATORS) -> Dict[str, Any]:#Dict[str, Dict[str, List[float]]]: """ Utility eval function to compute dataset metrics Parameters ---------- predictions: list of predictions made by the model (in Prediction class format) ground_truths: the real trajectories of the agent (SHAPE -> [len(dataset), n_samples, state_dim]) helper: nuScenes dataset helper aggregators: functions to aggregate metrics (e.g. mean) Return ------ metric_output: dictionary of the computed metrics: - minADE_5: The average of pointwise L2 distances between the predicted trajectory and ground truth over the 5 most likely predictions. - minADE_10: The average of pointwise L2 distances between the predicted trajectory and ground truth over the 10 most likely predictions. - missRateTop_2_5: Proportion of misses relative to the 5 most likely trajectories over all agents - missRateTop_2_10: Proportion of misses relative to the 10 most likely trajectories over all agents - minFDE_1: The final displacement error (FDE) is the L2 distance between the final points of the prediction and ground truth, computed on the most likely trajectory - offRoadRate: the fraction of trajectories that are not entirely contained in the drivable area of the map. """ # 1. Define metrics print("\t - Metrics definition ...") aggregators = \ [metrics.deserialize_aggregator(agg) for agg in aggregators] min_ade = metrics.MinADEK([5, 10], aggregators) miss_rate = metrics.MissRateTopK([5, 10], aggregators) min_fde = metrics.MinFDEK([1], aggregators) if helper is not None: # FIXME: instantiating offRoadRate class makes RAM explode #offRoadRate = metrics.OffRoadRate(self.helper, self.aggregators) pass else: offRoadRate = None # 2. Compute metrics metric_list = [] print("\t - Effective metrics computation ...") for p, pred in enumerate(tqdm(predictions)): # TODO: check for argument shapes minADE_5 = min_ade(ground_truths[p], pred)[0][0] minADE_10 = min_ade(ground_truths[p], pred)[0][1] missRateTop_2_5 = miss_rate(ground_truths[p], pred)[0][0] missRateTop_2_10 = miss_rate(ground_truths[p], pred)[0][1] minFDE_1 = min_fde(ground_truths[p], pred) #offRoadRate = offRoadRate(ground_truth[i], prediction) metric = {'minADE_5': minADE_5, 'missRateTop_2_5': missRateTop_2_5, 'minADE_10': minADE_10, 'missRateTop_2_10': missRateTop_2_10, 'minFDE_1': minFDE_1}#, 'offRoadRate': offRoadRate} metric_list.append(metric) # 3. Aggregate print("\t - Metrics aggregation ...") aggregations: Dict[str, Dict[str, List[float]]] = defaultdict(dict) metric_names = list(metric_list[0].keys()) metrics_dict = {name: np.array([metric_list[i][name] for i in range(len(metric_list))]) for name in metric_names} for metric in metric_names: for agg in aggregators: aggregations[metric][agg.name] = agg(metrics_dict[metric]) return aggregations
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
**Plotting**
def plot_train_data(train_iterations, val_iterations, epoches, train_losses, val_losses): """ Plot a graph with the training trend Parameters ---------- train_iterations: number of iterations for each epoch [train] val_iterations: number of iterations for each epoch [val] epoches: actual epoch number (starting from 1) train_losses: array of loss values [train] val_losses: array of loss values [val] """ # Data preparation train_iterations_list = list(range(epoches*(train_iterations))) val_iterations_list = list(range(epoches*(val_iterations))) epoches_list = list(range(epoches)) # Adjust validation array dimension val_error = len(val_losses) - len(val_iterations_list) if val_error > 0: val_losses = val_losses[:-val_error] # Per-iteration plot fig = plt.figure() plt.title('Per-iteration Loss [train]') plt.xlabel('Iterations') plt.ylabel('Value') l1, = plt.plot(train_iterations_list, train_losses, c='blue') plt.legend(handles=[l1], labels=['Train loss'], loc='best') plt.show() fig = plt.figure() plt.title('Per-iteration Loss [val]') plt.xlabel('Iterations') plt.ylabel('Value') l2, = plt.plot(val_iterations_list, val_losses, c='red') plt.legend(handles=[l2], labels=['Validation loss'], loc='best') plt.show() # Per-epoch plot fig = plt.figure() plt.title('Per-epoch Loss') plt.xlabel('Epoches') plt.ylabel('Value') train_avg_losses = [np.array(train_losses[i:i+train_iterations]).mean() for i in range(0, len(train_losses), train_iterations)] val_avg_losses = [np.array(val_losses[i:i+val_iterations]).mean() for i in range(0, len(val_losses), val_iterations)] l1, = plt.plot(epoches_list, train_avg_losses, c='blue') l2, = plt.plot(epoches_list, val_avg_losses, c='red') plt.legend(handles=[l1, l2], labels=['Train loss', 'Validation loss'], loc='best') plt.show() def plot_agent_future(raster, future, agent_pos=(0,0), reference_frame='local', color='green'): """ Plot agent's future trajectory Parameters ---------- raster: raster map tensor (image) future: future trajectory of the agent (predicted or GT) [x,y] agent_pos: position of the agent (needed in case of local coords) reference_frame: frame to which future coordinates refer color: color of the plotted trajectory """ # Show raster map plt.imshow(raster.permute(1, 2, 0)) # Show trajectory x, y = [], [] for i in range(len(future)): point = (agent_pos[0], agent_pos[1]) if i == 0 else future[i].numpy() if reference_frame == 'local' and i > 0: point = (point[0] + agent_pos[0], -point[1] + agent_pos[1]) x.append(point[0]) y.append(point[1]) plt.plot(x, y, color=color, markersize=10, linewidth=5) plt.show()
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
Main **Initialization**
# ---------- Dataset initialization ---------- # # Initialize nuScenes helper print("nuScenes Helper initialization ...") start_time = time.time() pl.seed_everything(PL_SEED) if ENVIRONMENT == 'local': if PREPARE_DATASET: nusc = NuScenes(version=DATASET_VERSION, dataroot=DATAROOT, verbose=True) with open(os.path.join(ROOT, 'nuscenes_checkpoint'+FILENAME_EXT), 'wb') as f: pickle.dump(nusc, f, protocol=pickle.HIGHEST_PROTOCOL) elif not 'nusc' in locals(): if HELPER_NEEDED: with open(os.path.join(ROOT, 'nuscenes_checkpoint'+FILENAME_EXT), 'rb') as f: nusc = pickle.load(f) elif ENVIRONMENT == 'colab': if PREPARE_DATASET or HELPER_NEEDED: nusc = NuScenes(version=DATASET_VERSION, dataroot=DATAROOT, verbose=True) helper = PredictHelper(nusc) if HELPER_NEEDED else None print("nuScenes Helper initialization done in %f s\n" % (time.time() - start_time)) # Initialize dataset and data module print("\nDataset and Data Module initialization ...") start_time = time.time() train_dataset = nuScenesDataset(helper, preprocessed=PREPROCESSED, split='train') val_dataset = nuScenesDataset(helper, preprocessed=PREPROCESSED, split='val') trainval_dm = nuScenesDataModule(train_dataset, val_dataset, num_workers=NUM_WORKERS) trainval_dm.setup(stage='fit') print("Dataset and Data Module initialization done in %f s\n" % (time.time() - start_time)) # ---------- Network initialization ---------- # print("\nCoverNet model initialization ...") start_time = time.time() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = CoverNet(K_SIZE, EPSILON, TRAJ_LINK, TRAJ_DIR, device) print("CoverNet model intialization done in %f s\n" % (time.time() - start_time)) # ---------- Training initialization ---------- # print("\nTrainer initialization ...") start_time = time.time() GPUS = min(1, torch.cuda.device_count()) checkpoint_callback = ModelCheckpoint(dirpath=CHECKPOINT_DIR, save_top_k=TOP_K_SAVE, monitor=CHECKPOINT_MONITOR) trainer = pl.Trainer(callbacks=[checkpoint_callback], progress_bar_refresh_rate=PROGRESS_BAR_REFRESH_RATE, gpus=GPUS, max_epochs=TRAIN_EPOCHES) print("Trainer intialization done in %f s\n" % (time.time() - start_time))
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
**Training loop**
trainer.fit(model, trainval_dm)
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
**Testing**
# Dataloader initialization print("Loading test dataloader ...") trainval_dm.setup(stage='test') test_dataloader = trainval_dm.test_dataloader() test_generator = iter(test_dataloader) # Trained model initialization # TODO: istantiate kwargs for network in a better way print("\nCoverNet trained model initialization ...") checkpoint_name = 'epoch=19-step=80460.ckpt' net_args = {'K_size': K_SIZE, 'epsilon': EPSILON, 'traj_link': TRAJ_LINK, 'traj_dir': TRAJ_DIR, 'device': device} model = CoverNet.load_from_checkpoint(checkpoint_path=os.path.join(BEST_CHECKPOINT_DIR, checkpoint_name), map_location=None, hparams_file=None, strict=True, K_size=K_SIZE, epsilon=EPSILON, traj_link=TRAJ_LINK, traj_dir=TRAJ_DIR, device=device).to(device) model.eval() # ---------- CoverNet Metrics computation ---------- # # TODO: generalize metrics computation predictions = [] ground_truths = [] start = time.time() reduced_traj = model.trajectories[:, :model.traj_samples].numpy() print("\nCoverNet metrics computation ...") print("1 - Producing predictions ...") for i, token in enumerate(tqdm(val_dataset.tokens)): with torch.no_grad(): x_state, x_img, gt, _ = val_dataset[i] x_state = x_state.to(device) x_img = x_img.to(device) x_state = torch.unsqueeze(torch.flatten(x_state, 0, 1), 0) x_img = torch.unsqueeze(x_img, 0) pred_logits = model((x_img, x_state)) pred_probs = F.softmax(pred_logits, dim=1)[0] top_indices = pred_probs.argsort()[-MAX_PREDICTED_MODES:] cutted_probs = pred_probs[top_indices].cpu().numpy() cutted_traj = reduced_traj[top_indices.cpu()] i_t, s_t = token.split("_") ground_truths.append(gt.numpy()) predictions.append(data_classes.Prediction(i_t, s_t, cutted_traj, cutted_probs)) print("2 - Computing metrics ...") convernet_metrics = compute_metrics(predictions, ground_truths, helper) print("Metric computation done in %f s" % (time.time() - start)) convernet_metrics
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
Code Debugging **Training loop** (manual - debug only)
if DEBUG_MODE: # Dataset preparation train_dataloader = torch.utils.data.DataLoader(train_dataset, BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=True) val_dataloader = torch.utils.data.DataLoader(val_dataset, BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS, drop_last=True) # Training preparation optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=MOMENTUM) model = model.to(device) # Plotting preparation train_loss_arr = [] val_loss_arr = [] train_iterations = len(train_dataset) // BATCH_SIZE val_iterations = len(val_dataset) // BATCH_SIZE # Training loop for i in range(TRAIN_EPOCHES): print("-------- Epoch %d --------" % i) model.train() # Training for j, data in enumerate(train_dataloader): # Data preparation x_state, x_img, gt, idx = data x_state = x_state.to(device) x_img = x_img.to(device) x_state = torch.flatten(x_state, 0, 1) with torch.no_grad(): reduced_traj = model.trajectories[:, :SAMPLES_PER_SECOND*TRAJ_HORIZON] y = get_positives(reduced_traj, gt) # Inference optimizer.zero_grad() traj_logits = model((x_img, x_state)) y = y.to(device) loss = F.cross_entropy(traj_logits, y) loss.backward() optimizer.step() # Logging loss_val = loss.item() train_loss_arr.append(loss_val) print("[%d] %d - train loss = %f" % (i, j, loss_val)) # Validation model.train(mode=False) for j, data in enumerate(val_dataloader): # Data preparation x_state, x_img, gt, idx = data x_state = x_state.to(device) x_img = x_img.to(device) x_state = torch.flatten(x_state, 0, 1) reduced_traj = model.trajectories[:, :SAMPLES_PER_SECOND*TRAJ_HORIZON] y = get_positives(reduced_traj, gt) # Inference traj_logits = model((x_img, x_state)) y = y.to(device) loss = F.cross_entropy(traj_logits, y) # Logging loss_val = loss.item() val_loss_arr.append(loss_val) print("[%d] %d - val loss = %f" % (i, j, loss_val)) # Plotting if (i+1) % PLOT_PERIOD == 0: plot_train_data(train_iterations, val_iterations, i+1, train_loss_arr, val_loss_arr) a = input("Press Enter to continue...") plt.close('all')
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
**Dataset debugging**
# Initialize nuScenes HELPER_NEEDED = True if ENVIRONMENT == 'local': if PREPARE_DATASET: nusc = NuScenes(version=DATASET_VERSION, dataroot=DATAROOT, verbose=True) with open(os.path.join(ROOT, 'nuscenes_checkpoint'+FILENAME_EXT), 'wb') as f: pickle.dump(nusc, f, protocol=pickle.HIGHEST_PROTOCOL) elif not 'nusc' in locals(): if HELPER_NEEDED: with open(os.path.join(ROOT, 'nuscenes_checkpoint'+FILENAME_EXT), 'rb') as f: nusc = pickle.load(f) elif ENVIRONMENT == 'colab': if PREPARE_DATASET or HELPER_NEEDED: nusc = NuScenes(version=DATASET_VERSION, dataroot=DATAROOT, verbose=True) helper = PredictHelper(nusc) dataset = nuScenesDataset(helper, preprocessed=PREPROCESSED) train_dataloader = torch.utils.data.DataLoader(dataset, BATCH_SIZE, True, num_workers=NUM_WORKERS) train_generator = iter(train_dataloader) # Useful to check ideal number of workers and batch size x = time.time() try: state, img, gt, idxs = next(train_generator) except StopIteration: train_generator = iter(train_dataloader) state, img, gt, idxs = next(train_generator) print(time.time() - x) state, img, gt, idx = dataset[np.random.randint(len(dataset))] plt.imshow(img.permute(1, 2, 0)) plt.show() print("State input size:", state.shape) print("Ground truth size:", gt.shape) instance_token, sample_token = dataset.tokens[idx].split("_") long_gt = torch.Tensor( dataset.helper.get_future_for_agent(instance_token, sample_token, seconds=100, in_agent_frame=True)) # TODO: check how to get agent position in the map plot_agent_future(img, long_gt, agent_pos=(250,400), reference_frame='local')
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
**Network debugging**
test_states, test_imgs, test_gts, _ = next(train_generator) test_states = torch.flatten(test_states, 0, 1) print(test_imgs.size()) print(test_states.size()) # Prediction model = CoverNet(K_SIZE, EPSILON, TRAJ_LINK, TRAJ_DIR, device='cuda:0') traj_logits = model((test_imgs, test_states)) # Output 5 and 10 most likely trajectories for this batch top_5_trajectories = model.trajectories[traj_logits.argsort(descending=True)[:5]] top_10_trajectories = model.trajectories[traj_logits.argsort(descending=True)[:10]]
_____no_output_____
MIT
Trajectory_Prediction.ipynb
Gianfranco-98/Trajectory-Prediction-PyTorch
OpenVisus Enabled Jupyter Notebook OpenViSUS: imports and utilities
%matplotlib notebook import os,sys import matplotlib.pyplot as plt import numpy as np from ipywidgets import * import OpenVisus as ov # Enable I/O component of OpenVisus ov.DbModule.attach() # function to plot the image data with matplotlib # optional parameters: colormap, existing plot to reuse (for more interactivity) def showData(data, cmap=None, plot=None): if len(data.shape)==3 and data.shape[0]==1: data=data[0,:,:] if len(data.shape)==3 and data.shape[1]==1: data=data[:,0,:] if len(data.shape)==3 and data.shape[2]==1: data=data[:,:,0] if(plot==None or cmap!=None): fig=plt.figure(figsize = (7,5)) plot = plt.imshow(data, origin='lower', cmap=cmap) plt.show() return plot else: plot.set_data(data) plt.show() return plot
_____no_output_____
MIT
jupyter/OpenVisus-Template.ipynb
ComputingElevatedLab/sciviscourse
Computo concurrente Multiprocessing El modulo 'multiprocessing' de Python permite la manipulacion y sincronizacion de procesos, tambien ofrece concurrencia local como remota.Ejemplo de motivacion...
import time def calc_cuad(numeros): print('Calcula el cuadrado:') for n in numeros: time.sleep(0.2) print('cuadrado:', n*n) def calc_cubo(numeros): print('Calcula el cubo:') for n in numeros: time.sleep(0.2) print('cubo:', n*n*n) nums = range(10) t = time.time() calc_cuad(nums) calc_cubo(nums) print('Finaliza la ejecucion') print('Tiempo de ejecucion', time.time()-t)
Calcula el cuadrado: cuadrado: 0 cuadrado: 1 cuadrado: 4 cuadrado: 9 cuadrado: 16 cuadrado: 25 cuadrado: 36 cuadrado: 49 cuadrado: 64 cuadrado: 81 Calcula el cubo: cubo: 0 cubo: 1 cubo: 8 cubo: 27 cubo: 64 cubo: 125 cubo: 216 cubo: 343 cubo: 512 cubo: 729 Finaliza la ejecucion Tiempo de ejecucion 4.024327278137207
MIT
T2.ProcHilosPy/T2-MultiprocessingPython.ipynb
patoba/ComputacionConcurrente
Una manera sencilla de generar procesos en Python es por medio de la creacion del objeto `Process` y llamarlo por medio del metodo `start()`.
import multiprocessing as mp def tarea(nombre): print('Hola', nombre) for n in range(10000): n**(1/(n+1)) if __name__ == '__main__': # Esta condicion se interpreta como una verificacion de si este proceso es el principal p = mp.Process(target=tarea, args=('Saul', )) ## Ejecuta la funcion tarea con el los argumentos de args p.start() ## Ejecuta p el objeto multiprocess p.join() import multiprocessing as mp import time def calc_cuad(numeros): print('Calcula el cuadrado:') for n in numeros: time.sleep(0.2) print('cuadrado:', n*n) def calc_cubo(numeros): print('Calcula el cubo:') for n in numeros: time.sleep(0.2) print('cubo:', n*n*n) nums = range(10) t1 = time.time() p1 = mp.Process(target=calc_cuad, args=(nums,)) p2 = mp.Process(target=calc_cubo, args=(nums,)) p1.start() p2.start() p1.join() p2.join() print('Tiempo de ejecucion', time.time()-t1) print('Finaliza la ejecucion')
Calcula el cuadrado: Calcula el cubo: cuadrado: cubo:0 0 cuadrado: cubo:1 1 cuadrado:cubo: 48 cuadrado:cubo: 927 cuadrado: cubo:16 64 cuadrado: cubo:25 125 cuadrado: 36 cubo: 216 cuadrado: 49 cubo: 343 cuadrado: 64 cubo: 512 cuadrado: 81 cubo: 729 Tiempo de ejecucion 2.1406450271606445 Finaliza la ejecucion
MIT
T2.ProcHilosPy/T2-MultiprocessingPython.ipynb
patoba/ComputacionConcurrente
Tarea Investiga en la documentacion del modulo `Multiprocessing` cual es su funcionamiento y todos los metodos o funciones que estan implementados en el. Identificadores PID, PPID
import multiprocessing as mp import os print('Nombre del proceso:', __name__) print('Proceso padre:', os.getppid()) print('Proceso actual:', os.getpid()) import multiprocessing as mp import os def info(titulo): print(titulo) print('Nombre del proceso:', __name__) print('Proceso padre:', os.getppid()) print('Proceso actual:', os.getpid()) def f(nombre): info('Funcion f') print('Hola', nombre) print('------------') info('Inicio') p = mp.Process(target = f, args=('Valeriano',)) p.start() p.join()
Inicio Nombre del proceso: __main__ Proceso padre: 7448 Proceso actual: 8016 Funcion f Nombre del proceso: __main__ Proceso padre: 8016 Proceso actual: 9690 Hola Valeriano ------------
MIT
T2.ProcHilosPy/T2-MultiprocessingPython.ipynb
patoba/ComputacionConcurrente
EjercicioCrea 3 procesos hijos, donde:- El primero multiplique 3 numeros (a,b,c)- El segundo sume (a,b,c)- El tercero haga (a+b)/c- Todos devolveran el valor calculado, el nombre de cada proceso hijo y el id del proceso padre.
import multiprocessing as mp import os def info(titulo): print(titulo) print('Nombre del proceso:', __name__) print('Proceso actual:', os.getpid()) print('Proceso padre:', os.getppid()) def primero(a,b,c): info('a*b*c =') print(a*b*c) def segundo(a,b,c): info('a+b+c =') print(a+b+c) def tercero(a,b,c): info('(a+b)/c =') print((a+b)/c) info('Inicio') a = 3 b = 5 c = 1 p1 = mp.Process(target = primero, args=(a,b,c)) p2 = mp.Process(target = segundo, args=(a,b,c)) p3 = mp.Process(target = tercero, args=(a,b,c)) p1.start() p1.join() p2.start() p2.join() p3.start() p3.join() import time nums_res = [] def calc_cuad(numeros): global nums_res for n in numeros: print('Cuadrado:', n*n) nums_res.append(n*n) nums = range(10) t = time.time() p1 = mp.Process(target=calc_cuad, args = (nums,)) p1.start() p1.join() print('Tiempo de ejecucion: ', time.time()-t) print('Resultado del proceso:', nums_res) print('Finaliza ejecucion')
Cuadrado: 0 Cuadrado: 1 Cuadrado: 4 Cuadrado: 9 Cuadrado: 16 Cuadrado: 25 Cuadrado: 36 Cuadrado: 49 Cuadrado: 64 Cuadrado: 81 Tiempo de ejecucion: 0.04141974449157715 Resultado del proceso: [] Finaliza ejecucion
MIT
T2.ProcHilosPy/T2-MultiprocessingPython.ipynb
patoba/ComputacionConcurrente
2020-10-27 Nombres y terminaciรณn de procesos
import multiprocessing multiprocessing.cpu_count()
_____no_output_____
MIT
T2.ProcHilosPy/T2-MultiprocessingPython.ipynb
patoba/ComputacionConcurrente
Con el mรฉtodo `cpu_count()
import time def TareaHijo(): print("Proceso HIJO con PID: {}".format(multiprocessing.current_process().pid)) time.sleep(3) print("Fin del proceso hijo") def main(): print("Proceso Padre PID: {}".format(multiprocessing.current_process().pid)) myProcess = multiprocessing.Process(target=TareaHijo) # Define el objeto myProcess como el objeto que llam[a al proceso] myProcess.start() myProcess.join() # Se acostumbra usar la variable __name__ # para hacer la ejecuciรณn desde el progragrama # principal, puede omitirse en los notebooks if __name__ == '__main__': main()
Proceso Padre PID: 6703 Proceso HIJO con PID: 7714 Fin del proceso hijo
MIT
T2.ProcHilosPy/T2-MultiprocessingPython.ipynb
patoba/ComputacionConcurrente
Es posible asignar un nombre a un proceso hijo que ha sido creado, por medio del medio del argumento `name` se asigna el nombre del proceso hijo.
def myProcess(): print("Proceso con nombre: {}".format(multiprocessing.current_process().name)) ## Metodo current process para obtener el nombre del proceso def main(): childProcess = multiprocessing.Process(target=myProcess, name='Proceso-LCD-cc') childProcess.start() childProcess.join() main() from multiprocessing import Process, current_process import time def f1(): pname = current_process().name print('Starting process %s...' % pname) time.sleep(2) print('Exiting process %s...' % pname) def f2(): pname = current_process().name print('Starting process %s...' % pname) time.sleep(4) print('Exiting process %s...' % pname) if __name__ == '__main__': p1 = Process(name='Worker 1', target=f1) p2 = Process(name='Worker 2', target=f2) p3 = Process(target=f1) p1.start() p2.start() p3.start() p1.join() p2.join() p3.join() def TareaProceso(): proceso_actual = multiprocessing.current_process() print("Procesos Hijo PID: {}".format(proceso_actual.pid)) time.sleep(20) proceso_actual = multiprocessing.current_process() print("Procesos Padre PID: {}".format(proceso_actual.pid)) miProceso = multiprocessing.Process(target=TareaProceso) miProceso.start() print("Proceso Padre ha terminado, termina el proceso main") print("Terminando el proceso Hijo...") time.sleep(1) miProceso.terminate() #miProceso.join() print("Proceso Hijo ha terminado exitosamente") multiprocessing.cpu_count()
_____no_output_____
MIT
T2.ProcHilosPy/T2-MultiprocessingPython.ipynb
patoba/ComputacionConcurrente
Ejercicio:1. Vamos a crear 3 procesos los cuales tendrรกn nombre y cรณdigo definido como funP1, funP2, funP3. Cada hijo escribirรก su nombre, su PID y el PID del padre, ademรกs de hacer un cรกlculo sobre tres valores a, b y c.2. El proceso 1 calcula a*b + c, el proceso 2 calcula a*b*c y el proceso 3 calcula (a*b)/c3. Crea un mecanismo para terminar alguno de los procesos de manera aleatoria.
import multiprocessing as mp import os import random def info(titulo): pname = current_process().name print('Nombre del proceso: %s...' % pname) print(titulo) print('Proceso actual:', os.getpid()) print('Proceso padre:', os.getppid()) def funP1(a,b,c): info('a*b + c =') print(a*b + c) def funP2(a,b,c): info('a*b*c =') print(a*b*c) def funP3(a,b,c): info('(a+b)/c =') print((a+b)/c) a = 3 b = 5 c = 1 p1 = mp.Process(name = 'funP1',target = funP1, args=(a,b,c)) p2 = mp.Process(name = 'funP2',target = funP2, args=(a,b,c)) p3 = mp.Process(name = 'funP3',target = funP3, args=(a,b,c)) p1.start() p2.start() p3.start() i = random.randint(1,3) if i==1: p1.terminate() elif i == 2: p2.terminate() elif i == 3: p3.terminate() p1.join() p2.join() p3.join()
Nombre del proceso: funP2... a*b*c = Proceso actual: 15004 Proceso padre: 6703 15 Nombre del proceso: funP1... a*b + c = Proceso actual: 15003 Proceso padre: 6703 16
MIT
T2.ProcHilosPy/T2-MultiprocessingPython.ipynb
patoba/ComputacionConcurrente
No obstante , a veces se requiere crear procesos que corran en silencio (*background*) y no bloquear el proceso principal al finalizarlos. Esta especificaciรณn es comunmente utilizada cuando el proceso principal no tiene la certeza de interrumpir un proceso despuรฉs de esperar cierto tiempo o finalizar sin que haya terminado el proceso hijo sin afectaciones al resultado finalEstos procesos se llaman **Procesos demonio** (*daemon processes*). Por medio del atributo `daemon` del mรฉtodo `Process` se crea un proceso de este tipo.El valor por defecto del atributo `daemon` es False, por tanto se establece a `True`para crear el proceso demonio.
from multiprocessing import Process, current_process import time def f1(): p = current_process() print('Starting process %s, ID %s....' %(p.name, p.pid)) time.sleep(8) print('Starting process %s, ID, %s....' %(p.name, p.pid)) def f2(): p = current_process() print('Starting process %s, ID %s....' %(p.name, p.pid)) time.sleep(2) print('Starting process %s, ID %s....' %(p.name, p.pid)) if __name__ == '__main__': p1 = Process(name='Worker 1', target=f1) p1.daemon = True p2 = Process(name='Worker 2', target=f2) p1.start() time.sleep(1) p2.start() # p1.join() # p2.join() # p3.join()
Starting process Worker 1, ID 15700.... Starting process Worker 2, ID 15705.... Starting process Worker 2, ID 15705.... Starting process Worker 1, ID, 15700....
MIT
T2.ProcHilosPy/T2-MultiprocessingPython.ipynb
patoba/ComputacionConcurrente
Sentiment Classification & How To "Frame Problems" for a Neural Networkby Andrew Trask- **Twitter**: @iamtrask- **Blog**: http://iamtrask.github.io What You Should Already Know- neural networks, forward and back-propagation- stochastic gradient descent- mean squared error- and train/test splits Where to Get Help if You Need it- Re-watch previous Udacity Lectures- Leverage the recommended Course Reading Material - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) (Check inside your classroom for a discount code)- Shoot me a tweet @iamtrask Tutorial Outline:- Intro: The Importance of "Framing a Problem" (this lesson)- [Curate a Dataset](lesson_1)- [Developing a "Predictive Theory"](lesson_2)- [**PROJECT 1**: Quick Theory Validation](project_1)- [Transforming Text to Numbers](lesson_3)- [**PROJECT 2**: Creating the Input/Output Data](project_2)- Putting it all together in a Neural Network (video only - nothing in notebook)- [**PROJECT 3**: Building our Neural Network](project_3)- [Understanding Neural Noise](lesson_4)- [**PROJECT 4**: Making Learning Faster by Reducing Noise](project_4)- [Analyzing Inefficiencies in our Network](lesson_5)- [**PROJECT 5**: Making our Network Train and Run Faster](project_5)- [Further Noise Reduction](lesson_6)- [**PROJECT 6**: Reducing Noise by Strategically Reducing the Vocabulary](project_6)- [Analysis: What's going on in the weights?](lesson_7) Lesson: Curate a DatasetThe cells from here until Project 1 include code Andrew shows in the videos leading up to mini project 1. We've included them so you can run the code along with the videos without having to type in everything.
def pretty_print_review_and_label(i): print(labels[i] + "\t:\t" + reviews[i][:80] + "...") g = open('reviews.txt','r') # What we know! reviews = list(map(lambda x:x[:-1],g.readlines())) g.close() g = open('labels.txt','r') # What we WANT to know! labels = list(map(lambda x:x[:-1].upper(),g.readlines())) g.close()
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
**Note:** The data in `reviews.txt` we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way.
len(reviews) reviews[0] labels[0]
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Lesson: Develop a Predictive Theory
print("labels.txt \t : \t reviews.txt\n") pretty_print_review_and_label(2137) pretty_print_review_and_label(12816) pretty_print_review_and_label(6267) pretty_print_review_and_label(21934) pretty_print_review_and_label(5297) pretty_print_review_and_label(4998)
labels.txt : reviews.txt NEGATIVE : this movie is terrible but it has some good effects . ... POSITIVE : adrian pasdar is excellent is this film . he makes a fascinating woman . ... NEGATIVE : comment this movie is impossible . is terrible very improbable bad interpretat... POSITIVE : excellent episode movie ala pulp fiction . days suicides . it doesnt get more... NEGATIVE : if you haven t seen this it s terrible . it is pure trash . i saw this about ... POSITIVE : this schiffer guy is a real genius the movie is of excellent quality and both e...
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Project 1: Quick Theory ValidationThere are multiple ways to implement these projects, but in order to get your code closer to what Andrew shows in his solutions, we've provided some hints and starter code throughout this notebook.You'll find the [Counter](https://docs.python.org/2/library/collections.htmlcollections.Counter) class to be useful in this exercise, as well as the [numpy](https://docs.scipy.org/doc/numpy/reference/) library.
from collections import Counter import numpy as np
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
We'll create three `Counter` objects, one for words from postive reviews, one for words from negative reviews, and one for all the words.
# Create three Counter objects to store positive, negative and total counts positive_counts = Counter() negative_counts = Counter() total_counts = Counter()
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
**TODO:** Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter.**Note:** Throughout these projects, you should use `split(' ')` to divide a piece of text (such as a review) into individual words. If you use `split()` instead, you'll get slightly different results than what the videos and solutions show.
for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used.
# Examine the counts of the most common words in positive reviews positive_counts.most_common() # Examine the counts of the most common words in negative reviews negative_counts.most_common()
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
As you can see, common words like "the" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the **ratios** of word usage between positive and negative reviews.**TODO:** Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in `pos_neg_ratios`. >Hint: the positive-to-negative ratio for a given word can be calculated with `positive_counts[word] / float(negative_counts[word]+1)`. Notice the `+1` in the denominator โ€“ย that ensures we don't divide by zero for words that are only seen in positive reviews.
# Create Counter object to store positive/negative ratios pos_neg_ratios = Counter() # TODO: Calculate the ratios of positive and negative uses of the most common words # Consider words to be "common" if they've been used at least 100 times # for word, count in total_counts.most_common(): # if count > 100: # neg_ratio = positive_counts[word] / float(negative_counts[word] + 1) # pos_neg_ratios[word] = neg_ratio for term,count in total_counts.most_common(): if(count > 100): pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1) pos_neg_ratios[term] = pos_neg_ratio
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Examine the ratios you've calculated for a few words:
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
Pos-to-neg ratio for 'the' = 1.0607993145235326 Pos-to-neg ratio for 'amazing' = 4.022813688212928 Pos-to-neg ratio for 'terrible' = 0.17744252873563218
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Looking closely at the values you just calculated, we see the following:* Words that you would expect to see more often in positive reviews โ€“ like "amazing"ย โ€“ have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be.* Words that you would expect to see more often in negative reviews โ€“ like "terrible" โ€“ have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be.* Neutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews โ€“ like "the" โ€“ have values very close to 1. A perfectly neutral word โ€“ย one that was used in exactly the same number of positive reviews as negative reviews โ€“ย would be almost exactly 1. The `+1` we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway.Ok, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like "amazing" has a value above 4, whereas a very negative word like "terrible" has a value around 0.18. Those values aren't easy to compare for a couple of reasons:* Right now, 1 is considered neutral, but the absolute value of the postive-to-negative rations of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value from neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys.* When comparing absolute values it's easier to do that around zero than one. To fix these issues, we'll convert all of our ratios to new values using logarithms.**TODO:** Go through all the ratios you calculated and convert them to logarithms. (i.e. use `np.log(ratio)`)In the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but opposite signs.
# TODO: Convert ratios to logs for word, ratio in pos_neg_ratios.most_common(): pos_neg_ratios[word] = np.log(ratio)
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Examine the new ratios you've calculated for the same words from before:
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
Pos-to-neg ratio for 'the' = 0.05902269426102881 Pos-to-neg ratio for 'amazing' = 1.3919815802404802 Pos-to-neg ratio for 'terrible' = -1.7291085042663878
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
If everything worked, now you should see neutral words with values close to zero. In this case, "the" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at "amazing"'s ratio - it's above `1`, showing it is clearly a word with positive sentiment. And "terrible" has a similar score, but in the opposite direction, so it's below `-1`. It's now clear that both of these words are associated with specific, opposing sentiments.Now run the following cells to see more ratios. The first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see *all* the words in the list.)The second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write `reversed(pos_neg_ratios.most_common())`.)You should continue to see values similar to the earlier ones we checked โ€“ย neutral words will be close to `0`, words will get more positive as their ratios approach and go above `1`, and words will get more negative as their ratios approach and go below `-1`. That's why we decided to use the logs instead of the raw ratios.
# words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] # Note: Above is the code Andrew uses in his solution video, # so we've included it here to avoid confusion. # If you explore the documentation for the Counter class, # you will see you could also find the 30 least common # words like this: pos_neg_ratios.most_common()[:-31:-1]
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
End of Project 1. Watch the next video to see Andrew's solution, then continue on to the next lesson. Transforming Text into NumbersThe cells here include code Andrew shows in the next video. We've included it so you can run the code along with the video without having to type in everything.
from IPython.display import Image review = "This was a horrible, terrible movie." Image(filename='sentiment_network.png') review = "The movie was excellent" Image(filename='sentiment_network_pos.png')
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Project 2: Creating the Input/Output Data**TODO:** Create a [set](https://docs.python.org/3/tutorial/datastructures.htmlsets) named `vocab` that contains every word in the vocabulary.
# TODO: Create set named "vocab" containing all of the words from all of the reviews vocab = list(total_counts)
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell to check your vocabulary size. If everything worked correctly, it should print **74074**
vocab_size = len(vocab) print(vocab_size)
74074
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. `layer_0` is the input layer, `layer_1` is a hidden layer, and `layer_2` is the output layer.
from IPython.display import Image Image(filename='sentiment_network_2.png')
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
**TODO:** Create a numpy array called `layer_0` and initialize it to all zeros. You will find the [zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html) function particularly helpful here. Be sure you create `layer_0` as a 2-dimensional matrix with 1 row and `vocab_size` columns.
# TODO: Create layer_0 matrix with dimensions 1 by vocab_size, initially filled with zeros layer_0 = np.zeros((1, vocab_size))
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell. It should display `(1, 74074)`
layer_0.shape from IPython.display import Image Image(filename='sentiment_network.png')
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
`layer_0` contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word.
# Create a dictionary of words in the vocabulary mapped to index positions # (to be used in layer_0) word2index = {} for i, word in enumerate(vocab): word2index[word] = i # display the map of words to indices word2index
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
**TODO:** Complete the implementation of `update_input_layer`. It should count how many times each word is used in the given review, and then store those counts at the appropriate indices inside `layer_0`.
def update_input_layer(review): """ Modify the global layer_0 to represent the vector form of review. The element at a given index of layer_0 should represent how many times the given word occurs in the review. Args: review(string) - the string of the review Returns: None """ global layer_0 # clear out previous state by resetting the layer to be all 0s layer_0 *= 0 # TODO: count how many times each word is used in the given review and store the results in layer_0 # print(total_counts.most_common()) for word in review.split(" "): layer_0[0][word2index[word]] += 1
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in `layer_0`.
update_input_layer(reviews[0]) layer_0
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
**TODO:** Complete the implementation of `get_target_for_labels`. It should return `0` or `1`, depending on whether the given label is `NEGATIVE` or `POSITIVE`, respectively.
def get_target_for_label(label): """Convert a label to `0` or `1`. Args: label(string) - Either "POSITIVE" or "NEGATIVE". Returns: `0` or `1`. """ if label == 'POSITIVE': return 1 return 0 # TODO: Your code here
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following two cells. They should print out`'POSITIVE'` and `1`, respectively.
labels[0] get_target_for_label(labels[0])
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following two cells. They should print out `'NEGATIVE'` and `0`, respectively.
labels[1] get_target_for_label(labels[1])
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
End of Project 2. Watch the next video to see Andrew's solution, then continue on to the next lesson. Project 3: Building a Neural Network **TODO:** We've included the framework of a class called `SentimentNetork`. Implement all of the items marked `TODO` in the code. These include doing the following:- Create a basic neural network much like the networks you've seen in earlier lessons and in Project 1, with an input layer, a hidden layer, and an output layer. - Do **not** add a non-linearity in the hidden layer. That is, do not use an activation function when calculating the hidden layer outputs.- Re-use the code from earlier in this notebook to create the training data (see `TODO`s in the code)- Implement the `pre_process_data` function to create the vocabulary for our training data generating functions- Ensure `train` trains over the entire corpus Where to Get Help if You Need it- Re-watch earlier Udacity lectures- Chapters 3-5 - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) - (Check inside your classroom for a discount code)
import time import sys import numpy as np # Encapsulate our neural network in a class class SentimentNetwork: def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.001): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training self.pre_process_data(reviews, labels) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab), hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): # populate review_vocab with all of the words in the given reviews review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) # populate label_vocab with all of the words in the given labels. label_vocab = set() for label in labels: label_vocab.add(label) # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i # Create a dictionary of labels mapped to index positions self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Store the number of nodes in input, hidden, and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) # The input layer, a two-dimensional matrix with shape 1 x input_nodes self.layer_0 = np.zeros((1,input_nodes)) def update_input_layer(self,review): # clear out previous state, reset the layer to be all 0s self.layer_0 *= 0 for word in review.split(" "): # NOTE: This if-check was not in the version of this method created in Project 2, # and it appears in Andrew's Project 3 solution without explanation. # It simply ensures the word is actually a key in word2index before # accessing it, which is important because accessing an invalid key # with raise an exception in Python. This allows us to ignore unknown # words encountered in new reviews. if(word in self.word2index.keys()): self.layer_0[0][self.word2index[word]] += 1 def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) def train(self, training_reviews, training_labels): # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### # Input Layer self.update_input_layer(review) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # TODO: Implement the back propagation pass here. # That means calculate the error for the forward pass's prediction # and update the weights in the network according to their # contributions toward the error, as calculated via the # gradient descent and back propagation algorithms you # learned in class. # TODO: Keep track of correct predictions. To determine if the prediction was # correct, check that the absolute value of the output error # is less than 0.5. If so, add one to the correct_so_far count. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): """ Attempts to predict the labels for the given testing_reviews, and uses the test_labels to calculate the accuracy of those predictions. """ # keep track of how many correct predictions we make correct = 0 # we'll time how many predictions per second we make start = time.time() # Loop through each of the given reviews and call run to predict # its label. for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) # print(layer_2.shape) if(pred == testing_labels[i]): correct += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the prediction process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): """ Returns a POSITIVE or NEGATIVE prediction for the given review. """ # Run a forward pass through the network, like in the "train" function. # Input Layer self.update_input_layer(review.lower()) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2 >= 0.5): return "POSITIVE" else: return "NEGATIVE"
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell to create a `SentimentNetwork` that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of `0.1`.
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set). **We have not trained the model yet, so the results should be about 50% as it will just be guessing and there are only two possible values to choose from.**
mlp.test(reviews[-1000:],labels[-1000:])
Progress:48.8% Speed(reviews/sec):800.1 #Correct:245 #Tested:489 Testing Accuracy:50.1%Progress:99.9% Speed(reviews/sec):777.5 #Correct:500 #Tested:1000 Testing Accuracy:50.0%
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing.
mlp.train(reviews[:-1000],labels[:-1000])
Progress:0.0% Speed(reviews/sec):0.0 #Correct:1 #Trained:1 Training Accuracy:100.% Progress:10.4% Speed(reviews/sec):246.6 #Correct:1251 #Trained:2501 Training Accuracy:50.0% Progress:11.4% Speed(reviews/sec):247.1 #Correct:1369 #Trained:2737 Training Accuracy:50.0%
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, `0.01`, and then train the new network.
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01) mlp.train(reviews[:-1000],labels[:-1000])
Progress:0.0% Speed(reviews/sec):0.0 #Correct:1 #Trained:1 Training Accuracy:100.% Progress:9.72% Speed(reviews/sec):239.4 #Correct:1165 #Trained:2334 Training Accuracy:49.9%
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, `0.001`, and then train the new network.
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001) mlp.train(reviews[:-1000],labels[:-1000])
Progress:0.0% Speed(reviews/sec):0.0 #Correct:1 #Trained:1 Training Accuracy:100.% Progress:10.4% Speed(reviews/sec):249.0 #Correct:1267 #Trained:2501 Training Accuracy:50.6% Progress:20.8% Speed(reviews/sec):248.8 #Correct:2655 #Trained:5001 Training Accuracy:53.0% Progress:31.2% Speed(reviews/sec):249.5 #Correct:4087 #Trained:7501 Training Accuracy:54.4% Progress:41.6% Speed(reviews/sec):250.9 #Correct:5535 #Trained:10001 Training Accuracy:55.3% Progress:49.3% Speed(reviews/sec):248.1 #Correct:6674 #Trained:11835 Training Accuracy:56.3%
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
With a learning rate of `0.001`, the network should finall have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson. End of Project 3. Watch the next video to see Andrew's solution, then continue on to the next lesson. Understanding Neural NoiseThe following cells include includes the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.
from IPython.display import Image Image(filename='sentiment_network.png') def update_input_layer(review): global layer_0 # clear out previous state, reset the layer to be all 0s layer_0 *= 0 for word in review.split(" "): layer_0[0][word2index[word]] += 1 update_input_layer(reviews[0]) layer_0 review_counter = Counter() for word in reviews[0].split(" "): review_counter[word] += 1 review_counter.most_common()
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Project 4: Reducing Noise in Our Input Data**TODO:** Attempt to reduce the noise in the input data like Andrew did in the previous video. Specifically, do the following:* Copy the `SentimentNetwork` class you created earlier into the following cell.* Modify `update_input_layer` so it does not count how many times each word is used, but rather just stores whether or not a word was used.
# TODO: -Copy the SentimentNetwork class from Projet 3 lesson # -Modify it to reduce noise, like in the video import time import sys import numpy as np # Encapsulate our neural network in a class class SentimentNetwork: def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.001): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training self.pre_process_data(reviews, labels) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab), hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): # populate review_vocab with all of the words in the given reviews review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) # populate label_vocab with all of the words in the given labels. label_vocab = set() for label in labels: label_vocab.add(label) # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i # Create a dictionary of labels mapped to index positions self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Store the number of nodes in input, hidden, and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) # The input layer, a two-dimensional matrix with shape 1 x input_nodes self.layer_0 = np.zeros((1,input_nodes)) def update_input_layer(self,review): # clear out previous state, reset the layer to be all 0s self.layer_0 *= 0 for word in review.split(" "): # NOTE: This if-check was not in the version of this method created in Project 2, # and it appears in Andrew's Project 3 solution without explanation. # It simply ensures the word is actually a key in word2index before # accessing it, which is important because accessing an invalid key # with raise an exception in Python. This allows us to ignore unknown # words encountered in new reviews. if(word in self.word2index.keys()): self.layer_0[0][self.word2index[word]] = 1 def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) def train(self, training_reviews, training_labels): # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### # Input Layer self.update_input_layer(review) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # TODO: Implement the back propagation pass here. # That means calculate the error for the forward pass's prediction # and update the weights in the network according to their # contributions toward the error, as calculated via the # gradient descent and back propagation algorithms you # learned in class. # TODO: Keep track of correct predictions. To determine if the prediction was # correct, check that the absolute value of the output error # is less than 0.5. If so, add one to the correct_so_far count. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): """ Attempts to predict the labels for the given testing_reviews, and uses the test_labels to calculate the accuracy of those predictions. """ # keep track of how many correct predictions we make correct = 0 # we'll time how many predictions per second we make start = time.time() # Loop through each of the given reviews and call run to predict # its label. for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) # print(layer_2.shape) if(pred == testing_labels[i]): correct += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the prediction process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): """ Returns a POSITIVE or NEGATIVE prediction for the given review. """ # Run a forward pass through the network, like in the "train" function. # Input Layer self.update_input_layer(review.lower()) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2 >= 0.5): return "POSITIVE" else: return "NEGATIVE"
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of `0.1`.
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) mlp.train(reviews[:-1000],labels[:-1000])
Progress:0.0% Speed(reviews/sec):0.0 #Correct:1 #Trained:1 Training Accuracy:100.% Progress:10.4% Speed(reviews/sec):83.64 #Correct:1838 #Trained:2501 Training Accuracy:73.4% Progress:20.8% Speed(reviews/sec):83.27 #Correct:3820 #Trained:5001 Training Accuracy:76.3% Progress:31.2% Speed(reviews/sec):83.09 #Correct:5911 #Trained:7501 Training Accuracy:78.8% Progress:41.6% Speed(reviews/sec):82.75 #Correct:8044 #Trained:10001 Training Accuracy:80.4% Progress:52.0% Speed(reviews/sec):82.98 #Correct:10188 #Trained:12501 Training Accuracy:81.4% Progress:62.5% Speed(reviews/sec):83.07 #Correct:12317 #Trained:15001 Training Accuracy:82.1% Progress:72.9% Speed(reviews/sec):83.04 #Correct:14438 #Trained:17501 Training Accuracy:82.4% Progress:83.3% Speed(reviews/sec):83.06 #Correct:16609 #Trained:20001 Training Accuracy:83.0% Progress:93.7% Speed(reviews/sec):82.96 #Correct:18788 #Trained:22501 Training Accuracy:83.4% Progress:99.9% Speed(reviews/sec):82.92 #Correct:20105 #Trained:24000 Training Accuracy:83.7%
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
That should have trained much better than the earlier attempts. It's still not wonderful, but it should have improved dramatically. Run the following cell to test your model with 1000 predictions.
mlp.test(reviews[-1000:],labels[-1000:])
Progress:99.9% Speed(reviews/sec):942.5 #Correct:849 #Tested:1000 Testing Accuracy:84.9%
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
End of Project 4. Andrew's solution was actually in the previous video, so rewatch that video if you had any problems with that project. Then continue on to the next lesson. Analyzing Inefficiencies in our NetworkThe following cells include the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.
Image(filename='sentiment_network_sparse.png') layer_0 = np.zeros(10) layer_0 layer_0[4] = 1 layer_0[9] = 1 layer_0 weights_0_1 = np.random.randn(10,5) layer_0.dot(weights_0_1) indices = [4,9] layer_1 = np.zeros(5) for index in indices: layer_1 += (1 * weights_0_1[index]) layer_1 Image(filename='sentiment_network_sparse_2.png') layer_1 = np.zeros(5) for index in indices: layer_1 += (weights_0_1[index]) layer_1
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Project 5: Making our Network More Efficient**TODO:** Make the `SentimentNetwork` class more efficient by eliminating unnecessary multiplications and additions that occur during forward and backward propagation. To do that, you can do the following:* Copy the `SentimentNetwork` class from the previous project into the following cell.* Remove the `update_input_layer` function - you will not need it in this version.* Modify `init_network`:>* You no longer need a separate input layer, so remove any mention of `self.layer_0`>* You will be dealing with the old hidden layer more directly, so create `self.layer_1`, a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero* Modify `train`:>* Change the name of the input parameter `training_reviews` to `training_reviews_raw`. This will help with the next step.>* At the beginning of the function, you'll want to preprocess your reviews to convert them to a list of indices (from `word2index`) that are actually used in the review. This is equivalent to what you saw in the video when Andrew set specific indices to 1. Your code should create a local `list` variable named `training_reviews` that should contain a `list` for each review in `training_reviews_raw`. Those lists should contain the indices for words found in the review.>* Remove call to `update_input_layer`>* Use `self`'s `layer_1` instead of a local `layer_1` object.>* In the forward pass, replace the code that updates `layer_1` with new logic that only adds the weights for the indices used in the review.>* When updating `weights_0_1`, only update the individual weights that were used in the forward pass.* Modify `run`:>* Remove call to `update_input_layer` >* Use `self`'s `layer_1` instead of a local `layer_1` object.>* Much like you did in `train`, you will need to pre-process the `review` so you can work with word indices, then update `layer_1` by adding weights for the indices used in the review.
import time import sys import numpy as np # Encapsulate our neural network in a class class SentimentNetwork: def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training self.pre_process_data(reviews, labels) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): # populate review_vocab with all of the words in the given reviews review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) # populate label_vocab with all of the words in the given labels. label_vocab = set() for label in labels: label_vocab.add(label) # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i # Create a dictionary of labels mapped to index positions self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights # These are the weights between the input layer and the hidden layer. self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) ## New for Project 5: Removed self.layer_0; added self.layer_1 # The input layer, a two-dimensional matrix with shape 1 x hidden_nodes self.layer_1 = np.zeros((1,hidden_nodes)) ## New for Project 5: Removed update_input_layer function def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) def train(self, training_reviews_raw, training_labels): ## New for Project 5: pre-process training reviews so we can deal # directly with the indices of non-zero inputs training_reviews = list() for review in training_reviews_raw: indices = set() for word in review.split(" "): if(word in self.word2index.keys()): indices.add(self.word2index[word]) training_reviews.append(list(indices)) # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): # Get the next review and its correct label review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### ## New for Project 5: Removed call to 'update_input_layer' function # because 'layer_0' is no longer used # Hidden layer ## New for Project 5: Add in only the weights for non-zero items self.layer_1 *= 0 for index in review: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use 'self.layer_1' instead of 'local layer_1' layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights ## New for Project 5: changed to use 'self.layer_1' instead of local 'layer_1' self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step ## New for Project 5: Only update the weights that were used in the forward pass for index in review: self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step # Keep track of correct predictions. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): correct = 0 start = time.time() for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): """ Returns a POSITIVE or NEGATIVE prediction for the given review. """ # Run a forward pass through the network, like in the "train" function. ## New for Project 5: Removed call to update_input_layer function # because layer_0 is no longer used # Hidden layer ## New for Project 5: Identify the indices used in the review and then add # just those weights to layer_1 self.layer_1 *= 0 unique_indices = set() for word in review.lower().split(" "): if word in self.word2index.keys(): unique_indices.add(self.word2index[word]) for index in unique_indices: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use self.layer_1 instead of local layer_1 layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2[0] >= 0.5): return "POSITIVE" else: return "NEGATIVE"
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell to recreate the network and train it once again.
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) mlp.train(reviews[:-1000],labels[:-1000])
Progress:0.0% Speed(reviews/sec):0.0 #Correct:1 #Trained:1 Training Accuracy:100.% Progress:10.4% Speed(reviews/sec):1756. #Correct:1823 #Trained:2501 Training Accuracy:72.8% Progress:20.8% Speed(reviews/sec):1710. #Correct:3810 #Trained:5001 Training Accuracy:76.1% Progress:31.2% Speed(reviews/sec):1707. #Correct:5884 #Trained:7501 Training Accuracy:78.4% Progress:41.6% Speed(reviews/sec):1720. #Correct:8023 #Trained:10001 Training Accuracy:80.2% Progress:52.0% Speed(reviews/sec):1715. #Correct:10147 #Trained:12501 Training Accuracy:81.1% Progress:62.5% Speed(reviews/sec):1716. #Correct:12277 #Trained:15001 Training Accuracy:81.8% Progress:72.9% Speed(reviews/sec):1712. #Correct:14388 #Trained:17501 Training Accuracy:82.2% Progress:83.3% Speed(reviews/sec):1709. #Correct:16559 #Trained:20001 Training Accuracy:82.7% Progress:93.7% Speed(reviews/sec):1707. #Correct:18745 #Trained:22501 Training Accuracy:83.3% Progress:99.9% Speed(reviews/sec):1707. #Correct:20078 #Trained:24000 Training Accuracy:83.6%
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions.
mlp.test(reviews[-1000:],labels[-1000:])
Progress:99.9% Speed(reviews/sec):1970. #Correct:853 #Tested:1000 Testing Accuracy:85.3%
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
End of Project 5. Watch the next video to see Andrew's solution, then continue on to the next lesson. Further Noise Reduction
Image(filename='sentiment_network_sparse_2.png') # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] from bokeh.models import ColumnDataSource, LabelSet from bokeh.plotting import figure, show, output_file from bokeh.io import output_notebook output_notebook() hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True) p = figure(tools="pan,wheel_zoom,reset,save", toolbar_location="above", title="Word Positive/Negative Affinity Distribution") p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555") show(p) frequency_frequency = Counter() for word, cnt in total_counts.most_common(): frequency_frequency[cnt] += 1 hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True) p = figure(tools="pan,wheel_zoom,reset,save", toolbar_location="above", title="The frequency distribution of the words in our corpus") p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555") show(p)
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Project 6: Reducing Noise by Strategically Reducing the Vocabulary**TODO:** Improve `SentimentNetwork`'s performance by reducing more noise in the vocabulary. Specifically, do the following:* Copy the `SentimentNetwork` class from the previous project into the following cell.* Modify `pre_process_data`:>* Add two additional parameters: `min_count` and `polarity_cutoff`>* Calculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.)>* Andrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like. >* Change so words are only added to the vocabulary if they occur in the vocabulary more than `min_count` times.>* Change so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least `polarity_cutoff`* Modify `__init__`:>* Add the same two parameters (`min_count` and `polarity_cutoff`) and use them when you call `pre_process_data`
import time import sys import numpy as np from collections import Counter # Encapsulate our neural network in a class class SentimentNetwork: def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1, min_count = 10, polarity_cutoff = 0.1): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training self.pre_process_data(reviews, labels, min_count, polarity_cutoff) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels, min_count, polarity_cutoff): positive_counts = Counter() negative_counts = Counter() total_counts = Counter() for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1 pos_neg_ratios = Counter() for term,count in total_counts.most_common(): if(count > 50): pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1) try: pos_neg_ratios[term] = np.log(pos_neg_ratio) except: pass review_vocab = set() for review in reviews: for word in review.split(" "): ## New for Project 6: only add words that occur at least min_count times # and for words with pos/neg ratios, only add words # that meet the polarity_cutoff if(total_counts[word] > min_count): if(word in pos_neg_ratios.keys() and np.abs([pos_neg_ratios[word]]) >= polarity_cutoff): # if((pos_neg_ratios[word] >= polarity_cutoff) or (pos_neg_ratios[word] <= -polarity_cutoff)): review_vocab.add(word) else: review_vocab.add(word) # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) # populate label_vocab with all of the words in the given labels. label_vocab = set() for label in labels: label_vocab.add(label) # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i # Create a dictionary of labels mapped to index positions self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights # These are the weights between the input layer and the hidden layer. self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) ## New for Project 5: Removed self.layer_0; added self.layer_1 # The input layer, a two-dimensional matrix with shape 1 x hidden_nodes self.layer_1 = np.zeros((1,hidden_nodes)) ## New for Project 5: Removed update_input_layer function def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) def train(self, training_reviews_raw, training_labels): ## New for Project 5: pre-process training reviews so we can deal # directly with the indices of non-zero inputs training_reviews = list() for review in training_reviews_raw: indices = set() for word in review.split(" "): if(word in self.word2index.keys()): indices.add(self.word2index[word]) training_reviews.append(list(indices)) # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): # Get the next review and its correct label review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### ## New for Project 5: Removed call to 'update_input_layer' function # because 'layer_0' is no longer used # Hidden layer ## New for Project 5: Add in only the weights for non-zero items self.layer_1 *= 0 for index in review: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use 'self.layer_1' instead of 'local layer_1' layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights ## New for Project 5: changed to use 'self.layer_1' instead of local 'layer_1' self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step ## New for Project 5: Only update the weights that were used in the forward pass for index in review: self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step # Keep track of correct predictions. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): correct = 0 start = time.time() for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): self.layer_1 *= 0 unique_indices = set() for word in review.lower().split(" "): if word in self.word2index.keys(): unique_indices.add(self.word2index[word]) for index in unique_indices: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use self.layer_1 instead of local layer_1 layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2[0] >= 0.5): return "POSITIVE" else: return "NEGATIVE"
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell to train your network with a small polarity cutoff.
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01) mlp.train(reviews[:-1000],labels[:-1000])
/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:51: RuntimeWarning: divide by zero encountered in log
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
And run the following cell to test it's performance. It should be
mlp.test(reviews[-1000:],labels[-1000:])
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Run the following cell to train your network with a much larger polarity cutoff.
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01) mlp.train(reviews[:-1000],labels[:-1000])
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
And run the following cell to test it's performance.
mlp.test(reviews[-1000:],labels[-1000:])
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
End of Project 6. Watch the next video to see Andrew's solution, then continue on to the next lesson. Analysis: What's Going on in the Weights?
mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01) mlp_full.train(reviews[:-1000],labels[:-1000]) Image(filename='sentiment_network_sparse.png') def get_most_similar_words(focus = "horrible"): most_similar = Counter() for word in mlp_full.word2index.keys(): most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]]) return most_similar.most_common() get_most_similar_words("excellent") get_most_similar_words("terrible") import matplotlib.colors as colors words_to_visualize = list() for word, ratio in pos_neg_ratios.most_common(500): if(word in mlp_full.word2index.keys()): words_to_visualize.append(word) for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]: if(word in mlp_full.word2index.keys()): words_to_visualize.append(word) pos = 0 neg = 0 colors_list = list() vectors_list = list() for word in words_to_visualize: if word in pos_neg_ratios.keys(): vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]]) if(pos_neg_ratios[word] > 0): pos+=1 colors_list.append("#00ff00") else: neg+=1 colors_list.append("#000000") from sklearn.manifold import TSNE tsne = TSNE(n_components=2, random_state=0) words_top_ted_tsne = tsne.fit_transform(vectors_list) p = figure(tools="pan,wheel_zoom,reset,save", toolbar_location="above", title="vector T-SNE for most polarized words") source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0], x2=words_top_ted_tsne[:,1], names=words_to_visualize, color=colors_list)) p.scatter(x="x1", y="x2", size=8, source=source, fill_color="color") word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6, text_font_size="8pt", text_color="#555555", source=source, text_align='center') p.add_layout(word_labels) show(p) # green indicates positive words, black indicates negative words
_____no_output_____
MIT
sentiment-network/sentiment-classification-project.ipynb
eugli/udacity-deep-learning
Advanced usageThis notebook replicates what was done in the *simple_usage* notebooks, but this time with the advanced API. The advanced API is required if we want to use non-standard affinity methods that better preserve global structure.If you are comfortable with the advanced API, please refer to the *preserving_global_structure* notebook for a guide how obtain better embeddings and preserve more global structure.
from openTSNE import TSNEEmbedding from openTSNE import affinity from openTSNE import initialization from examples import utils import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt
_____no_output_____
BSD-3-Clause
examples/02_advanced_usage.ipynb
gavehan/openTSNE
Load data
import gzip import pickle with gzip.open("data/macosko_2015.pkl.gz", "rb") as f: data = pickle.load(f) x = data["pca_50"] y = data["CellType1"].astype(str) print("Data set contains %d samples with %d features" % x.shape)
Data set contains 44808 samples with 50 features
BSD-3-Clause
examples/02_advanced_usage.ipynb
gavehan/openTSNE