textproc_nn.tex 16.2 KB
Newer Older
1
2
3
4
5
6
7
% !TEX root = text_processing.tex
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{}

\vfill
\centering
Loïc Barrault's avatar
Loïc Barrault committed
8
\Huge{\edinred{[Text processing]\\Deep Learning}}
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Text Processing: Deep Learning: Overview}

\begin{itemize}
\item Shortest introduction to Neural networks
\item Representing words
\item Representing sentences
\item Classifying
\item Deep Learning for Sentiment Analysis
\item Deep Learning for Information Extraction (NER)
\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Biological neuron / nerve cell}

\begin{center}
\includegraphics[width=0.95\textwidth]{figures/neuron_en}
\end{center}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Hebb principle}

\begin{center}
\includegraphics[width=0.5\textwidth]{figures/neuron_en}
\vfill
Hebb: \myemph{``Neurons that fire together, wire together''}
\end{center}


%\vspace{1cm}
\begin{itemize}
  \item Cells are active together \ra\ reinforce their connection
  \item Cells are not active together \ra\ diminish their connection
  \item[] \Ra\ \myemph{local process} there is no global supervision
\end{itemize}
\end{frame}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{The perceptron}

\textbf{Perceptron}: computing unit loosely inspired by the biological neuron

Loïc Barrault's avatar
Loïc Barrault committed
63
64
65
66
67
68
69
70
\begin{columns}
\begin{column}{.5\textwidth}
\begin{center}
	\includegraphics[width=0.6\textwidth]{figures/BpNeurone}
\end{center}
\end{column}
\begin{column}{.5\textwidth}
\begin{center}
71
  \begin{tabular}[c]{rl}
Loïc Barrault's avatar
Loïc Barrault committed
72
73
    \textbf{input}: & $\vx = \{x_i\}$ \\
    \myemph{weights}: & $\vw = \{w_i\}$ \\
74
75
    threshold: & $s$ \\
    activity: & $\displaystyle a = \sum_i w_i x_i + s$ \\
Loïc Barrault's avatar
Loïc Barrault committed
76
77
    \myemph{activation function}: & $f=\text{threshold function}$ \\
    \textbf{output}: & $\hat{y}=f(a)$ \\
78
  \end{tabular}
Loïc Barrault's avatar
Loïc Barrault committed
79
80
81
82
83
84
\end{center}
\end{column}
\end{columns}
\vspace{.5cm}
Training method: change the weights $\vw$ if a training example $\vx$ is misclassified as follows:
\begin{itemize}
Loïc Barrault's avatar
Loïc Barrault committed
85
	\item[]   $\vw^{new} = \vw^{cur} + \vx . y $ ~~~ with ~~~ $ y \in \{+1, -1\}$
Loïc Barrault's avatar
Loïc Barrault committed
86
87
\end{itemize}
	
88
89
90
91
92
\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{The Perceptron and the logical functions}
Loïc Barrault's avatar
Loïc Barrault committed
93
\hspace{1cm}
94
\begin{tabular}[t]{c}
Loïc Barrault's avatar
Loïc Barrault committed
95
  y = a OR b \\[5pt]
96
97
  \includegraphics[height=0.7\textheight]{figures/or}
\end{tabular}
Loïc Barrault's avatar
Loïc Barrault committed
98
\hspace{1cm}%
99
\begin{tabular}[t]{c}
Loïc Barrault's avatar
Loïc Barrault committed
100
101
  y = a AND b \\[5pt]
  \includegraphics[height=0.69\textheight]{figures/and}
102
\end{tabular}
Loïc Barrault's avatar
Loïc Barrault committed
103
\hspace{1cm}%
104
\begin{tabular}[t]{c}
Loïc Barrault's avatar
Loïc Barrault committed
105
106
  y = a XOR b \\[5pt]
  \includegraphics[height=0.73\textheight]{figures/xor}
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
\end{tabular}
\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Multilayer perceptron}

\begin{columns}
\begin{column}{.5\textwidth}

\begin{center}
\includegraphics[width=0.95\textwidth]{figures/mlp}
\end{center}

\end{column}
\begin{column}{.5\textwidth}
\begin{eqnarray*}
y_i^{2} & = & f\left(\sum_j w^{1}_{ij} ~ x_j^{1}\right) \\
y_i^{3} & = & f\left(\sum_j w^{2}_{ij} ~ y_j^{2}\right) \\
       & \vdots & \\
y_i^{c} & = & f \left(\sum_j w^{c-1}_{ij} ~ y_j^{c-1}\right) \\
\end{eqnarray*}
\end{column}
\end{columns}
\Ra\ \myemph{propagation} of the input $\vx$ towards the output $\vy$
\end{frame}

Loïc Barrault's avatar
Loïc Barrault committed
134
135
136
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{How to train a multilayer perceptron?}
137
138


Loïc Barrault's avatar
Loïc Barrault committed
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
\begin{block}{\center \myemphb{Backpropagation}: Backward propagation of errors}
%\begin{center}
\begin{columns}
\begin{column}{.5\textwidth}
\[  \wij^{new} = \wij^{cur} - \lambda \frac{\partial E}{\partial \wij}   \]
\end{column}
\begin{column}{.5\textwidth}
\begin{itemize}
\item $E$: \textbf{loss function}
\item $\lambda$: \textbf{learning rate}
\item $\wij$: weight between neuron $i$ and $j$
\end{itemize}
\end{column}
\end{columns}
%\end{center}
\end{block}

\begin{itemize}
\item Error function depending on the task
\item Classification task \Ra\ estimate a probability distribution
\[
\begin{array}[t]{rcl@{\hspace{1cm}}rcl}
  y_i & = & \ds \frac{e^{a_i}}{\sum_k e^{a_k}}
  & \ds {\partial y_i} / {\partial a_k} & = & \delta_{ik}y_i - y_i y_k \\[10pt]
  \ds E(\vy,\vc) & = & \ds \sum_i c_i \log y_i
  & \ds {\partial E} / {\partial y_i} & = & \ds \frac{c_i}{y_i}
\end{array}
\]
\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{How to train a multilayer perceptron?}

\begin{columns}[c]
\begin{column}{.5\textwidth}

\begin{block}{\center \myemphb{Chain rule}}
Loïc Barrault's avatar
Loïc Barrault committed
179
\begin{center}
Loïc Barrault's avatar
Loïc Barrault committed
180
181
182
183
184
185
$
\ds \frac{\partial \mathbf{E}}{\partial \mathbf{W}} = 
	\frac{\color{liumgreen} \partial \mathbf{E}}{\color{edinorange} \partial \mathbf{h^{2}}}
	\frac{\color{edinorange} \partial \mathbf{h^{2}}}{\color{cyan} \partial \mathbf{h^{1}}}
	\frac{\color{cyan} \partial \mathbf{h^{1}}}{\partial \mathbf{W}}
$
Loïc Barrault's avatar
Loïc Barrault committed
186
187
\end{center}
\end{block}
Loïc Barrault's avatar
Loïc Barrault committed
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
\end{column}

\begin{column}{.5\textwidth}
\begin{center} \includegraphics[width=4cm]{mlp_bp_grad} \end{center}
\end{column}
\end{columns}


\textbf{Output layer}
\[
\ds \frac{\partial E}{\partial \wij} = \ds \underbrace{\frac{\partial E}{\partial a_i}}_{\delta_i} \, \frac{\partial a_i}{\partial \wij} = \delta_i \, h_j 
\text{~~with~~} 
\delta_i = \ds \frac{\partial E}{\partial y_i}  \, \frac{\partial y_i}{\partial a_i} = \ds \frac{\partial E}{\partial y_i} \, f^{~'}(a_i)
\]

\textbf{Hidden layer}
\[
\ds \frac{\partial E}{\partial v_{jk}} = \ds \underbrace{\frac{\partial E}{\partial z_j}}_{\gamma_j} \, \frac{\partial z_j}{\partial v_{jk}} = \gamma_j  \,x_k
\text{~~with~~} 
\gamma_j = \ds \sum_i \frac{\partial E}{\partial a_i} \, \frac{\partial a_i}{\partial h_j} \, \frac{\partial h_j}{\partial z_j} 
    		= \ds \sum_i \delta_i \, \wij \, f^{~'}(z_j)
    		= f^{~'}(z_j) \ds \sum_i \delta_i \wij
\]

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Multilayer perceptron: training}

\begin{itemize}
  \item[1.] Normalise data
  \item[2.] Initialise the weights $\mW$
  \item[3.] \alert{Repeat}
        \begin{itemize}
        \item Pick a \textbf{batch} of examples $(\vx,\vc)$
        \item \textbf{Forward} pass: propagate the batch $\vx$ through the network \ra\ $\vy$
        \item Calculate the error $E(\vy,\vc)$
        \item \textbf{Backward} pass: \myemphb{backpropagation} \ra\ $\nabla \wij$
        \item Update weights $\wij^{new} = \wij^{cur} - \lambda \frac{\partial E}{\partial \wij}$
        \item Eventually change the training meta-parameters (e.g. learning rate $\lambda$)
        \end{itemize}
  \item[  ] \alert{until convergence}
\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{}

\vfill
\centering
\Huge{\liumcyan{That's great, but where is the text?!?}}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{How to represent words?}

\begin{block}{\center \myemphb{Word Embedding}}
\begin{center}
Vector representation of a word \Ra\ vector of real values\\
\end{center}
\end{block}
Also called continuous space representation.

\begin{itemize}
\item<2-> What would be the simplest way of obtaining vectors? \only<3->{\Ra\ so-called \myemphb{1-hot vector}:}
\item[]<3-> \begin{itemize}
\item vector of size equal to \textbf{vocabulary size} 
\item contains 0 everywhere except for a single 1 at a specific position
\end{itemize}

\vspace{1cm}

\item<4-> Is that a good representation? \only<5->{\Ra\ \textbf{NO!}}
\item[]<5-> \begin{itemize}
\item distance between any two words is the same for all word pairs
\item position of the "1" arbitrarily 
\item \ra\ it is just a \textbf{coding}
 \end{itemize}

\end{itemize}

\only<3->{
\begin{textblock*}{50mm}[0,0](105mm,40mm)
\includegraphics[width=4cm]{one-hot} 
\end{textblock*}
}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{How to represent words?}

\myemph{The semantic properties of the words are encoded in the dimensions of the vector}

\begin{minipage}[t][.7\textheight]{\textwidth}

\centering
\includegraphics[width=.7\textwidth]{king-white-embedding}<1->

\includegraphics[width=.7\textwidth]{king-colored-embedding}<2->

\includegraphics[width=.4\textwidth]{queen-woman-girl-embeddings}<3-> 



\end{minipage}

\vfill

\source{ \textbf{\url{http://jalammar.github.io/illustrated-word2vec/}} \la\ \myemphb{Must read!} } 

\smallskip

\end{frame}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{How to represent words?}

\myemph{The semantic properties of the words are encoded in the dimensions of the vector}


\begin{center}
\includegraphics[width=.5\textwidth]{king-analogy-viz} 
\end{center}

Can be learned in several ways:
\begin{itemize}
\item Extract handcrafted meaningful features
\item \myemph{Use a neural network!}<2->
\end{itemize}

\vfill

\source{ \textbf{\url{http://jalammar.github.io/illustrated-word2vec/}} \la\ \myemphb{Must read!} } 

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Word embeddings: word2vec}

Language modelling task: given a prefix (sequence of words), predict the next word

\begin{columns}[c]
\begin{column}{.5\textwidth}
	\begin{center}
		\textbf{CBOW}\\
	 	\includegraphics[width=4cm]{cbow} 
	\end{center}
\end{column}
\begin{column}{.5\textwidth}
	\begin{center}
		\textbf{SkipGram}\\
		\includegraphics[width=4cm]{skipgram} 
	\end{center}
\end{column}
\end{columns}

\source{ \textbf{\url{http://jalammar.github.io/illustrated-word2vec/}} \la\ \myemphb{Must read!} } 

\source{Mikolov et al. \textbf{Efficient Estimation of Word Representations in Vector Space}  \cite{mikolov2013}}
\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Why does it work?}

\begin{center}
\includegraphics[width=0.8\textwidth]{word_embeddings}
\end{center}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Why does it work?}
372

Loïc Barrault's avatar
Loïc Barrault committed
373
\begin{itemize}
Loïc Barrault's avatar
Loïc Barrault committed
374
375
376
\item Let's assume that the word representations are \myemph{organised semantically}
\item words $w_1$ and $w_2$ having similar meaning would be \textbf{close to each other} in this space
\item[] \ra\ Consequently $\mathcal{F}(w_1) \approx \mathcal{F}(w_2)$
Loïc Barrault's avatar
Loïc Barrault committed
377
\end{itemize}
378

Loïc Barrault's avatar
Loïc Barrault committed
379
380
\begin{columns}[c]
\begin{column}{.5\textwidth}
381

Loïc Barrault's avatar
Loïc Barrault committed
382
383
384
385
386
387
388
389
\begin{itemize}
\item[] Language modelling:
\end{itemize}
\begin{enumerate}
\item I have got \edinred{10} \blue{euros} in my wallet
\item This item costs \liumgreen{11} \blue{euros}
\item In the U.S. it is \liumgreen{11} \edinorange{dollars} !
\end{enumerate} 
390

Loïc Barrault's avatar
Loïc Barrault committed
391
392
393
394
395
396
397
398
399
400
401
\end{column}
\begin{column}{.5\textwidth}

\begin{center}
\includegraphics<1>[width=0.8\textwidth]{fflm_generalisation}
\includegraphics<2>[width=0.8\textwidth]{fflm_generalisation2}
\end{center}
\end{column}
\end{columns}

\Ra\ What is the probability that \edinred{10} is followed by \edinorange{dollars}?
402

Loïc Barrault's avatar
Loïc Barrault committed
403
404
\end{frame}

Loïc Barrault's avatar
Loïc Barrault committed
405
406
407
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{How to represent sentences?}
408

Loïc Barrault's avatar
Loïc Barrault committed
409
Sentence = sequence of word \Ra\ we need an \myemphb{encoder} 
410

Loïc Barrault's avatar
Loïc Barrault committed
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
Several possibilities have been developed:
\begin{itemize}
\item<2-> \myemph{Recurrent neural network} (RNN) 
\begin{itemize}
\item and its \textbf{bidirectional} version
\item representation = single vector or matrix
\item[]
\end{itemize}
\item<4-> \myemph{Convolutional neural network} (CNN)
\begin{itemize}
\item produces a single vector representation
\item[]
\end{itemize}

\item<5-> Very recently \myemph{Transformers} = self-attention
\begin{itemize}
\item representation = matrix (1 vector per word)
\item Must read: \textbf{\url{http://jalammar.github.io/illustrated-transformer/}}
\end{itemize}

\end{itemize}


\begin{textblock*}{30mm}[0,0](110mm,0mm)
	    \includegraphics<2>[height=4cm]{figures/rnn_proj}
	    \includegraphics<3->[height=4cm]{figures/rnn_proj2}
\end{textblock*}  

\begin{textblock*}{30mm}[0,0](110mm,35mm)
	\includegraphics<4->[height=0.25\textheight]{figures/conv_sent_encoder}
\end{textblock*}  

\begin{textblock*}{30mm}[0,0](110mm,35mm)
	\includegraphics<5->[height=0.25\textheight]{figures/conv_sent_encoder}
\end{textblock*}  






\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{How to classify sentences?}

\begin{itemize}
\item The classifier is a neural network implementing a complex function $\mathcal{F}$ 
\begin{itemize}
	\item that operates in the \textbf{continuous space}
	\item that maps input vectors to a \textbf{probability distribution} over the desired classes
\end{itemize}
\end{itemize}
465

Loïc Barrault's avatar
Loïc Barrault committed
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
\begin{enumerate}
\item Encode the sentence
\begin{itemize}
\item get a vector
\item get a matrix (1 vector per word) \ra\ compress into 1 vector
	\begin{itemize}
	\item \textbf{pooling} operation (usually mean or max)
	\item concatenation
	\end{itemize}
\end{itemize}
\item Non-linear classification layer(s) \ra\ get a vector of scores $\vz$ (1 for each class)
\item Get a probability distribution by normalization \ra\ softmax
\begin{itemize}
\item[] \begin{center} $p(\vc = j | \theta) = \ds \frac{ e^{\vz_j}}{\ds \sum_{k=1}^{\|V\|} e^{\vz_k}}$  \end{center}
\end{itemize}
\end{enumerate}
482

Loïc Barrault's avatar
Loïc Barrault committed
483
\end{frame}
484

Loïc Barrault's avatar
Loïc Barrault committed
485
486
487
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Encoding a sentence with a (bi-)RNN}
488

Loïc Barrault's avatar
Loïc Barrault committed
489
Sentence: "\textbf{A long time ago in a galaxy far , far away}"
490

Loïc Barrault's avatar
Loïc Barrault committed
491
492
493
494
495
496
497
 \begin{center}
%  \includegraphics[height=0.8\textheight]{figures/rnn_seq_1}<+>% if you remove the '%' then the 
%  \includegraphics[height=0.8\textheight]{figures/rnn_seq_2}<+>%
%  \includegraphics[height=0.8\textheight]{figures/rnn_seq_3}<+>%   
%  \includegraphics[height=0.8\textheight]{figures/rnn_seq_7}<+>% 
% \includegraphics[height=0.8\textheight]{figures/rnn_seq_10}<+>% 
% \includegraphics[height=0.8\textheight]{figures/rnn_seq_all}<+>% 
498

Loïc Barrault's avatar
Loïc Barrault committed
499
500
501
502
  \includegraphics[height=0.8\textheight]{figures/bi_rnn_seq_1}<+>%
  \includegraphics[height=0.8\textheight]{figures/bi_rnn_seq_2}<+>%
  \includegraphics[height=0.8\textheight]{figures/bi_rnn_seq_7}<+>%   
    \includegraphics[height=0.8\textheight]{figures/bi_rnn_seq_fall}<+>%   
503

Loïc Barrault's avatar
Loïc Barrault committed
504
505
506
507
  \includegraphics[height=0.8\textheight]{figures/bi_rnn_seq_r1}<+>%
  \includegraphics[height=0.8\textheight]{figures/bi_rnn_seq_r2}<+>%
  \includegraphics[height=0.8\textheight]{figures/bi_rnn_seq_r3}<+>%   
  \includegraphics[height=0.8\textheight]{figures/bi_rnn_seq_rall}<+>% 
508

Loïc Barrault's avatar
Loïc Barrault committed
509
  \includegraphics[height=0.8\textheight]{figures/bi_rnn_seq_all}<+>% 
510

Loïc Barrault's avatar
Loïc Barrault committed
511
  \end{center}%centering  
512

Loïc Barrault's avatar
Loïc Barrault committed
513
\end{frame}
514

Loïc Barrault's avatar
Loïc Barrault committed
515
516
517
518
519
520
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Pooling operation}

Compute the feature-wise \myemph{average} or \myemph{maximum} \textbf{activation} of a set of vectors\\
Aim: sub-sampling \ra\ result is a vector!
521

Loïc Barrault's avatar
Loïc Barrault committed
522
523
524
525
526
527
528
 \begin{center}
  \includegraphics[height=0.5\textheight]{figures/pooling}%
 \end{center}
 
\source{A comment on max pooling to read: \url{https://mirror2image.wordpress.com/2014/11/11/geoffrey-hinton-on-max-pooling-reddit-ama/}}
 
\end{frame}
529

Loïc Barrault's avatar
Loïc Barrault committed
530
531
532
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Classification layer \Ra\ Softmax}
533

Loïc Barrault's avatar
Loïc Barrault committed
534
Get a probability distribution by normalization \ra\ softmax:  $p(\vc = j | \theta) = \ds \frac{ e^{\vz_j}}{\ds \sum_{k=1}^{\|V\|} e^{\vz_k}}$  
535

Loïc Barrault's avatar
Loïc Barrault committed
536
537
538
539
 \begin{center}
  \includegraphics[height=0.6\textheight]{figures/classif_layer}%
 \end{center}
\end{frame}
540
541


Loïc Barrault's avatar
Loïc Barrault committed
542
543
544
545
546
547
548
549
550



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{TITLE}

\end{frame}

551
552
553
554
555
556
557
558
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Deep Learning for Sentiment Analysis}

\begin{block}{Principle}
\myemph{Project} or represent the \textbf{text} into a \myemph{continuous space} and train an estimator operating into this space to compute the probability of the sentiment.
\end{block}

Loïc Barrault's avatar
Loïc Barrault committed
559
560
561
\begin{center}
\includegraphics[height=0.6\textheight]{sa_nn}
\end{center}
562
563
564
565
566
567
568
569
570
571
572
573
574

\end{frame}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}
\frametitle{Text Processing: Deep Learning: Resources}


Deep Learning book: \url{https://www.deeplearningbook.org/}
\cite{Goodfellow-et-al-2016}

\end{frame}