From e6436f90fe73408cc510693d80cc7f63eb6e986b Mon Sep 17 00:00:00 2001 From: Mirko Birbaumer <mirko.birbaumer@hslu.ch> Date: Fri, 2 Apr 2021 20:45:38 +0000 Subject: [PATCH] Minor Editing --- ...k Block 6 - RNN and Image Captioning.ipynb | 126 ++++++++++-------- 1 file changed, 70 insertions(+), 56 deletions(-) diff --git a/notebooks/Block_6/Jupyter Notebook Block 6 - RNN and Image Captioning.ipynb b/notebooks/Block_6/Jupyter Notebook Block 6 - RNN and Image Captioning.ipynb index bee73b0..b01ccf6 100644 --- a/notebooks/Block_6/Jupyter Notebook Block 6 - RNN and Image Captioning.ipynb +++ b/notebooks/Block_6/Jupyter Notebook Block 6 - RNN and Image Captioning.ipynb @@ -20,7 +20,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 48, "metadata": {}, "outputs": [ { @@ -99,7 +99,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -128,7 +128,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -157,7 +157,7 @@ "source": [ "We also get some statistics about the clean document.\n", "\n", - "We can see that there are just under 33'722 words in the clean text and a vocabulary of just under 7'019 words. This is smallish and models fit on this data should be manageable on modest hardware." + "We can see that there are approximately 33'600 words in the clean text and a vocabulary of just under 7'005 words. This is smallish and models fit on this data should be manageable on modest hardware." ] }, { @@ -177,7 +177,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -209,7 +209,7 @@ "metadata": {}, "source": [ "Running this piece creates a long list of lines. Printing statistics \n", - "on the list, we can see that we will have exactly 33'518 training \n", + "on the list, we can see that we will have exactly 33'549 training \n", "patterns to fit our model." ] }, @@ -227,7 +227,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -248,7 +248,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -301,7 +301,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -351,7 +351,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -409,7 +409,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -437,7 +437,7 @@ "\n", "We can do this with array slicing.\n", "\n", - "After separating, we need to one hot encode the output word. This means converting it from \n", + "After separating, we need to one-hot-encode the output word. This means converting it from \n", "an integer to a vector of 0 values, one for each word in the vocabulary, with a 1 to indicate \n", "the specific word at the index of the words integer value.\n", "\n", @@ -445,7 +445,7 @@ "the ground truth from which to learn from is 0 for all words except the actual word that comes \n", "next.\n", "\n", - "Keras provides the `to_categorical()` that can be used to one hot encode the output words for each \n", + "Keras provides the `to_categorical()` that can be used to one-hot-encode the output words for each \n", "input-output sequence pair.\n", "\n", "Finally, we need to specify to the Embedding layer how long input sequences are. We know that there \n", @@ -456,7 +456,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -500,14 +500,16 @@ "\n", "Common values are 50, 100, and 300. We will use 50 here, but consider testing smaller or larger values.\n", "\n", - "We will use two LSTM hidden layers with 100 memory cells each. More memory cells and a deeper network may achieve better results.\n", + "We will use two LSTM hidden layers with 100 memory units each. By means of `return_sequences=True`, the first LSTM hidden \n", + "layers (each hidden layer consisting of 100 units) at each position serve in turn as input into the next LSTM hidden layers. \n", + "More memory cells and a deeper network may achieve better results.\n", "\n", - "A dense fully connected layer with 100 neurons connects to the LSTM hidden layers to interpret the features extracted from the sequence. The output layer predicts the next word as a single vector the size of the vocabulary with a probability for each word in the vocabulary. A softmax activation function is used to ensure the outputs have the characteristics of normalized probabilities." + "A dense fully connected layer with 100 neurons connects to the final output of the second LSTM hidden layers to interpret the features extracted from the sequence. The output layer predicts the next word as a single vector the size of the vocabulary with a probability for each word in the vocabulary. A softmax activation function is used to ensure the outputs have the characteristics of normalized probabilities." ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -529,26 +531,26 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Model: \"sequential\"\n", + "Model: \"sequential_1\"\n", "_________________________________________________________________\n", "Layer (type) Output Shape Param # \n", "=================================================================\n", - "embedding (Embedding) (None, 50, 50) 350300 \n", + "embedding_1 (Embedding) (None, 50, 50) 350300 \n", "_________________________________________________________________\n", - "lstm (LSTM) (None, 50, 100) 60400 \n", + "lstm_2 (LSTM) (None, 50, 100) 60400 \n", "_________________________________________________________________\n", - "lstm_1 (LSTM) (None, 100) 80400 \n", + "lstm_3 (LSTM) (None, 100) 80400 \n", "_________________________________________________________________\n", - "dense (Dense) (None, 100) 10100 \n", + "dense_2 (Dense) (None, 100) 10100 \n", "_________________________________________________________________\n", - "dense_1 (Dense) (None, 7006) 707606 \n", + "dense_3 (Dense) (None, 7006) 707606 \n", "=================================================================\n", "Total params: 1,208,806\n", "Trainable params: 1,208,806\n", @@ -1409,7 +1411,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ @@ -1444,7 +1446,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -1477,7 +1479,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -1506,9 +1508,20 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 58, "metadata": {}, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: h5py<3.0.0 in /opt/conda/lib/python3.7/site-packages (2.10.0)\n", + "Requirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from h5py<3.0.0) (1.14.0)\n", + "Requirement already satisfied: numpy>=1.7 in /opt/conda/lib/python3.7/site-packages (from h5py<3.0.0) (1.19.1)\n", + "\u001b[33mWARNING: You are using pip version 20.1.1; however, version 21.0.1 is available.\n", + "You should consider upgrading via the '/opt/conda/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n" + ] + }, { "ename": "AttributeError", "evalue": "'str' object has no attribute 'decode'", @@ -1516,7 +1529,7 @@ "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m<ipython-input-26-51509c3e7321>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# load the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'model_goethe_generator.h5'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m<ipython-input-58-7352f2d33ec0>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# load the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msystem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"pip3 install 'h5py<3.0.0'\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'model_goethe_generator.h5'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m/opt/conda/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/save.py\u001b[0m in \u001b[0;36mload_model\u001b[0;34m(filepath, custom_objects, compile)\u001b[0m\n\u001b[1;32m 144\u001b[0m if (h5py is not None and (\n\u001b[1;32m 145\u001b[0m isinstance(filepath, h5py.File) or h5py.is_hdf5(filepath))):\n\u001b[0;32m--> 146\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mhdf5_format\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_model_from_hdf5\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcustom_objects\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcompile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 147\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msix\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstring_types\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/hdf5_format.py\u001b[0m in \u001b[0;36mload_model_from_hdf5\u001b[0;34m(filepath, custom_objects, compile)\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmodel_config\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 165\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'No model found in config file.'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 166\u001b[0;31m \u001b[0mmodel_config\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mjson\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloads\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel_config\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdecode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'utf-8'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 167\u001b[0m model = model_config_lib.model_from_config(model_config,\n\u001b[1;32m 168\u001b[0m custom_objects=custom_objects)\n", "\u001b[0;31mAttributeError\u001b[0m: 'str' object has no attribute 'decode'" @@ -1525,6 +1538,7 @@ ], "source": [ "# load the model\n", + "!pip3 install 'h5py<3.0.0'\n", "model = load_model('model_goethe_generator.h5')" ] }, @@ -1537,7 +1551,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 54, "metadata": {}, "outputs": [], "source": [ @@ -1558,14 +1572,14 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 55, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "ich fühls du schwebst um mich erflehter geist enthülle dich ha wies in meinem herzen reißt zu neuen gefühlen all meine sinnen sich erwühlen ich fühle ganz mein herz dir hingegeben du mußt du mußt und kostet es mein leben er faßt das buch und spricht das zeichen des geistes geheimnißvoll\n", + "gleich in die hölle springen xenien als insekten sind wir da mit kleinen scharfen scheren satan unsern herrn papa nach würden zu verehren hennings seht wie sie in gedrängter schaar naiv zusammen scherzen am ende sagen sie noch gar sie hätten gute herzen musaget ich mag in diesem hexenheer mich gar\n", "\n" ] } @@ -1589,7 +1603,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 56, "metadata": {}, "outputs": [ { @@ -1597,10 +1611,10 @@ "output_type": "stream", "text": [ "(1, 50)\n", - "[[ 2 1813 11 3053 69 22 3054 83 3055 36 848 696 9 128\n", - " 169 3056 8 343 3057 174 105 341 19 3058 2 607 140 44\n", - " 134 45 1814 11 255 11 255 1 3059 13 44 115 24 264\n", - " 7 370 1 817 7 454 49 844]]\n" + "[[ 86 9 3 337 1409 6293 42 6294 57 38 37 17 635 6295\n", + " 6296 2294 1254 149 6297 52 2405 8 1249 6298 250 25 12 9\n", + " 1715 1403 6299 1004 1557 75 261 167 12 51 88 12 6300 421\n", + " 169 6301 2 117 9 166 2472 22]]\n" ] } ], @@ -1621,14 +1635,14 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 29, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[1309]\n" + "[183]\n" ] } ], @@ -1647,14 +1661,14 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 30, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "geheimnißvoll\n" + "liebe\n" ] } ], @@ -1678,7 +1692,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 43, "metadata": {}, "outputs": [], "source": [ @@ -1695,7 +1709,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 44, "metadata": {}, "outputs": [], "source": [ @@ -1732,14 +1746,14 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 45, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "aus die kunst herum thut das leben sitzt dadrinne und meiner gestein mit euch stehn wir habt ihr euch ums ältste bitten die hölle schon darneben und meiner wach schleicht mag einen geist den wedel immer geschäftiger wirkenskraft und ganzes geisterzahn uns zeigte mir mich ein große loch in die\n" + "ringsum gegenwart erstanden erstanden erstanden judex judex fremder fremder fremder seltsamen seltsamen moralisch moralisch wiederholt zittert zittert wiederholt wiederholt zittert wiederholt seltsamen seltsamen wiederholt wiederholt zittert zittert werden wohlbekannte wohlbekannte wohlbekannte einzunehmen gegenwart meinem meinem meinem himmelslicht himmelslicht himmelslicht hieß hieß hieß hieß damaged damaged samstags genoß genoß genoß genoß\n" ] } ], @@ -1788,7 +1802,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 63, "metadata": {}, "outputs": [], "source": [ @@ -1804,7 +1818,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 64, "metadata": {}, "outputs": [ { @@ -1813,7 +1827,7 @@ "<AxesSubplot:>" ] }, - "execution_count": 37, + "execution_count": 64, "metadata": {}, "output_type": "execute_result" }, @@ -1850,7 +1864,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 65, "metadata": {}, "outputs": [], "source": [ @@ -1859,7 +1873,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 66, "metadata": {}, "outputs": [ { @@ -1868,7 +1882,7 @@ "<AxesSubplot:>" ] }, - "execution_count": 39, + "execution_count": 66, "metadata": {}, "output_type": "execute_result" }, @@ -2227,7 +2241,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 59, "metadata": {}, "outputs": [ { @@ -2325,7 +2339,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 60, "metadata": {}, "outputs": [ { @@ -2333,7 +2347,7 @@ "output_type": "stream", "text": [ "Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5\n", - "553467904/553467096 [==============================] - 13s 0us/step\n", + "553467904/553467096 [==============================] - 16s 0us/step\n", "Model: \"model\"\n", "_________________________________________________________________\n", "Layer (type) Output Shape Param # \n", @@ -2396,8 +2410,8 @@ "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m<ipython-input-50-9545493bef71>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;31m# extract features from all images\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0mdirectory\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'Flicker8k_Dataset/'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0mfeatures\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mextract_features\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdirectory\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Extracted Features: %d'\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfeatures\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;31m# save to file\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m<ipython-input-50-9545493bef71>\u001b[0m in \u001b[0;36mextract_features\u001b[0;34m(directory)\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0;31m# extract features from each photo\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0mfeatures\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mlistdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdirectory\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 21\u001b[0m \u001b[0;31m# load an image from file\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0mfilename\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdirectory\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m'/'\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m<ipython-input-60-9545493bef71>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;31m# extract features from all images\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0mdirectory\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'Flicker8k_Dataset/'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0mfeatures\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mextract_features\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdirectory\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Extracted Features: %d'\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfeatures\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;31m# save to file\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m<ipython-input-60-9545493bef71>\u001b[0m in \u001b[0;36mextract_features\u001b[0;34m(directory)\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0;31m# extract features from each photo\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0mfeatures\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mlistdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdirectory\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 21\u001b[0m \u001b[0;31m# load an image from file\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0mfilename\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdirectory\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m'/'\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'Flicker8k_Dataset/'" ] } -- GitLab