Add files via upload

This commit is contained in:
Vita Aeterna 2023-01-04 23:57:18 +01:00 committed by GitHub
commit e8340bf05a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 543 additions and 0 deletions

124
AI.ipynb Normal file
View File

@ -0,0 +1,124 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "8189e15a",
"metadata": {},
"outputs": [],
"source": [
"# Importieren der benötigten Pakete\n",
"import tensorflow as tf\n",
"from tensorflow.keras.datasets import cifar10\n",
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n",
"from tensorflow.keras.layers import Conv2D, MaxPooling2D\n",
"from tensorflow.keras.callbacks import TensorBoard\n",
"\n",
"import numpy as np\n",
"import pickle\n",
"import time\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"# Öffnen der Input Datei um sie in Binär zu lesen (rb = read-binary)\n",
"pickle_in = open(\"X.pickle\",\"rb\")\n",
"# Geöffnete Datei in das Program laden\n",
"X = pickle.load(pickle_in)\n",
"\n",
"# Selbe nochmal nur mit der Output Layer\n",
"pickle_in = open(\"y.pickle\",\"rb\")\n",
"y = pickle.load(pickle_in)\n",
"\n",
"# Den Output in einen Numpy Array stecken um es später zu verwenden\n",
"y = np.array(y)\n",
"# Input(Bild Pixel) durch 255(rgb) dividieren um Graues Bild zu bekommen\n",
"X = X/255.0\n",
"\n",
"\n",
"dense_layer = [1]\n",
"layer_size = [64]\n",
"conv_layer = [3]\n",
"\n",
"\n",
"\n",
"for dense_layer in dense_layer:\n",
" for layer_size in layer_size:\n",
" for conv_layer in conv_layer:\n",
" # Definieren des Datei-Namen\n",
" NAME = \"{}-conv-{}-nodes-{}-dense\".format(conv_layer, layer_size, dense_layer)\n",
" # Ein Tensorboard erstellen\n",
" tensorboard = tf.keras.callbacks.TensorBoard(log_dir='logs/{}'.format(NAME))\n",
"\n",
" print(NAME)\n",
"\n",
" # Sequential wird für Layers benutzt die nur einen Input und einen Output haben\n",
" model = Sequential()\n",
"\n",
" # Hinzufügen einer \"Hidden Layer\" mit der obigen definierten Größe 64, (3,3) Kerne und dem im Script vorher definierten Shape\n",
" model.add(Conv2D(layer_size, (3,3), input_shape=X.shape[1:]))\n",
" # Activation Layer ist relu sprich es gibt uns eine lineare Funktion aus wenn der Input Positiv ist\n",
" model.add(Activation('relu'))\n",
" # Das Maximum-Pooling oder Max-Pooling ist eine Pooling-Operation, bei der der maximale oder größte Wert berechnet wird\n",
" model.add(MaxPooling2D(pool_size=(2, 2)))\n",
" \n",
" \n",
" for l in range(conv_layer-1):\n",
" model.add(Conv2D(layer_size, (3, 3)))\n",
" model.add(Activation('relu'))\n",
" model.add(MaxPooling2D(pool_size=(2, 2)))\n",
" \n",
" # Alle Dimensionen bis auf eine entfernen\n",
" model.add(Flatten()) \n",
" \n",
" for l in range(dense_layer):\n",
" model.add(Dense(512))\n",
" model.add(Activation('relu'))\n",
"\n",
"\n",
" # Output Layer\n",
" model.add(Dense(1))\n",
" # Die Sigmoid Kurve ähnelt der Form eines S und kann einen Wert zwischen 0 und 1 annehmen (loss and accuracy)\n",
" model.add(Activation('sigmoid'))\n",
" \n",
" \n",
" # Binary_Crossentropy gibt einen Output von 0 oder 1\n",
" # Der Adamoptimizer ist ein optimizer in TensorFlow, der eine adaptive Lernrate verwendet, um die Gewichte in einem neuronale Netzwerk zu aktualisieren\n",
" # Es soll nach genauigkeit trainieren und diese aufzeichnen\n",
" model.compile(loss='binary_crossentropy',\n",
" optimizer='adam',\n",
" metrics=['accuracy'])\n",
" # Anfangen das Model mit den Pickle dateien für 10 Epochen zu belehren. Außerdem die Logs auf das Tensorboard speicher und anzeigen\n",
" model.fit(X, y, batch_size=32, epochs=10, validation_split=0.1, callbacks=tensorboard)\n",
" \n",
" model.save('64x3-CNN.model')\n",
" "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

71
EndProduct.ipynb Normal file
View File

@ -0,0 +1,71 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "bc93610d",
"metadata": {},
"outputs": [],
"source": [
"# Importieren der Pakekte\n",
"import cv2\n",
"import tensorflow as tf\n",
"\n",
"# Kategorien defienieren. In diesem Falle nur 2 da wir mit einem Binary_Crossentropy arbeiten (Nur 2 Klassifizierungen)\n",
"category = ['Dog', 'Cat'] \n",
"\n",
"# Dateipfad und funktion definieren\n",
"def prepare(filepath):\n",
" \n",
" # Größe des Bildes bestimmen (Desto kleiner desto weniger Lernmaterial) \n",
" IMG_SIZE = 50\n",
" \n",
" # Bild in einen Array machen und in Schwarz-Weiß umformen\n",
" img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)\n",
" \n",
" # Bild größe anpassen\n",
" new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n",
" \n",
" # Bild als Schwarz-Weiß und mit neuer Größe zurückgeben\n",
" return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\n",
"\n",
"#Model das wir bereits erstellt haben, hier reinladen und verwenden (WICHTIG: Name MUSS übereinstimmen)\n",
"model = tf.keras.models.load_model(\"64x3-CNN.model\")\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2f07fa08",
"metadata": {},
"outputs": [],
"source": [
"prediction = model.predict([prepare('cat.jpg')])\n",
"print(prediction)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

348
ImageUsing.ipynb Normal file

File diff suppressed because one or more lines are too long