Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 97 additions & 7 deletions 2. Training and Detection.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -3160,13 +3160,103 @@
" break"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 11. Real Time Detections from your Webcam"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This part helps you to use your cellphone camera to do the detection\n",
"1-Install IP Webcam on your cellphone from app store\n",
"2-Run the app and copy the IP that is shown on the screen in the \"URL\" section in the cell below.\n",
"3-Run the cell and detect using your cellphone webcam"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip uninstall opencv-python-headless -y"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"import cv2\n",
"import numpy as np\n",
"import imutils\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"url = \"http://192.168.137.34:8080/shot.jpg\"\n",
"cap = cv2.VideoCapture(0)\n",
"width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
"height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
"\n",
"while True:\n",
" img_resp = requests.get(url)\n",
" img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)\n",
" img = cv2.imdecode(img_arr, -1)\n",
" #img = imutils.resize(img, width=800, height=600)\n",
" #cv2.imshow(\"Android_cam\", img)\n",
" image_np = np.array(img)\n",
" \n",
" input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\n",
" detections = detect_fn(input_tensor)\n",
" \n",
" num_detections = int(detections.pop('num_detections'))\n",
" detections = {key: value[0, :num_detections].numpy()\n",
" for key, value in detections.items()}\n",
" detections['num_detections'] = num_detections\n",
"\n",
" # detection_classes should be ints.\n",
" detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n",
"\n",
" label_id_offset = 1\n",
" image_np_with_detections = image_np.copy()\n",
"\n",
" viz_utils.visualize_boxes_and_labels_on_image_array(\n",
" image_np_with_detections,\n",
" detections['detection_boxes'],\n",
" detections['detection_classes']+label_id_offset,\n",
" detections['detection_scores'],\n",
" category_index,\n",
" use_normalized_coordinates=True,\n",
" max_boxes_to_draw=50,\n",
" min_score_thresh=.8,\n",
" agnostic_mode=False)\n",
"\n",
" cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600)))\n",
" \n",
" if cv2.waitKey(10) & 0xFF == ord('q'):\n",
" cap.release()\n",
" cv2.destroyAllWindows()\n",
" break"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "rzlM4jt0pfDJ"
},
"source": [
"# 10. Freezing the Graph"
"# 12. Freezing the Graph"
]
},
{
Expand Down Expand Up @@ -3435,7 +3525,7 @@
"id": "wTPmdqaXpfDK"
},
"source": [
"# 11. Conversion to TFJS"
"# 13. Conversion to TFJS"
]
},
{
Expand Down Expand Up @@ -3676,7 +3766,7 @@
"id": "VtUw73FHpfDK"
},
"source": [
"# 12. Conversion to TFLite"
"# 14. Conversion to TFLite"
]
},
{
Expand Down Expand Up @@ -4125,7 +4215,7 @@
"id": "5NQqZRdA21Uc"
},
"source": [
"# 13. Zip and Export Models "
"# 15. Zip and Export Models "
]
},
{
Expand Down Expand Up @@ -4163,9 +4253,9 @@
"provenance": []
},
"kernelspec": {
"display_name": "tfod",
"display_name": "tfodj",
"language": "python",
"name": "tfod"
"name": "tfodj"
},
"language_info": {
"codemirror_mode": {
Expand All @@ -4177,7 +4267,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
"version": "3.9.1"
}
},
"nbformat": 4,
Expand Down