יסודות בינה מלאכותית : 07-RB27 – אנימציה , OPENPOSE YOLO
תכנון מערכת בקרה בעזרת CHATGPT
- צפה בשתי הסרטונים האחד חישנים אידנקטבים , והשני חיישנים מגנטים
חיישן אינדוקטבי
זרם משתנה (AC או DC משתנה) יוצר שטף מגנטי משתנה, מה שנחוץ להשראת זרמים או מתח במעגלים סמוכים.
חוק לנץ (Lenz's Law) :
חיישן פוטואלקטרי
2.בנה בעזרת בינה מלאכותית מערכת של חיישנים למפעל – שיודעת לזהות סיבוב של זרוע רובוטית ללא מגע – איזה חיישן כדאי לקחת – בקש טבלה השוואה
3. בנה בעזרת בינה מלאוכתית מערכת למפעל מזון שבודקת אם יש חלילה נוכחות של ברגים או חלקי מתכת בתוך אוכל ? איזה חיישן נידרש
YOLO
חלק 2 :
1 |
pip install ultralytics opencv-python |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
from ultralytics import YOLO from pathlib import Path # Load the YOLOv8 model pre-trained on COCO dataset model = YOLO('yolov8m.pt') # 'yolov8n.pt' is the nano version; you can use 'yolov8s.pt' for the small version # Define the image path image_path = Path(r'd:\temp\1.jpg') # Run YOLOv8 for human detection on the image results = model(image_path) # Filter results to include only humans (class label '0' for person in COCO dataset) humans = [res for res in results[0].boxes if res.cls[0] == 0] # Display detection results results[0].show() # This will open the image with bounding boxes for detected humans |
ספירה של אנשים בתמונה
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
from ultralytics import YOLO # Load YOLOv8 model and run detection model = YOLO('yolov8n.pt') results = model(r'd:\temp\1.jpg') # Initialize people count people_count = 0 # Use a for loop to count people (class '0' is "person") for box in results[0].boxes: if box.cls[0] == 0: # Class '0' corresponds to "person" people_count += 1 # Print the count and appropriate message print("Low people" if people_count < 4 else "Many people") |
ספירה של אנשים מתקדם
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
from ultralytics import YOLO import cv2 # Load YOLOv8 model and run detection model = YOLO('yolov8n.pt') results = model(r'd:\temp\many-car.jpeg') # Initialize people count and create an empty list for person boxes people_count = 0 person_boxes = [] # Use a for loop to count people with confidence > 60% and collect their boxes for box in results[0].boxes: if box.cls[0] == 0 and box.conf[0] > 0.6: # Class '0' is "person" and confidence > 60% people_count += 1 person_boxes.append(box) # Keep only high-confidence person boxes # Plot only the high-confidence person boxes annotated_img = results[0].orig_img.copy() for person_box in person_boxes: x1, y1, x2, y2 = map(int, person_box.xyxy[0]) # Convert to integer coordinates cv2.rectangle(annotated_img, (x1, y1), (x2, y2), (0, 255, 0), 2) # Add the people count to the image cv2.putText( annotated_img, f'People Count: {people_count}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2 ) # Display the annotated image cv2.imshow("People Detection", annotated_img) cv2.waitKey(0) # Wait until a key is pressed cv2.destroyAllWindows() # Close the window |
זיהוי בוידאו :
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
from ultralytics import YOLO import cv2 from IPython.display import display, clear_output from PIL import Image import numpy as np # Load YOLOv8 model pre-trained on the COCO dataset model = YOLO('yolov8m.pt') # Load video file video_path = r'd:\temp\AK4.mp4' cap = cv2.VideoCapture(video_path) # Check if the video opened successfully if not cap.isOpened(): print("Error opening video file") # Process video frame-by-frame frame_count = 0 while cap.isOpened() and frame_count < 100: # Limiting to 100 frames for notebook display purposes ret, frame = cap.read() if not ret: break # Run YOLO detection on the frame results = model(frame) # Process each detected box and draw red rectangles for people above 60% confidence for box in results[0].boxes: if box.cls[0] == 0 and box.conf[0] > 0.6: # Class '0' is "person", confidence > 60% # Get the bounding box coordinates x1, y1, x2, y2 = map(int, box.xyxy[0]) # Convert to integer coordinates # Draw a red rectangle (bounding box) on the frame cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2) # Prepare the confidence text confidence_text = f"{box.conf[0] * 100:.1f}%" # Convert confidence to percentage with 1 decimal # Position the text slightly above the bounding box text_x, text_y = x1, y1 - 10 # Position above top-left corner of the bounding box cv2.putText( frame, confidence_text, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), # Red color for the text 2 ) # Convert the frame from BGR to RGB for Jupyter display frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) pil_img = Image.fromarray(frame_rgb) # Display the frame in the notebook clear_output(wait=True) display(pil_img) frame_count += 1 # Increase frame count to limit displayed frames # Release video capture cap.release() |
. אנימציה – בקאנבה – CANVA
4.1 דרך אחת
4.2 דרך 2