adjust to ailab
Explore-Gitea-Actions
Details
Explore-Gitea-Actions
Details
This commit is contained in:
parent
a882a63b6d
commit
7708eab0f4
|
@ -0,0 +1,27 @@
|
|||
name: Gitea Actions Gradio Demo
|
||||
run-name: ${{ github.actor }} is testing out Gitea Actions 🚀
|
||||
on: [push]
|
||||
jobs:
|
||||
Explore-Gitea-Actions:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
|
||||
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: false
|
||||
tags: artifacts.iflytek.com/docker-private/atp/whybeyoung/yolov5:latest
|
||||
- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v3
|
||||
- run: echo "💡 The ${{ github.repository }} repository has been cloned to the runner."
|
||||
- run: echo "🖥️ The workflow is now ready to test your code on the runner."
|
||||
- name: List files in the repository
|
||||
run: |
|
||||
ls ${{ github.workspace }}
|
||||
- run: echo "🍏 This job's status is ${{ job.status }}."
|
|
@ -48,4 +48,6 @@
|
|||
!img_example/*
|
||||
!packages.txt
|
||||
|
||||
app copy.py
|
||||
app copy.py
|
||||
|
||||
model_download/yolov5_model_all.sh
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
FROM docker.io/library/python:3.8.9
|
||||
|
||||
RUN sed -i 's http://deb.debian.org http://mirrors.ustc.edu.cn g' /etc/apt/sources.list && sed -i 's http://archive.ubuntu.com http://mirrors.ustc.edu.cn g' /etc/apt/sources.list && sed -i '/security/d' /etc/apt/sources.list && apt-get update && apt-get install -y git git-lfs ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx && rm -rf /var/lib/apt/lists/* && git lfs install
|
||||
|
||||
|
||||
WORKDIR /home/user/app
|
||||
RUN useradd -m -u 1000 user
|
||||
RUN chown -R 1000.1000 /home/user
|
||||
|
||||
RUN --mount=target=/root/packages.txt,source=packages.txt sed -i 's http://deb.debian.org http://mirrors.ustc.edu.cn g' /etc/apt/sources.list && sed -i 's http://archive.ubuntu.com http://mirrors.ustc.edu.cn g' /etc/apt/sources.list && sed -i '/security/d' /etc/apt/sources.list && apt-get update && xargs -r -a /root/packages.txt apt-get install -y && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
USER 1000
|
||||
RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple/
|
||||
RUN pip install --no-cache-dir gradio==3.0.9
|
||||
RUN pip install --no-cache-dir pip==22.3.1 && pip install --no-cache-dir datasets "huggingface-hub>=0.12.1" "protobuf<4" "click<8.1"
|
||||
#COPY --link --chown=1000 --from=lfs /app /home/user/app
|
||||
#RUN --mount=target=pre-requirements.txt,source=pre-requirements.txt pip install --no-cache-dir -r pre-requirements.txt
|
||||
RUN --mount=target=requirements.txt,source=requirements.txt pip install --no-cache-dir -r requirements.txt
|
||||
COPY --link --chown=1000 ./ /home/user/app
|
||||
COPY --link --chown=1000 ultralytics /home/user/app/ultralytics
|
||||
COPY --chown=1000 master /home/user/.cache/torch/hub/master.zip
|
||||
COPY --chown=1000 --link fonts /home/user/app
|
||||
ADD --chown=1000 model_download/yolov5 /home/user/app
|
||||
CMD ["python", "app.py"]
|
5
app.py
5
app.py
|
@ -140,11 +140,13 @@ def model_loading(model_name, device, opt=[]):
|
|||
# load model
|
||||
model = torch.hub.load(model_path,
|
||||
model_name,
|
||||
force_reload=[True if "refresh_yolov5" in opt else False][0],
|
||||
force_reload=False,
|
||||
source='local',
|
||||
device=device,
|
||||
_verbose=False)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return None
|
||||
else:
|
||||
print(f"🚀 welcome to {GYD_VERSION},{model_name} loaded successfully!")
|
||||
|
||||
|
@ -664,6 +666,7 @@ def main(args):
|
|||
favicon_path="./icon/logo.ico", # web icon
|
||||
show_error=True, # Display error message in browser console
|
||||
quiet=True, # Suppress most print statements
|
||||
server_name="0.0.0.0"
|
||||
)
|
||||
else:
|
||||
gyd.launch(
|
||||
|
|
|
@ -0,0 +1,222 @@
|
|||
# Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
|
||||
.git
|
||||
.cache
|
||||
.idea
|
||||
runs
|
||||
output
|
||||
coco
|
||||
storage.googleapis.com
|
||||
|
||||
data/samples/*
|
||||
**/results*.csv
|
||||
*.jpg
|
||||
|
||||
# Neural Network weights -----------------------------------------------------------------------------------------------
|
||||
**/*.pt
|
||||
**/*.pth
|
||||
**/*.onnx
|
||||
**/*.engine
|
||||
**/*.mlmodel
|
||||
**/*.torchscript
|
||||
**/*.torchscript.pt
|
||||
**/*.tflite
|
||||
**/*.h5
|
||||
**/*.pb
|
||||
*_saved_model/
|
||||
*_web_model/
|
||||
*_openvino_model/
|
||||
|
||||
# Below Copied From .gitignore -----------------------------------------------------------------------------------------
|
||||
# Below Copied From .gitignore -----------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
wandb/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# virtualenv
|
||||
.venv*
|
||||
venv*/
|
||||
ENV*/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
|
||||
|
||||
# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
|
||||
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
Icon?
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
.com.apple.timemachine.donotpresent
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
|
||||
# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff:
|
||||
.idea/*
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/dictionaries
|
||||
.html # Bokeh Plots
|
||||
.pg # TensorFlow Frozen Graphs
|
||||
.avi # videos
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
|
||||
# Gradle:
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# CMake
|
||||
cmake-build-debug/
|
||||
cmake-build-release/
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
|
@ -0,0 +1,2 @@
|
|||
# this drop notebooks from GitHub language stats
|
||||
*.ipynb linguist-vendored
|
|
@ -0,0 +1,85 @@
|
|||
name: 🐛 Bug Report
|
||||
# title: " "
|
||||
description: Problems with YOLOv5
|
||||
labels: [bug, triage]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for submitting a YOLOv5 🐛 Bug Report!
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Search before asking
|
||||
description: >
|
||||
Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar bug report already exists.
|
||||
options:
|
||||
- label: >
|
||||
I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report.
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: YOLOv5 Component
|
||||
description: |
|
||||
Please select the part of YOLOv5 where you found the bug.
|
||||
multiple: true
|
||||
options:
|
||||
- "Training"
|
||||
- "Validation"
|
||||
- "Detection"
|
||||
- "Export"
|
||||
- "PyTorch Hub"
|
||||
- "Multi-GPU"
|
||||
- "Evolution"
|
||||
- "Integrations"
|
||||
- "Other"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Bug
|
||||
description: Provide console output with error messages and/or screenshots of the bug.
|
||||
placeholder: |
|
||||
💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment
|
||||
description: Please specify the software and hardware you used to produce the bug.
|
||||
placeholder: |
|
||||
- YOLO: YOLOv5 🚀 v6.0-67-g60e42e1 torch 1.9.0+cu111 CUDA:0 (A100-SXM4-40GB, 40536MiB)
|
||||
- OS: Ubuntu 20.04
|
||||
- Python: 3.9.0
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Minimal Reproducible Example
|
||||
description: >
|
||||
When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem.
|
||||
This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
|
||||
placeholder: |
|
||||
```
|
||||
# Code to reproduce your issue here
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional
|
||||
description: Anything else you would like to share?
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Are you willing to submit a PR?
|
||||
description: >
|
||||
(Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature.
|
||||
See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started.
|
||||
options:
|
||||
- label: Yes I'd like to help by submitting a PR!
|
|
@ -0,0 +1,8 @@
|
|||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: 💬 Forum
|
||||
url: https://community.ultralytics.com/
|
||||
about: Ask on Ultralytics Community Forum
|
||||
- name: Stack Overflow
|
||||
url: https://stackoverflow.com/search?q=YOLOv5
|
||||
about: Ask on Stack Overflow with 'YOLOv5' tag
|
|
@ -0,0 +1,50 @@
|
|||
name: 🚀 Feature Request
|
||||
description: Suggest a YOLOv5 idea
|
||||
# title: " "
|
||||
labels: [enhancement]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for submitting a YOLOv5 🚀 Feature Request!
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Search before asking
|
||||
description: >
|
||||
Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar feature request already exists.
|
||||
options:
|
||||
- label: >
|
||||
I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar feature requests.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: A short description of your feature.
|
||||
placeholder: |
|
||||
What new feature would you like to see in YOLOv5?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Use case
|
||||
description: |
|
||||
Describe the use case of your feature request. It will help us understand and prioritize the feature request.
|
||||
placeholder: |
|
||||
How would this feature be used, and who would use it?
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional
|
||||
description: Anything else you would like to share?
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Are you willing to submit a PR?
|
||||
description: >
|
||||
(Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature.
|
||||
See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started.
|
||||
options:
|
||||
- label: Yes I'd like to help by submitting a PR!
|
|
@ -0,0 +1,33 @@
|
|||
name: ❓ Question
|
||||
description: Ask a YOLOv5 question
|
||||
# title: " "
|
||||
labels: [question]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for asking a YOLOv5 ❓ Question!
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Search before asking
|
||||
description: >
|
||||
Please search the [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) to see if a similar question already exists.
|
||||
options:
|
||||
- label: >
|
||||
I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) and found no similar questions.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Question
|
||||
description: What is your question?
|
||||
placeholder: |
|
||||
💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional
|
||||
description: Anything else you would like to share?
|
|
@ -0,0 +1,9 @@
|
|||
<!--
|
||||
Thank you for submitting a YOLOv5 🚀 Pull Request! We want to make contributing to YOLOv5 as easy and transparent as possible. A few tips to get you started:
|
||||
|
||||
- Search existing YOLOv5 [PRs](https://github.com/ultralytics/yolov5/pull) to see if a similar PR already exists.
|
||||
- Link this PR to a YOLOv5 [issue](https://github.com/ultralytics/yolov5/issues) to help us understand what bug fix or feature is being implemented.
|
||||
- Provide before and after profiling/inference/training results to help us quantify the improvement your PR provides (if applicable).
|
||||
|
||||
Please see our ✅ [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) for more details.
|
||||
-->
|
|
@ -0,0 +1,23 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: pip
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
time: "04:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- glenn-jocher
|
||||
labels:
|
||||
- dependencies
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
time: "04:00"
|
||||
open-pull-requests-limit: 5
|
||||
reviewers:
|
||||
- glenn-jocher
|
||||
labels:
|
||||
- dependencies
|
|
@ -0,0 +1,153 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 Continuous Integration (CI) GitHub Actions tests
|
||||
|
||||
name: YOLOv5 CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # runs at 00:00 UTC every day
|
||||
|
||||
jobs:
|
||||
Benchmarks:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest ]
|
||||
python-version: [ '3.10' ] # requires python<=3.10
|
||||
model: [ yolov5n ]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
- name: Install requirements
|
||||
run: |
|
||||
python -m pip install --upgrade pip wheel
|
||||
pip install -r requirements.txt coremltools openvino-dev tensorflow-cpu --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
python --version
|
||||
pip --version
|
||||
pip list
|
||||
- name: Benchmark DetectionModel
|
||||
run: |
|
||||
python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29
|
||||
- name: Benchmark SegmentationModel
|
||||
run: |
|
||||
python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22
|
||||
- name: Test predictions
|
||||
run: |
|
||||
python export.py --weights ${{ matrix.model }}-cls.pt --include onnx --img 224
|
||||
python detect.py --weights ${{ matrix.model }}.onnx --img 320
|
||||
python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320
|
||||
python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224
|
||||
|
||||
Tests:
|
||||
timeout-minutes: 60
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, windows-latest ] # macos-latest bug https://github.com/ultralytics/yolov5/pull/9049
|
||||
python-version: [ '3.10' ]
|
||||
model: [ yolov5n ]
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
python-version: '3.7' # '3.6.8' min
|
||||
model: yolov5n
|
||||
- os: ubuntu-latest
|
||||
python-version: '3.8'
|
||||
model: yolov5n
|
||||
- os: ubuntu-latest
|
||||
python-version: '3.9'
|
||||
model: yolov5n
|
||||
- os: ubuntu-latest
|
||||
python-version: '3.8' # torch 1.7.0 requires python >=3.6, <=3.8
|
||||
model: yolov5n
|
||||
torch: '1.7.0' # min torch version CI https://pypi.org/project/torchvision/
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
- name: Install requirements
|
||||
run: |
|
||||
python -m pip install --upgrade pip wheel
|
||||
if [ "${{ matrix.torch }}" == "1.7.0" ]; then
|
||||
pip install -r requirements.txt torch==1.7.0 torchvision==0.8.1 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
else
|
||||
pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
fi
|
||||
shell: bash # for Windows compatibility
|
||||
- name: Check environment
|
||||
run: |
|
||||
python -c "import utils; utils.notebook_init()"
|
||||
echo "RUNNER_OS is ${{ runner.os }}"
|
||||
echo "GITHUB_EVENT_NAME is ${{ github.event_name }}"
|
||||
echo "GITHUB_WORKFLOW is ${{ github.workflow }}"
|
||||
echo "GITHUB_ACTOR is ${{ github.actor }}"
|
||||
echo "GITHUB_REPOSITORY is ${{ github.repository }}"
|
||||
echo "GITHUB_REPOSITORY_OWNER is ${{ github.repository_owner }}"
|
||||
python --version
|
||||
pip --version
|
||||
pip list
|
||||
- name: Test detection
|
||||
shell: bash # for Windows compatibility
|
||||
run: |
|
||||
# export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories
|
||||
m=${{ matrix.model }} # official weights
|
||||
b=runs/train/exp/weights/best # best.pt checkpoint
|
||||
python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu # train
|
||||
for d in cpu; do # devices
|
||||
for w in $m $b; do # weights
|
||||
python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val
|
||||
python detect.py --imgsz 64 --weights $w.pt --device $d # detect
|
||||
done
|
||||
done
|
||||
python hubconf.py --model $m # hub
|
||||
# python models/tf.py --weights $m.pt # build TF model
|
||||
python models/yolo.py --cfg $m.yaml # build PyTorch model
|
||||
python export.py --weights $m.pt --img 64 --include torchscript # export
|
||||
python - <<EOF
|
||||
import torch
|
||||
im = torch.zeros([1, 3, 64, 64])
|
||||
for path in '$m', '$b':
|
||||
model = torch.hub.load('.', 'custom', path=path, source='local')
|
||||
print(model('data/images/bus.jpg'))
|
||||
model(im) # warmup, build grids for trace
|
||||
torch.jit.trace(model, [im])
|
||||
EOF
|
||||
- name: Test segmentation
|
||||
shell: bash # for Windows compatibility
|
||||
run: |
|
||||
m=${{ matrix.model }}-seg # official weights
|
||||
b=runs/train-seg/exp/weights/best # best.pt checkpoint
|
||||
python segment/train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu # train
|
||||
python segment/train.py --imgsz 64 --batch 32 --weights '' --cfg $m.yaml --epochs 1 --device cpu # train
|
||||
for d in cpu; do # devices
|
||||
for w in $m $b; do # weights
|
||||
python segment/val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val
|
||||
python segment/predict.py --imgsz 64 --weights $w.pt --device $d # predict
|
||||
python export.py --weights $w.pt --img 64 --include torchscript --device $d # export
|
||||
done
|
||||
done
|
||||
- name: Test classification
|
||||
shell: bash # for Windows compatibility
|
||||
run: |
|
||||
m=${{ matrix.model }}-cls.pt # official weights
|
||||
b=runs/train-cls/exp/weights/best.pt # best.pt checkpoint
|
||||
python classify/train.py --imgsz 32 --model $m --data mnist160 --epochs 1 # train
|
||||
python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist160 # val
|
||||
python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist160/test/7/60.png # predict
|
||||
python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg # predict
|
||||
python export.py --weights $b --img 64 --include torchscript # export
|
||||
python - <<EOF
|
||||
import torch
|
||||
for path in '$m', '$b':
|
||||
model = torch.hub.load('.', 'custom', path=path, source='local')
|
||||
EOF
|
|
@ -0,0 +1,54 @@
|
|||
# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
|
||||
# https://github.com/github/codeql-action
|
||||
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: ['python']
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
|
@ -0,0 +1,57 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5
|
||||
|
||||
name: Publish Docker Images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
if: github.repository == 'ultralytics/yolov5'
|
||||
name: Push Docker image to Docker Hub
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push arm64 image
|
||||
uses: docker/build-push-action@v4
|
||||
continue-on-error: true
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/arm64
|
||||
file: utils/docker/Dockerfile-arm64
|
||||
push: true
|
||||
tags: ultralytics/yolov5:latest-arm64
|
||||
|
||||
- name: Build and push CPU image
|
||||
uses: docker/build-push-action@v4
|
||||
continue-on-error: true
|
||||
with:
|
||||
context: .
|
||||
file: utils/docker/Dockerfile-cpu
|
||||
push: true
|
||||
tags: ultralytics/yolov5:latest-cpu
|
||||
|
||||
- name: Build and push GPU image
|
||||
uses: docker/build-push-action@v4
|
||||
continue-on-error: true
|
||||
with:
|
||||
context: .
|
||||
file: utils/docker/Dockerfile
|
||||
push: true
|
||||
tags: ultralytics/yolov5:latest
|
|
@ -0,0 +1,65 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
|
||||
name: Greetings
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened]
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
greeting:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/first-interaction@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
pr-message: |
|
||||
👋 Hello @${{ github.actor }}, thank you for submitting a YOLOv5 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to:
|
||||
|
||||
- ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally.
|
||||
- ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**.
|
||||
- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
|
||||
|
||||
issue-message: |
|
||||
👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607).
|
||||
|
||||
If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it.
|
||||
|
||||
If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results).
|
||||
|
||||
## Requirements
|
||||
|
||||
[**Python>=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started:
|
||||
```bash
|
||||
git clone https://github.com/ultralytics/yolov5 # clone
|
||||
cd yolov5
|
||||
pip install -r requirements.txt # install
|
||||
```
|
||||
|
||||
## Environments
|
||||
|
||||
YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):
|
||||
|
||||
- **Notebooks** with free GPU: <a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a> <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
|
||||
- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
|
||||
- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)
|
||||
- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
|
||||
|
||||
## Status
|
||||
|
||||
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
|
||||
|
||||
If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.
|
||||
|
||||
## Introducing YOLOv8 🚀
|
||||
|
||||
We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀!
|
||||
|
||||
Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects.
|
||||
|
||||
Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with:
|
||||
```bash
|
||||
pip install ultralytics
|
||||
```
|
|
@ -0,0 +1,40 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
|
||||
name: Close stale issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Runs at 00:00 UTC every day
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v7
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: |
|
||||
👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs.
|
||||
|
||||
Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources:
|
||||
- **Wiki** – https://github.com/ultralytics/yolov5/wiki
|
||||
- **Tutorials** – https://github.com/ultralytics/yolov5#tutorials
|
||||
- **Docs** – https://docs.ultralytics.com
|
||||
|
||||
Access additional [Ultralytics](https://ultralytics.com) ⚡ resources:
|
||||
- **Ultralytics HUB** – https://ultralytics.com/hub
|
||||
- **Vision API** – https://ultralytics.com/yolov5
|
||||
- **About Us** – https://ultralytics.com/about
|
||||
- **Join Our Team** – https://ultralytics.com/work
|
||||
- **Contact Us** – https://ultralytics.com/contact
|
||||
|
||||
Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed!
|
||||
|
||||
Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐!
|
||||
|
||||
stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.'
|
||||
days-before-issue-stale: 30
|
||||
days-before-issue-close: 10
|
||||
days-before-pr-stale: 90
|
||||
days-before-pr-close: 30
|
||||
exempt-issue-labels: 'documentation,tutorial,TODO'
|
||||
operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting.
|
|
@ -0,0 +1,26 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md
|
||||
|
||||
name: Translate README
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- translate_readme # replace with 'master' to enable action
|
||||
paths:
|
||||
- README.md
|
||||
|
||||
jobs:
|
||||
Translate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
# ISO Language Codes: https://cloud.google.com/translate/docs/languages
|
||||
- name: Adding README - Chinese Simplified
|
||||
uses: dephraiim/translate-readme@main
|
||||
with:
|
||||
LANG: zh-CN
|
|
@ -0,0 +1,257 @@
|
|||
# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
|
||||
*.jpg
|
||||
*.jpeg
|
||||
*.png
|
||||
*.bmp
|
||||
*.tif
|
||||
*.tiff
|
||||
*.heic
|
||||
*.JPG
|
||||
*.JPEG
|
||||
*.PNG
|
||||
*.BMP
|
||||
*.TIF
|
||||
*.TIFF
|
||||
*.HEIC
|
||||
*.mp4
|
||||
*.mov
|
||||
*.MOV
|
||||
*.avi
|
||||
*.data
|
||||
*.json
|
||||
*.cfg
|
||||
!setup.cfg
|
||||
!cfg/yolov3*.cfg
|
||||
|
||||
storage.googleapis.com
|
||||
runs/*
|
||||
data/*
|
||||
data/images/*
|
||||
!data/*.yaml
|
||||
!data/hyps
|
||||
!data/scripts
|
||||
!data/images
|
||||
!data/images/zidane.jpg
|
||||
!data/images/bus.jpg
|
||||
!data/*.sh
|
||||
|
||||
results*.csv
|
||||
|
||||
# Datasets -------------------------------------------------------------------------------------------------------------
|
||||
coco/
|
||||
coco128/
|
||||
VOC/
|
||||
|
||||
# MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
|
||||
*.m~
|
||||
*.mat
|
||||
!targets*.mat
|
||||
|
||||
# Neural Network weights -----------------------------------------------------------------------------------------------
|
||||
*.weights
|
||||
*.pt
|
||||
*.pb
|
||||
*.onnx
|
||||
*.engine
|
||||
*.mlmodel
|
||||
*.torchscript
|
||||
*.tflite
|
||||
*.h5
|
||||
*_saved_model/
|
||||
*_web_model/
|
||||
*_openvino_model/
|
||||
*_paddle_model/
|
||||
darknet53.conv.74
|
||||
yolov3-tiny.conv.15
|
||||
|
||||
# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
/wandb/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# virtualenv
|
||||
.venv*
|
||||
venv*/
|
||||
ENV*/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
|
||||
|
||||
# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
|
||||
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
Icon?
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
.com.apple.timemachine.donotpresent
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
|
||||
# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff:
|
||||
.idea/*
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/dictionaries
|
||||
.html # Bokeh Plots
|
||||
.pg # TensorFlow Frozen Graphs
|
||||
.avi # videos
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
|
||||
# Gradle:
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# CMake
|
||||
cmake-build-debug/
|
||||
cmake-build-release/
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
|
@ -0,0 +1,14 @@
|
|||
cff-version: 1.2.0
|
||||
preferred-citation:
|
||||
type: software
|
||||
message: If you use YOLOv5, please cite it as below.
|
||||
authors:
|
||||
- family-names: Jocher
|
||||
given-names: Glenn
|
||||
orcid: "https://orcid.org/0000-0001-5950-6979"
|
||||
title: "YOLOv5 by Ultralytics"
|
||||
version: 7.0
|
||||
doi: 10.5281/zenodo.3908559
|
||||
date-released: 2020-5-29
|
||||
license: GPL-3.0
|
||||
url: "https://github.com/ultralytics/yolov5"
|
|
@ -0,0 +1,93 @@
|
|||
## Contributing to YOLOv5 🚀
|
||||
|
||||
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
|
||||
|
||||
- Reporting a bug
|
||||
- Discussing the current state of the code
|
||||
- Submitting a fix
|
||||
- Proposing a new feature
|
||||
- Becoming a maintainer
|
||||
|
||||
YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
|
||||
helping push the frontiers of what's possible in AI 😃!
|
||||
|
||||
## Submitting a Pull Request (PR) 🛠️
|
||||
|
||||
Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
|
||||
|
||||
### 1. Select File to Update
|
||||
|
||||
Select `requirements.txt` to update by clicking on it in GitHub.
|
||||
|
||||
<p align="center"><img width="800" alt="PR_step1" src="https://user-images.githubusercontent.com/26833433/122260847-08be2600-ced4-11eb-828b-8287ace4136c.png"></p>
|
||||
|
||||
### 2. Click 'Edit this file'
|
||||
|
||||
The button is in the top-right corner.
|
||||
|
||||
<p align="center"><img width="800" alt="PR_step2" src="https://user-images.githubusercontent.com/26833433/122260844-06f46280-ced4-11eb-9eec-b8a24be519ca.png"></p>
|
||||
|
||||
### 3. Make Changes
|
||||
|
||||
Change the `matplotlib` version from `3.2.2` to `3.3`.
|
||||
|
||||
<p align="center"><img width="800" alt="PR_step3" src="https://user-images.githubusercontent.com/26833433/122260853-0a87e980-ced4-11eb-9fd2-3650fb6e0842.png"></p>
|
||||
|
||||
### 4. Preview Changes and Submit PR
|
||||
|
||||
Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
|
||||
for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
|
||||
changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!
|
||||
|
||||
<p align="center"><img width="800" alt="PR_step4" src="https://user-images.githubusercontent.com/26833433/122260856-0b208000-ced4-11eb-8e8e-77b6151cbcc3.png"></p>
|
||||
|
||||
### PR recommendations
|
||||
|
||||
To allow your work to be integrated as seamlessly as possible, we advise you to:
|
||||
|
||||
- ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update
|
||||
your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally.
|
||||
|
||||
<p align="center"><img width="751" alt="Screenshot 2022-08-29 at 22 47 15" src="https://user-images.githubusercontent.com/26833433/187295893-50ed9f44-b2c9-4138-a614-de69bd1753d7.png"></p>
|
||||
|
||||
- ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**.
|
||||
|
||||
<p align="center"><img width="751" alt="Screenshot 2022-08-29 at 22 47 03" src="https://user-images.githubusercontent.com/26833433/187296922-545c5498-f64a-4d8c-8300-5fa764360da6.png"></p>
|
||||
|
||||
- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
|
||||
but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
|
||||
|
||||
## Submitting a Bug Report 🐛
|
||||
|
||||
If you spot a problem with YOLOv5 please submit a Bug Report!
|
||||
|
||||
For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few
|
||||
short guidelines below to help users provide what we need to get started.
|
||||
|
||||
When asking a question, people will be better able to provide help if you provide **code** that they can easily
|
||||
understand and use to **reproduce** the problem. This is referred to by community members as creating
|
||||
a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces
|
||||
the problem should be:
|
||||
|
||||
- ✅ **Minimal** – Use as little code as possible that still produces the same problem
|
||||
- ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
|
||||
- ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
|
||||
|
||||
In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
|
||||
should be:
|
||||
|
||||
- ✅ **Current** – Verify that your code is up-to-date with the current
|
||||
GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
|
||||
copy to ensure your problem has not already been resolved by previous commits.
|
||||
- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
|
||||
repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
|
||||
|
||||
If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛
|
||||
**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide
|
||||
a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better
|
||||
understand and diagnose your problem.
|
||||
|
||||
## License
|
||||
|
||||
By contributing, you agree that your contributions will be licensed under
|
||||
the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/)
|
|
@ -0,0 +1,674 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
|
@ -0,0 +1,488 @@
|
|||
<div align="center">
|
||||
<p>
|
||||
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
||||
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
|
||||
</p>
|
||||
|
||||
[English](README.md) | [简体中文](README.zh-CN.md)
|
||||
<br>
|
||||
|
||||
<div>
|
||||
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv5 Citation"></a>
|
||||
<a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
|
||||
<br>
|
||||
<a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a>
|
||||
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
||||
<a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
YOLOv5 🚀 is the world's most loved vision AI, representing <a href="https://ultralytics.com">Ultralytics</a> open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
|
||||
|
||||
To request an Enterprise License please complete the form at <a href="https://ultralytics.com/license">Ultralytics Licensing</a>.
|
||||
|
||||
<div align="center">
|
||||
<a href="https://github.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://www.linkedin.com/company/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://twitter.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://youtube.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="" /></a>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<br>
|
||||
|
||||
## <div align="center">YOLOv8 🚀 NEW</div>
|
||||
|
||||
We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model
|
||||
released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**.
|
||||
YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of
|
||||
object detection, image segmentation and image classification tasks.
|
||||
|
||||
See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with:
|
||||
|
||||
```commandline
|
||||
pip install ultralytics
|
||||
```
|
||||
|
||||
<div align="center">
|
||||
<a href="https://ultralytics.com/yolov8" target="_blank">
|
||||
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png"></a>
|
||||
</div>
|
||||
|
||||
## <div align="center">Documentation</div>
|
||||
|
||||
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples.
|
||||
|
||||
<details open>
|
||||
<summary>Install</summary>
|
||||
|
||||
Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a
|
||||
[**Python>=3.7.0**](https://www.python.org/) environment, including
|
||||
[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/).
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ultralytics/yolov5 # clone
|
||||
cd yolov5
|
||||
pip install -r requirements.txt # install
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Inference</summary>
|
||||
|
||||
YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest
|
||||
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
# Model
|
||||
model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom
|
||||
|
||||
# Images
|
||||
img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list
|
||||
|
||||
# Inference
|
||||
results = model(img)
|
||||
|
||||
# Results
|
||||
results.print() # or .show(), .save(), .crop(), .pandas(), etc.
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Inference with detect.py</summary>
|
||||
|
||||
`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from
|
||||
the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
|
||||
|
||||
```bash
|
||||
python detect.py --weights yolov5s.pt --source 0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
path/ # directory
|
||||
list.txt # list of images
|
||||
list.streams # list of streams
|
||||
'path/*.jpg' # glob
|
||||
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
||||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Training</summary>
|
||||
|
||||
The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
|
||||
results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
|
||||
and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest
|
||||
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are
|
||||
1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the
|
||||
largest `--batch-size` possible, or pass `--batch-size -1` for
|
||||
YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
|
||||
|
||||
```bash
|
||||
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
|
||||
yolov5s 64
|
||||
yolov5m 40
|
||||
yolov5l 24
|
||||
yolov5x 16
|
||||
```
|
||||
|
||||
<img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png">
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary>Tutorials</summary>
|
||||
|
||||
- [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 RECOMMENDED
|
||||
- [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️
|
||||
RECOMMENDED
|
||||
- [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475)
|
||||
- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 NEW
|
||||
- [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀
|
||||
- [NVIDIA Jetson Nano Deployment](https://github.com/ultralytics/yolov5/issues/9627) 🌟 NEW
|
||||
- [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303)
|
||||
- [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318)
|
||||
- [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304)
|
||||
- [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607)
|
||||
- [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)
|
||||
- [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW
|
||||
- [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975) 🌟 NEW
|
||||
- [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW
|
||||
- [YOLOv5 with Neural Magic's Deepsparse](https://bit.ly/yolov5-neuralmagic) 🌟 NEW
|
||||
- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) 🌟 NEW
|
||||
|
||||
</details>
|
||||
|
||||
## <div align="center">Integrations</div>
|
||||
|
||||
<br>
|
||||
<a align="center" href="https://bit.ly/ultralytics_hub" target="_blank">
|
||||
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png"></a>
|
||||
<br>
|
||||
<br>
|
||||
|
||||
<div align="center">
|
||||
<a href="https://roboflow.com/?ref=ultralytics">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-roboflow.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
|
||||
<a href="https://cutt.ly/yolov5-readme-clearml">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-clearml.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
|
||||
<a href="https://bit.ly/yolov5-readme-comet2">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
|
||||
<a href="https://bit.ly/yolov5-neuralmagic">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" /></a>
|
||||
</div>
|
||||
|
||||
| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW |
|
||||
| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
|
||||
| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
|
||||
|
||||
## <div align="center">Ultralytics HUB</div>
|
||||
|
||||
Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now!
|
||||
|
||||
<a align="center" href="https://bit.ly/ultralytics_hub" target="_blank">
|
||||
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png"></a>
|
||||
|
||||
## <div align="center">Why YOLOv5</div>
|
||||
|
||||
YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results.
|
||||
|
||||
<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040763-93c22a27-347c-4e3c-847a-8094621d3f4e.png"></p>
|
||||
<details>
|
||||
<summary>YOLOv5-P5 640 Figure</summary>
|
||||
|
||||
<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040757-ce0934a3-06a6-43dc-a979-2edbbd69ea0e.png"></p>
|
||||
</details>
|
||||
<details>
|
||||
<summary>Figure Notes</summary>
|
||||
|
||||
- **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536.
|
||||
- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32.
|
||||
- **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8.
|
||||
- **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
|
||||
|
||||
</details>
|
||||
|
||||
### Pretrained Checkpoints
|
||||
|
||||
| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | mAP<sup>val<br>50 | Speed<br><sup>CPU b1<br>(ms) | Speed<br><sup>V100 b1<br>(ms) | Speed<br><sup>V100 b32<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>@640 (B) |
|
||||
| ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- |
|
||||
| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** |
|
||||
| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 |
|
||||
| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 |
|
||||
| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 |
|
||||
| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 |
|
||||
| | | | | | | | | |
|
||||
| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 |
|
||||
| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 |
|
||||
| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 |
|
||||
| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 |
|
||||
| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)<br>+ [TTA] | 1280<br>1536 | 55.0<br>**55.8** | 72.7<br>**72.7** | 3136<br>- | 26.2<br>- | 19.4<br>- | 140.7<br>- | 209.8<br>- |
|
||||
|
||||
<details>
|
||||
<summary>Table Notes</summary>
|
||||
|
||||
- All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml).
|
||||
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.<br>Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
|
||||
- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.<br>Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1`
|
||||
- **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.<br>Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
|
||||
|
||||
</details>
|
||||
|
||||
## <div align="center">Segmentation</div>
|
||||
|
||||
Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials.
|
||||
|
||||
<details>
|
||||
<summary>Segmentation Checkpoints</summary>
|
||||
|
||||
<div align="center">
|
||||
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
||||
<img width="800" src="https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png"></a>
|
||||
</div>
|
||||
|
||||
We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility.
|
||||
|
||||
| Model | size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Train time<br><sup>300 epochs<br>A100 (hours) | Speed<br><sup>ONNX CPU<br>(ms) | Speed<br><sup>TRT A100<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>@640 (B) |
|
||||
| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- |
|
||||
| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** |
|
||||
| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 |
|
||||
| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 |
|
||||
| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 |
|
||||
| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 |
|
||||
|
||||
- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.<br>Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official
|
||||
- **Accuracy** values are for single-model single-scale on COCO dataset.<br>Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt`
|
||||
- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image). <br>Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1`
|
||||
- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`. <br>Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Segmentation Usage Examples <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>
|
||||
|
||||
### Train
|
||||
|
||||
YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`.
|
||||
|
||||
```bash
|
||||
# Single-GPU
|
||||
python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640
|
||||
|
||||
# Multi-GPU DDP
|
||||
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
|
||||
```
|
||||
|
||||
### Val
|
||||
|
||||
Validate YOLOv5s-seg mask mAP on COCO dataset:
|
||||
|
||||
```bash
|
||||
bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images)
|
||||
python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate
|
||||
```
|
||||
|
||||
### Predict
|
||||
|
||||
Use pretrained YOLOv5m-seg.pt to predict bus.jpg:
|
||||
|
||||
```bash
|
||||
python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg
|
||||
```
|
||||
|
||||
```python
|
||||
model = torch.hub.load(
|
||||
"ultralytics/yolov5", "custom", "yolov5m-seg.pt"
|
||||
) # load from PyTorch Hub (WARNING: inference not yet supported)
|
||||
```
|
||||
|
||||
|  |  |
|
||||
| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- |
|
||||
|
||||
### Export
|
||||
|
||||
Export YOLOv5s-seg model to ONNX and TensorRT:
|
||||
|
||||
```bash
|
||||
python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## <div align="center">Classification</div>
|
||||
|
||||
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials.
|
||||
|
||||
<details>
|
||||
<summary>Classification Checkpoints</summary>
|
||||
|
||||
<br>
|
||||
|
||||
We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility.
|
||||
|
||||
| Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Training<br><sup>90 epochs<br>4xA100 (hours) | Speed<br><sup>ONNX CPU<br>(ms) | Speed<br><sup>TensorRT V100<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>@224 (B) |
|
||||
| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- |
|
||||
| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** |
|
||||
| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 |
|
||||
| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 |
|
||||
| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 |
|
||||
| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 |
|
||||
| | | | | | | | | |
|
||||
| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 |
|
||||
| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 |
|
||||
| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 |
|
||||
| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 |
|
||||
| | | | | | | | | |
|
||||
| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 |
|
||||
| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 |
|
||||
| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 |
|
||||
| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
|
||||
|
||||
<details>
|
||||
<summary>Table Notes (click to expand)</summary>
|
||||
|
||||
- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.<br>Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2
|
||||
- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.<br>Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224`
|
||||
- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.<br>Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`
|
||||
- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`. <br>Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`
|
||||
|
||||
</details>
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Classification Usage Examples <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>
|
||||
|
||||
### Train
|
||||
|
||||
YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`.
|
||||
|
||||
```bash
|
||||
# Single-GPU
|
||||
python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128
|
||||
|
||||
# Multi-GPU DDP
|
||||
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
|
||||
```
|
||||
|
||||
### Val
|
||||
|
||||
Validate YOLOv5m-cls accuracy on ImageNet-1k dataset:
|
||||
|
||||
```bash
|
||||
bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
|
||||
python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate
|
||||
```
|
||||
|
||||
### Predict
|
||||
|
||||
Use pretrained YOLOv5s-cls.pt to predict bus.jpg:
|
||||
|
||||
```bash
|
||||
python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg
|
||||
```
|
||||
|
||||
```python
|
||||
model = torch.hub.load(
|
||||
"ultralytics/yolov5", "custom", "yolov5s-cls.pt"
|
||||
) # load from PyTorch Hub
|
||||
```
|
||||
|
||||
### Export
|
||||
|
||||
Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT:
|
||||
|
||||
```bash
|
||||
python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## <div align="center">Environments</div>
|
||||
|
||||
Get started in seconds with our verified environments. Click each icon below for details.
|
||||
|
||||
<div align="center">
|
||||
<a href="https://bit.ly/yolov5-paperspace-notebook">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gradient.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-colab-small.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://www.kaggle.com/ultralytics/yolov5">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-kaggle-small.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://hub.docker.com/r/ultralytics/yolov5">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-docker-small.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-aws-small.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gcp-small.png" width="10%" /></a>
|
||||
</div>
|
||||
|
||||
## <div align="center">Contribute</div>
|
||||
|
||||
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors!
|
||||
|
||||
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->
|
||||
|
||||
<a href="https://github.com/ultralytics/yolov5/graphs/contributors">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" /></a>
|
||||
|
||||
## <div align="center">License</div>
|
||||
|
||||
YOLOv5 is available under two different licenses:
|
||||
|
||||
- **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details.
|
||||
- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license).
|
||||
|
||||
## <div align="center">Contact</div>
|
||||
|
||||
For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues) or the [Ultralytics Community Forum](https://community.ultralytics.com/).
|
||||
|
||||
<br>
|
||||
<div align="center">
|
||||
<a href="https://github.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://www.linkedin.com/company/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://twitter.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://youtube.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="" /></a>
|
||||
</div>
|
||||
|
||||
[tta]: https://github.com/ultralytics/yolov5/issues/303
|
|
@ -0,0 +1,482 @@
|
|||
<div align="center">
|
||||
<p>
|
||||
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
||||
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
|
||||
</p>
|
||||
|
||||
[英文](README.md)|[简体中文](README.zh-CN.md)<br>
|
||||
|
||||
<div>
|
||||
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv5 Citation"></a>
|
||||
<a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
|
||||
<br>
|
||||
<a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a>
|
||||
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
||||
<a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表<a href="https://ultralytics.com"> Ultralytics </a>对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。
|
||||
|
||||
如果要申请企业许可证,请填写表格<a href="https://ultralytics.com/license">Ultralytics 许可</a>.
|
||||
|
||||
<div align="center">
|
||||
<a href="https://github.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://www.linkedin.com/company/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://twitter.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://youtube.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||
<a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="" /></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## <div align="center">YOLOv8 🚀 NEW</div>
|
||||
|
||||
We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model
|
||||
released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**.
|
||||
YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of
|
||||
object detection, image segmentation and image classification tasks.
|
||||
|
||||
See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with:
|
||||
|
||||
```commandline
|
||||
pip install ultralytics
|
||||
```
|
||||
|
||||
<div align="center">
|
||||
<a href="https://ultralytics.com/yolov8" target="_blank">
|
||||
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png"></a>
|
||||
</div>
|
||||
|
||||
## <div align="center">文档</div>
|
||||
|
||||
有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com)。请参阅下面的快速入门示例。
|
||||
|
||||
<details open>
|
||||
<summary>安装</summary>
|
||||
|
||||
克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/) 。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ultralytics/yolov5 # clone
|
||||
cd yolov5
|
||||
pip install -r requirements.txt # install
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>推理</summary>
|
||||
|
||||
使用 YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从
|
||||
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
# Model
|
||||
model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom
|
||||
|
||||
# Images
|
||||
img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list
|
||||
|
||||
# Inference
|
||||
results = model(img)
|
||||
|
||||
# Results
|
||||
results.print() # or .show(), .save(), .crop(), .pandas(), etc.
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>使用 detect.py 推理</summary>
|
||||
|
||||
`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从
|
||||
最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。
|
||||
|
||||
```bash
|
||||
python detect.py --weights yolov5s.pt --source 0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
path/ # directory
|
||||
list.txt # list of images
|
||||
list.streams # list of streams
|
||||
'path/*.jpg' # glob
|
||||
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
||||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>训练</summary>
|
||||
|
||||
下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。
|
||||
最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data)
|
||||
将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
|
||||
YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://github.com/ultralytics/yolov5/issues/475) 训练速度更快)。
|
||||
尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现
|
||||
YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。
|
||||
|
||||
```bash
|
||||
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
|
||||
yolov5s 64
|
||||
yolov5m 40
|
||||
yolov5l 24
|
||||
yolov5x 16
|
||||
```
|
||||
|
||||
<img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png">
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary>教程</summary>
|
||||
|
||||
- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐
|
||||
- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐
|
||||
- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475)
|
||||
- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新
|
||||
- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀
|
||||
- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新
|
||||
- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303)
|
||||
- [模型集成](https://github.com/ultralytics/yolov5/issues/318)
|
||||
- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304)
|
||||
- [超参数进化](https://github.com/ultralytics/yolov5/issues/607)
|
||||
- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314)
|
||||
- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新
|
||||
- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新
|
||||
- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新
|
||||
- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新
|
||||
- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新
|
||||
|
||||
</details>
|
||||
|
||||
## <div align="center">模块集成</div>
|
||||
|
||||
<br>
|
||||
<a align="center" href="https://bit.ly/ultralytics_hub" target="_blank">
|
||||
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png"></a>
|
||||
<br>
|
||||
<br>
|
||||
|
||||
<div align="center">
|
||||
<a href="https://roboflow.com/?ref=ultralytics">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-roboflow.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
|
||||
<a href="https://cutt.ly/yolov5-readme-clearml">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-clearml.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
|
||||
<a href="https://bit.ly/yolov5-readme-comet2">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
|
||||
<a href="https://bit.ly/yolov5-neuralmagic">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" /></a>
|
||||
</div>
|
||||
|
||||
| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 |
|
||||
| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :--------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: |
|
||||
| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 |
|
||||
|
||||
## <div align="center">Ultralytics HUB</div>
|
||||
|
||||
[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他!
|
||||
|
||||
<a align="center" href="https://bit.ly/ultralytics_hub" target="_blank">
|
||||
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png"></a>
|
||||
|
||||
## <div align="center">为什么选择 YOLOv5</div>
|
||||
|
||||
YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结果。
|
||||
|
||||
<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040763-93c22a27-347c-4e3c-847a-8094621d3f4e.png"></p>
|
||||
<details>
|
||||
<summary>YOLOv5-P5 640 图</summary>
|
||||
|
||||
<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040757-ce0934a3-06a6-43dc-a979-2edbbd69ea0e.png"></p>
|
||||
</details>
|
||||
<details>
|
||||
<summary>图表笔记</summary>
|
||||
|
||||
- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。
|
||||
- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。
|
||||
- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。
|
||||
- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
|
||||
|
||||
</details>
|
||||
|
||||
### 预训练模型
|
||||
|
||||
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>val<br>50-95 | mAP<sup>val<br>50 | 推理速度<br><sup>CPU b1<br>(ms) | 推理速度<br><sup>V100 b1<br>(ms) | 速度<br><sup>V100 b32<br>(ms) | 参数量<br><sup>(M) | FLOPs<br><sup>@640 (B) |
|
||||
| ---------------------------------------------------------------------------------------------- | --------------- | -------------------- | ----------------- | --------------------------- | ---------------------------- | --------------------------- | --------------- | ---------------------- |
|
||||
| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** |
|
||||
| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 |
|
||||
| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 |
|
||||
| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 |
|
||||
| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 |
|
||||
| | | | | | | | | |
|
||||
| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 |
|
||||
| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 |
|
||||
| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 |
|
||||
| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 |
|
||||
| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)<br>+[TTA] | 1280<br>1536 | 55.0<br>**55.8** | 72.7<br>**72.7** | 3136<br>- | 26.2<br>- | 19.4<br>- | 140.7<br>- | 209.8<br>- |
|
||||
|
||||
<details>
|
||||
<summary>笔记</summary>
|
||||
|
||||
- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。
|
||||
- \*\*mAP<sup>val</sup>\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。<br>复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
|
||||
- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。<br>复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1`
|
||||
- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。<br>复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
|
||||
|
||||
</details>
|
||||
|
||||
## <div align="center">实例分割模型 ⭐ 新</div>
|
||||
|
||||
我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。
|
||||
|
||||
<details>
|
||||
<summary>实例分割模型列表</summary>
|
||||
|
||||
<br>
|
||||
|
||||
<div align="center">
|
||||
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
||||
<img width="800" src="https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png"></a>
|
||||
</div>
|
||||
|
||||
我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。
|
||||
|
||||
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | 训练时长<br><sup>300 epochs<br>A100 GPU(小时) | 推理速度<br><sup>ONNX CPU<br>(ms) | 推理速度<br><sup>TRT A100<br>(ms) | 参数量<br><sup>(M) | FLOPs<br><sup>@640 (B) |
|
||||
| ------------------------------------------------------------------------------------------ | --------------- | -------------------- | --------------------- | --------------------------------------- | ----------------------------- | ----------------------------- | --------------- | ---------------------- |
|
||||
| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** |
|
||||
| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 |
|
||||
| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 |
|
||||
| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 |
|
||||
| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 |
|
||||
|
||||
- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。<br>训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official
|
||||
- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。<br>复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt`
|
||||
- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。<br>复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1`
|
||||
- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.<br>运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>分割模型使用示例 <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>
|
||||
|
||||
### 训练
|
||||
|
||||
YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。
|
||||
|
||||
```bash
|
||||
# 单 GPU
|
||||
python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640
|
||||
|
||||
# 多 GPU, DDP 模式
|
||||
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
|
||||
```
|
||||
|
||||
### 验证
|
||||
|
||||
在 COCO 数据集上验证 YOLOv5s-seg mask mAP:
|
||||
|
||||
```bash
|
||||
bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images)
|
||||
python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证
|
||||
```
|
||||
|
||||
### 预测
|
||||
|
||||
使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg:
|
||||
|
||||
```bash
|
||||
python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg
|
||||
```
|
||||
|
||||
```python
|
||||
model = torch.hub.load(
|
||||
"ultralytics/yolov5", "custom", "yolov5m-seg.pt"
|
||||
) # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持)
|
||||
```
|
||||
|
||||
|  |  |
|
||||
| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- |
|
||||
|
||||
### 模型导出
|
||||
|
||||
将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT:
|
||||
|
||||
```bash
|
||||
python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## <div align="center">分类网络 ⭐ 新</div>
|
||||
|
||||
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。
|
||||
|
||||
<details>
|
||||
<summary>分类网络模型</summary>
|
||||
|
||||
<br>
|
||||
|
||||
我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。
|
||||
|
||||
| 模型 | 尺寸<br><sup>(像素) | acc<br><sup>top1 | acc<br><sup>top5 | 训练时长<br><sup>90 epochs<br>4xA100(小时) | 推理速度<br><sup>ONNX CPU<br>(ms) | 推理速度<br><sup>TensorRT V100<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>@640 (B) |
|
||||
| -------------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ------------------------------------ | ----------------------------- | ---------------------------------- | -------------- | ---------------------- |
|
||||
| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** |
|
||||
| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 |
|
||||
| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 |
|
||||
| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 |
|
||||
| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 |
|
||||
| | | | | | | | | |
|
||||
| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 |
|
||||
| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 |
|
||||
| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 |
|
||||
| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 |
|
||||
| | | | | | | | | |
|
||||
| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 |
|
||||
| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 |
|
||||
| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 |
|
||||
| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
|
||||
|
||||
<details>
|
||||
<summary>Table Notes (点击以展开)</summary>
|
||||
|
||||
- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。<br>训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2
|
||||
- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。<br>复现命令 `python classify/val.py --data ../datasets/imagenet --img 224`
|
||||
- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。<br>复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`
|
||||
- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。<br>复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`
|
||||
</details>
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>分类训练示例 <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>
|
||||
|
||||
### 训练
|
||||
|
||||
YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集,命令中使用 `--data` 即可。 MNIST 示例 `--data mnist` 。
|
||||
|
||||
```bash
|
||||
# 单 GPU
|
||||
python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128
|
||||
|
||||
# 多 GPU, DDP 模式
|
||||
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
|
||||
```
|
||||
|
||||
### 验证
|
||||
|
||||
在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性:
|
||||
|
||||
```bash
|
||||
bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
|
||||
python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate
|
||||
```
|
||||
|
||||
### 预测
|
||||
|
||||
使用预训练的 YOLOv5s-cls.pt 来预测 bus.jpg:
|
||||
|
||||
```bash
|
||||
python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg
|
||||
```
|
||||
|
||||
```python
|
||||
model = torch.hub.load(
|
||||
"ultralytics/yolov5", "custom", "yolov5s-cls.pt"
|
||||
) # load from PyTorch Hub
|
||||
```
|
||||
|
||||
### 模型导出
|
||||
|
||||
将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT:
|
||||
|
||||
```bash
|
||||
python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## <div align="center">环境</div>
|
||||
|
||||
使用下面我们经过验证的环境,在几秒钟内开始使用 YOLOv5 。单击下面的图标了解详细信息。
|
||||
|
||||
<div align="center">
|
||||
<a href="https://bit.ly/yolov5-paperspace-notebook">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gradient.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-colab-small.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://www.kaggle.com/ultralytics/yolov5">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-kaggle-small.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://hub.docker.com/r/ultralytics/yolov5">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-docker-small.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-aws-small.png" width="10%" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
|
||||
<a href="https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart">
|
||||
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gcp-small.png" width="10%" /></a>
|
||||
</div>
|
||||
|
||||
## <div align="center">贡献</div>
|
||||
|
||||
我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者!
|
||||
|
||||
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->
|
||||
|
||||
<a href="https://github.com/ultralytics/yolov5/graphs/contributors">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" /></a>
|
||||
|
||||
## <div align="center">License</div>
|
||||
|
||||
YOLOv5 在两种不同的 License 下可用:
|
||||
|
||||
- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。
|
||||
- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。
|
||||
|
||||
## <div align="center">联系我们</div>
|
||||
|
||||
请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytis.com) 以报告 YOLOv5 错误和请求功能。
|
||||
|
||||
<br>
|
||||
<div align="center">
|
||||
<a href="https://github.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://www.linkedin.com/company/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://twitter.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://youtube.com/ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="" /></a>
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
||||
<a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
|
||||
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="" /></a>
|
||||
</div>
|
||||
|
||||
[tta]: https://github.com/ultralytics/yolov5/issues/303
|
|
@ -0,0 +1,169 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Run YOLOv5 benchmarks on all supported export formats
|
||||
|
||||
Format | `export.py --include` | Model
|
||||
--- | --- | ---
|
||||
PyTorch | - | yolov5s.pt
|
||||
TorchScript | `torchscript` | yolov5s.torchscript
|
||||
ONNX | `onnx` | yolov5s.onnx
|
||||
OpenVINO | `openvino` | yolov5s_openvino_model/
|
||||
TensorRT | `engine` | yolov5s.engine
|
||||
CoreML | `coreml` | yolov5s.mlmodel
|
||||
TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
|
||||
TensorFlow GraphDef | `pb` | yolov5s.pb
|
||||
TensorFlow Lite | `tflite` | yolov5s.tflite
|
||||
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
|
||||
TensorFlow.js | `tfjs` | yolov5s_web_model/
|
||||
|
||||
Requirements:
|
||||
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
|
||||
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
|
||||
$ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
|
||||
|
||||
Usage:
|
||||
$ python benchmarks.py --weights yolov5s.pt --img 640
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import platform
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[0] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
# ROOT = ROOT.relative_to(Path.cwd()) # relative
|
||||
|
||||
import export
|
||||
from models.experimental import attempt_load
|
||||
from models.yolo import SegmentationModel
|
||||
from segment.val import run as val_seg
|
||||
from utils import notebook_init
|
||||
from utils.general import LOGGER, check_yaml, file_size, print_args
|
||||
from utils.torch_utils import select_device
|
||||
from val import run as val_det
|
||||
|
||||
|
||||
def run(
|
||||
weights=ROOT / 'yolov5s.pt', # weights path
|
||||
imgsz=640, # inference size (pixels)
|
||||
batch_size=1, # batch size
|
||||
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
half=False, # use FP16 half-precision inference
|
||||
test=False, # test exports only
|
||||
pt_only=False, # test PyTorch only
|
||||
hard_fail=False, # throw error on benchmark failure
|
||||
):
|
||||
y, t = [], time.time()
|
||||
device = select_device(device)
|
||||
model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
|
||||
for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
|
||||
try:
|
||||
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
|
||||
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
|
||||
if 'cpu' in device.type:
|
||||
assert cpu, 'inference not supported on CPU'
|
||||
if 'cuda' in device.type:
|
||||
assert gpu, 'inference not supported on GPU'
|
||||
|
||||
# Export
|
||||
if f == '-':
|
||||
w = weights # PyTorch format
|
||||
else:
|
||||
w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others
|
||||
assert suffix in str(w), 'export failed'
|
||||
|
||||
# Validate
|
||||
if model_type == SegmentationModel:
|
||||
result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
|
||||
metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))
|
||||
else: # DetectionModel:
|
||||
result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
|
||||
metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls))
|
||||
speed = result[2][1] # times (preprocess, inference, postprocess)
|
||||
y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference
|
||||
except Exception as e:
|
||||
if hard_fail:
|
||||
assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
|
||||
LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}')
|
||||
y.append([name, None, None, None]) # mAP, t_inference
|
||||
if pt_only and i == 0:
|
||||
break # break after PyTorch
|
||||
|
||||
# Print results
|
||||
LOGGER.info('\n')
|
||||
parse_opt()
|
||||
notebook_init() # print system info
|
||||
c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']
|
||||
py = pd.DataFrame(y, columns=c)
|
||||
LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
|
||||
LOGGER.info(str(py if map else py.iloc[:, :2]))
|
||||
if hard_fail and isinstance(hard_fail, str):
|
||||
metrics = py['mAP50-95'].array # values to compare to floor
|
||||
floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
|
||||
assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}'
|
||||
return py
|
||||
|
||||
|
||||
def test(
|
||||
weights=ROOT / 'yolov5s.pt', # weights path
|
||||
imgsz=640, # inference size (pixels)
|
||||
batch_size=1, # batch size
|
||||
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
half=False, # use FP16 half-precision inference
|
||||
test=False, # test exports only
|
||||
pt_only=False, # test PyTorch only
|
||||
hard_fail=False, # throw error on benchmark failure
|
||||
):
|
||||
y, t = [], time.time()
|
||||
device = select_device(device)
|
||||
for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
|
||||
try:
|
||||
w = weights if f == '-' else \
|
||||
export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights
|
||||
assert suffix in str(w), 'export failed'
|
||||
y.append([name, True])
|
||||
except Exception:
|
||||
y.append([name, False]) # mAP, t_inference
|
||||
|
||||
# Print results
|
||||
LOGGER.info('\n')
|
||||
parse_opt()
|
||||
notebook_init() # print system info
|
||||
py = pd.DataFrame(y, columns=['Format', 'Export'])
|
||||
LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)')
|
||||
LOGGER.info(str(py))
|
||||
return py
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
|
||||
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--test', action='store_true', help='test exports only')
|
||||
parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')
|
||||
parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric')
|
||||
opt = parser.parse_args()
|
||||
opt.data = check_yaml(opt.data) # check YAML
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
test(**vars(opt)) if opt.test else run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,226 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
||||
|
||||
Usage - sources:
|
||||
$ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
path/ # directory
|
||||
list.txt # list of images
|
||||
list.streams # list of streams
|
||||
'path/*.jpg' # glob
|
||||
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
||||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
||||
|
||||
Usage - formats:
|
||||
$ python classify/predict.py --weights yolov5s-cls.pt # PyTorch
|
||||
yolov5s-cls.torchscript # TorchScript
|
||||
yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s-cls_openvino_model # OpenVINO
|
||||
yolov5s-cls.engine # TensorRT
|
||||
yolov5s-cls.mlmodel # CoreML (macOS-only)
|
||||
yolov5s-cls_saved_model # TensorFlow SavedModel
|
||||
yolov5s-cls.pb # TensorFlow GraphDef
|
||||
yolov5s-cls.tflite # TensorFlow Lite
|
||||
yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s-cls_paddle_model # PaddlePaddle
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import DetectMultiBackend
|
||||
from utils.augmentations import classify_transforms
|
||||
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
||||
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
||||
increment_path, print_args, strip_optimizer)
|
||||
from utils.plots import Annotator
|
||||
from utils.torch_utils import select_device, smart_inference_mode
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s)
|
||||
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
||||
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
||||
imgsz=(224, 224), # inference size (height, width)
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
view_img=False, # show results
|
||||
save_txt=False, # save results to *.txt
|
||||
nosave=False, # do not save images/videos
|
||||
augment=False, # augmented inference
|
||||
visualize=False, # visualize features
|
||||
update=False, # update all models
|
||||
project=ROOT / 'runs/predict-cls', # save results to project/name
|
||||
name='exp', # save results to project/name
|
||||
exist_ok=False, # existing project/name ok, do not increment
|
||||
half=False, # use FP16 half-precision inference
|
||||
dnn=False, # use OpenCV DNN for ONNX inference
|
||||
vid_stride=1, # video frame-rate stride
|
||||
):
|
||||
source = str(source)
|
||||
save_img = not nosave and not source.endswith('.txt') # save inference images
|
||||
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
||||
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
||||
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
||||
screenshot = source.lower().startswith('screen')
|
||||
if is_url and is_file:
|
||||
source = check_file(source) # download
|
||||
|
||||
# Directories
|
||||
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
||||
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
||||
|
||||
# Load model
|
||||
device = select_device(device)
|
||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
||||
stride, names, pt = model.stride, model.names, model.pt
|
||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||
|
||||
# Dataloader
|
||||
bs = 1 # batch_size
|
||||
if webcam:
|
||||
view_img = check_imshow(warn=True)
|
||||
dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
|
||||
bs = len(dataset)
|
||||
elif screenshot:
|
||||
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
||||
else:
|
||||
dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
|
||||
vid_path, vid_writer = [None] * bs, [None] * bs
|
||||
|
||||
# Run inference
|
||||
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
|
||||
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
||||
for path, im, im0s, vid_cap, s in dataset:
|
||||
with dt[0]:
|
||||
im = torch.Tensor(im).to(model.device)
|
||||
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
||||
if len(im.shape) == 3:
|
||||
im = im[None] # expand for batch dim
|
||||
|
||||
# Inference
|
||||
with dt[1]:
|
||||
results = model(im)
|
||||
|
||||
# Post-process
|
||||
with dt[2]:
|
||||
pred = F.softmax(results, dim=1) # probabilities
|
||||
|
||||
# Process predictions
|
||||
for i, prob in enumerate(pred): # per image
|
||||
seen += 1
|
||||
if webcam: # batch_size >= 1
|
||||
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
||||
s += f'{i}: '
|
||||
else:
|
||||
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
||||
|
||||
p = Path(p) # to Path
|
||||
save_path = str(save_dir / p.name) # im.jpg
|
||||
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
||||
|
||||
s += '%gx%g ' % im.shape[2:] # print string
|
||||
annotator = Annotator(im0, example=str(names), pil=True)
|
||||
|
||||
# Print results
|
||||
top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices
|
||||
s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, "
|
||||
|
||||
# Write results
|
||||
text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
|
||||
if save_img or view_img: # Add bbox to image
|
||||
annotator.text((32, 32), text, txt_color=(255, 255, 255))
|
||||
if save_txt: # Write to file
|
||||
with open(f'{txt_path}.txt', 'a') as f:
|
||||
f.write(text + '\n')
|
||||
|
||||
# Stream results
|
||||
im0 = annotator.result()
|
||||
if view_img:
|
||||
if platform.system() == 'Linux' and p not in windows:
|
||||
windows.append(p)
|
||||
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
||||
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
||||
cv2.imshow(str(p), im0)
|
||||
cv2.waitKey(1) # 1 millisecond
|
||||
|
||||
# Save results (image with detections)
|
||||
if save_img:
|
||||
if dataset.mode == 'image':
|
||||
cv2.imwrite(save_path, im0)
|
||||
else: # 'video' or 'stream'
|
||||
if vid_path[i] != save_path: # new video
|
||||
vid_path[i] = save_path
|
||||
if isinstance(vid_writer[i], cv2.VideoWriter):
|
||||
vid_writer[i].release() # release previous video writer
|
||||
if vid_cap: # video
|
||||
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
||||
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
else: # stream
|
||||
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
||||
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
||||
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
vid_writer[i].write(im0)
|
||||
|
||||
# Print time (inference-only)
|
||||
LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms')
|
||||
|
||||
# Print results
|
||||
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
||||
if save_txt or save_img:
|
||||
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
||||
if update:
|
||||
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)')
|
||||
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--view-img', action='store_true', help='show results')
|
||||
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
||||
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
||||
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
||||
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
||||
parser.add_argument('--update', action='store_true', help='update all models')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save results to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
||||
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
||||
opt = parser.parse_args()
|
||||
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
check_requirements(exclude=('tensorboard', 'thop'))
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,333 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Train a YOLOv5 classifier model on a classification dataset
|
||||
|
||||
Usage - Single-GPU training:
|
||||
$ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224
|
||||
|
||||
Usage - Multi-GPU DDP training:
|
||||
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
|
||||
|
||||
Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
|
||||
YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
|
||||
Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.hub as hub
|
||||
import torch.optim.lr_scheduler as lr_scheduler
|
||||
import torchvision
|
||||
from torch.cuda import amp
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from classify import val as validate
|
||||
from models.experimental import attempt_load
|
||||
from models.yolo import ClassificationModel, DetectionModel
|
||||
from utils.dataloaders import create_classification_dataloader
|
||||
from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status,
|
||||
check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save)
|
||||
from utils.loggers import GenericLogger
|
||||
from utils.plots import imshow_cls
|
||||
from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP,
|
||||
smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first)
|
||||
|
||||
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
||||
GIT_INFO = check_git_info()
|
||||
|
||||
|
||||
def train(opt, device):
|
||||
init_seeds(opt.seed + 1 + RANK, deterministic=True)
|
||||
save_dir, data, bs, epochs, nw, imgsz, pretrained = \
|
||||
opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \
|
||||
opt.imgsz, str(opt.pretrained).lower() == 'true'
|
||||
cuda = device.type != 'cpu'
|
||||
|
||||
# Directories
|
||||
wdir = save_dir / 'weights'
|
||||
wdir.mkdir(parents=True, exist_ok=True) # make dir
|
||||
last, best = wdir / 'last.pt', wdir / 'best.pt'
|
||||
|
||||
# Save run settings
|
||||
yaml_save(save_dir / 'opt.yaml', vars(opt))
|
||||
|
||||
# Logger
|
||||
logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None
|
||||
|
||||
# Download Dataset
|
||||
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
|
||||
data_dir = data if data.is_dir() else (DATASETS_DIR / data)
|
||||
if not data_dir.is_dir():
|
||||
LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...')
|
||||
t = time.time()
|
||||
if str(data) == 'imagenet':
|
||||
subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True)
|
||||
else:
|
||||
url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip'
|
||||
download(url, dir=data_dir.parent)
|
||||
s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
|
||||
LOGGER.info(s)
|
||||
|
||||
# Dataloaders
|
||||
nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes
|
||||
trainloader = create_classification_dataloader(path=data_dir / 'train',
|
||||
imgsz=imgsz,
|
||||
batch_size=bs // WORLD_SIZE,
|
||||
augment=True,
|
||||
cache=opt.cache,
|
||||
rank=LOCAL_RANK,
|
||||
workers=nw)
|
||||
|
||||
test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val
|
||||
if RANK in {-1, 0}:
|
||||
testloader = create_classification_dataloader(path=test_dir,
|
||||
imgsz=imgsz,
|
||||
batch_size=bs // WORLD_SIZE * 2,
|
||||
augment=False,
|
||||
cache=opt.cache,
|
||||
rank=-1,
|
||||
workers=nw)
|
||||
|
||||
# Model
|
||||
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
|
||||
if Path(opt.model).is_file() or opt.model.endswith('.pt'):
|
||||
model = attempt_load(opt.model, device='cpu', fuse=False)
|
||||
elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
|
||||
model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None)
|
||||
else:
|
||||
m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models
|
||||
raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m))
|
||||
if isinstance(model, DetectionModel):
|
||||
LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
|
||||
model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model
|
||||
reshape_classifier_output(model, nc) # update class count
|
||||
for m in model.modules():
|
||||
if not pretrained and hasattr(m, 'reset_parameters'):
|
||||
m.reset_parameters()
|
||||
if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
|
||||
m.p = opt.dropout # set dropout
|
||||
for p in model.parameters():
|
||||
p.requires_grad = True # for training
|
||||
model = model.to(device)
|
||||
|
||||
# Info
|
||||
if RANK in {-1, 0}:
|
||||
model.names = trainloader.dataset.classes # attach class names
|
||||
model.transforms = testloader.dataset.torch_transforms # attach inference transforms
|
||||
model_info(model)
|
||||
if opt.verbose:
|
||||
LOGGER.info(model)
|
||||
images, labels = next(iter(trainloader))
|
||||
file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg')
|
||||
logger.log_images(file, name='Train Examples')
|
||||
logger.log_graph(model, imgsz) # log model
|
||||
|
||||
# Optimizer
|
||||
optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay)
|
||||
|
||||
# Scheduler
|
||||
lrf = 0.01 # final lr (fraction of lr0)
|
||||
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
|
||||
lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear
|
||||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
||||
# scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
|
||||
# final_div_factor=1 / 25 / lrf)
|
||||
|
||||
# EMA
|
||||
ema = ModelEMA(model) if RANK in {-1, 0} else None
|
||||
|
||||
# DDP mode
|
||||
if cuda and RANK != -1:
|
||||
model = smart_DDP(model)
|
||||
|
||||
# Train
|
||||
t0 = time.time()
|
||||
criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function
|
||||
best_fitness = 0.0
|
||||
scaler = amp.GradScaler(enabled=cuda)
|
||||
val = test_dir.stem # 'val' or 'test'
|
||||
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n'
|
||||
f'Using {nw * WORLD_SIZE} dataloader workers\n'
|
||||
f"Logging results to {colorstr('bold', save_dir)}\n"
|
||||
f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n'
|
||||
f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}")
|
||||
for epoch in range(epochs): # loop over the dataset multiple times
|
||||
tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness
|
||||
model.train()
|
||||
if RANK != -1:
|
||||
trainloader.sampler.set_epoch(epoch)
|
||||
pbar = enumerate(trainloader)
|
||||
if RANK in {-1, 0}:
|
||||
pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT)
|
||||
for i, (images, labels) in pbar: # progress bar
|
||||
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
||||
|
||||
# Forward
|
||||
with amp.autocast(enabled=cuda): # stability issues when enabled
|
||||
loss = criterion(model(images), labels)
|
||||
|
||||
# Backward
|
||||
scaler.scale(loss).backward()
|
||||
|
||||
# Optimize
|
||||
scaler.unscale_(optimizer) # unscale gradients
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
|
||||
scaler.step(optimizer)
|
||||
scaler.update()
|
||||
optimizer.zero_grad()
|
||||
if ema:
|
||||
ema.update(model)
|
||||
|
||||
if RANK in {-1, 0}:
|
||||
# Print
|
||||
tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses
|
||||
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
|
||||
pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36
|
||||
|
||||
# Test
|
||||
if i == len(pbar) - 1: # last batch
|
||||
top1, top5, vloss = validate.run(model=ema.ema,
|
||||
dataloader=testloader,
|
||||
criterion=criterion,
|
||||
pbar=pbar) # test accuracy, loss
|
||||
fitness = top1 # define fitness as top1 accuracy
|
||||
|
||||
# Scheduler
|
||||
scheduler.step()
|
||||
|
||||
# Log metrics
|
||||
if RANK in {-1, 0}:
|
||||
# Best fitness
|
||||
if fitness > best_fitness:
|
||||
best_fitness = fitness
|
||||
|
||||
# Log
|
||||
metrics = {
|
||||
'train/loss': tloss,
|
||||
f'{val}/loss': vloss,
|
||||
'metrics/accuracy_top1': top1,
|
||||
'metrics/accuracy_top5': top5,
|
||||
'lr/0': optimizer.param_groups[0]['lr']} # learning rate
|
||||
logger.log_metrics(metrics, epoch)
|
||||
|
||||
# Save model
|
||||
final_epoch = epoch + 1 == epochs
|
||||
if (not opt.nosave) or final_epoch:
|
||||
ckpt = {
|
||||
'epoch': epoch,
|
||||
'best_fitness': best_fitness,
|
||||
'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(),
|
||||
'ema': None, # deepcopy(ema.ema).half(),
|
||||
'updates': ema.updates,
|
||||
'optimizer': None, # optimizer.state_dict(),
|
||||
'opt': vars(opt),
|
||||
'git': GIT_INFO, # {remote, branch, commit} if a git repo
|
||||
'date': datetime.now().isoformat()}
|
||||
|
||||
# Save last, best and delete
|
||||
torch.save(ckpt, last)
|
||||
if best_fitness == fitness:
|
||||
torch.save(ckpt, best)
|
||||
del ckpt
|
||||
|
||||
# Train complete
|
||||
if RANK in {-1, 0} and final_epoch:
|
||||
LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)'
|
||||
f"\nResults saved to {colorstr('bold', save_dir)}"
|
||||
f'\nPredict: python classify/predict.py --weights {best} --source im.jpg'
|
||||
f'\nValidate: python classify/val.py --weights {best} --data {data_dir}'
|
||||
f'\nExport: python export.py --weights {best} --include onnx'
|
||||
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')"
|
||||
f'\nVisualize: https://netron.app\n')
|
||||
|
||||
# Plot examples
|
||||
images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels
|
||||
pred = torch.max(ema.ema(images.to(device)), 1)[1]
|
||||
file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg')
|
||||
|
||||
# Log results
|
||||
meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()}
|
||||
logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch)
|
||||
logger.log_model(best, epochs, metadata=meta)
|
||||
|
||||
|
||||
def parse_opt(known=False):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path')
|
||||
parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...')
|
||||
parser.add_argument('--epochs', type=int, default=10, help='total training epochs')
|
||||
parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)')
|
||||
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
||||
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False')
|
||||
parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer')
|
||||
parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate')
|
||||
parser.add_argument('--decay', type=float, default=5e-5, help='weight decay')
|
||||
parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon')
|
||||
parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head')
|
||||
parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)')
|
||||
parser.add_argument('--verbose', action='store_true', help='Verbose mode')
|
||||
parser.add_argument('--seed', type=int, default=0, help='Global training seed')
|
||||
parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
|
||||
return parser.parse_known_args()[0] if known else parser.parse_args()
|
||||
|
||||
|
||||
def main(opt):
|
||||
# Checks
|
||||
if RANK in {-1, 0}:
|
||||
print_args(vars(opt))
|
||||
check_git_status()
|
||||
check_requirements()
|
||||
|
||||
# DDP mode
|
||||
device = select_device(opt.device, batch_size=opt.batch_size)
|
||||
if LOCAL_RANK != -1:
|
||||
assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size'
|
||||
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
|
||||
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
|
||||
torch.cuda.set_device(LOCAL_RANK)
|
||||
device = torch.device('cuda', LOCAL_RANK)
|
||||
dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
|
||||
|
||||
# Parameters
|
||||
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
|
||||
|
||||
# Train
|
||||
train(opt, device)
|
||||
|
||||
|
||||
def run(**kwargs):
|
||||
# Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
|
||||
opt = parse_opt(True)
|
||||
for k, v in kwargs.items():
|
||||
setattr(opt, k, v)
|
||||
main(opt)
|
||||
return opt
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,170 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Validate a trained YOLOv5 classification model on a classification dataset
|
||||
|
||||
Usage:
|
||||
$ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
|
||||
$ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet
|
||||
|
||||
Usage - formats:
|
||||
$ python classify/val.py --weights yolov5s-cls.pt # PyTorch
|
||||
yolov5s-cls.torchscript # TorchScript
|
||||
yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s-cls_openvino_model # OpenVINO
|
||||
yolov5s-cls.engine # TensorRT
|
||||
yolov5s-cls.mlmodel # CoreML (macOS-only)
|
||||
yolov5s-cls_saved_model # TensorFlow SavedModel
|
||||
yolov5s-cls.pb # TensorFlow GraphDef
|
||||
yolov5s-cls.tflite # TensorFlow Lite
|
||||
yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s-cls_paddle_model # PaddlePaddle
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import DetectMultiBackend
|
||||
from utils.dataloaders import create_classification_dataloader
|
||||
from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr,
|
||||
increment_path, print_args)
|
||||
from utils.torch_utils import select_device, smart_inference_mode
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
data=ROOT / '../datasets/mnist', # dataset dir
|
||||
weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s)
|
||||
batch_size=128, # batch size
|
||||
imgsz=224, # inference size (pixels)
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
workers=8, # max dataloader workers (per RANK in DDP mode)
|
||||
verbose=False, # verbose output
|
||||
project=ROOT / 'runs/val-cls', # save to project/name
|
||||
name='exp', # save to project/name
|
||||
exist_ok=False, # existing project/name ok, do not increment
|
||||
half=False, # use FP16 half-precision inference
|
||||
dnn=False, # use OpenCV DNN for ONNX inference
|
||||
model=None,
|
||||
dataloader=None,
|
||||
criterion=None,
|
||||
pbar=None,
|
||||
):
|
||||
# Initialize/load model and set device
|
||||
training = model is not None
|
||||
if training: # called by train.py
|
||||
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
|
||||
half &= device.type != 'cpu' # half precision only supported on CUDA
|
||||
model.half() if half else model.float()
|
||||
else: # called directly
|
||||
device = select_device(device, batch_size=batch_size)
|
||||
|
||||
# Directories
|
||||
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
||||
save_dir.mkdir(parents=True, exist_ok=True) # make dir
|
||||
|
||||
# Load model
|
||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
|
||||
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
|
||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||
half = model.fp16 # FP16 supported on limited backends with CUDA
|
||||
if engine:
|
||||
batch_size = model.batch_size
|
||||
else:
|
||||
device = model.device
|
||||
if not (pt or jit):
|
||||
batch_size = 1 # export.py models default to batch-size 1
|
||||
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
||||
|
||||
# Dataloader
|
||||
data = Path(data)
|
||||
test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val
|
||||
dataloader = create_classification_dataloader(path=test_dir,
|
||||
imgsz=imgsz,
|
||||
batch_size=batch_size,
|
||||
augment=False,
|
||||
rank=-1,
|
||||
workers=workers)
|
||||
|
||||
model.eval()
|
||||
pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile())
|
||||
n = len(dataloader) # number of batches
|
||||
action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing'
|
||||
desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}'
|
||||
bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
|
||||
with torch.cuda.amp.autocast(enabled=device.type != 'cpu'):
|
||||
for images, labels in bar:
|
||||
with dt[0]:
|
||||
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
||||
|
||||
with dt[1]:
|
||||
y = model(images)
|
||||
|
||||
with dt[2]:
|
||||
pred.append(y.argsort(1, descending=True)[:, :5])
|
||||
targets.append(labels)
|
||||
if criterion:
|
||||
loss += criterion(y, labels)
|
||||
|
||||
loss /= n
|
||||
pred, targets = torch.cat(pred), torch.cat(targets)
|
||||
correct = (targets[:, None] == pred).float()
|
||||
acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy
|
||||
top1, top5 = acc.mean(0).tolist()
|
||||
|
||||
if pbar:
|
||||
pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}'
|
||||
if verbose: # all classes
|
||||
LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}")
|
||||
LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}")
|
||||
for i, c in model.names.items():
|
||||
acc_i = acc[targets == i]
|
||||
top1i, top5i = acc_i.mean(0).tolist()
|
||||
LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}')
|
||||
|
||||
# Print results
|
||||
t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image
|
||||
shape = (1, 3, imgsz, imgsz)
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t)
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
|
||||
|
||||
return top1, top5, loss
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path')
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)')
|
||||
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
||||
opt = parser.parse_args()
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
check_requirements(exclude=('tensorboard', 'thop'))
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,74 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
|
||||
# Example usage: python train.py --data Argoverse.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── Argoverse ← downloads here (31.3 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/Argoverse # dataset root dir
|
||||
train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
|
||||
val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
|
||||
test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: bus
|
||||
5: truck
|
||||
6: traffic_light
|
||||
7: stop_sign
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import json
|
||||
|
||||
from tqdm import tqdm
|
||||
from utils.general import download, Path
|
||||
|
||||
|
||||
def argoverse2yolo(set):
|
||||
labels = {}
|
||||
a = json.load(open(set, "rb"))
|
||||
for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
|
||||
img_id = annot['image_id']
|
||||
img_name = a['images'][img_id]['name']
|
||||
img_label_name = f'{img_name[:-3]}txt'
|
||||
|
||||
cls = annot['category_id'] # instance class id
|
||||
x_center, y_center, width, height = annot['bbox']
|
||||
x_center = (x_center + width / 2) / 1920.0 # offset and scale
|
||||
y_center = (y_center + height / 2) / 1200.0 # offset and scale
|
||||
width /= 1920.0 # scale
|
||||
height /= 1200.0 # scale
|
||||
|
||||
img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
|
||||
if not img_dir.exists():
|
||||
img_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
k = str(img_dir / img_label_name)
|
||||
if k not in labels:
|
||||
labels[k] = []
|
||||
labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
|
||||
|
||||
for k in labels:
|
||||
with open(k, "w") as f:
|
||||
f.writelines(labels[k])
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
|
||||
download(urls, dir=dir, delete=False)
|
||||
|
||||
# Convert
|
||||
annotations_dir = 'Argoverse-HD/annotations/'
|
||||
(dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
|
||||
for d in "train.json", "val.json":
|
||||
argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
|
|
@ -0,0 +1,54 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
|
||||
# Example usage: python train.py --data GlobalWheat2020.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── GlobalWheat2020 ← downloads here (7.0 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/GlobalWheat2020 # dataset root dir
|
||||
train: # train images (relative to 'path') 3422 images
|
||||
- images/arvalis_1
|
||||
- images/arvalis_2
|
||||
- images/arvalis_3
|
||||
- images/ethz_1
|
||||
- images/rres_1
|
||||
- images/inrae_1
|
||||
- images/usask_1
|
||||
val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
|
||||
- images/ethz_1
|
||||
test: # test images (optional) 1276 images
|
||||
- images/utokyo_1
|
||||
- images/utokyo_2
|
||||
- images/nau_1
|
||||
- images/uq_1
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: wheat_head
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from utils.general import download, Path
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
|
||||
download(urls, dir=dir)
|
||||
|
||||
# Make Directories
|
||||
for p in 'annotations', 'images', 'labels':
|
||||
(dir / p).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Move
|
||||
for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
|
||||
'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
|
||||
(dir / p).rename(dir / 'images' / p) # move to /images
|
||||
f = (dir / p).with_suffix('.json') # json file
|
||||
if f.exists():
|
||||
f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,438 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Objects365 dataset https://www.objects365.org/ by Megvii
|
||||
# Example usage: python train.py --data Objects365.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/Objects365 # dataset root dir
|
||||
train: images/train # train images (relative to 'path') 1742289 images
|
||||
val: images/val # val images (relative to 'path') 80000 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: Person
|
||||
1: Sneakers
|
||||
2: Chair
|
||||
3: Other Shoes
|
||||
4: Hat
|
||||
5: Car
|
||||
6: Lamp
|
||||
7: Glasses
|
||||
8: Bottle
|
||||
9: Desk
|
||||
10: Cup
|
||||
11: Street Lights
|
||||
12: Cabinet/shelf
|
||||
13: Handbag/Satchel
|
||||
14: Bracelet
|
||||
15: Plate
|
||||
16: Picture/Frame
|
||||
17: Helmet
|
||||
18: Book
|
||||
19: Gloves
|
||||
20: Storage box
|
||||
21: Boat
|
||||
22: Leather Shoes
|
||||
23: Flower
|
||||
24: Bench
|
||||
25: Potted Plant
|
||||
26: Bowl/Basin
|
||||
27: Flag
|
||||
28: Pillow
|
||||
29: Boots
|
||||
30: Vase
|
||||
31: Microphone
|
||||
32: Necklace
|
||||
33: Ring
|
||||
34: SUV
|
||||
35: Wine Glass
|
||||
36: Belt
|
||||
37: Monitor/TV
|
||||
38: Backpack
|
||||
39: Umbrella
|
||||
40: Traffic Light
|
||||
41: Speaker
|
||||
42: Watch
|
||||
43: Tie
|
||||
44: Trash bin Can
|
||||
45: Slippers
|
||||
46: Bicycle
|
||||
47: Stool
|
||||
48: Barrel/bucket
|
||||
49: Van
|
||||
50: Couch
|
||||
51: Sandals
|
||||
52: Basket
|
||||
53: Drum
|
||||
54: Pen/Pencil
|
||||
55: Bus
|
||||
56: Wild Bird
|
||||
57: High Heels
|
||||
58: Motorcycle
|
||||
59: Guitar
|
||||
60: Carpet
|
||||
61: Cell Phone
|
||||
62: Bread
|
||||
63: Camera
|
||||
64: Canned
|
||||
65: Truck
|
||||
66: Traffic cone
|
||||
67: Cymbal
|
||||
68: Lifesaver
|
||||
69: Towel
|
||||
70: Stuffed Toy
|
||||
71: Candle
|
||||
72: Sailboat
|
||||
73: Laptop
|
||||
74: Awning
|
||||
75: Bed
|
||||
76: Faucet
|
||||
77: Tent
|
||||
78: Horse
|
||||
79: Mirror
|
||||
80: Power outlet
|
||||
81: Sink
|
||||
82: Apple
|
||||
83: Air Conditioner
|
||||
84: Knife
|
||||
85: Hockey Stick
|
||||
86: Paddle
|
||||
87: Pickup Truck
|
||||
88: Fork
|
||||
89: Traffic Sign
|
||||
90: Balloon
|
||||
91: Tripod
|
||||
92: Dog
|
||||
93: Spoon
|
||||
94: Clock
|
||||
95: Pot
|
||||
96: Cow
|
||||
97: Cake
|
||||
98: Dinning Table
|
||||
99: Sheep
|
||||
100: Hanger
|
||||
101: Blackboard/Whiteboard
|
||||
102: Napkin
|
||||
103: Other Fish
|
||||
104: Orange/Tangerine
|
||||
105: Toiletry
|
||||
106: Keyboard
|
||||
107: Tomato
|
||||
108: Lantern
|
||||
109: Machinery Vehicle
|
||||
110: Fan
|
||||
111: Green Vegetables
|
||||
112: Banana
|
||||
113: Baseball Glove
|
||||
114: Airplane
|
||||
115: Mouse
|
||||
116: Train
|
||||
117: Pumpkin
|
||||
118: Soccer
|
||||
119: Skiboard
|
||||
120: Luggage
|
||||
121: Nightstand
|
||||
122: Tea pot
|
||||
123: Telephone
|
||||
124: Trolley
|
||||
125: Head Phone
|
||||
126: Sports Car
|
||||
127: Stop Sign
|
||||
128: Dessert
|
||||
129: Scooter
|
||||
130: Stroller
|
||||
131: Crane
|
||||
132: Remote
|
||||
133: Refrigerator
|
||||
134: Oven
|
||||
135: Lemon
|
||||
136: Duck
|
||||
137: Baseball Bat
|
||||
138: Surveillance Camera
|
||||
139: Cat
|
||||
140: Jug
|
||||
141: Broccoli
|
||||
142: Piano
|
||||
143: Pizza
|
||||
144: Elephant
|
||||
145: Skateboard
|
||||
146: Surfboard
|
||||
147: Gun
|
||||
148: Skating and Skiing shoes
|
||||
149: Gas stove
|
||||
150: Donut
|
||||
151: Bow Tie
|
||||
152: Carrot
|
||||
153: Toilet
|
||||
154: Kite
|
||||
155: Strawberry
|
||||
156: Other Balls
|
||||
157: Shovel
|
||||
158: Pepper
|
||||
159: Computer Box
|
||||
160: Toilet Paper
|
||||
161: Cleaning Products
|
||||
162: Chopsticks
|
||||
163: Microwave
|
||||
164: Pigeon
|
||||
165: Baseball
|
||||
166: Cutting/chopping Board
|
||||
167: Coffee Table
|
||||
168: Side Table
|
||||
169: Scissors
|
||||
170: Marker
|
||||
171: Pie
|
||||
172: Ladder
|
||||
173: Snowboard
|
||||
174: Cookies
|
||||
175: Radiator
|
||||
176: Fire Hydrant
|
||||
177: Basketball
|
||||
178: Zebra
|
||||
179: Grape
|
||||
180: Giraffe
|
||||
181: Potato
|
||||
182: Sausage
|
||||
183: Tricycle
|
||||
184: Violin
|
||||
185: Egg
|
||||
186: Fire Extinguisher
|
||||
187: Candy
|
||||
188: Fire Truck
|
||||
189: Billiards
|
||||
190: Converter
|
||||
191: Bathtub
|
||||
192: Wheelchair
|
||||
193: Golf Club
|
||||
194: Briefcase
|
||||
195: Cucumber
|
||||
196: Cigar/Cigarette
|
||||
197: Paint Brush
|
||||
198: Pear
|
||||
199: Heavy Truck
|
||||
200: Hamburger
|
||||
201: Extractor
|
||||
202: Extension Cord
|
||||
203: Tong
|
||||
204: Tennis Racket
|
||||
205: Folder
|
||||
206: American Football
|
||||
207: earphone
|
||||
208: Mask
|
||||
209: Kettle
|
||||
210: Tennis
|
||||
211: Ship
|
||||
212: Swing
|
||||
213: Coffee Machine
|
||||
214: Slide
|
||||
215: Carriage
|
||||
216: Onion
|
||||
217: Green beans
|
||||
218: Projector
|
||||
219: Frisbee
|
||||
220: Washing Machine/Drying Machine
|
||||
221: Chicken
|
||||
222: Printer
|
||||
223: Watermelon
|
||||
224: Saxophone
|
||||
225: Tissue
|
||||
226: Toothbrush
|
||||
227: Ice cream
|
||||
228: Hot-air balloon
|
||||
229: Cello
|
||||
230: French Fries
|
||||
231: Scale
|
||||
232: Trophy
|
||||
233: Cabbage
|
||||
234: Hot dog
|
||||
235: Blender
|
||||
236: Peach
|
||||
237: Rice
|
||||
238: Wallet/Purse
|
||||
239: Volleyball
|
||||
240: Deer
|
||||
241: Goose
|
||||
242: Tape
|
||||
243: Tablet
|
||||
244: Cosmetics
|
||||
245: Trumpet
|
||||
246: Pineapple
|
||||
247: Golf Ball
|
||||
248: Ambulance
|
||||
249: Parking meter
|
||||
250: Mango
|
||||
251: Key
|
||||
252: Hurdle
|
||||
253: Fishing Rod
|
||||
254: Medal
|
||||
255: Flute
|
||||
256: Brush
|
||||
257: Penguin
|
||||
258: Megaphone
|
||||
259: Corn
|
||||
260: Lettuce
|
||||
261: Garlic
|
||||
262: Swan
|
||||
263: Helicopter
|
||||
264: Green Onion
|
||||
265: Sandwich
|
||||
266: Nuts
|
||||
267: Speed Limit Sign
|
||||
268: Induction Cooker
|
||||
269: Broom
|
||||
270: Trombone
|
||||
271: Plum
|
||||
272: Rickshaw
|
||||
273: Goldfish
|
||||
274: Kiwi fruit
|
||||
275: Router/modem
|
||||
276: Poker Card
|
||||
277: Toaster
|
||||
278: Shrimp
|
||||
279: Sushi
|
||||
280: Cheese
|
||||
281: Notepaper
|
||||
282: Cherry
|
||||
283: Pliers
|
||||
284: CD
|
||||
285: Pasta
|
||||
286: Hammer
|
||||
287: Cue
|
||||
288: Avocado
|
||||
289: Hamimelon
|
||||
290: Flask
|
||||
291: Mushroom
|
||||
292: Screwdriver
|
||||
293: Soap
|
||||
294: Recorder
|
||||
295: Bear
|
||||
296: Eggplant
|
||||
297: Board Eraser
|
||||
298: Coconut
|
||||
299: Tape Measure/Ruler
|
||||
300: Pig
|
||||
301: Showerhead
|
||||
302: Globe
|
||||
303: Chips
|
||||
304: Steak
|
||||
305: Crosswalk Sign
|
||||
306: Stapler
|
||||
307: Camel
|
||||
308: Formula 1
|
||||
309: Pomegranate
|
||||
310: Dishwasher
|
||||
311: Crab
|
||||
312: Hoverboard
|
||||
313: Meat ball
|
||||
314: Rice Cooker
|
||||
315: Tuba
|
||||
316: Calculator
|
||||
317: Papaya
|
||||
318: Antelope
|
||||
319: Parrot
|
||||
320: Seal
|
||||
321: Butterfly
|
||||
322: Dumbbell
|
||||
323: Donkey
|
||||
324: Lion
|
||||
325: Urinal
|
||||
326: Dolphin
|
||||
327: Electric Drill
|
||||
328: Hair Dryer
|
||||
329: Egg tart
|
||||
330: Jellyfish
|
||||
331: Treadmill
|
||||
332: Lighter
|
||||
333: Grapefruit
|
||||
334: Game board
|
||||
335: Mop
|
||||
336: Radish
|
||||
337: Baozi
|
||||
338: Target
|
||||
339: French
|
||||
340: Spring Rolls
|
||||
341: Monkey
|
||||
342: Rabbit
|
||||
343: Pencil Case
|
||||
344: Yak
|
||||
345: Red Cabbage
|
||||
346: Binoculars
|
||||
347: Asparagus
|
||||
348: Barbell
|
||||
349: Scallop
|
||||
350: Noddles
|
||||
351: Comb
|
||||
352: Dumpling
|
||||
353: Oyster
|
||||
354: Table Tennis paddle
|
||||
355: Cosmetics Brush/Eyeliner Pencil
|
||||
356: Chainsaw
|
||||
357: Eraser
|
||||
358: Lobster
|
||||
359: Durian
|
||||
360: Okra
|
||||
361: Lipstick
|
||||
362: Cosmetics Mirror
|
||||
363: Curling
|
||||
364: Table Tennis
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from tqdm import tqdm
|
||||
|
||||
from utils.general import Path, check_requirements, download, np, xyxy2xywhn
|
||||
|
||||
check_requirements(('pycocotools>=2.0',))
|
||||
from pycocotools.coco import COCO
|
||||
|
||||
# Make Directories
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
for p in 'images', 'labels':
|
||||
(dir / p).mkdir(parents=True, exist_ok=True)
|
||||
for q in 'train', 'val':
|
||||
(dir / p / q).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Train, Val Splits
|
||||
for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
|
||||
print(f"Processing {split} in {patches} patches ...")
|
||||
images, labels = dir / 'images' / split, dir / 'labels' / split
|
||||
|
||||
# Download
|
||||
url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
|
||||
if split == 'train':
|
||||
download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json
|
||||
download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8)
|
||||
elif split == 'val':
|
||||
download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json
|
||||
download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
|
||||
download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
|
||||
|
||||
# Move
|
||||
for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
|
||||
f.rename(images / f.name) # move to /images/{split}
|
||||
|
||||
# Labels
|
||||
coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
|
||||
names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
|
||||
for cid, cat in enumerate(names):
|
||||
catIds = coco.getCatIds(catNms=[cat])
|
||||
imgIds = coco.getImgIds(catIds=catIds)
|
||||
for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
|
||||
width, height = im["width"], im["height"]
|
||||
path = Path(im["file_name"]) # image filename
|
||||
try:
|
||||
with open(labels / path.with_suffix('.txt').name, 'a') as file:
|
||||
annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
|
||||
for a in coco.loadAnns(annIds):
|
||||
x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
|
||||
xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
|
||||
x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped
|
||||
file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
|
||||
except Exception as e:
|
||||
print(e)
|
|
@ -0,0 +1,53 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
|
||||
# Example usage: python train.py --data SKU-110K.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── SKU-110K ← downloads here (13.6 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/SKU-110K # dataset root dir
|
||||
train: train.txt # train images (relative to 'path') 8219 images
|
||||
val: val.txt # val images (relative to 'path') 588 images
|
||||
test: test.txt # test images (optional) 2936 images
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: object
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import shutil
|
||||
from tqdm import tqdm
|
||||
from utils.general import np, pd, Path, download, xyxy2xywh
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
parent = Path(dir.parent) # download dir
|
||||
urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
|
||||
download(urls, dir=parent, delete=False)
|
||||
|
||||
# Rename directories
|
||||
if dir.exists():
|
||||
shutil.rmtree(dir)
|
||||
(parent / 'SKU110K_fixed').rename(dir) # rename dir
|
||||
(dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
|
||||
|
||||
# Convert labels
|
||||
names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
|
||||
for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
|
||||
x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
|
||||
images, unique_images = x[:, 0], np.unique(x[:, 0])
|
||||
with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
|
||||
f.writelines(f'./images/{s}\n' for s in unique_images)
|
||||
for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
|
||||
cls = 0 # single-class dataset
|
||||
with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
|
||||
for r in x[images == im]:
|
||||
w, h = r[6], r[7] # image width, height
|
||||
xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
|
||||
f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
|
|
@ -0,0 +1,100 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
|
||||
# Example usage: python train.py --data VOC.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── VOC ← downloads here (2.8 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/VOC
|
||||
train: # train images (relative to 'path') 16551 images
|
||||
- images/train2012
|
||||
- images/train2007
|
||||
- images/val2012
|
||||
- images/val2007
|
||||
val: # val images (relative to 'path') 4952 images
|
||||
- images/test2007
|
||||
test: # test images (optional)
|
||||
- images/test2007
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: aeroplane
|
||||
1: bicycle
|
||||
2: bird
|
||||
3: boat
|
||||
4: bottle
|
||||
5: bus
|
||||
6: car
|
||||
7: cat
|
||||
8: chair
|
||||
9: cow
|
||||
10: diningtable
|
||||
11: dog
|
||||
12: horse
|
||||
13: motorbike
|
||||
14: person
|
||||
15: pottedplant
|
||||
16: sheep
|
||||
17: sofa
|
||||
18: train
|
||||
19: tvmonitor
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from tqdm import tqdm
|
||||
from utils.general import download, Path
|
||||
|
||||
|
||||
def convert_label(path, lb_path, year, image_id):
|
||||
def convert_box(size, box):
|
||||
dw, dh = 1. / size[0], 1. / size[1]
|
||||
x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
|
||||
return x * dw, y * dh, w * dw, h * dh
|
||||
|
||||
in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
|
||||
out_file = open(lb_path, 'w')
|
||||
tree = ET.parse(in_file)
|
||||
root = tree.getroot()
|
||||
size = root.find('size')
|
||||
w = int(size.find('width').text)
|
||||
h = int(size.find('height').text)
|
||||
|
||||
names = list(yaml['names'].values()) # names list
|
||||
for obj in root.iter('object'):
|
||||
cls = obj.find('name').text
|
||||
if cls in names and int(obj.find('difficult').text) != 1:
|
||||
xmlbox = obj.find('bndbox')
|
||||
bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
|
||||
cls_id = names.index(cls) # class id
|
||||
out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
|
||||
urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
|
||||
f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
|
||||
f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
|
||||
download(urls, dir=dir / 'images', delete=False, curl=True, threads=3)
|
||||
|
||||
# Convert
|
||||
path = dir / 'images/VOCdevkit'
|
||||
for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
|
||||
imgs_path = dir / 'images' / f'{image_set}{year}'
|
||||
lbs_path = dir / 'labels' / f'{image_set}{year}'
|
||||
imgs_path.mkdir(exist_ok=True, parents=True)
|
||||
lbs_path.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
|
||||
image_ids = f.read().strip().split()
|
||||
for id in tqdm(image_ids, desc=f'{image_set}{year}'):
|
||||
f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
|
||||
lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
|
||||
f.rename(imgs_path / f.name) # move image
|
||||
convert_label(path, lb_path, year, id) # convert labels to YOLO format
|
|
@ -0,0 +1,70 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
|
||||
# Example usage: python train.py --data VisDrone.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── VisDrone ← downloads here (2.3 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/VisDrone # dataset root dir
|
||||
train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
|
||||
val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
|
||||
test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: pedestrian
|
||||
1: people
|
||||
2: bicycle
|
||||
3: car
|
||||
4: van
|
||||
5: truck
|
||||
6: tricycle
|
||||
7: awning-tricycle
|
||||
8: bus
|
||||
9: motor
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from utils.general import download, os, Path
|
||||
|
||||
def visdrone2yolo(dir):
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
def convert_box(size, box):
|
||||
# Convert VisDrone box to YOLO xywh box
|
||||
dw = 1. / size[0]
|
||||
dh = 1. / size[1]
|
||||
return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
|
||||
|
||||
(dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
|
||||
pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
|
||||
for f in pbar:
|
||||
img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
|
||||
lines = []
|
||||
with open(f, 'r') as file: # read annotation.txt
|
||||
for row in [x.split(',') for x in file.read().strip().splitlines()]:
|
||||
if row[4] == '0': # VisDrone 'ignored regions' class 0
|
||||
continue
|
||||
cls = int(row[5]) - 1
|
||||
box = convert_box(img_size, tuple(map(int, row[:4])))
|
||||
lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
|
||||
with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
|
||||
fl.writelines(lines) # write label.txt
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
|
||||
download(urls, dir=dir, curl=True, threads=4)
|
||||
|
||||
# Convert
|
||||
for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
|
||||
visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
|
|
@ -0,0 +1,116 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# COCO 2017 dataset http://cocodataset.org by Microsoft
|
||||
# Example usage: python train.py --data coco.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco ← downloads here (20.1 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco # dataset root dir
|
||||
train: train2017.txt # train images (relative to 'path') 118287 images
|
||||
val: val2017.txt # val images (relative to 'path') 5000 images
|
||||
test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: |
|
||||
from utils.general import download, Path
|
||||
|
||||
|
||||
# Download labels
|
||||
segments = False # segment or box labels
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
|
||||
urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
|
||||
download(urls, dir=dir.parent)
|
||||
|
||||
# Download data
|
||||
urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
|
||||
'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
|
||||
'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
|
||||
download(urls, dir=dir / 'images', threads=3)
|
|
@ -0,0 +1,101 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
|
||||
# Example usage: python train.py --data coco128.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco128-seg ← downloads here (7 MB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco128-seg # dataset root dir
|
||||
train: images/train2017 # train images (relative to 'path') 128 images
|
||||
val: images/train2017 # val images (relative to 'path') 128 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: https://ultralytics.com/assets/coco128-seg.zip
|
|
@ -0,0 +1,101 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
|
||||
# Example usage: python train.py --data coco128.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco128 ← downloads here (7 MB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco128 # dataset root dir
|
||||
train: images/train2017 # train images (relative to 'path') 128 images
|
||||
val: images/train2017 # val images (relative to 'path') 128 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: https://ultralytics.com/assets/coco128.zip
|
Binary file not shown.
After Width: | Height: | Size: 476 KiB |
Binary file not shown.
After Width: | Height: | Size: 165 KiB |
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Download latest models from https://github.com/ultralytics/yolov5/releases
|
||||
# Example usage: bash data/scripts/download_weights.sh
|
||||
# parent
|
||||
# └── yolov5
|
||||
# ├── yolov5s.pt ← downloads here
|
||||
# ├── yolov5m.pt
|
||||
# └── ...
|
||||
|
||||
python - <<EOF
|
||||
from utils.downloads import attempt_download
|
||||
|
||||
p5 = list('nsmlx') # P5 models
|
||||
p6 = [f'{x}6' for x in p5] # P6 models
|
||||
cls = [f'{x}-cls' for x in p5] # classification models
|
||||
seg = [f'{x}-seg' for x in p5] # classification models
|
||||
|
||||
for x in p5 + p6 + cls + seg:
|
||||
attempt_download(f'weights/yolov5{x}.pt')
|
||||
|
||||
EOF
|
|
@ -0,0 +1,56 @@
|
|||
#!/bin/bash
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Download COCO 2017 dataset http://cocodataset.org
|
||||
# Example usage: bash data/scripts/get_coco.sh
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco ← downloads here
|
||||
|
||||
# Arguments (optional) Usage: bash data/scripts/get_coco.sh --train --val --test --segments
|
||||
if [ "$#" -gt 0 ]; then
|
||||
for opt in "$@"; do
|
||||
case "${opt}" in
|
||||
--train) train=true ;;
|
||||
--val) val=true ;;
|
||||
--test) test=true ;;
|
||||
--segments) segments=true ;;
|
||||
esac
|
||||
done
|
||||
else
|
||||
train=true
|
||||
val=true
|
||||
test=false
|
||||
segments=false
|
||||
fi
|
||||
|
||||
# Download/unzip labels
|
||||
d='../datasets' # unzip directory
|
||||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
||||
if [ "$segments" == "true" ]; then
|
||||
f='coco2017labels-segments.zip' # 168 MB
|
||||
else
|
||||
f='coco2017labels.zip' # 46 MB
|
||||
fi
|
||||
echo 'Downloading' $url$f ' ...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
|
||||
# Download/unzip images
|
||||
d='../datasets/coco/images' # unzip directory
|
||||
url=http://images.cocodataset.org/zips/
|
||||
if [ "$train" == "true" ]; then
|
||||
f='train2017.zip' # 19G, 118k images
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
fi
|
||||
if [ "$val" == "true" ]; then
|
||||
f='val2017.zip' # 1G, 5k images
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
fi
|
||||
if [ "$test" == "true" ]; then
|
||||
f='test2017.zip' # 7G, 41k images (optional)
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
fi
|
||||
wait # finish background tasks
|
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
|
||||
# Example usage: bash data/scripts/get_coco128.sh
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco128 ← downloads here
|
||||
|
||||
# Download/unzip images and labels
|
||||
d='../datasets' # unzip directory
|
||||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
||||
f='coco128.zip' # or 'coco128-segments.zip', 68 MB
|
||||
echo 'Downloading' $url$f ' ...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
|
||||
wait # finish background tasks
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Download ILSVRC2012 ImageNet dataset https://image-net.org
|
||||
# Example usage: bash data/scripts/get_imagenet.sh
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── imagenet ← downloads here
|
||||
|
||||
# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val
|
||||
if [ "$#" -gt 0 ]; then
|
||||
for opt in "$@"; do
|
||||
case "${opt}" in
|
||||
--train) train=true ;;
|
||||
--val) val=true ;;
|
||||
esac
|
||||
done
|
||||
else
|
||||
train=true
|
||||
val=true
|
||||
fi
|
||||
|
||||
# Make dir
|
||||
d='../datasets/imagenet' # unzip directory
|
||||
mkdir -p $d && cd $d
|
||||
|
||||
# Download/unzip train
|
||||
if [ "$train" == "true" ]; then
|
||||
wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_train.tar # download 138G, 1281167 images
|
||||
mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train
|
||||
tar -xf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar
|
||||
find . -name "*.tar" | while read NAME; do
|
||||
mkdir -p "${NAME%.tar}"
|
||||
tar -xf "${NAME}" -C "${NAME%.tar}"
|
||||
rm -f "${NAME}"
|
||||
done
|
||||
cd ..
|
||||
fi
|
||||
|
||||
# Download/unzip val
|
||||
if [ "$val" == "true" ]; then
|
||||
wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar # download 6.3G, 50000 images
|
||||
mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xf ILSVRC2012_img_val.tar
|
||||
wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash # move into subdirs
|
||||
fi
|
||||
|
||||
# Delete corrupted image (optional: PNG under JPEG name that may cause dataloaders to fail)
|
||||
# rm train/n04266014/n04266014_10835.JPEG
|
||||
|
||||
# TFRecords (optional)
|
||||
# wget https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_lsvrc_2015_synsets.txt
|
|
@ -0,0 +1,153 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
|
||||
# -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
|
||||
# Example usage: python train.py --data xView.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── xView ← downloads here (20.7 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/xView # dataset root dir
|
||||
train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
|
||||
val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: Fixed-wing Aircraft
|
||||
1: Small Aircraft
|
||||
2: Cargo Plane
|
||||
3: Helicopter
|
||||
4: Passenger Vehicle
|
||||
5: Small Car
|
||||
6: Bus
|
||||
7: Pickup Truck
|
||||
8: Utility Truck
|
||||
9: Truck
|
||||
10: Cargo Truck
|
||||
11: Truck w/Box
|
||||
12: Truck Tractor
|
||||
13: Trailer
|
||||
14: Truck w/Flatbed
|
||||
15: Truck w/Liquid
|
||||
16: Crane Truck
|
||||
17: Railway Vehicle
|
||||
18: Passenger Car
|
||||
19: Cargo Car
|
||||
20: Flat Car
|
||||
21: Tank car
|
||||
22: Locomotive
|
||||
23: Maritime Vessel
|
||||
24: Motorboat
|
||||
25: Sailboat
|
||||
26: Tugboat
|
||||
27: Barge
|
||||
28: Fishing Vessel
|
||||
29: Ferry
|
||||
30: Yacht
|
||||
31: Container Ship
|
||||
32: Oil Tanker
|
||||
33: Engineering Vehicle
|
||||
34: Tower crane
|
||||
35: Container Crane
|
||||
36: Reach Stacker
|
||||
37: Straddle Carrier
|
||||
38: Mobile Crane
|
||||
39: Dump Truck
|
||||
40: Haul Truck
|
||||
41: Scraper/Tractor
|
||||
42: Front loader/Bulldozer
|
||||
43: Excavator
|
||||
44: Cement Mixer
|
||||
45: Ground Grader
|
||||
46: Hut/Tent
|
||||
47: Shed
|
||||
48: Building
|
||||
49: Aircraft Hangar
|
||||
50: Damaged Building
|
||||
51: Facility
|
||||
52: Construction Site
|
||||
53: Vehicle Lot
|
||||
54: Helipad
|
||||
55: Storage Tank
|
||||
56: Shipping container lot
|
||||
57: Shipping Container
|
||||
58: Pylon
|
||||
59: Tower
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
from utils.dataloaders import autosplit
|
||||
from utils.general import download, xyxy2xywhn
|
||||
|
||||
|
||||
def convert_labels(fname=Path('xView/xView_train.geojson')):
|
||||
# Convert xView geoJSON labels to YOLO format
|
||||
path = fname.parent
|
||||
with open(fname) as f:
|
||||
print(f'Loading {fname}...')
|
||||
data = json.load(f)
|
||||
|
||||
# Make dirs
|
||||
labels = Path(path / 'labels' / 'train')
|
||||
os.system(f'rm -rf {labels}')
|
||||
labels.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# xView classes 11-94 to 0-59
|
||||
xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11,
|
||||
12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1,
|
||||
29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46,
|
||||
47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]
|
||||
|
||||
shapes = {}
|
||||
for feature in tqdm(data['features'], desc=f'Converting {fname}'):
|
||||
p = feature['properties']
|
||||
if p['bounds_imcoords']:
|
||||
id = p['image_id']
|
||||
file = path / 'train_images' / id
|
||||
if file.exists(): # 1395.tif missing
|
||||
try:
|
||||
box = np.array([int(num) for num in p['bounds_imcoords'].split(",")])
|
||||
assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}'
|
||||
cls = p['type_id']
|
||||
cls = xview_class2index[int(cls)] # xView class to 0-60
|
||||
assert 59 >= cls >= 0, f'incorrect class index {cls}'
|
||||
|
||||
# Write YOLO label
|
||||
if id not in shapes:
|
||||
shapes[id] = Image.open(file).size
|
||||
box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
|
||||
with open((labels / id).with_suffix('.txt'), 'a') as f:
|
||||
f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
|
||||
except Exception as e:
|
||||
print(f'WARNING: skipping one label for {file}: {e}')
|
||||
|
||||
|
||||
# Download manually from https://challenge.xviewdataset.org
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
# urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels
|
||||
# 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images
|
||||
# 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels)
|
||||
# download(urls, dir=dir, delete=False)
|
||||
|
||||
# Convert labels
|
||||
convert_labels(dir / 'xView_train.geojson')
|
||||
|
||||
# Move images
|
||||
images = Path(dir / 'images')
|
||||
images.mkdir(parents=True, exist_ok=True)
|
||||
Path(dir / 'train_images').rename(dir / 'images' / 'train')
|
||||
Path(dir / 'val_images').rename(dir / 'images' / 'val')
|
||||
|
||||
# Split
|
||||
autosplit(dir / 'images' / 'train')
|
|
@ -0,0 +1,261 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
||||
|
||||
Usage - sources:
|
||||
$ python detect.py --weights yolov5s.pt --source 0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
path/ # directory
|
||||
list.txt # list of images
|
||||
list.streams # list of streams
|
||||
'path/*.jpg' # glob
|
||||
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
||||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
||||
|
||||
Usage - formats:
|
||||
$ python detect.py --weights yolov5s.pt # PyTorch
|
||||
yolov5s.torchscript # TorchScript
|
||||
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s_openvino_model # OpenVINO
|
||||
yolov5s.engine # TensorRT
|
||||
yolov5s.mlmodel # CoreML (macOS-only)
|
||||
yolov5s_saved_model # TensorFlow SavedModel
|
||||
yolov5s.pb # TensorFlow GraphDef
|
||||
yolov5s.tflite # TensorFlow Lite
|
||||
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s_paddle_model # PaddlePaddle
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[0] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import DetectMultiBackend
|
||||
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
||||
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
||||
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
|
||||
from utils.plots import Annotator, colors, save_one_box
|
||||
from utils.torch_utils import select_device, smart_inference_mode
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
weights=ROOT / 'yolov5s.pt', # model path or triton URL
|
||||
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
||||
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
||||
imgsz=(640, 640), # inference size (height, width)
|
||||
conf_thres=0.25, # confidence threshold
|
||||
iou_thres=0.45, # NMS IOU threshold
|
||||
max_det=1000, # maximum detections per image
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
view_img=False, # show results
|
||||
save_txt=False, # save results to *.txt
|
||||
save_conf=False, # save confidences in --save-txt labels
|
||||
save_crop=False, # save cropped prediction boxes
|
||||
nosave=False, # do not save images/videos
|
||||
classes=None, # filter by class: --class 0, or --class 0 2 3
|
||||
agnostic_nms=False, # class-agnostic NMS
|
||||
augment=False, # augmented inference
|
||||
visualize=False, # visualize features
|
||||
update=False, # update all models
|
||||
project=ROOT / 'runs/detect', # save results to project/name
|
||||
name='exp', # save results to project/name
|
||||
exist_ok=False, # existing project/name ok, do not increment
|
||||
line_thickness=3, # bounding box thickness (pixels)
|
||||
hide_labels=False, # hide labels
|
||||
hide_conf=False, # hide confidences
|
||||
half=False, # use FP16 half-precision inference
|
||||
dnn=False, # use OpenCV DNN for ONNX inference
|
||||
vid_stride=1, # video frame-rate stride
|
||||
):
|
||||
source = str(source)
|
||||
save_img = not nosave and not source.endswith('.txt') # save inference images
|
||||
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
||||
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
||||
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
||||
screenshot = source.lower().startswith('screen')
|
||||
if is_url and is_file:
|
||||
source = check_file(source) # download
|
||||
|
||||
# Directories
|
||||
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
||||
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
||||
|
||||
# Load model
|
||||
device = select_device(device)
|
||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
||||
stride, names, pt = model.stride, model.names, model.pt
|
||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||
|
||||
# Dataloader
|
||||
bs = 1 # batch_size
|
||||
if webcam:
|
||||
view_img = check_imshow(warn=True)
|
||||
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
||||
bs = len(dataset)
|
||||
elif screenshot:
|
||||
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
||||
else:
|
||||
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
||||
vid_path, vid_writer = [None] * bs, [None] * bs
|
||||
|
||||
# Run inference
|
||||
model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
|
||||
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
||||
for path, im, im0s, vid_cap, s in dataset:
|
||||
with dt[0]:
|
||||
im = torch.from_numpy(im).to(model.device)
|
||||
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
||||
im /= 255 # 0 - 255 to 0.0 - 1.0
|
||||
if len(im.shape) == 3:
|
||||
im = im[None] # expand for batch dim
|
||||
|
||||
# Inference
|
||||
with dt[1]:
|
||||
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
|
||||
pred = model(im, augment=augment, visualize=visualize)
|
||||
|
||||
# NMS
|
||||
with dt[2]:
|
||||
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
|
||||
|
||||
# Second-stage classifier (optional)
|
||||
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
|
||||
|
||||
# Process predictions
|
||||
for i, det in enumerate(pred): # per image
|
||||
seen += 1
|
||||
if webcam: # batch_size >= 1
|
||||
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
||||
s += f'{i}: '
|
||||
else:
|
||||
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
||||
|
||||
p = Path(p) # to Path
|
||||
save_path = str(save_dir / p.name) # im.jpg
|
||||
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
||||
s += '%gx%g ' % im.shape[2:] # print string
|
||||
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
||||
imc = im0.copy() if save_crop else im0 # for save_crop
|
||||
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
|
||||
if len(det):
|
||||
# Rescale boxes from img_size to im0 size
|
||||
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
|
||||
|
||||
# Print results
|
||||
for c in det[:, 5].unique():
|
||||
n = (det[:, 5] == c).sum() # detections per class
|
||||
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
||||
|
||||
# Write results
|
||||
for *xyxy, conf, cls in reversed(det):
|
||||
if save_txt: # Write to file
|
||||
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
||||
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
||||
with open(f'{txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
|
||||
if save_img or save_crop or view_img: # Add bbox to image
|
||||
c = int(cls) # integer class
|
||||
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
|
||||
annotator.box_label(xyxy, label, color=colors(c, True))
|
||||
if save_crop:
|
||||
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
|
||||
|
||||
# Stream results
|
||||
im0 = annotator.result()
|
||||
if view_img:
|
||||
if platform.system() == 'Linux' and p not in windows:
|
||||
windows.append(p)
|
||||
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
||||
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
||||
cv2.imshow(str(p), im0)
|
||||
cv2.waitKey(1) # 1 millisecond
|
||||
|
||||
# Save results (image with detections)
|
||||
if save_img:
|
||||
if dataset.mode == 'image':
|
||||
cv2.imwrite(save_path, im0)
|
||||
else: # 'video' or 'stream'
|
||||
if vid_path[i] != save_path: # new video
|
||||
vid_path[i] = save_path
|
||||
if isinstance(vid_writer[i], cv2.VideoWriter):
|
||||
vid_writer[i].release() # release previous video writer
|
||||
if vid_cap: # video
|
||||
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
||||
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
else: # stream
|
||||
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
||||
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
||||
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
vid_writer[i].write(im0)
|
||||
|
||||
# Print time (inference-only)
|
||||
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
|
||||
|
||||
# Print results
|
||||
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
||||
if save_txt or save_img:
|
||||
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
||||
if update:
|
||||
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
|
||||
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
||||
parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
|
||||
parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
|
||||
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--view-img', action='store_true', help='show results')
|
||||
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
||||
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
||||
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
|
||||
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
||||
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
|
||||
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
||||
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
||||
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
||||
parser.add_argument('--update', action='store_true', help='update all models')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save results to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
|
||||
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
|
||||
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
||||
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
||||
opt = parser.parse_args()
|
||||
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
check_requirements(exclude=('tensorboard', 'thop'))
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,672 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
||||
|
||||
Format | `export.py --include` | Model
|
||||
--- | --- | ---
|
||||
PyTorch | - | yolov5s.pt
|
||||
TorchScript | `torchscript` | yolov5s.torchscript
|
||||
ONNX | `onnx` | yolov5s.onnx
|
||||
OpenVINO | `openvino` | yolov5s_openvino_model/
|
||||
TensorRT | `engine` | yolov5s.engine
|
||||
CoreML | `coreml` | yolov5s.mlmodel
|
||||
TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
|
||||
TensorFlow GraphDef | `pb` | yolov5s.pb
|
||||
TensorFlow Lite | `tflite` | yolov5s.tflite
|
||||
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
|
||||
TensorFlow.js | `tfjs` | yolov5s_web_model/
|
||||
PaddlePaddle | `paddle` | yolov5s_paddle_model/
|
||||
|
||||
Requirements:
|
||||
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
|
||||
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
|
||||
|
||||
Usage:
|
||||
$ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
|
||||
|
||||
Inference:
|
||||
$ python detect.py --weights yolov5s.pt # PyTorch
|
||||
yolov5s.torchscript # TorchScript
|
||||
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s_openvino_model # OpenVINO
|
||||
yolov5s.engine # TensorRT
|
||||
yolov5s.mlmodel # CoreML (macOS-only)
|
||||
yolov5s_saved_model # TensorFlow SavedModel
|
||||
yolov5s.pb # TensorFlow GraphDef
|
||||
yolov5s.tflite # TensorFlow Lite
|
||||
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s_paddle_model # PaddlePaddle
|
||||
|
||||
TensorFlow.js:
|
||||
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
|
||||
$ npm install
|
||||
$ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
|
||||
$ npm start
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
import torch
|
||||
from torch.utils.mobile_optimizer import optimize_for_mobile
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[0] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
if platform.system() != 'Windows':
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.experimental import attempt_load
|
||||
from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
|
||||
from utils.dataloaders import LoadImages
|
||||
from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version,
|
||||
check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save)
|
||||
from utils.torch_utils import select_device, smart_inference_mode
|
||||
|
||||
MACOS = platform.system() == 'Darwin' # macOS environment
|
||||
|
||||
|
||||
def export_formats():
|
||||
# YOLOv5 export formats
|
||||
x = [
|
||||
['PyTorch', '-', '.pt', True, True],
|
||||
['TorchScript', 'torchscript', '.torchscript', True, True],
|
||||
['ONNX', 'onnx', '.onnx', True, True],
|
||||
['OpenVINO', 'openvino', '_openvino_model', True, False],
|
||||
['TensorRT', 'engine', '.engine', False, True],
|
||||
['CoreML', 'coreml', '.mlmodel', True, False],
|
||||
['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
|
||||
['TensorFlow GraphDef', 'pb', '.pb', True, True],
|
||||
['TensorFlow Lite', 'tflite', '.tflite', True, False],
|
||||
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
|
||||
['TensorFlow.js', 'tfjs', '_web_model', False, False],
|
||||
['PaddlePaddle', 'paddle', '_paddle_model', True, True],]
|
||||
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
|
||||
|
||||
|
||||
def try_export(inner_func):
|
||||
# YOLOv5 export decorator, i..e @try_export
|
||||
inner_args = get_default_args(inner_func)
|
||||
|
||||
def outer_func(*args, **kwargs):
|
||||
prefix = inner_args['prefix']
|
||||
try:
|
||||
with Profile() as dt:
|
||||
f, model = inner_func(*args, **kwargs)
|
||||
LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)')
|
||||
return f, model
|
||||
except Exception as e:
|
||||
LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}')
|
||||
return None, None
|
||||
|
||||
return outer_func
|
||||
|
||||
|
||||
@try_export
|
||||
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
|
||||
# YOLOv5 TorchScript model export
|
||||
LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
|
||||
f = file.with_suffix('.torchscript')
|
||||
|
||||
ts = torch.jit.trace(model, im, strict=False)
|
||||
d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names}
|
||||
extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
|
||||
if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
|
||||
optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
|
||||
else:
|
||||
ts.save(str(f), _extra_files=extra_files)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
|
||||
# YOLOv5 ONNX export
|
||||
check_requirements('onnx>=1.12.0')
|
||||
import onnx
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
|
||||
f = file.with_suffix('.onnx')
|
||||
|
||||
output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
|
||||
if dynamic:
|
||||
dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640)
|
||||
if isinstance(model, SegmentationModel):
|
||||
dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
|
||||
dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160)
|
||||
elif isinstance(model, DetectionModel):
|
||||
dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
|
||||
|
||||
torch.onnx.export(
|
||||
model.cpu() if dynamic else model, # --dynamic only compatible with cpu
|
||||
im.cpu() if dynamic else im,
|
||||
f,
|
||||
verbose=False,
|
||||
opset_version=opset,
|
||||
do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
|
||||
input_names=['images'],
|
||||
output_names=output_names,
|
||||
dynamic_axes=dynamic or None)
|
||||
|
||||
# Checks
|
||||
model_onnx = onnx.load(f) # load onnx model
|
||||
onnx.checker.check_model(model_onnx) # check onnx model
|
||||
|
||||
# Metadata
|
||||
d = {'stride': int(max(model.stride)), 'names': model.names}
|
||||
for k, v in d.items():
|
||||
meta = model_onnx.metadata_props.add()
|
||||
meta.key, meta.value = k, str(v)
|
||||
onnx.save(model_onnx, f)
|
||||
|
||||
# Simplify
|
||||
if simplify:
|
||||
try:
|
||||
cuda = torch.cuda.is_available()
|
||||
check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
|
||||
import onnxsim
|
||||
|
||||
LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
|
||||
model_onnx, check = onnxsim.simplify(model_onnx)
|
||||
assert check, 'assert check failed'
|
||||
onnx.save(model_onnx, f)
|
||||
except Exception as e:
|
||||
LOGGER.info(f'{prefix} simplifier failure: {e}')
|
||||
return f, model_onnx
|
||||
|
||||
|
||||
@try_export
|
||||
def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
|
||||
# YOLOv5 OpenVINO export
|
||||
check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
||||
import openvino.inference_engine as ie
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
|
||||
f = str(file).replace('.pt', f'_openvino_model{os.sep}')
|
||||
|
||||
args = [
|
||||
'mo',
|
||||
'--input_model',
|
||||
str(file.with_suffix('.onnx')),
|
||||
'--output_dir',
|
||||
f,
|
||||
'--data_type',
|
||||
('FP16' if half else 'FP32'),]
|
||||
subprocess.run(args, check=True, env=os.environ) # export
|
||||
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
|
||||
# YOLOv5 Paddle export
|
||||
check_requirements(('paddlepaddle', 'x2paddle'))
|
||||
import x2paddle
|
||||
from x2paddle.convert import pytorch2paddle
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...')
|
||||
f = str(file).replace('.pt', f'_paddle_model{os.sep}')
|
||||
|
||||
pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export
|
||||
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
|
||||
# YOLOv5 CoreML export
|
||||
check_requirements('coremltools')
|
||||
import coremltools as ct
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
|
||||
f = file.with_suffix('.mlmodel')
|
||||
|
||||
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
|
||||
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
|
||||
bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
|
||||
if bits < 32:
|
||||
if MACOS: # quantization only supported on macOS
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning
|
||||
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
|
||||
else:
|
||||
print(f'{prefix} quantization only supported on macOS, skipping...')
|
||||
ct_model.save(f)
|
||||
return f, ct_model
|
||||
|
||||
|
||||
@try_export
|
||||
def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
|
||||
# YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
|
||||
assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
|
||||
try:
|
||||
import tensorrt as trt
|
||||
except Exception:
|
||||
if platform.system() == 'Linux':
|
||||
check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
|
||||
import tensorrt as trt
|
||||
|
||||
if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
|
||||
grid = model.model[-1].anchor_grid
|
||||
model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
|
||||
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
|
||||
model.model[-1].anchor_grid = grid
|
||||
else: # TensorRT >= 8
|
||||
check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
|
||||
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
|
||||
onnx = file.with_suffix('.onnx')
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
|
||||
assert onnx.exists(), f'failed to export ONNX file: {onnx}'
|
||||
f = file.with_suffix('.engine') # TensorRT engine file
|
||||
logger = trt.Logger(trt.Logger.INFO)
|
||||
if verbose:
|
||||
logger.min_severity = trt.Logger.Severity.VERBOSE
|
||||
|
||||
builder = trt.Builder(logger)
|
||||
config = builder.create_builder_config()
|
||||
config.max_workspace_size = workspace * 1 << 30
|
||||
# config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
|
||||
|
||||
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
|
||||
network = builder.create_network(flag)
|
||||
parser = trt.OnnxParser(network, logger)
|
||||
if not parser.parse_from_file(str(onnx)):
|
||||
raise RuntimeError(f'failed to load ONNX file: {onnx}')
|
||||
|
||||
inputs = [network.get_input(i) for i in range(network.num_inputs)]
|
||||
outputs = [network.get_output(i) for i in range(network.num_outputs)]
|
||||
for inp in inputs:
|
||||
LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
|
||||
for out in outputs:
|
||||
LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
|
||||
|
||||
if dynamic:
|
||||
if im.shape[0] <= 1:
|
||||
LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument')
|
||||
profile = builder.create_optimization_profile()
|
||||
for inp in inputs:
|
||||
profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
|
||||
config.add_optimization_profile(profile)
|
||||
|
||||
LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}')
|
||||
if builder.platform_has_fast_fp16 and half:
|
||||
config.set_flag(trt.BuilderFlag.FP16)
|
||||
with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
|
||||
t.write(engine.serialize())
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_saved_model(model,
|
||||
im,
|
||||
file,
|
||||
dynamic,
|
||||
tf_nms=False,
|
||||
agnostic_nms=False,
|
||||
topk_per_class=100,
|
||||
topk_all=100,
|
||||
iou_thres=0.45,
|
||||
conf_thres=0.25,
|
||||
keras=False,
|
||||
prefix=colorstr('TensorFlow SavedModel:')):
|
||||
# YOLOv5 TensorFlow SavedModel export
|
||||
try:
|
||||
import tensorflow as tf
|
||||
except Exception:
|
||||
check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
|
||||
from models.tf import TFModel
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
f = str(file).replace('.pt', '_saved_model')
|
||||
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
||||
|
||||
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
||||
im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
|
||||
_ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
||||
inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
|
||||
outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
||||
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
|
||||
keras_model.trainable = False
|
||||
keras_model.summary()
|
||||
if keras:
|
||||
keras_model.save(f, save_format='tf')
|
||||
else:
|
||||
spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
|
||||
m = tf.function(lambda x: keras_model(x)) # full model
|
||||
m = m.get_concrete_function(spec)
|
||||
frozen_func = convert_variables_to_constants_v2(m)
|
||||
tfm = tf.Module()
|
||||
tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
|
||||
tfm.__call__(im)
|
||||
tf.saved_model.save(tfm,
|
||||
f,
|
||||
options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version(
|
||||
tf.__version__, '2.6') else tf.saved_model.SaveOptions())
|
||||
return f, keras_model
|
||||
|
||||
|
||||
@try_export
|
||||
def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')):
|
||||
# YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
f = file.with_suffix('.pb')
|
||||
|
||||
m = tf.function(lambda x: keras_model(x)) # full model
|
||||
m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
|
||||
frozen_func = convert_variables_to_constants_v2(m)
|
||||
frozen_func.graph.as_graph_def()
|
||||
tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
|
||||
# YOLOv5 TensorFlow Lite export
|
||||
import tensorflow as tf
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
||||
f = str(file).replace('.pt', '-fp16.tflite')
|
||||
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
|
||||
converter.target_spec.supported_types = [tf.float16]
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
if int8:
|
||||
from models.tf import representative_dataset_gen
|
||||
dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False)
|
||||
converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
converter.target_spec.supported_types = []
|
||||
converter.inference_input_type = tf.uint8 # or tf.int8
|
||||
converter.inference_output_type = tf.uint8 # or tf.int8
|
||||
converter.experimental_new_quantizer = True
|
||||
f = str(file).replace('.pt', '-int8.tflite')
|
||||
if nms or agnostic_nms:
|
||||
converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
|
||||
|
||||
tflite_model = converter.convert()
|
||||
open(f, 'wb').write(tflite_model)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
|
||||
# YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
|
||||
cmd = 'edgetpu_compiler --version'
|
||||
help_url = 'https://coral.ai/docs/edgetpu/compiler/'
|
||||
assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
|
||||
if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0:
|
||||
LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
|
||||
sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
|
||||
for c in (
|
||||
'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
|
||||
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
|
||||
'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'):
|
||||
subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
|
||||
ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
|
||||
f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
|
||||
f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
|
||||
|
||||
subprocess.run([
|
||||
'edgetpu_compiler',
|
||||
'-s',
|
||||
'-d',
|
||||
'-k',
|
||||
'10',
|
||||
'--out_dir',
|
||||
str(file.parent),
|
||||
f_tfl,], check=True)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')):
|
||||
# YOLOv5 TensorFlow.js export
|
||||
check_requirements('tensorflowjs')
|
||||
import tensorflowjs as tfjs
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
|
||||
f = str(file).replace('.pt', '_web_model') # js dir
|
||||
f_pb = file.with_suffix('.pb') # *.pb path
|
||||
f_json = f'{f}/model.json' # *.json path
|
||||
|
||||
args = [
|
||||
'tensorflowjs_converter',
|
||||
'--input_format=tf_frozen_model',
|
||||
'--quantize_uint8' if int8 else '',
|
||||
'--output_node_names=Identity,Identity_1,Identity_2,Identity_3',
|
||||
str(f_pb),
|
||||
str(f),]
|
||||
subprocess.run([arg for arg in args if arg], check=True)
|
||||
|
||||
json = Path(f_json).read_text()
|
||||
with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
|
||||
subst = re.sub(
|
||||
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
|
||||
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
||||
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
||||
r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
|
||||
r'"Identity_1": {"name": "Identity_1"}, '
|
||||
r'"Identity_2": {"name": "Identity_2"}, '
|
||||
r'"Identity_3": {"name": "Identity_3"}}}', json)
|
||||
j.write(subst)
|
||||
return f, None
|
||||
|
||||
|
||||
def add_tflite_metadata(file, metadata, num_outputs):
|
||||
# Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
|
||||
with contextlib.suppress(ImportError):
|
||||
# check_requirements('tflite_support')
|
||||
from tflite_support import flatbuffers
|
||||
from tflite_support import metadata as _metadata
|
||||
from tflite_support import metadata_schema_py_generated as _metadata_fb
|
||||
|
||||
tmp_file = Path('/tmp/meta.txt')
|
||||
with open(tmp_file, 'w') as meta_f:
|
||||
meta_f.write(str(metadata))
|
||||
|
||||
model_meta = _metadata_fb.ModelMetadataT()
|
||||
label_file = _metadata_fb.AssociatedFileT()
|
||||
label_file.name = tmp_file.name
|
||||
model_meta.associatedFiles = [label_file]
|
||||
|
||||
subgraph = _metadata_fb.SubGraphMetadataT()
|
||||
subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
|
||||
subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
|
||||
model_meta.subgraphMetadata = [subgraph]
|
||||
|
||||
b = flatbuffers.Builder(0)
|
||||
b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
|
||||
metadata_buf = b.Output()
|
||||
|
||||
populator = _metadata.MetadataPopulator.with_model_file(file)
|
||||
populator.load_metadata_buffer(metadata_buf)
|
||||
populator.load_associated_files([str(tmp_file)])
|
||||
populator.populate()
|
||||
tmp_file.unlink()
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
|
||||
weights=ROOT / 'yolov5s.pt', # weights path
|
||||
imgsz=(640, 640), # image (height, width)
|
||||
batch_size=1, # batch size
|
||||
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
include=('torchscript', 'onnx'), # include formats
|
||||
half=False, # FP16 half-precision export
|
||||
inplace=False, # set YOLOv5 Detect() inplace=True
|
||||
keras=False, # use Keras
|
||||
optimize=False, # TorchScript: optimize for mobile
|
||||
int8=False, # CoreML/TF INT8 quantization
|
||||
dynamic=False, # ONNX/TF/TensorRT: dynamic axes
|
||||
simplify=False, # ONNX: simplify model
|
||||
opset=12, # ONNX: opset version
|
||||
verbose=False, # TensorRT: verbose log
|
||||
workspace=4, # TensorRT: workspace size (GB)
|
||||
nms=False, # TF: add NMS to model
|
||||
agnostic_nms=False, # TF: add agnostic NMS to model
|
||||
topk_per_class=100, # TF.js NMS: topk per class to keep
|
||||
topk_all=100, # TF.js NMS: topk for all classes to keep
|
||||
iou_thres=0.45, # TF.js NMS: IoU threshold
|
||||
conf_thres=0.25, # TF.js NMS: confidence threshold
|
||||
):
|
||||
t = time.time()
|
||||
include = [x.lower() for x in include] # to lowercase
|
||||
fmts = tuple(export_formats()['Argument'][1:]) # --include arguments
|
||||
flags = [x in include for x in fmts]
|
||||
assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}'
|
||||
jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
|
||||
file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights
|
||||
|
||||
# Load PyTorch model
|
||||
device = select_device(device)
|
||||
if half:
|
||||
assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0'
|
||||
assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both'
|
||||
model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
|
||||
|
||||
# Checks
|
||||
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
|
||||
if optimize:
|
||||
assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu'
|
||||
|
||||
# Input
|
||||
gs = int(max(model.stride)) # grid size (max stride)
|
||||
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
|
||||
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
|
||||
|
||||
# Update model
|
||||
model.eval()
|
||||
for k, m in model.named_modules():
|
||||
if isinstance(m, Detect):
|
||||
m.inplace = inplace
|
||||
m.dynamic = dynamic
|
||||
m.export = True
|
||||
|
||||
for _ in range(2):
|
||||
y = model(im) # dry runs
|
||||
if half and not coreml:
|
||||
im, model = im.half(), model.half() # to FP16
|
||||
shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
|
||||
metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata
|
||||
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
|
||||
|
||||
# Exports
|
||||
f = [''] * len(fmts) # exported filenames
|
||||
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning
|
||||
if jit: # TorchScript
|
||||
f[0], _ = export_torchscript(model, im, file, optimize)
|
||||
if engine: # TensorRT required before ONNX
|
||||
f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
|
||||
if onnx or xml: # OpenVINO requires ONNX
|
||||
f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
|
||||
if xml: # OpenVINO
|
||||
f[3], _ = export_openvino(file, metadata, half)
|
||||
if coreml: # CoreML
|
||||
f[4], _ = export_coreml(model, im, file, int8, half)
|
||||
if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
|
||||
assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.'
|
||||
assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.'
|
||||
f[5], s_model = export_saved_model(model.cpu(),
|
||||
im,
|
||||
file,
|
||||
dynamic,
|
||||
tf_nms=nms or agnostic_nms or tfjs,
|
||||
agnostic_nms=agnostic_nms or tfjs,
|
||||
topk_per_class=topk_per_class,
|
||||
topk_all=topk_all,
|
||||
iou_thres=iou_thres,
|
||||
conf_thres=conf_thres,
|
||||
keras=keras)
|
||||
if pb or tfjs: # pb prerequisite to tfjs
|
||||
f[6], _ = export_pb(s_model, file)
|
||||
if tflite or edgetpu:
|
||||
f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms)
|
||||
if edgetpu:
|
||||
f[8], _ = export_edgetpu(file)
|
||||
add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))
|
||||
if tfjs:
|
||||
f[9], _ = export_tfjs(file, int8)
|
||||
if paddle: # PaddlePaddle
|
||||
f[10], _ = export_paddle(model, im, file, metadata)
|
||||
|
||||
# Finish
|
||||
f = [str(x) for x in f if x] # filter out '' and None
|
||||
if any(f):
|
||||
cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type
|
||||
det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel)
|
||||
dir = Path('segment' if seg else 'classify' if cls else '')
|
||||
h = '--half' if half else '' # --half FP16 inference arg
|
||||
s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \
|
||||
'# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else ''
|
||||
LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
|
||||
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
|
||||
f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
|
||||
f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
|
||||
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
|
||||
f'\nVisualize: https://netron.app')
|
||||
return f # return list of exported files/dirs
|
||||
|
||||
|
||||
def parse_opt(known=False):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)')
|
||||
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
||||
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
|
||||
parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
|
||||
parser.add_argument('--keras', action='store_true', help='TF: use Keras')
|
||||
parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
|
||||
parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
|
||||
parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
|
||||
parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
|
||||
parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version')
|
||||
parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
|
||||
parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
|
||||
parser.add_argument('--nms', action='store_true', help='TF: add NMS to model')
|
||||
parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model')
|
||||
parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep')
|
||||
parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep')
|
||||
parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold')
|
||||
parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold')
|
||||
parser.add_argument(
|
||||
'--include',
|
||||
nargs='+',
|
||||
default=['torchscript'],
|
||||
help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle')
|
||||
opt = parser.parse_known_args()[0] if known else parser.parse_args()
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]):
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,169 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5
|
||||
|
||||
Usage:
|
||||
import torch
|
||||
model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model
|
||||
model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch
|
||||
model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model
|
||||
model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo
|
||||
"""
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
||||
"""Creates or loads a YOLOv5 model
|
||||
|
||||
Arguments:
|
||||
name (str): model name 'yolov5s' or path 'path/to/best.pt'
|
||||
pretrained (bool): load pretrained weights into the model
|
||||
channels (int): number of input channels
|
||||
classes (int): number of model classes
|
||||
autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
|
||||
verbose (bool): print all information to screen
|
||||
device (str, torch.device, None): device to use for model parameters
|
||||
|
||||
Returns:
|
||||
YOLOv5 model
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
from models.common import AutoShape, DetectMultiBackend
|
||||
from models.experimental import attempt_load
|
||||
from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
|
||||
from utils.downloads import attempt_download
|
||||
from utils.general import LOGGER, check_requirements, intersect_dicts, logging
|
||||
from utils.torch_utils import select_device
|
||||
|
||||
if not verbose:
|
||||
LOGGER.setLevel(logging.WARNING)
|
||||
check_requirements(exclude=('opencv-python', 'tensorboard', 'thop'))
|
||||
name = Path(name)
|
||||
path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path
|
||||
try:
|
||||
device = select_device(device)
|
||||
if pretrained and channels == 3 and classes == 80:
|
||||
try:
|
||||
model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
|
||||
if autoshape:
|
||||
if model.pt and isinstance(model.model, ClassificationModel):
|
||||
LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. '
|
||||
'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
|
||||
elif model.pt and isinstance(model.model, SegmentationModel):
|
||||
LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. '
|
||||
'You will not be able to run inference with this model.')
|
||||
else:
|
||||
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
|
||||
except Exception:
|
||||
model = attempt_load(path, device=device, fuse=False) # arbitrary model
|
||||
else:
|
||||
cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
|
||||
model = DetectionModel(cfg, channels, classes) # create model
|
||||
if pretrained:
|
||||
ckpt = torch.load(attempt_download(path), map_location=device) # load
|
||||
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
||||
csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect
|
||||
model.load_state_dict(csd, strict=False) # load
|
||||
if len(ckpt['model'].names) == classes:
|
||||
model.names = ckpt['model'].names # set class names attribute
|
||||
if not verbose:
|
||||
LOGGER.setLevel(logging.INFO) # reset to default
|
||||
return model.to(device)
|
||||
|
||||
except Exception as e:
|
||||
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
|
||||
s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.'
|
||||
raise Exception(s) from e
|
||||
|
||||
|
||||
def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5 custom or local model
|
||||
return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
|
||||
|
||||
|
||||
def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-nano model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-small model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-medium model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-large model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-xlarge model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
||||
# YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
|
||||
return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from utils.general import cv2, print_args
|
||||
|
||||
# Argparser
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--model', type=str, default='yolov5s', help='model name')
|
||||
opt = parser.parse_args()
|
||||
print_args(vars(opt))
|
||||
|
||||
# Model
|
||||
model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)
|
||||
# model = custom(path='path/to/model.pt') # custom
|
||||
|
||||
# Images
|
||||
imgs = [
|
||||
'data/images/zidane.jpg', # filename
|
||||
Path('data/images/zidane.jpg'), # Path
|
||||
'https://ultralytics.com/images/zidane.jpg', # URI
|
||||
cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
|
||||
Image.open('data/images/bus.jpg'), # PIL
|
||||
np.zeros((320, 640, 3))] # numpy
|
||||
|
||||
# Inference
|
||||
results = model(imgs, size=320) # batched inference
|
||||
|
||||
# Results
|
||||
results.print()
|
||||
results.save()
|
|
@ -0,0 +1,870 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Common modules
|
||||
"""
|
||||
|
||||
import ast
|
||||
import contextlib
|
||||
import json
|
||||
import math
|
||||
import platform
|
||||
import warnings
|
||||
import zipfile
|
||||
from collections import OrderedDict, namedtuple
|
||||
from copy import copy
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import requests
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from PIL import Image
|
||||
from torch.cuda import amp
|
||||
|
||||
from utils import TryExcept
|
||||
from utils.dataloaders import exif_transpose, letterbox
|
||||
from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
|
||||
increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
|
||||
xyxy2xywh, yaml_load)
|
||||
from utils.plots import Annotator, colors, save_one_box
|
||||
from utils.torch_utils import copy_attr, smart_inference_mode
|
||||
|
||||
|
||||
def autopad(k, p=None, d=1): # kernel, padding, dilation
|
||||
# Pad to 'same' shape outputs
|
||||
if d > 1:
|
||||
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
|
||||
if p is None:
|
||||
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
||||
return p
|
||||
|
||||
|
||||
class Conv(nn.Module):
|
||||
# Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
|
||||
default_act = nn.SiLU() # default activation
|
||||
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
|
||||
super().__init__()
|
||||
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
|
||||
self.bn = nn.BatchNorm2d(c2)
|
||||
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
|
||||
|
||||
def forward(self, x):
|
||||
return self.act(self.bn(self.conv(x)))
|
||||
|
||||
def forward_fuse(self, x):
|
||||
return self.act(self.conv(x))
|
||||
|
||||
|
||||
class DWConv(Conv):
|
||||
# Depth-wise convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
|
||||
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
|
||||
|
||||
|
||||
class DWConvTranspose2d(nn.ConvTranspose2d):
|
||||
# Depth-wise transpose convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
|
||||
super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
|
||||
|
||||
|
||||
class TransformerLayer(nn.Module):
|
||||
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
|
||||
def __init__(self, c, num_heads):
|
||||
super().__init__()
|
||||
self.q = nn.Linear(c, c, bias=False)
|
||||
self.k = nn.Linear(c, c, bias=False)
|
||||
self.v = nn.Linear(c, c, bias=False)
|
||||
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
|
||||
self.fc1 = nn.Linear(c, c, bias=False)
|
||||
self.fc2 = nn.Linear(c, c, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
|
||||
x = self.fc2(self.fc1(x)) + x
|
||||
return x
|
||||
|
||||
|
||||
class TransformerBlock(nn.Module):
|
||||
# Vision Transformer https://arxiv.org/abs/2010.11929
|
||||
def __init__(self, c1, c2, num_heads, num_layers):
|
||||
super().__init__()
|
||||
self.conv = None
|
||||
if c1 != c2:
|
||||
self.conv = Conv(c1, c2)
|
||||
self.linear = nn.Linear(c2, c2) # learnable position embedding
|
||||
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
|
||||
self.c2 = c2
|
||||
|
||||
def forward(self, x):
|
||||
if self.conv is not None:
|
||||
x = self.conv(x)
|
||||
b, _, w, h = x.shape
|
||||
p = x.flatten(2).permute(2, 0, 1)
|
||||
return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
|
||||
|
||||
|
||||
class Bottleneck(nn.Module):
|
||||
# Standard bottleneck
|
||||
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c_, c2, 3, 1, g=g)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
||||
|
||||
|
||||
class BottleneckCSP(nn.Module):
|
||||
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
||||
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
||||
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
||||
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
||||
self.act = nn.SiLU()
|
||||
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
||||
|
||||
def forward(self, x):
|
||||
y1 = self.cv3(self.m(self.cv1(x)))
|
||||
y2 = self.cv2(x)
|
||||
return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
|
||||
|
||||
|
||||
class CrossConv(nn.Module):
|
||||
# Cross Convolution Downsample
|
||||
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
|
||||
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, (1, k), (1, s))
|
||||
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
||||
|
||||
|
||||
class C3(nn.Module):
|
||||
# CSP Bottleneck with 3 convolutions
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c1, c_, 1, 1)
|
||||
self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
|
||||
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
||||
|
||||
def forward(self, x):
|
||||
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
|
||||
|
||||
|
||||
class C3x(C3):
|
||||
# C3 module with cross-convolutions
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
||||
super().__init__(c1, c2, n, shortcut, g, e)
|
||||
c_ = int(c2 * e)
|
||||
self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
|
||||
|
||||
|
||||
class C3TR(C3):
|
||||
# C3 module with TransformerBlock()
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
||||
super().__init__(c1, c2, n, shortcut, g, e)
|
||||
c_ = int(c2 * e)
|
||||
self.m = TransformerBlock(c_, c_, 4, n)
|
||||
|
||||
|
||||
class C3SPP(C3):
|
||||
# C3 module with SPP()
|
||||
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
|
||||
super().__init__(c1, c2, n, shortcut, g, e)
|
||||
c_ = int(c2 * e)
|
||||
self.m = SPP(c_, c_, k)
|
||||
|
||||
|
||||
class C3Ghost(C3):
|
||||
# C3 module with GhostBottleneck()
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
||||
super().__init__(c1, c2, n, shortcut, g, e)
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
|
||||
|
||||
|
||||
class SPP(nn.Module):
|
||||
# Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
|
||||
def __init__(self, c1, c2, k=(5, 9, 13)):
|
||||
super().__init__()
|
||||
c_ = c1 // 2 # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
|
||||
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
|
||||
|
||||
def forward(self, x):
|
||||
x = self.cv1(x)
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
|
||||
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
|
||||
|
||||
|
||||
class SPPF(nn.Module):
|
||||
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
|
||||
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
|
||||
super().__init__()
|
||||
c_ = c1 // 2 # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c_ * 4, c2, 1, 1)
|
||||
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.cv1(x)
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
|
||||
y1 = self.m(x)
|
||||
y2 = self.m(y1)
|
||||
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
|
||||
|
||||
|
||||
class Focus(nn.Module):
|
||||
# Focus wh information into c-space
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
|
||||
# self.contract = Contract(gain=2)
|
||||
|
||||
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
||||
return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
|
||||
# return self.conv(self.contract(x))
|
||||
|
||||
|
||||
class GhostConv(nn.Module):
|
||||
# Ghost Convolution https://github.com/huawei-noah/ghostnet
|
||||
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
|
||||
super().__init__()
|
||||
c_ = c2 // 2 # hidden channels
|
||||
self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
|
||||
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
|
||||
|
||||
def forward(self, x):
|
||||
y = self.cv1(x)
|
||||
return torch.cat((y, self.cv2(y)), 1)
|
||||
|
||||
|
||||
class GhostBottleneck(nn.Module):
|
||||
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
|
||||
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
|
||||
super().__init__()
|
||||
c_ = c2 // 2
|
||||
self.conv = nn.Sequential(
|
||||
GhostConv(c1, c_, 1, 1), # pw
|
||||
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
|
||||
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
|
||||
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1,
|
||||
act=False)) if s == 2 else nn.Identity()
|
||||
|
||||
def forward(self, x):
|
||||
return self.conv(x) + self.shortcut(x)
|
||||
|
||||
|
||||
class Contract(nn.Module):
|
||||
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
|
||||
def __init__(self, gain=2):
|
||||
super().__init__()
|
||||
self.gain = gain
|
||||
|
||||
def forward(self, x):
|
||||
b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
|
||||
s = self.gain
|
||||
x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
|
||||
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
|
||||
return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
|
||||
|
||||
|
||||
class Expand(nn.Module):
|
||||
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
|
||||
def __init__(self, gain=2):
|
||||
super().__init__()
|
||||
self.gain = gain
|
||||
|
||||
def forward(self, x):
|
||||
b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
|
||||
s = self.gain
|
||||
x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
|
||||
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
|
||||
return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
|
||||
|
||||
|
||||
class Concat(nn.Module):
|
||||
# Concatenate a list of tensors along dimension
|
||||
def __init__(self, dimension=1):
|
||||
super().__init__()
|
||||
self.d = dimension
|
||||
|
||||
def forward(self, x):
|
||||
return torch.cat(x, self.d)
|
||||
|
||||
|
||||
class DetectMultiBackend(nn.Module):
|
||||
# YOLOv5 MultiBackend class for python inference on various backends
|
||||
def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):
|
||||
# Usage:
|
||||
# PyTorch: weights = *.pt
|
||||
# TorchScript: *.torchscript
|
||||
# ONNX Runtime: *.onnx
|
||||
# ONNX OpenCV DNN: *.onnx --dnn
|
||||
# OpenVINO: *_openvino_model
|
||||
# CoreML: *.mlmodel
|
||||
# TensorRT: *.engine
|
||||
# TensorFlow SavedModel: *_saved_model
|
||||
# TensorFlow GraphDef: *.pb
|
||||
# TensorFlow Lite: *.tflite
|
||||
# TensorFlow Edge TPU: *_edgetpu.tflite
|
||||
# PaddlePaddle: *_paddle_model
|
||||
from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
|
||||
|
||||
super().__init__()
|
||||
w = str(weights[0] if isinstance(weights, list) else weights)
|
||||
pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
|
||||
fp16 &= pt or jit or onnx or engine # FP16
|
||||
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
|
||||
stride = 32 # default stride
|
||||
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
|
||||
if not (pt or triton):
|
||||
w = attempt_download(w) # download if not local
|
||||
|
||||
if pt: # PyTorch
|
||||
model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
|
||||
stride = max(int(model.stride.max()), 32) # model stride
|
||||
names = model.module.names if hasattr(model, 'module') else model.names # get class names
|
||||
model.half() if fp16 else model.float()
|
||||
self.model = model # explicitly assign for to(), cpu(), cuda(), half()
|
||||
elif jit: # TorchScript
|
||||
LOGGER.info(f'Loading {w} for TorchScript inference...')
|
||||
extra_files = {'config.txt': ''} # model metadata
|
||||
model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
|
||||
model.half() if fp16 else model.float()
|
||||
if extra_files['config.txt']: # load metadata dict
|
||||
d = json.loads(extra_files['config.txt'],
|
||||
object_hook=lambda d: {int(k) if k.isdigit() else k: v
|
||||
for k, v in d.items()})
|
||||
stride, names = int(d['stride']), d['names']
|
||||
elif dnn: # ONNX OpenCV DNN
|
||||
LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
|
||||
check_requirements('opencv-python>=4.5.4')
|
||||
net = cv2.dnn.readNetFromONNX(w)
|
||||
elif onnx: # ONNX Runtime
|
||||
LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
|
||||
check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
|
||||
import onnxruntime
|
||||
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
||||
session = onnxruntime.InferenceSession(w, providers=providers)
|
||||
output_names = [x.name for x in session.get_outputs()]
|
||||
meta = session.get_modelmeta().custom_metadata_map # metadata
|
||||
if 'stride' in meta:
|
||||
stride, names = int(meta['stride']), eval(meta['names'])
|
||||
elif xml: # OpenVINO
|
||||
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
||||
check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
||||
from openvino.runtime import Core, Layout, get_batch
|
||||
ie = Core()
|
||||
if not Path(w).is_file(): # if not *.xml
|
||||
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
|
||||
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
|
||||
if network.get_parameters()[0].get_layout().empty:
|
||||
network.get_parameters()[0].set_layout(Layout('NCHW'))
|
||||
batch_dim = get_batch(network)
|
||||
if batch_dim.is_static:
|
||||
batch_size = batch_dim.get_length()
|
||||
executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2
|
||||
stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
|
||||
elif engine: # TensorRT
|
||||
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
||||
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
|
||||
check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
|
||||
if device.type == 'cpu':
|
||||
device = torch.device('cuda:0')
|
||||
Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
|
||||
logger = trt.Logger(trt.Logger.INFO)
|
||||
with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
|
||||
model = runtime.deserialize_cuda_engine(f.read())
|
||||
context = model.create_execution_context()
|
||||
bindings = OrderedDict()
|
||||
output_names = []
|
||||
fp16 = False # default updated below
|
||||
dynamic = False
|
||||
for i in range(model.num_bindings):
|
||||
name = model.get_binding_name(i)
|
||||
dtype = trt.nptype(model.get_binding_dtype(i))
|
||||
if model.binding_is_input(i):
|
||||
if -1 in tuple(model.get_binding_shape(i)): # dynamic
|
||||
dynamic = True
|
||||
context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
|
||||
if dtype == np.float16:
|
||||
fp16 = True
|
||||
else: # output
|
||||
output_names.append(name)
|
||||
shape = tuple(context.get_binding_shape(i))
|
||||
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
|
||||
bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
|
||||
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
|
||||
batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
|
||||
elif coreml: # CoreML
|
||||
LOGGER.info(f'Loading {w} for CoreML inference...')
|
||||
import coremltools as ct
|
||||
model = ct.models.MLModel(w)
|
||||
elif saved_model: # TF SavedModel
|
||||
LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
|
||||
import tensorflow as tf
|
||||
keras = False # assume TF1 saved_model
|
||||
model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
|
||||
elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
|
||||
LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
|
||||
import tensorflow as tf
|
||||
|
||||
def wrap_frozen_graph(gd, inputs, outputs):
|
||||
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped
|
||||
ge = x.graph.as_graph_element
|
||||
return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
|
||||
|
||||
def gd_outputs(gd):
|
||||
name_list, input_list = [], []
|
||||
for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
|
||||
name_list.append(node.name)
|
||||
input_list.extend(node.input)
|
||||
return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))
|
||||
|
||||
gd = tf.Graph().as_graph_def() # TF GraphDef
|
||||
with open(w, 'rb') as f:
|
||||
gd.ParseFromString(f.read())
|
||||
frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))
|
||||
elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
|
||||
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
|
||||
from tflite_runtime.interpreter import Interpreter, load_delegate
|
||||
except ImportError:
|
||||
import tensorflow as tf
|
||||
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
|
||||
if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
|
||||
LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
|
||||
delegate = {
|
||||
'Linux': 'libedgetpu.so.1',
|
||||
'Darwin': 'libedgetpu.1.dylib',
|
||||
'Windows': 'edgetpu.dll'}[platform.system()]
|
||||
interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
|
||||
else: # TFLite
|
||||
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
|
||||
interpreter = Interpreter(model_path=w) # load TFLite model
|
||||
interpreter.allocate_tensors() # allocate
|
||||
input_details = interpreter.get_input_details() # inputs
|
||||
output_details = interpreter.get_output_details() # outputs
|
||||
# load metadata
|
||||
with contextlib.suppress(zipfile.BadZipFile):
|
||||
with zipfile.ZipFile(w, 'r') as model:
|
||||
meta_file = model.namelist()[0]
|
||||
meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))
|
||||
stride, names = int(meta['stride']), meta['names']
|
||||
elif tfjs: # TF.js
|
||||
raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
|
||||
elif paddle: # PaddlePaddle
|
||||
LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
|
||||
check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
|
||||
import paddle.inference as pdi
|
||||
if not Path(w).is_file(): # if not *.pdmodel
|
||||
w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
|
||||
weights = Path(w).with_suffix('.pdiparams')
|
||||
config = pdi.Config(str(w), str(weights))
|
||||
if cuda:
|
||||
config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
|
||||
predictor = pdi.create_predictor(config)
|
||||
input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
|
||||
output_names = predictor.get_output_names()
|
||||
elif triton: # NVIDIA Triton Inference Server
|
||||
LOGGER.info(f'Using {w} as Triton Inference Server...')
|
||||
check_requirements('tritonclient[all]')
|
||||
from utils.triton import TritonRemoteModel
|
||||
model = TritonRemoteModel(url=w)
|
||||
nhwc = model.runtime.startswith('tensorflow')
|
||||
else:
|
||||
raise NotImplementedError(f'ERROR: {w} is not a supported format')
|
||||
|
||||
# class names
|
||||
if 'names' not in locals():
|
||||
names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
|
||||
if names[0] == 'n01440764' and len(names) == 1000: # ImageNet
|
||||
names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names
|
||||
|
||||
self.__dict__.update(locals()) # assign all variables to self
|
||||
|
||||
def forward(self, im, augment=False, visualize=False):
|
||||
# YOLOv5 MultiBackend inference
|
||||
b, ch, h, w = im.shape # batch, channel, height, width
|
||||
if self.fp16 and im.dtype != torch.float16:
|
||||
im = im.half() # to FP16
|
||||
if self.nhwc:
|
||||
im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
|
||||
|
||||
if self.pt: # PyTorch
|
||||
y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
|
||||
elif self.jit: # TorchScript
|
||||
y = self.model(im)
|
||||
elif self.dnn: # ONNX OpenCV DNN
|
||||
im = im.cpu().numpy() # torch to numpy
|
||||
self.net.setInput(im)
|
||||
y = self.net.forward()
|
||||
elif self.onnx: # ONNX Runtime
|
||||
im = im.cpu().numpy() # torch to numpy
|
||||
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
|
||||
elif self.xml: # OpenVINO
|
||||
im = im.cpu().numpy() # FP32
|
||||
y = list(self.executable_network([im]).values())
|
||||
elif self.engine: # TensorRT
|
||||
if self.dynamic and im.shape != self.bindings['images'].shape:
|
||||
i = self.model.get_binding_index('images')
|
||||
self.context.set_binding_shape(i, im.shape) # reshape if dynamic
|
||||
self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
|
||||
for name in self.output_names:
|
||||
i = self.model.get_binding_index(name)
|
||||
self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
|
||||
s = self.bindings['images'].shape
|
||||
assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
|
||||
self.binding_addrs['images'] = int(im.data_ptr())
|
||||
self.context.execute_v2(list(self.binding_addrs.values()))
|
||||
y = [self.bindings[x].data for x in sorted(self.output_names)]
|
||||
elif self.coreml: # CoreML
|
||||
im = im.cpu().numpy()
|
||||
im = Image.fromarray((im[0] * 255).astype('uint8'))
|
||||
# im = im.resize((192, 320), Image.ANTIALIAS)
|
||||
y = self.model.predict({'image': im}) # coordinates are xywh normalized
|
||||
if 'confidence' in y:
|
||||
box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
|
||||
conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
|
||||
y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
|
||||
else:
|
||||
y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
|
||||
elif self.paddle: # PaddlePaddle
|
||||
im = im.cpu().numpy().astype(np.float32)
|
||||
self.input_handle.copy_from_cpu(im)
|
||||
self.predictor.run()
|
||||
y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
|
||||
elif self.triton: # NVIDIA Triton Inference Server
|
||||
y = self.model(im)
|
||||
else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
|
||||
im = im.cpu().numpy()
|
||||
if self.saved_model: # SavedModel
|
||||
y = self.model(im, training=False) if self.keras else self.model(im)
|
||||
elif self.pb: # GraphDef
|
||||
y = self.frozen_func(x=self.tf.constant(im))
|
||||
else: # Lite or Edge TPU
|
||||
input = self.input_details[0]
|
||||
int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
|
||||
if int8:
|
||||
scale, zero_point = input['quantization']
|
||||
im = (im / scale + zero_point).astype(np.uint8) # de-scale
|
||||
self.interpreter.set_tensor(input['index'], im)
|
||||
self.interpreter.invoke()
|
||||
y = []
|
||||
for output in self.output_details:
|
||||
x = self.interpreter.get_tensor(output['index'])
|
||||
if int8:
|
||||
scale, zero_point = output['quantization']
|
||||
x = (x.astype(np.float32) - zero_point) * scale # re-scale
|
||||
y.append(x)
|
||||
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
|
||||
y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
|
||||
|
||||
if isinstance(y, (list, tuple)):
|
||||
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
|
||||
else:
|
||||
return self.from_numpy(y)
|
||||
|
||||
def from_numpy(self, x):
|
||||
return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
|
||||
|
||||
def warmup(self, imgsz=(1, 3, 640, 640)):
|
||||
# Warmup model by running inference once
|
||||
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
|
||||
if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
|
||||
im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
|
||||
for _ in range(2 if self.jit else 1): #
|
||||
self.forward(im) # warmup
|
||||
|
||||
@staticmethod
|
||||
def _model_type(p='path/to/model.pt'):
|
||||
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
|
||||
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
|
||||
from export import export_formats
|
||||
from utils.downloads import is_url
|
||||
sf = list(export_formats().Suffix) # export suffixes
|
||||
if not is_url(p, check=False):
|
||||
check_suffix(p, sf) # checks
|
||||
url = urlparse(p) # if url may be Triton inference server
|
||||
types = [s in Path(p).name for s in sf]
|
||||
types[8] &= not types[9] # tflite &= not edgetpu
|
||||
triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])
|
||||
return types + [triton]
|
||||
|
||||
@staticmethod
|
||||
def _load_metadata(f=Path('path/to/meta.yaml')):
|
||||
# Load metadata from meta.yaml if it exists
|
||||
if f.exists():
|
||||
d = yaml_load(f)
|
||||
return d['stride'], d['names'] # assign stride, names
|
||||
return None, None
|
||||
|
||||
|
||||
class AutoShape(nn.Module):
|
||||
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
||||
conf = 0.25 # NMS confidence threshold
|
||||
iou = 0.45 # NMS IoU threshold
|
||||
agnostic = False # NMS class-agnostic
|
||||
multi_label = False # NMS multiple labels per box
|
||||
classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
|
||||
max_det = 1000 # maximum number of detections per image
|
||||
amp = False # Automatic Mixed Precision (AMP) inference
|
||||
|
||||
def __init__(self, model, verbose=True):
|
||||
super().__init__()
|
||||
if verbose:
|
||||
LOGGER.info('Adding AutoShape... ')
|
||||
copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
|
||||
self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance
|
||||
self.pt = not self.dmb or model.pt # PyTorch model
|
||||
self.model = model.eval()
|
||||
if self.pt:
|
||||
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
|
||||
m.inplace = False # Detect.inplace=False for safe multithread inference
|
||||
m.export = True # do not output loss values
|
||||
|
||||
def _apply(self, fn):
|
||||
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
|
||||
self = super()._apply(fn)
|
||||
if self.pt:
|
||||
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
|
||||
m.stride = fn(m.stride)
|
||||
m.grid = list(map(fn, m.grid))
|
||||
if isinstance(m.anchor_grid, list):
|
||||
m.anchor_grid = list(map(fn, m.anchor_grid))
|
||||
return self
|
||||
|
||||
@smart_inference_mode()
|
||||
def forward(self, ims, size=640, augment=False, profile=False):
|
||||
# Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
|
||||
# file: ims = 'data/images/zidane.jpg' # str or PosixPath
|
||||
# URI: = 'https://ultralytics.com/images/zidane.jpg'
|
||||
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
|
||||
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
|
||||
# numpy: = np.zeros((640,1280,3)) # HWC
|
||||
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
|
||||
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
||||
|
||||
dt = (Profile(), Profile(), Profile())
|
||||
with dt[0]:
|
||||
if isinstance(size, int): # expand
|
||||
size = (size, size)
|
||||
p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
|
||||
autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
|
||||
if isinstance(ims, torch.Tensor): # torch
|
||||
with amp.autocast(autocast):
|
||||
return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
|
||||
|
||||
# Pre-process
|
||||
n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
|
||||
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
|
||||
for i, im in enumerate(ims):
|
||||
f = f'image{i}' # filename
|
||||
if isinstance(im, (str, Path)): # filename or uri
|
||||
im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
|
||||
im = np.asarray(exif_transpose(im))
|
||||
elif isinstance(im, Image.Image): # PIL Image
|
||||
im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
|
||||
files.append(Path(f).with_suffix('.jpg').name)
|
||||
if im.shape[0] < 5: # image in CHW
|
||||
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
||||
im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
|
||||
s = im.shape[:2] # HWC
|
||||
shape0.append(s) # image shape
|
||||
g = max(size) / max(s) # gain
|
||||
shape1.append([int(y * g) for y in s])
|
||||
ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
|
||||
shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape
|
||||
x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
|
||||
x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
|
||||
x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
|
||||
|
||||
with amp.autocast(autocast):
|
||||
# Inference
|
||||
with dt[1]:
|
||||
y = self.model(x, augment=augment) # forward
|
||||
|
||||
# Post-process
|
||||
with dt[2]:
|
||||
y = non_max_suppression(y if self.dmb else y[0],
|
||||
self.conf,
|
||||
self.iou,
|
||||
self.classes,
|
||||
self.agnostic,
|
||||
self.multi_label,
|
||||
max_det=self.max_det) # NMS
|
||||
for i in range(n):
|
||||
scale_boxes(shape1, y[i][:, :4], shape0[i])
|
||||
|
||||
return Detections(ims, y, files, dt, self.names, x.shape)
|
||||
|
||||
|
||||
class Detections:
|
||||
# YOLOv5 detections class for inference results
|
||||
def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
|
||||
super().__init__()
|
||||
d = pred[0].device # device
|
||||
gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
|
||||
self.ims = ims # list of images as numpy arrays
|
||||
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
||||
self.names = names # class names
|
||||
self.files = files # image filenames
|
||||
self.times = times # profiling times
|
||||
self.xyxy = pred # xyxy pixels
|
||||
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
||||
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
|
||||
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
|
||||
self.n = len(self.pred) # number of images (batch size)
|
||||
self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
|
||||
self.s = tuple(shape) # inference BCHW shape
|
||||
|
||||
def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
|
||||
s, crops = '', []
|
||||
for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
|
||||
s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
|
||||
if pred.shape[0]:
|
||||
for c in pred[:, -1].unique():
|
||||
n = (pred[:, -1] == c).sum() # detections per class
|
||||
s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
|
||||
s = s.rstrip(', ')
|
||||
if show or save or render or crop:
|
||||
annotator = Annotator(im, example=str(self.names))
|
||||
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
|
||||
label = f'{self.names[int(cls)]} {conf:.2f}'
|
||||
if crop:
|
||||
file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
|
||||
crops.append({
|
||||
'box': box,
|
||||
'conf': conf,
|
||||
'cls': cls,
|
||||
'label': label,
|
||||
'im': save_one_box(box, im, file=file, save=save)})
|
||||
else: # all others
|
||||
annotator.box_label(box, label if labels else '', color=colors(cls))
|
||||
im = annotator.im
|
||||
else:
|
||||
s += '(no detections)'
|
||||
|
||||
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
|
||||
if show:
|
||||
if is_jupyter():
|
||||
from IPython.display import display
|
||||
display(im)
|
||||
else:
|
||||
im.show(self.files[i])
|
||||
if save:
|
||||
f = self.files[i]
|
||||
im.save(save_dir / f) # save
|
||||
if i == self.n - 1:
|
||||
LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
|
||||
if render:
|
||||
self.ims[i] = np.asarray(im)
|
||||
if pprint:
|
||||
s = s.lstrip('\n')
|
||||
return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
|
||||
if crop:
|
||||
if save:
|
||||
LOGGER.info(f'Saved results to {save_dir}\n')
|
||||
return crops
|
||||
|
||||
@TryExcept('Showing images is not supported in this environment')
|
||||
def show(self, labels=True):
|
||||
self._run(show=True, labels=labels) # show results
|
||||
|
||||
def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
|
||||
save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
|
||||
self._run(save=True, labels=labels, save_dir=save_dir) # save results
|
||||
|
||||
def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
|
||||
save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
|
||||
return self._run(crop=True, save=save, save_dir=save_dir) # crop results
|
||||
|
||||
def render(self, labels=True):
|
||||
self._run(render=True, labels=labels) # render results
|
||||
return self.ims
|
||||
|
||||
def pandas(self):
|
||||
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
|
||||
new = copy(self) # return copy
|
||||
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
|
||||
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
|
||||
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
|
||||
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
|
||||
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
|
||||
return new
|
||||
|
||||
def tolist(self):
|
||||
# return a list of Detections objects, i.e. 'for result in results.tolist():'
|
||||
r = range(self.n) # iterable
|
||||
x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
|
||||
# for d in x:
|
||||
# for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
|
||||
# setattr(d, k, getattr(d, k)[0]) # pop out of list
|
||||
return x
|
||||
|
||||
def print(self):
|
||||
LOGGER.info(self.__str__())
|
||||
|
||||
def __len__(self): # override len(results)
|
||||
return self.n
|
||||
|
||||
def __str__(self): # override print(results)
|
||||
return self._run(pprint=True) # print results
|
||||
|
||||
def __repr__(self):
|
||||
return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
|
||||
|
||||
|
||||
class Proto(nn.Module):
|
||||
# YOLOv5 mask Proto module for segmentation models
|
||||
def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks
|
||||
super().__init__()
|
||||
self.cv1 = Conv(c1, c_, k=3)
|
||||
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
|
||||
self.cv2 = Conv(c_, c_, k=3)
|
||||
self.cv3 = Conv(c_, c2)
|
||||
|
||||
def forward(self, x):
|
||||
return self.cv3(self.cv2(self.upsample(self.cv1(x))))
|
||||
|
||||
|
||||
class Classify(nn.Module):
|
||||
# YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
|
||||
def __init__(self,
|
||||
c1,
|
||||
c2,
|
||||
k=1,
|
||||
s=1,
|
||||
p=None,
|
||||
g=1,
|
||||
dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability
|
||||
super().__init__()
|
||||
c_ = 1280 # efficientnet_b0 size
|
||||
self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
|
||||
self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
|
||||
self.drop = nn.Dropout(p=dropout_p, inplace=True)
|
||||
self.linear = nn.Linear(c_, c2) # to x(b,c2)
|
||||
|
||||
def forward(self, x):
|
||||
if isinstance(x, list):
|
||||
x = torch.cat(x, 1)
|
||||
return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
|
|
@ -0,0 +1,111 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Experimental modules
|
||||
"""
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from utils.downloads import attempt_download
|
||||
|
||||
|
||||
class Sum(nn.Module):
|
||||
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
||||
def __init__(self, n, weight=False): # n: number of inputs
|
||||
super().__init__()
|
||||
self.weight = weight # apply weights boolean
|
||||
self.iter = range(n - 1) # iter object
|
||||
if weight:
|
||||
self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
|
||||
|
||||
def forward(self, x):
|
||||
y = x[0] # no weight
|
||||
if self.weight:
|
||||
w = torch.sigmoid(self.w) * 2
|
||||
for i in self.iter:
|
||||
y = y + x[i + 1] * w[i]
|
||||
else:
|
||||
for i in self.iter:
|
||||
y = y + x[i + 1]
|
||||
return y
|
||||
|
||||
|
||||
class MixConv2d(nn.Module):
|
||||
# Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
|
||||
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
|
||||
super().__init__()
|
||||
n = len(k) # number of convolutions
|
||||
if equal_ch: # equal c_ per group
|
||||
i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
|
||||
c_ = [(i == g).sum() for g in range(n)] # intermediate channels
|
||||
else: # equal weight.numel() per group
|
||||
b = [c2] + [0] * n
|
||||
a = np.eye(n + 1, n, k=-1)
|
||||
a -= np.roll(a, 1, axis=1)
|
||||
a *= np.array(k) ** 2
|
||||
a[0] = 1
|
||||
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
|
||||
|
||||
self.m = nn.ModuleList([
|
||||
nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
|
||||
self.bn = nn.BatchNorm2d(c2)
|
||||
self.act = nn.SiLU()
|
||||
|
||||
def forward(self, x):
|
||||
return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
||||
|
||||
|
||||
class Ensemble(nn.ModuleList):
|
||||
# Ensemble of models
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(self, x, augment=False, profile=False, visualize=False):
|
||||
y = [module(x, augment, profile, visualize)[0] for module in self]
|
||||
# y = torch.stack(y).max(0)[0] # max ensemble
|
||||
# y = torch.stack(y).mean(0) # mean ensemble
|
||||
y = torch.cat(y, 1) # nms ensemble
|
||||
return y, None # inference, train output
|
||||
|
||||
|
||||
def attempt_load(weights, device=None, inplace=True, fuse=True):
|
||||
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
||||
from models.yolo import Detect, Model
|
||||
|
||||
model = Ensemble()
|
||||
for w in weights if isinstance(weights, list) else [weights]:
|
||||
ckpt = torch.load(attempt_download(w), map_location='cpu') # load
|
||||
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
|
||||
|
||||
# Model compatibility updates
|
||||
if not hasattr(ckpt, 'stride'):
|
||||
ckpt.stride = torch.tensor([32.])
|
||||
if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
|
||||
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
|
||||
|
||||
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
|
||||
|
||||
# Module compatibility updates
|
||||
for m in model.modules():
|
||||
t = type(m)
|
||||
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
|
||||
m.inplace = inplace # torch 1.7.0 compatibility
|
||||
if t is Detect and not isinstance(m.anchor_grid, list):
|
||||
delattr(m, 'anchor_grid')
|
||||
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
|
||||
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
|
||||
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
||||
|
||||
# Return model
|
||||
if len(model) == 1:
|
||||
return model[-1]
|
||||
|
||||
# Return detection ensemble
|
||||
print(f'Ensemble created with {weights}\n')
|
||||
for k in 'names', 'nc', 'yaml':
|
||||
setattr(model, k, getattr(model[0], k))
|
||||
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
|
||||
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
|
||||
return model
|
|
@ -0,0 +1,608 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
TensorFlow, Keras and TFLite versions of YOLOv5
|
||||
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
|
||||
|
||||
Usage:
|
||||
$ python models/tf.py --weights yolov5s.pt
|
||||
|
||||
Export:
|
||||
$ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
# ROOT = ROOT.relative_to(Path.cwd()) # relative
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from tensorflow import keras
|
||||
|
||||
from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,
|
||||
DWConvTranspose2d, Focus, autopad)
|
||||
from models.experimental import MixConv2d, attempt_load
|
||||
from models.yolo import Detect, Segment
|
||||
from utils.activations import SiLU
|
||||
from utils.general import LOGGER, make_divisible, print_args
|
||||
|
||||
|
||||
class TFBN(keras.layers.Layer):
|
||||
# TensorFlow BatchNormalization wrapper
|
||||
def __init__(self, w=None):
|
||||
super().__init__()
|
||||
self.bn = keras.layers.BatchNormalization(
|
||||
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
|
||||
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
|
||||
moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
|
||||
moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
|
||||
epsilon=w.eps)
|
||||
|
||||
def call(self, inputs):
|
||||
return self.bn(inputs)
|
||||
|
||||
|
||||
class TFPad(keras.layers.Layer):
|
||||
# Pad inputs in spatial dimensions 1 and 2
|
||||
def __init__(self, pad):
|
||||
super().__init__()
|
||||
if isinstance(pad, int):
|
||||
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
|
||||
else: # tuple/list
|
||||
self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])
|
||||
|
||||
def call(self, inputs):
|
||||
return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
|
||||
|
||||
|
||||
class TFConv(keras.layers.Layer):
|
||||
# Standard convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
||||
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
||||
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
|
||||
# see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
|
||||
conv = keras.layers.Conv2D(
|
||||
filters=c2,
|
||||
kernel_size=k,
|
||||
strides=s,
|
||||
padding='SAME' if s == 1 else 'VALID',
|
||||
use_bias=not hasattr(w, 'bn'),
|
||||
kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
|
||||
bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
|
||||
self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
||||
self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
|
||||
self.act = activations(w.act) if act else tf.identity
|
||||
|
||||
def call(self, inputs):
|
||||
return self.act(self.bn(self.conv(inputs)))
|
||||
|
||||
|
||||
class TFDWConv(keras.layers.Layer):
|
||||
# Depthwise convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
|
||||
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels'
|
||||
conv = keras.layers.DepthwiseConv2D(
|
||||
kernel_size=k,
|
||||
depth_multiplier=c2 // c1,
|
||||
strides=s,
|
||||
padding='SAME' if s == 1 else 'VALID',
|
||||
use_bias=not hasattr(w, 'bn'),
|
||||
depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
|
||||
bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
|
||||
self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
||||
self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
|
||||
self.act = activations(w.act) if act else tf.identity
|
||||
|
||||
def call(self, inputs):
|
||||
return self.act(self.bn(self.conv(inputs)))
|
||||
|
||||
|
||||
class TFDWConvTranspose2d(keras.layers.Layer):
|
||||
# Depthwise ConvTranspose2d
|
||||
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
|
||||
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels'
|
||||
assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1'
|
||||
weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()
|
||||
self.c1 = c1
|
||||
self.conv = [
|
||||
keras.layers.Conv2DTranspose(filters=1,
|
||||
kernel_size=k,
|
||||
strides=s,
|
||||
padding='VALID',
|
||||
output_padding=p2,
|
||||
use_bias=True,
|
||||
kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]),
|
||||
bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)]
|
||||
|
||||
def call(self, inputs):
|
||||
return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]
|
||||
|
||||
|
||||
class TFFocus(keras.layers.Layer):
|
||||
# Focus wh information into c-space
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
||||
# ch_in, ch_out, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
|
||||
|
||||
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
|
||||
# inputs = inputs / 255 # normalize 0-255 to 0-1
|
||||
inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]
|
||||
return self.conv(tf.concat(inputs, 3))
|
||||
|
||||
|
||||
class TFBottleneck(keras.layers.Layer):
|
||||
# Standard bottleneck
|
||||
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def call(self, inputs):
|
||||
return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
|
||||
|
||||
|
||||
class TFCrossConv(keras.layers.Layer):
|
||||
# Cross Convolution
|
||||
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)
|
||||
self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def call(self, inputs):
|
||||
return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
|
||||
|
||||
|
||||
class TFConv2d(keras.layers.Layer):
|
||||
# Substitution for PyTorch nn.Conv2D
|
||||
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
|
||||
super().__init__()
|
||||
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
||||
self.conv = keras.layers.Conv2D(filters=c2,
|
||||
kernel_size=k,
|
||||
strides=s,
|
||||
padding='VALID',
|
||||
use_bias=bias,
|
||||
kernel_initializer=keras.initializers.Constant(
|
||||
w.weight.permute(2, 3, 1, 0).numpy()),
|
||||
bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None)
|
||||
|
||||
def call(self, inputs):
|
||||
return self.conv(inputs)
|
||||
|
||||
|
||||
class TFBottleneckCSP(keras.layers.Layer):
|
||||
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
||||
# ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
|
||||
self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
|
||||
self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
|
||||
self.bn = TFBN(w.bn)
|
||||
self.act = lambda x: keras.activations.swish(x)
|
||||
self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
|
||||
|
||||
def call(self, inputs):
|
||||
y1 = self.cv3(self.m(self.cv1(inputs)))
|
||||
y2 = self.cv2(inputs)
|
||||
return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
|
||||
|
||||
|
||||
class TFC3(keras.layers.Layer):
|
||||
# CSP Bottleneck with 3 convolutions
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
||||
# ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
||||
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
||||
self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
|
||||
|
||||
def call(self, inputs):
|
||||
return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
|
||||
|
||||
|
||||
class TFC3x(keras.layers.Layer):
|
||||
# 3 module with cross-convolutions
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
||||
# ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
||||
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
||||
self.m = keras.Sequential([
|
||||
TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)])
|
||||
|
||||
def call(self, inputs):
|
||||
return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
|
||||
|
||||
|
||||
class TFSPP(keras.layers.Layer):
|
||||
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
||||
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
|
||||
super().__init__()
|
||||
c_ = c1 // 2 # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
|
||||
self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.cv1(inputs)
|
||||
return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
|
||||
|
||||
|
||||
class TFSPPF(keras.layers.Layer):
|
||||
# Spatial pyramid pooling-Fast layer
|
||||
def __init__(self, c1, c2, k=5, w=None):
|
||||
super().__init__()
|
||||
c_ = c1 // 2 # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
|
||||
self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.cv1(inputs)
|
||||
y1 = self.m(x)
|
||||
y2 = self.m(y1)
|
||||
return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
|
||||
|
||||
|
||||
class TFDetect(keras.layers.Layer):
|
||||
# TF YOLOv5 Detect layer
|
||||
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
|
||||
super().__init__()
|
||||
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
|
||||
self.nc = nc # number of classes
|
||||
self.no = nc + 5 # number of outputs per anchor
|
||||
self.nl = len(anchors) # number of detection layers
|
||||
self.na = len(anchors[0]) // 2 # number of anchors
|
||||
self.grid = [tf.zeros(1)] * self.nl # init grid
|
||||
self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
|
||||
self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])
|
||||
self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
|
||||
self.training = False # set to False after building model
|
||||
self.imgsz = imgsz
|
||||
for i in range(self.nl):
|
||||
ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
|
||||
self.grid[i] = self._make_grid(nx, ny)
|
||||
|
||||
def call(self, inputs):
|
||||
z = [] # inference output
|
||||
x = []
|
||||
for i in range(self.nl):
|
||||
x.append(self.m[i](inputs[i]))
|
||||
# x(bs,20,20,255) to x(bs,3,20,20,85)
|
||||
ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
|
||||
x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])
|
||||
|
||||
if not self.training: # inference
|
||||
y = x[i]
|
||||
grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5
|
||||
anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4
|
||||
xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy
|
||||
wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid
|
||||
# Normalize xywh to 0-1 to reduce calibration error
|
||||
xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
|
||||
wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
|
||||
y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
|
||||
z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
|
||||
|
||||
return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),)
|
||||
|
||||
@staticmethod
|
||||
def _make_grid(nx=20, ny=20):
|
||||
# yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
||||
# return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
||||
xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
|
||||
return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
|
||||
|
||||
|
||||
class TFSegment(TFDetect):
|
||||
# YOLOv5 Segment head for segmentation models
|
||||
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
|
||||
super().__init__(nc, anchors, ch, imgsz, w)
|
||||
self.nm = nm # number of masks
|
||||
self.npr = npr # number of protos
|
||||
self.no = 5 + nc + self.nm # number of outputs per anchor
|
||||
self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv
|
||||
self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos
|
||||
self.detect = TFDetect.call
|
||||
|
||||
def call(self, x):
|
||||
p = self.proto(x[0])
|
||||
# p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos
|
||||
p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160)
|
||||
x = self.detect(self, x)
|
||||
return (x, p) if self.training else (x[0], p)
|
||||
|
||||
|
||||
class TFProto(keras.layers.Layer):
|
||||
|
||||
def __init__(self, c1, c_=256, c2=32, w=None):
|
||||
super().__init__()
|
||||
self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
|
||||
self.upsample = TFUpsample(None, scale_factor=2, mode='nearest')
|
||||
self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
|
||||
self.cv3 = TFConv(c_, c2, w=w.cv3)
|
||||
|
||||
def call(self, inputs):
|
||||
return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
|
||||
|
||||
|
||||
class TFUpsample(keras.layers.Layer):
|
||||
# TF version of torch.nn.Upsample()
|
||||
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
|
||||
super().__init__()
|
||||
assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2'
|
||||
self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode)
|
||||
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
|
||||
# with default arguments: align_corners=False, half_pixel_centers=False
|
||||
# self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
|
||||
# size=(x.shape[1] * 2, x.shape[2] * 2))
|
||||
|
||||
def call(self, inputs):
|
||||
return self.upsample(inputs)
|
||||
|
||||
|
||||
class TFConcat(keras.layers.Layer):
|
||||
# TF version of torch.concat()
|
||||
def __init__(self, dimension=1, w=None):
|
||||
super().__init__()
|
||||
assert dimension == 1, 'convert only NCHW to NHWC concat'
|
||||
self.d = 3
|
||||
|
||||
def call(self, inputs):
|
||||
return tf.concat(inputs, self.d)
|
||||
|
||||
|
||||
def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
||||
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
||||
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
||||
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
||||
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
||||
|
||||
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
||||
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
||||
m_str = m
|
||||
m = eval(m) if isinstance(m, str) else m # eval strings
|
||||
for j, a in enumerate(args):
|
||||
try:
|
||||
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
||||
if m in [
|
||||
nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv,
|
||||
BottleneckCSP, C3, C3x]:
|
||||
c1, c2 = ch[f], args[0]
|
||||
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
|
||||
|
||||
args = [c1, c2, *args[1:]]
|
||||
if m in [BottleneckCSP, C3, C3x]:
|
||||
args.insert(2, n)
|
||||
n = 1
|
||||
elif m is nn.BatchNorm2d:
|
||||
args = [ch[f]]
|
||||
elif m is Concat:
|
||||
c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
|
||||
elif m in [Detect, Segment]:
|
||||
args.append([ch[x + 1] for x in f])
|
||||
if isinstance(args[1], int): # number of anchors
|
||||
args[1] = [list(range(args[1] * 2))] * len(f)
|
||||
if m is Segment:
|
||||
args[3] = make_divisible(args[3] * gw, 8)
|
||||
args.append(imgsz)
|
||||
else:
|
||||
c2 = ch[f]
|
||||
|
||||
tf_m = eval('TF' + m_str.replace('nn.', ''))
|
||||
m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
|
||||
else tf_m(*args, w=model.model[i]) # module
|
||||
|
||||
torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
||||
t = str(m)[8:-2].replace('__main__.', '') # module type
|
||||
np = sum(x.numel() for x in torch_m_.parameters()) # number params
|
||||
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
||||
LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
|
||||
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
||||
layers.append(m_)
|
||||
ch.append(c2)
|
||||
return keras.Sequential(layers), sorted(save)
|
||||
|
||||
|
||||
class TFModel:
|
||||
# TF YOLOv5 model
|
||||
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
|
||||
super().__init__()
|
||||
if isinstance(cfg, dict):
|
||||
self.yaml = cfg # model dict
|
||||
else: # is *.yaml
|
||||
import yaml # for torch hub
|
||||
self.yaml_file = Path(cfg).name
|
||||
with open(cfg) as f:
|
||||
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
||||
|
||||
# Define model
|
||||
if nc and nc != self.yaml['nc']:
|
||||
LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
|
||||
self.yaml['nc'] = nc # override yaml value
|
||||
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
|
||||
|
||||
def predict(self,
|
||||
inputs,
|
||||
tf_nms=False,
|
||||
agnostic_nms=False,
|
||||
topk_per_class=100,
|
||||
topk_all=100,
|
||||
iou_thres=0.45,
|
||||
conf_thres=0.25):
|
||||
y = [] # outputs
|
||||
x = inputs
|
||||
for m in self.model.layers:
|
||||
if m.f != -1: # if not from previous layer
|
||||
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
||||
|
||||
x = m(x) # run
|
||||
y.append(x if m.i in self.savelist else None) # save output
|
||||
|
||||
# Add TensorFlow NMS
|
||||
if tf_nms:
|
||||
boxes = self._xywh2xyxy(x[0][..., :4])
|
||||
probs = x[0][:, :, 4:5]
|
||||
classes = x[0][:, :, 5:]
|
||||
scores = probs * classes
|
||||
if agnostic_nms:
|
||||
nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)
|
||||
else:
|
||||
boxes = tf.expand_dims(boxes, 2)
|
||||
nms = tf.image.combined_non_max_suppression(boxes,
|
||||
scores,
|
||||
topk_per_class,
|
||||
topk_all,
|
||||
iou_thres,
|
||||
conf_thres,
|
||||
clip_boxes=False)
|
||||
return (nms,)
|
||||
return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
|
||||
# x = x[0] # [x(1,6300,85), ...] to x(6300,85)
|
||||
# xywh = x[..., :4] # x(6300,4) boxes
|
||||
# conf = x[..., 4:5] # x(6300,1) confidences
|
||||
# cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
|
||||
# return tf.concat([conf, cls, xywh], 1)
|
||||
|
||||
@staticmethod
|
||||
def _xywh2xyxy(xywh):
|
||||
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
||||
x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
|
||||
return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
|
||||
|
||||
|
||||
class AgnosticNMS(keras.layers.Layer):
|
||||
# TF Agnostic NMS
|
||||
def call(self, input, topk_all, iou_thres, conf_thres):
|
||||
# wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
|
||||
return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres),
|
||||
input,
|
||||
fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
|
||||
name='agnostic_nms')
|
||||
|
||||
@staticmethod
|
||||
def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS
|
||||
boxes, classes, scores = x
|
||||
class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
|
||||
scores_inp = tf.reduce_max(scores, -1)
|
||||
selected_inds = tf.image.non_max_suppression(boxes,
|
||||
scores_inp,
|
||||
max_output_size=topk_all,
|
||||
iou_threshold=iou_thres,
|
||||
score_threshold=conf_thres)
|
||||
selected_boxes = tf.gather(boxes, selected_inds)
|
||||
padded_boxes = tf.pad(selected_boxes,
|
||||
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
|
||||
mode='CONSTANT',
|
||||
constant_values=0.0)
|
||||
selected_scores = tf.gather(scores_inp, selected_inds)
|
||||
padded_scores = tf.pad(selected_scores,
|
||||
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
||||
mode='CONSTANT',
|
||||
constant_values=-1.0)
|
||||
selected_classes = tf.gather(class_inds, selected_inds)
|
||||
padded_classes = tf.pad(selected_classes,
|
||||
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
||||
mode='CONSTANT',
|
||||
constant_values=-1.0)
|
||||
valid_detections = tf.shape(selected_inds)[0]
|
||||
return padded_boxes, padded_scores, padded_classes, valid_detections
|
||||
|
||||
|
||||
def activations(act=nn.SiLU):
|
||||
# Returns TF activation from input PyTorch activation
|
||||
if isinstance(act, nn.LeakyReLU):
|
||||
return lambda x: keras.activations.relu(x, alpha=0.1)
|
||||
elif isinstance(act, nn.Hardswish):
|
||||
return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667
|
||||
elif isinstance(act, (nn.SiLU, SiLU)):
|
||||
return lambda x: keras.activations.swish(x)
|
||||
else:
|
||||
raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}')
|
||||
|
||||
|
||||
def representative_dataset_gen(dataset, ncalib=100):
|
||||
# Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
|
||||
for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
|
||||
im = np.transpose(img, [1, 2, 0])
|
||||
im = np.expand_dims(im, axis=0).astype(np.float32)
|
||||
im /= 255
|
||||
yield [im]
|
||||
if n >= ncalib:
|
||||
break
|
||||
|
||||
|
||||
def run(
|
||||
weights=ROOT / 'yolov5s.pt', # weights path
|
||||
imgsz=(640, 640), # inference size h,w
|
||||
batch_size=1, # batch size
|
||||
dynamic=False, # dynamic batch size
|
||||
):
|
||||
# PyTorch model
|
||||
im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
|
||||
model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)
|
||||
_ = model(im) # inference
|
||||
model.info()
|
||||
|
||||
# TensorFlow model
|
||||
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
|
||||
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
||||
_ = tf_model.predict(im) # inference
|
||||
|
||||
# Keras model
|
||||
im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
|
||||
keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
|
||||
keras_model.summary()
|
||||
|
||||
LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.')
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
||||
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
||||
parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
|
||||
opt = parser.parse_args()
|
||||
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,391 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
YOLO-specific modules
|
||||
|
||||
Usage:
|
||||
$ python models/yolo.py --cfg yolov5s.yaml
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import contextlib
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
if platform.system() != 'Windows':
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import *
|
||||
from models.experimental import *
|
||||
from utils.autoanchor import check_anchor_order
|
||||
from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
|
||||
from utils.plots import feature_visualization
|
||||
from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
|
||||
time_sync)
|
||||
|
||||
try:
|
||||
import thop # for FLOPs computation
|
||||
except ImportError:
|
||||
thop = None
|
||||
|
||||
|
||||
class Detect(nn.Module):
|
||||
# YOLOv5 Detect head for detection models
|
||||
stride = None # strides computed during build
|
||||
dynamic = False # force grid reconstruction
|
||||
export = False # export mode
|
||||
|
||||
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
|
||||
super().__init__()
|
||||
self.nc = nc # number of classes
|
||||
self.no = nc + 5 # number of outputs per anchor
|
||||
self.nl = len(anchors) # number of detection layers
|
||||
self.na = len(anchors[0]) // 2 # number of anchors
|
||||
self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
|
||||
self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
|
||||
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
|
||||
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
||||
self.inplace = inplace # use inplace ops (e.g. slice assignment)
|
||||
|
||||
def forward(self, x):
|
||||
z = [] # inference output
|
||||
for i in range(self.nl):
|
||||
x[i] = self.m[i](x[i]) # conv
|
||||
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
||||
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
||||
|
||||
if not self.training: # inference
|
||||
if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
||||
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
|
||||
|
||||
if isinstance(self, Segment): # (boxes + masks)
|
||||
xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
|
||||
xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
|
||||
wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
|
||||
y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
|
||||
else: # Detect (boxes only)
|
||||
xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
|
||||
xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
|
||||
wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
|
||||
y = torch.cat((xy, wh, conf), 4)
|
||||
z.append(y.view(bs, self.na * nx * ny, self.no))
|
||||
|
||||
return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
|
||||
|
||||
def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
|
||||
d = self.anchors[i].device
|
||||
t = self.anchors[i].dtype
|
||||
shape = 1, self.na, ny, nx, 2 # grid shape
|
||||
y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
|
||||
yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
|
||||
grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
|
||||
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
|
||||
return grid, anchor_grid
|
||||
|
||||
|
||||
class Segment(Detect):
|
||||
# YOLOv5 Segment head for segmentation models
|
||||
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
|
||||
super().__init__(nc, anchors, ch, inplace)
|
||||
self.nm = nm # number of masks
|
||||
self.npr = npr # number of protos
|
||||
self.no = 5 + nc + self.nm # number of outputs per anchor
|
||||
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
||||
self.proto = Proto(ch[0], self.npr, self.nm) # protos
|
||||
self.detect = Detect.forward
|
||||
|
||||
def forward(self, x):
|
||||
p = self.proto(x[0])
|
||||
x = self.detect(self, x)
|
||||
return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
|
||||
|
||||
|
||||
class BaseModel(nn.Module):
|
||||
# YOLOv5 base model
|
||||
def forward(self, x, profile=False, visualize=False):
|
||||
return self._forward_once(x, profile, visualize) # single-scale inference, train
|
||||
|
||||
def _forward_once(self, x, profile=False, visualize=False):
|
||||
y, dt = [], [] # outputs
|
||||
for m in self.model:
|
||||
if m.f != -1: # if not from previous layer
|
||||
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
||||
if profile:
|
||||
self._profile_one_layer(m, x, dt)
|
||||
x = m(x) # run
|
||||
y.append(x if m.i in self.save else None) # save output
|
||||
if visualize:
|
||||
feature_visualization(x, m.type, m.i, save_dir=visualize)
|
||||
return x
|
||||
|
||||
def _profile_one_layer(self, m, x, dt):
|
||||
c = m == self.model[-1] # is final layer, copy input as inplace fix
|
||||
o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
|
||||
t = time_sync()
|
||||
for _ in range(10):
|
||||
m(x.copy() if c else x)
|
||||
dt.append((time_sync() - t) * 100)
|
||||
if m == self.model[0]:
|
||||
LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
|
||||
LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
|
||||
if c:
|
||||
LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
|
||||
|
||||
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
|
||||
LOGGER.info('Fusing layers... ')
|
||||
for m in self.model.modules():
|
||||
if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
|
||||
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
||||
delattr(m, 'bn') # remove batchnorm
|
||||
m.forward = m.forward_fuse # update forward
|
||||
self.info()
|
||||
return self
|
||||
|
||||
def info(self, verbose=False, img_size=640): # print model information
|
||||
model_info(self, verbose, img_size)
|
||||
|
||||
def _apply(self, fn):
|
||||
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
|
||||
self = super()._apply(fn)
|
||||
m = self.model[-1] # Detect()
|
||||
if isinstance(m, (Detect, Segment)):
|
||||
m.stride = fn(m.stride)
|
||||
m.grid = list(map(fn, m.grid))
|
||||
if isinstance(m.anchor_grid, list):
|
||||
m.anchor_grid = list(map(fn, m.anchor_grid))
|
||||
return self
|
||||
|
||||
|
||||
class DetectionModel(BaseModel):
|
||||
# YOLOv5 detection model
|
||||
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
|
||||
super().__init__()
|
||||
if isinstance(cfg, dict):
|
||||
self.yaml = cfg # model dict
|
||||
else: # is *.yaml
|
||||
import yaml # for torch hub
|
||||
self.yaml_file = Path(cfg).name
|
||||
with open(cfg, encoding='ascii', errors='ignore') as f:
|
||||
self.yaml = yaml.safe_load(f) # model dict
|
||||
|
||||
# Define model
|
||||
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
|
||||
if nc and nc != self.yaml['nc']:
|
||||
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
|
||||
self.yaml['nc'] = nc # override yaml value
|
||||
if anchors:
|
||||
LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
|
||||
self.yaml['anchors'] = round(anchors) # override yaml value
|
||||
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
|
||||
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
|
||||
self.inplace = self.yaml.get('inplace', True)
|
||||
|
||||
# Build strides, anchors
|
||||
m = self.model[-1] # Detect()
|
||||
if isinstance(m, (Detect, Segment)):
|
||||
s = 256 # 2x min stride
|
||||
m.inplace = self.inplace
|
||||
forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
|
||||
m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
|
||||
check_anchor_order(m)
|
||||
m.anchors /= m.stride.view(-1, 1, 1)
|
||||
self.stride = m.stride
|
||||
self._initialize_biases() # only run once
|
||||
|
||||
# Init weights, biases
|
||||
initialize_weights(self)
|
||||
self.info()
|
||||
LOGGER.info('')
|
||||
|
||||
def forward(self, x, augment=False, profile=False, visualize=False):
|
||||
if augment:
|
||||
return self._forward_augment(x) # augmented inference, None
|
||||
return self._forward_once(x, profile, visualize) # single-scale inference, train
|
||||
|
||||
def _forward_augment(self, x):
|
||||
img_size = x.shape[-2:] # height, width
|
||||
s = [1, 0.83, 0.67] # scales
|
||||
f = [None, 3, None] # flips (2-ud, 3-lr)
|
||||
y = [] # outputs
|
||||
for si, fi in zip(s, f):
|
||||
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
|
||||
yi = self._forward_once(xi)[0] # forward
|
||||
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
||||
yi = self._descale_pred(yi, fi, si, img_size)
|
||||
y.append(yi)
|
||||
y = self._clip_augmented(y) # clip augmented tails
|
||||
return torch.cat(y, 1), None # augmented inference, train
|
||||
|
||||
def _descale_pred(self, p, flips, scale, img_size):
|
||||
# de-scale predictions following augmented inference (inverse operation)
|
||||
if self.inplace:
|
||||
p[..., :4] /= scale # de-scale
|
||||
if flips == 2:
|
||||
p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
|
||||
elif flips == 3:
|
||||
p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
|
||||
else:
|
||||
x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
|
||||
if flips == 2:
|
||||
y = img_size[0] - y # de-flip ud
|
||||
elif flips == 3:
|
||||
x = img_size[1] - x # de-flip lr
|
||||
p = torch.cat((x, y, wh, p[..., 4:]), -1)
|
||||
return p
|
||||
|
||||
def _clip_augmented(self, y):
|
||||
# Clip YOLOv5 augmented inference tails
|
||||
nl = self.model[-1].nl # number of detection layers (P3-P5)
|
||||
g = sum(4 ** x for x in range(nl)) # grid points
|
||||
e = 1 # exclude layer count
|
||||
i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices
|
||||
y[0] = y[0][:, :-i] # large
|
||||
i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
|
||||
y[-1] = y[-1][:, i:] # small
|
||||
return y
|
||||
|
||||
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
|
||||
# https://arxiv.org/abs/1708.02002 section 3.3
|
||||
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
|
||||
m = self.model[-1] # Detect() module
|
||||
for mi, s in zip(m.m, m.stride): # from
|
||||
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
|
||||
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
|
||||
b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
|
||||
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
|
||||
|
||||
|
||||
Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
|
||||
|
||||
|
||||
class SegmentationModel(DetectionModel):
|
||||
# YOLOv5 segmentation model
|
||||
def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
|
||||
super().__init__(cfg, ch, nc, anchors)
|
||||
|
||||
|
||||
class ClassificationModel(BaseModel):
|
||||
# YOLOv5 classification model
|
||||
def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
|
||||
super().__init__()
|
||||
self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
|
||||
|
||||
def _from_detection_model(self, model, nc=1000, cutoff=10):
|
||||
# Create a YOLOv5 classification model from a YOLOv5 detection model
|
||||
if isinstance(model, DetectMultiBackend):
|
||||
model = model.model # unwrap DetectMultiBackend
|
||||
model.model = model.model[:cutoff] # backbone
|
||||
m = model.model[-1] # last layer
|
||||
ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
|
||||
c = Classify(ch, nc) # Classify()
|
||||
c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
|
||||
model.model[-1] = c # replace
|
||||
self.model = model.model
|
||||
self.stride = model.stride
|
||||
self.save = []
|
||||
self.nc = nc
|
||||
|
||||
def _from_yaml(self, cfg):
|
||||
# Create a YOLOv5 classification model from a *.yaml file
|
||||
self.model = None
|
||||
|
||||
|
||||
def parse_model(d, ch): # model_dict, input_channels(3)
|
||||
# Parse a YOLOv5 model.yaml dictionary
|
||||
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
||||
anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
|
||||
if act:
|
||||
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
|
||||
LOGGER.info(f"{colorstr('activation:')} {act}") # print
|
||||
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
||||
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
||||
|
||||
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
||||
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
||||
m = eval(m) if isinstance(m, str) else m # eval strings
|
||||
for j, a in enumerate(args):
|
||||
with contextlib.suppress(NameError):
|
||||
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
||||
|
||||
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
|
||||
if m in {
|
||||
Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
|
||||
BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
|
||||
c1, c2 = ch[f], args[0]
|
||||
if c2 != no: # if not output
|
||||
c2 = make_divisible(c2 * gw, 8)
|
||||
|
||||
args = [c1, c2, *args[1:]]
|
||||
if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
|
||||
args.insert(2, n) # number of repeats
|
||||
n = 1
|
||||
elif m is nn.BatchNorm2d:
|
||||
args = [ch[f]]
|
||||
elif m is Concat:
|
||||
c2 = sum(ch[x] for x in f)
|
||||
# TODO: channel, gw, gd
|
||||
elif m in {Detect, Segment}:
|
||||
args.append([ch[x] for x in f])
|
||||
if isinstance(args[1], int): # number of anchors
|
||||
args[1] = [list(range(args[1] * 2))] * len(f)
|
||||
if m is Segment:
|
||||
args[3] = make_divisible(args[3] * gw, 8)
|
||||
elif m is Contract:
|
||||
c2 = ch[f] * args[0] ** 2
|
||||
elif m is Expand:
|
||||
c2 = ch[f] // args[0] ** 2
|
||||
else:
|
||||
c2 = ch[f]
|
||||
|
||||
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
||||
t = str(m)[8:-2].replace('__main__.', '') # module type
|
||||
np = sum(x.numel() for x in m_.parameters()) # number params
|
||||
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
||||
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
|
||||
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
||||
layers.append(m_)
|
||||
if i == 0:
|
||||
ch = []
|
||||
ch.append(c2)
|
||||
return nn.Sequential(*layers), sorted(save)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
||||
parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--profile', action='store_true', help='profile model speed')
|
||||
parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
|
||||
parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
|
||||
opt = parser.parse_args()
|
||||
opt.cfg = check_yaml(opt.cfg) # check YAML
|
||||
print_args(vars(opt))
|
||||
device = select_device(opt.device)
|
||||
|
||||
# Create model
|
||||
im = torch.rand(opt.batch_size, 3, 640, 640).to(device)
|
||||
model = Model(opt.cfg).to(device)
|
||||
|
||||
# Options
|
||||
if opt.line_profile: # profile layer by layer
|
||||
model(im, profile=True)
|
||||
|
||||
elif opt.profile: # profile forward-backward
|
||||
results = profile(input=im, ops=[model], n=3)
|
||||
|
||||
elif opt.test: # test all models
|
||||
for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):
|
||||
try:
|
||||
_ = Model(cfg)
|
||||
except Exception as e:
|
||||
print(f'Error in {cfg}: {e}')
|
||||
|
||||
else: # report fused model summary
|
||||
model.fuse()
|
|
@ -0,0 +1,50 @@
|
|||
# YOLOv5 requirements
|
||||
# Usage: pip install -r requirements.txt
|
||||
|
||||
# Base ------------------------------------------------------------------------
|
||||
gitpython>=3.1.30
|
||||
matplotlib>=3.2.2
|
||||
numpy>=1.18.5
|
||||
opencv-python>=4.1.1
|
||||
Pillow>=7.1.2
|
||||
psutil # system resources
|
||||
PyYAML>=5.3.1
|
||||
requests>=2.23.0
|
||||
scipy>=1.4.1
|
||||
thop>=0.1.1 # FLOPs computation
|
||||
torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended)
|
||||
torchvision>=0.8.1
|
||||
tqdm>=4.64.0
|
||||
# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
|
||||
|
||||
# Logging ---------------------------------------------------------------------
|
||||
tensorboard>=2.4.1
|
||||
# clearml>=1.2.0
|
||||
# comet
|
||||
|
||||
# Plotting --------------------------------------------------------------------
|
||||
pandas>=1.1.4
|
||||
seaborn>=0.11.0
|
||||
|
||||
# Export ----------------------------------------------------------------------
|
||||
# coremltools>=6.0 # CoreML export
|
||||
# onnx>=1.12.0 # ONNX export
|
||||
# onnx-simplifier>=0.4.1 # ONNX simplifier
|
||||
# nvidia-pyindex # TensorRT export
|
||||
# nvidia-tensorrt # TensorRT export
|
||||
# scikit-learn<=1.1.2 # CoreML quantization
|
||||
# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
|
||||
# tensorflowjs>=3.9.0 # TF.js export
|
||||
# openvino-dev # OpenVINO export
|
||||
|
||||
# Deploy ----------------------------------------------------------------------
|
||||
setuptools>=65.5.1 # Snyk vulnerability fix
|
||||
# tritonclient[all]~=2.24.0
|
||||
|
||||
# Extras ----------------------------------------------------------------------
|
||||
# ipython # interactive notebook
|
||||
# mss # screenshots
|
||||
# albumentations>=1.0.3
|
||||
# pycocotools>=2.0.6 # COCO mAP
|
||||
# roboflow
|
||||
# ultralytics # HUB https://hub.ultralytics.com
|
|
@ -0,0 +1,284 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Run YOLOv5 segmentation inference on images, videos, directories, streams, etc.
|
||||
|
||||
Usage - sources:
|
||||
$ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
path/ # directory
|
||||
list.txt # list of images
|
||||
list.streams # list of streams
|
||||
'path/*.jpg' # glob
|
||||
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
||||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
||||
|
||||
Usage - formats:
|
||||
$ python segment/predict.py --weights yolov5s-seg.pt # PyTorch
|
||||
yolov5s-seg.torchscript # TorchScript
|
||||
yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s-seg_openvino_model # OpenVINO
|
||||
yolov5s-seg.engine # TensorRT
|
||||
yolov5s-seg.mlmodel # CoreML (macOS-only)
|
||||
yolov5s-seg_saved_model # TensorFlow SavedModel
|
||||
yolov5s-seg.pb # TensorFlow GraphDef
|
||||
yolov5s-seg.tflite # TensorFlow Lite
|
||||
yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s-seg_paddle_model # PaddlePaddle
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import DetectMultiBackend
|
||||
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
||||
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
||||
increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
|
||||
strip_optimizer)
|
||||
from utils.plots import Annotator, colors, save_one_box
|
||||
from utils.segment.general import masks2segments, process_mask, process_mask_native
|
||||
from utils.torch_utils import select_device, smart_inference_mode
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s)
|
||||
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
||||
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
||||
imgsz=(640, 640), # inference size (height, width)
|
||||
conf_thres=0.25, # confidence threshold
|
||||
iou_thres=0.45, # NMS IOU threshold
|
||||
max_det=1000, # maximum detections per image
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
view_img=False, # show results
|
||||
save_txt=False, # save results to *.txt
|
||||
save_conf=False, # save confidences in --save-txt labels
|
||||
save_crop=False, # save cropped prediction boxes
|
||||
nosave=False, # do not save images/videos
|
||||
classes=None, # filter by class: --class 0, or --class 0 2 3
|
||||
agnostic_nms=False, # class-agnostic NMS
|
||||
augment=False, # augmented inference
|
||||
visualize=False, # visualize features
|
||||
update=False, # update all models
|
||||
project=ROOT / 'runs/predict-seg', # save results to project/name
|
||||
name='exp', # save results to project/name
|
||||
exist_ok=False, # existing project/name ok, do not increment
|
||||
line_thickness=3, # bounding box thickness (pixels)
|
||||
hide_labels=False, # hide labels
|
||||
hide_conf=False, # hide confidences
|
||||
half=False, # use FP16 half-precision inference
|
||||
dnn=False, # use OpenCV DNN for ONNX inference
|
||||
vid_stride=1, # video frame-rate stride
|
||||
retina_masks=False,
|
||||
):
|
||||
source = str(source)
|
||||
save_img = not nosave and not source.endswith('.txt') # save inference images
|
||||
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
||||
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
||||
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
||||
screenshot = source.lower().startswith('screen')
|
||||
if is_url and is_file:
|
||||
source = check_file(source) # download
|
||||
|
||||
# Directories
|
||||
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
||||
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
||||
|
||||
# Load model
|
||||
device = select_device(device)
|
||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
||||
stride, names, pt = model.stride, model.names, model.pt
|
||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||
|
||||
# Dataloader
|
||||
bs = 1 # batch_size
|
||||
if webcam:
|
||||
view_img = check_imshow(warn=True)
|
||||
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
||||
bs = len(dataset)
|
||||
elif screenshot:
|
||||
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
||||
else:
|
||||
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
||||
vid_path, vid_writer = [None] * bs, [None] * bs
|
||||
|
||||
# Run inference
|
||||
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
|
||||
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
||||
for path, im, im0s, vid_cap, s in dataset:
|
||||
with dt[0]:
|
||||
im = torch.from_numpy(im).to(model.device)
|
||||
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
||||
im /= 255 # 0 - 255 to 0.0 - 1.0
|
||||
if len(im.shape) == 3:
|
||||
im = im[None] # expand for batch dim
|
||||
|
||||
# Inference
|
||||
with dt[1]:
|
||||
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
|
||||
pred, proto = model(im, augment=augment, visualize=visualize)[:2]
|
||||
|
||||
# NMS
|
||||
with dt[2]:
|
||||
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32)
|
||||
|
||||
# Second-stage classifier (optional)
|
||||
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
|
||||
|
||||
# Process predictions
|
||||
for i, det in enumerate(pred): # per image
|
||||
seen += 1
|
||||
if webcam: # batch_size >= 1
|
||||
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
||||
s += f'{i}: '
|
||||
else:
|
||||
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
||||
|
||||
p = Path(p) # to Path
|
||||
save_path = str(save_dir / p.name) # im.jpg
|
||||
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
||||
s += '%gx%g ' % im.shape[2:] # print string
|
||||
imc = im0.copy() if save_crop else im0 # for save_crop
|
||||
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
|
||||
if len(det):
|
||||
if retina_masks:
|
||||
# scale bbox first the crop masks
|
||||
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
|
||||
masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC
|
||||
else:
|
||||
masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC
|
||||
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
|
||||
|
||||
# Segments
|
||||
if save_txt:
|
||||
segments = [
|
||||
scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True)
|
||||
for x in reversed(masks2segments(masks))]
|
||||
|
||||
# Print results
|
||||
for c in det[:, 5].unique():
|
||||
n = (det[:, 5] == c).sum() # detections per class
|
||||
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
||||
|
||||
# Mask plotting
|
||||
annotator.masks(
|
||||
masks,
|
||||
colors=[colors(x, True) for x in det[:, 5]],
|
||||
im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() /
|
||||
255 if retina_masks else im[i])
|
||||
|
||||
# Write results
|
||||
for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])):
|
||||
if save_txt: # Write to file
|
||||
seg = segments[j].reshape(-1) # (n,2) to (n*2)
|
||||
line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format
|
||||
with open(f'{txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
|
||||
if save_img or save_crop or view_img: # Add bbox to image
|
||||
c = int(cls) # integer class
|
||||
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
|
||||
annotator.box_label(xyxy, label, color=colors(c, True))
|
||||
# annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)
|
||||
if save_crop:
|
||||
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
|
||||
|
||||
# Stream results
|
||||
im0 = annotator.result()
|
||||
if view_img:
|
||||
if platform.system() == 'Linux' and p not in windows:
|
||||
windows.append(p)
|
||||
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
||||
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
||||
cv2.imshow(str(p), im0)
|
||||
if cv2.waitKey(1) == ord('q'): # 1 millisecond
|
||||
exit()
|
||||
|
||||
# Save results (image with detections)
|
||||
if save_img:
|
||||
if dataset.mode == 'image':
|
||||
cv2.imwrite(save_path, im0)
|
||||
else: # 'video' or 'stream'
|
||||
if vid_path[i] != save_path: # new video
|
||||
vid_path[i] = save_path
|
||||
if isinstance(vid_writer[i], cv2.VideoWriter):
|
||||
vid_writer[i].release() # release previous video writer
|
||||
if vid_cap: # video
|
||||
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
||||
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
else: # stream
|
||||
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
||||
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
||||
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
vid_writer[i].write(im0)
|
||||
|
||||
# Print time (inference-only)
|
||||
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
|
||||
|
||||
# Print results
|
||||
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
||||
if save_txt or save_img:
|
||||
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
||||
if update:
|
||||
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
|
||||
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
||||
parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
|
||||
parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
|
||||
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--view-img', action='store_true', help='show results')
|
||||
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
||||
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
||||
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
|
||||
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
||||
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
|
||||
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
||||
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
||||
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
||||
parser.add_argument('--update', action='store_true', help='update all models')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save results to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
|
||||
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
|
||||
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
||||
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
||||
parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution')
|
||||
opt = parser.parse_args()
|
||||
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
check_requirements(exclude=('tensorboard', 'thop'))
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,664 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Train a YOLOv5 segment model on a segment dataset
|
||||
Models and datasets download automatically from the latest YOLOv5 release.
|
||||
|
||||
Usage - Single-GPU training:
|
||||
$ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended)
|
||||
$ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch
|
||||
|
||||
Usage - Multi-GPU DDP training:
|
||||
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
|
||||
|
||||
Models: https://github.com/ultralytics/yolov5/tree/master/models
|
||||
Datasets: https://github.com/ultralytics/yolov5/tree/master/data
|
||||
Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
import yaml
|
||||
from torch.optim import lr_scheduler
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
import segment.val as validate # for end-of-epoch mAP
|
||||
from models.experimental import attempt_load
|
||||
from models.yolo import SegmentationModel
|
||||
from utils.autoanchor import check_anchors
|
||||
from utils.autobatch import check_train_batch_size
|
||||
from utils.callbacks import Callbacks
|
||||
from utils.downloads import attempt_download, is_url
|
||||
from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
|
||||
check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr,
|
||||
get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
|
||||
labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save)
|
||||
from utils.loggers import GenericLogger
|
||||
from utils.plots import plot_evolve, plot_labels
|
||||
from utils.segment.dataloaders import create_dataloader
|
||||
from utils.segment.loss import ComputeLoss
|
||||
from utils.segment.metrics import KEYS, fitness
|
||||
from utils.segment.plots import plot_images_and_masks, plot_results_with_masks
|
||||
from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer,
|
||||
smart_resume, torch_distributed_zero_first)
|
||||
|
||||
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
||||
GIT_INFO = check_git_info()
|
||||
|
||||
|
||||
def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
|
||||
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \
|
||||
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
|
||||
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio
|
||||
# callbacks.run('on_pretrain_routine_start')
|
||||
|
||||
# Directories
|
||||
w = save_dir / 'weights' # weights dir
|
||||
(w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
|
||||
last, best = w / 'last.pt', w / 'best.pt'
|
||||
|
||||
# Hyperparameters
|
||||
if isinstance(hyp, str):
|
||||
with open(hyp, errors='ignore') as f:
|
||||
hyp = yaml.safe_load(f) # load hyps dict
|
||||
LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
|
||||
opt.hyp = hyp.copy() # for saving hyps to checkpoints
|
||||
|
||||
# Save run settings
|
||||
if not evolve:
|
||||
yaml_save(save_dir / 'hyp.yaml', hyp)
|
||||
yaml_save(save_dir / 'opt.yaml', vars(opt))
|
||||
|
||||
# Loggers
|
||||
data_dict = None
|
||||
if RANK in {-1, 0}:
|
||||
logger = GenericLogger(opt=opt, console_logger=LOGGER)
|
||||
|
||||
# Config
|
||||
plots = not evolve and not opt.noplots # create plots
|
||||
overlap = not opt.no_overlap
|
||||
cuda = device.type != 'cpu'
|
||||
init_seeds(opt.seed + 1 + RANK, deterministic=True)
|
||||
with torch_distributed_zero_first(LOCAL_RANK):
|
||||
data_dict = data_dict or check_dataset(data) # check if None
|
||||
train_path, val_path = data_dict['train'], data_dict['val']
|
||||
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
|
||||
names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
||||
is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
|
||||
|
||||
# Model
|
||||
check_suffix(weights, '.pt') # check weights
|
||||
pretrained = weights.endswith('.pt')
|
||||
if pretrained:
|
||||
with torch_distributed_zero_first(LOCAL_RANK):
|
||||
weights = attempt_download(weights) # download if not found locally
|
||||
ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
|
||||
model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)
|
||||
exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
|
||||
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
||||
csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
|
||||
model.load_state_dict(csd, strict=False) # load
|
||||
LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
|
||||
else:
|
||||
model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
||||
amp = check_amp(model) # check AMP
|
||||
|
||||
# Freeze
|
||||
freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
|
||||
for k, v in model.named_parameters():
|
||||
v.requires_grad = True # train all layers
|
||||
# v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
|
||||
if any(x in k for x in freeze):
|
||||
LOGGER.info(f'freezing {k}')
|
||||
v.requires_grad = False
|
||||
|
||||
# Image size
|
||||
gs = max(int(model.stride.max()), 32) # grid size (max stride)
|
||||
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
|
||||
|
||||
# Batch size
|
||||
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
|
||||
batch_size = check_train_batch_size(model, imgsz, amp)
|
||||
logger.update_params({'batch_size': batch_size})
|
||||
# loggers.on_params_update({"batch_size": batch_size})
|
||||
|
||||
# Optimizer
|
||||
nbs = 64 # nominal batch size
|
||||
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
|
||||
hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
|
||||
optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
|
||||
|
||||
# Scheduler
|
||||
if opt.cos_lr:
|
||||
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
|
||||
else:
|
||||
lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
|
||||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
|
||||
|
||||
# EMA
|
||||
ema = ModelEMA(model) if RANK in {-1, 0} else None
|
||||
|
||||
# Resume
|
||||
best_fitness, start_epoch = 0.0, 0
|
||||
if pretrained:
|
||||
if resume:
|
||||
best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
|
||||
del ckpt, csd
|
||||
|
||||
# DP mode
|
||||
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
||||
LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
|
||||
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
|
||||
model = torch.nn.DataParallel(model)
|
||||
|
||||
# SyncBatchNorm
|
||||
if opt.sync_bn and cuda and RANK != -1:
|
||||
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
||||
LOGGER.info('Using SyncBatchNorm()')
|
||||
|
||||
# Trainloader
|
||||
train_loader, dataset = create_dataloader(
|
||||
train_path,
|
||||
imgsz,
|
||||
batch_size // WORLD_SIZE,
|
||||
gs,
|
||||
single_cls,
|
||||
hyp=hyp,
|
||||
augment=True,
|
||||
cache=None if opt.cache == 'val' else opt.cache,
|
||||
rect=opt.rect,
|
||||
rank=LOCAL_RANK,
|
||||
workers=workers,
|
||||
image_weights=opt.image_weights,
|
||||
quad=opt.quad,
|
||||
prefix=colorstr('train: '),
|
||||
shuffle=True,
|
||||
mask_downsample_ratio=mask_ratio,
|
||||
overlap_mask=overlap,
|
||||
)
|
||||
labels = np.concatenate(dataset.labels, 0)
|
||||
mlc = int(labels[:, 0].max()) # max label class
|
||||
assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
|
||||
|
||||
# Process 0
|
||||
if RANK in {-1, 0}:
|
||||
val_loader = create_dataloader(val_path,
|
||||
imgsz,
|
||||
batch_size // WORLD_SIZE * 2,
|
||||
gs,
|
||||
single_cls,
|
||||
hyp=hyp,
|
||||
cache=None if noval else opt.cache,
|
||||
rect=True,
|
||||
rank=-1,
|
||||
workers=workers * 2,
|
||||
pad=0.5,
|
||||
mask_downsample_ratio=mask_ratio,
|
||||
overlap_mask=overlap,
|
||||
prefix=colorstr('val: '))[0]
|
||||
|
||||
if not resume:
|
||||
if not opt.noautoanchor:
|
||||
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
|
||||
model.half().float() # pre-reduce anchor precision
|
||||
|
||||
if plots:
|
||||
plot_labels(labels, names, save_dir)
|
||||
# callbacks.run('on_pretrain_routine_end', labels, names)
|
||||
|
||||
# DDP mode
|
||||
if cuda and RANK != -1:
|
||||
model = smart_DDP(model)
|
||||
|
||||
# Model attributes
|
||||
nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
|
||||
hyp['box'] *= 3 / nl # scale to layers
|
||||
hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
|
||||
hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
|
||||
hyp['label_smoothing'] = opt.label_smoothing
|
||||
model.nc = nc # attach number of classes to model
|
||||
model.hyp = hyp # attach hyperparameters to model
|
||||
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
|
||||
model.names = names
|
||||
|
||||
# Start training
|
||||
t0 = time.time()
|
||||
nb = len(train_loader) # number of batches
|
||||
nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
|
||||
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
|
||||
last_opt_step = -1
|
||||
maps = np.zeros(nc) # mAP per class
|
||||
results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
|
||||
scheduler.last_epoch = start_epoch - 1 # do not move
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=amp)
|
||||
stopper, stop = EarlyStopping(patience=opt.patience), False
|
||||
compute_loss = ComputeLoss(model, overlap=overlap) # init loss class
|
||||
# callbacks.run('on_train_start')
|
||||
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
|
||||
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
|
||||
f"Logging results to {colorstr('bold', save_dir)}\n"
|
||||
f'Starting training for {epochs} epochs...')
|
||||
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
||||
# callbacks.run('on_train_epoch_start')
|
||||
model.train()
|
||||
|
||||
# Update image weights (optional, single-GPU only)
|
||||
if opt.image_weights:
|
||||
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
|
||||
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
|
||||
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
|
||||
|
||||
# Update mosaic border (optional)
|
||||
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
|
||||
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
|
||||
|
||||
mloss = torch.zeros(4, device=device) # mean losses
|
||||
if RANK != -1:
|
||||
train_loader.sampler.set_epoch(epoch)
|
||||
pbar = enumerate(train_loader)
|
||||
LOGGER.info(('\n' + '%11s' * 8) %
|
||||
('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size'))
|
||||
if RANK in {-1, 0}:
|
||||
pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
|
||||
optimizer.zero_grad()
|
||||
for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------
|
||||
# callbacks.run('on_train_batch_start')
|
||||
ni = i + nb * epoch # number integrated batches (since train start)
|
||||
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
|
||||
|
||||
# Warmup
|
||||
if ni <= nw:
|
||||
xi = [0, nw] # x interp
|
||||
# compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
|
||||
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
|
||||
for j, x in enumerate(optimizer.param_groups):
|
||||
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
|
||||
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
|
||||
if 'momentum' in x:
|
||||
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
|
||||
|
||||
# Multi-scale
|
||||
if opt.multi_scale:
|
||||
sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size
|
||||
sf = sz / max(imgs.shape[2:]) # scale factor
|
||||
if sf != 1:
|
||||
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
|
||||
imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
|
||||
|
||||
# Forward
|
||||
with torch.cuda.amp.autocast(amp):
|
||||
pred = model(imgs) # forward
|
||||
loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float())
|
||||
if RANK != -1:
|
||||
loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
|
||||
if opt.quad:
|
||||
loss *= 4.
|
||||
|
||||
# Backward
|
||||
scaler.scale(loss).backward()
|
||||
|
||||
# Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
|
||||
if ni - last_opt_step >= accumulate:
|
||||
scaler.unscale_(optimizer) # unscale gradients
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
|
||||
scaler.step(optimizer) # optimizer.step
|
||||
scaler.update()
|
||||
optimizer.zero_grad()
|
||||
if ema:
|
||||
ema.update(model)
|
||||
last_opt_step = ni
|
||||
|
||||
# Log
|
||||
if RANK in {-1, 0}:
|
||||
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
|
||||
mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
|
||||
pbar.set_description(('%11s' * 2 + '%11.4g' * 6) %
|
||||
(f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
|
||||
# callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths)
|
||||
# if callbacks.stop_training:
|
||||
# return
|
||||
|
||||
# Mosaic plots
|
||||
if plots:
|
||||
if ni < 3:
|
||||
plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg')
|
||||
if ni == 10:
|
||||
files = sorted(save_dir.glob('train*.jpg'))
|
||||
logger.log_images(files, 'Mosaics', epoch)
|
||||
# end batch ------------------------------------------------------------------------------------------------
|
||||
|
||||
# Scheduler
|
||||
lr = [x['lr'] for x in optimizer.param_groups] # for loggers
|
||||
scheduler.step()
|
||||
|
||||
if RANK in {-1, 0}:
|
||||
# mAP
|
||||
# callbacks.run('on_train_epoch_end', epoch=epoch)
|
||||
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
|
||||
final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
|
||||
if not noval or final_epoch: # Calculate mAP
|
||||
results, maps, _ = validate.run(data_dict,
|
||||
batch_size=batch_size // WORLD_SIZE * 2,
|
||||
imgsz=imgsz,
|
||||
half=amp,
|
||||
model=ema.ema,
|
||||
single_cls=single_cls,
|
||||
dataloader=val_loader,
|
||||
save_dir=save_dir,
|
||||
plots=False,
|
||||
callbacks=callbacks,
|
||||
compute_loss=compute_loss,
|
||||
mask_downsample_ratio=mask_ratio,
|
||||
overlap=overlap)
|
||||
|
||||
# Update best mAP
|
||||
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
|
||||
stop = stopper(epoch=epoch, fitness=fi) # early stop check
|
||||
if fi > best_fitness:
|
||||
best_fitness = fi
|
||||
log_vals = list(mloss) + list(results) + lr
|
||||
# callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
|
||||
# Log val metrics and media
|
||||
metrics_dict = dict(zip(KEYS, log_vals))
|
||||
logger.log_metrics(metrics_dict, epoch)
|
||||
|
||||
# Save model
|
||||
if (not nosave) or (final_epoch and not evolve): # if save
|
||||
ckpt = {
|
||||
'epoch': epoch,
|
||||
'best_fitness': best_fitness,
|
||||
'model': deepcopy(de_parallel(model)).half(),
|
||||
'ema': deepcopy(ema.ema).half(),
|
||||
'updates': ema.updates,
|
||||
'optimizer': optimizer.state_dict(),
|
||||
'opt': vars(opt),
|
||||
'git': GIT_INFO, # {remote, branch, commit} if a git repo
|
||||
'date': datetime.now().isoformat()}
|
||||
|
||||
# Save last, best and delete
|
||||
torch.save(ckpt, last)
|
||||
if best_fitness == fi:
|
||||
torch.save(ckpt, best)
|
||||
if opt.save_period > 0 and epoch % opt.save_period == 0:
|
||||
torch.save(ckpt, w / f'epoch{epoch}.pt')
|
||||
logger.log_model(w / f'epoch{epoch}.pt')
|
||||
del ckpt
|
||||
# callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
|
||||
|
||||
# EarlyStopping
|
||||
if RANK != -1: # if DDP training
|
||||
broadcast_list = [stop if RANK == 0 else None]
|
||||
dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
|
||||
if RANK != 0:
|
||||
stop = broadcast_list[0]
|
||||
if stop:
|
||||
break # must break all DDP ranks
|
||||
|
||||
# end epoch ----------------------------------------------------------------------------------------------------
|
||||
# end training -----------------------------------------------------------------------------------------------------
|
||||
if RANK in {-1, 0}:
|
||||
LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
|
||||
for f in last, best:
|
||||
if f.exists():
|
||||
strip_optimizer(f) # strip optimizers
|
||||
if f is best:
|
||||
LOGGER.info(f'\nValidating {f}...')
|
||||
results, _, _ = validate.run(
|
||||
data_dict,
|
||||
batch_size=batch_size // WORLD_SIZE * 2,
|
||||
imgsz=imgsz,
|
||||
model=attempt_load(f, device).half(),
|
||||
iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
|
||||
single_cls=single_cls,
|
||||
dataloader=val_loader,
|
||||
save_dir=save_dir,
|
||||
save_json=is_coco,
|
||||
verbose=True,
|
||||
plots=plots,
|
||||
callbacks=callbacks,
|
||||
compute_loss=compute_loss,
|
||||
mask_downsample_ratio=mask_ratio,
|
||||
overlap=overlap) # val best model with plots
|
||||
if is_coco:
|
||||
# callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
|
||||
metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr))
|
||||
logger.log_metrics(metrics_dict, epoch)
|
||||
|
||||
# callbacks.run('on_train_end', last, best, epoch, results)
|
||||
# on train end callback using genericLogger
|
||||
logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs)
|
||||
if not opt.evolve:
|
||||
logger.log_model(best, epoch)
|
||||
if plots:
|
||||
plot_results_with_masks(file=save_dir / 'results.csv') # save results.png
|
||||
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
|
||||
files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
|
||||
logger.log_images(files, 'Results', epoch + 1)
|
||||
logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1)
|
||||
torch.cuda.empty_cache()
|
||||
return results
|
||||
|
||||
|
||||
def parse_opt(known=False):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path')
|
||||
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
|
||||
parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
|
||||
parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
|
||||
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
|
||||
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
||||
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
|
||||
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
||||
parser.add_argument('--noval', action='store_true', help='only validate final epoch')
|
||||
parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
|
||||
parser.add_argument('--noplots', action='store_true', help='save no plot files')
|
||||
parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
|
||||
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
|
||||
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
|
||||
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
|
||||
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
|
||||
parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
|
||||
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--quad', action='store_true', help='quad dataloader')
|
||||
parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
|
||||
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
|
||||
parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
|
||||
parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
|
||||
parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
|
||||
parser.add_argument('--seed', type=int, default=0, help='Global training seed')
|
||||
parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
|
||||
|
||||
# Instance Segmentation Args
|
||||
parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory')
|
||||
parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP')
|
||||
|
||||
return parser.parse_known_args()[0] if known else parser.parse_args()
|
||||
|
||||
|
||||
def main(opt, callbacks=Callbacks()):
|
||||
# Checks
|
||||
if RANK in {-1, 0}:
|
||||
print_args(vars(opt))
|
||||
check_git_status()
|
||||
check_requirements()
|
||||
|
||||
# Resume
|
||||
if opt.resume and not opt.evolve: # resume from specified or most recent last.pt
|
||||
last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
|
||||
opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
|
||||
opt_data = opt.data # original dataset
|
||||
if opt_yaml.is_file():
|
||||
with open(opt_yaml, errors='ignore') as f:
|
||||
d = yaml.safe_load(f)
|
||||
else:
|
||||
d = torch.load(last, map_location='cpu')['opt']
|
||||
opt = argparse.Namespace(**d) # replace
|
||||
opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
|
||||
if is_url(opt_data):
|
||||
opt.data = check_file(opt_data) # avoid HUB resume auth timeout
|
||||
else:
|
||||
opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
|
||||
check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
|
||||
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
|
||||
if opt.evolve:
|
||||
if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg
|
||||
opt.project = str(ROOT / 'runs/evolve-seg')
|
||||
opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
|
||||
if opt.name == 'cfg':
|
||||
opt.name = Path(opt.cfg).stem # use model.yaml as name
|
||||
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
|
||||
|
||||
# DDP mode
|
||||
device = select_device(opt.device, batch_size=opt.batch_size)
|
||||
if LOCAL_RANK != -1:
|
||||
msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
|
||||
assert not opt.image_weights, f'--image-weights {msg}'
|
||||
assert not opt.evolve, f'--evolve {msg}'
|
||||
assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
|
||||
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
|
||||
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
|
||||
torch.cuda.set_device(LOCAL_RANK)
|
||||
device = torch.device('cuda', LOCAL_RANK)
|
||||
dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
|
||||
|
||||
# Train
|
||||
if not opt.evolve:
|
||||
train(opt.hyp, opt, device, callbacks)
|
||||
|
||||
# Evolve hyperparameters (optional)
|
||||
else:
|
||||
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
|
||||
meta = {
|
||||
'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
|
||||
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
|
||||
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
|
||||
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
|
||||
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
|
||||
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
|
||||
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
|
||||
'box': (1, 0.02, 0.2), # box loss gain
|
||||
'cls': (1, 0.2, 4.0), # cls loss gain
|
||||
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
|
||||
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
|
||||
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
|
||||
'iou_t': (0, 0.1, 0.7), # IoU training threshold
|
||||
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
|
||||
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
|
||||
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
|
||||
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
|
||||
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
|
||||
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
|
||||
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
|
||||
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
|
||||
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
|
||||
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
|
||||
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
|
||||
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
|
||||
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
|
||||
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
|
||||
'mixup': (1, 0.0, 1.0), # image mixup (probability)
|
||||
'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
|
||||
|
||||
with open(opt.hyp, errors='ignore') as f:
|
||||
hyp = yaml.safe_load(f) # load hyps dict
|
||||
if 'anchors' not in hyp: # anchors commented in hyp.yaml
|
||||
hyp['anchors'] = 3
|
||||
if opt.noautoanchor:
|
||||
del hyp['anchors'], meta['anchors']
|
||||
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
|
||||
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
|
||||
evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
|
||||
if opt.bucket:
|
||||
# download evolve.csv if exists
|
||||
subprocess.run([
|
||||
'gsutil',
|
||||
'cp',
|
||||
f'gs://{opt.bucket}/evolve.csv',
|
||||
str(evolve_csv),])
|
||||
|
||||
for _ in range(opt.evolve): # generations to evolve
|
||||
if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
|
||||
# Select parent(s)
|
||||
parent = 'single' # parent selection method: 'single' or 'weighted'
|
||||
x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
|
||||
n = min(5, len(x)) # number of previous results to consider
|
||||
x = x[np.argsort(-fitness(x))][:n] # top n mutations
|
||||
w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
|
||||
if parent == 'single' or len(x) == 1:
|
||||
# x = x[random.randint(0, n - 1)] # random selection
|
||||
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
|
||||
elif parent == 'weighted':
|
||||
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
|
||||
|
||||
# Mutate
|
||||
mp, s = 0.8, 0.2 # mutation probability, sigma
|
||||
npr = np.random
|
||||
npr.seed(int(time.time()))
|
||||
g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
|
||||
ng = len(meta)
|
||||
v = np.ones(ng)
|
||||
while all(v == 1): # mutate until a change occurs (prevent duplicates)
|
||||
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
|
||||
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
|
||||
hyp[k] = float(x[i + 12] * v[i]) # mutate
|
||||
|
||||
# Constrain to limits
|
||||
for k, v in meta.items():
|
||||
hyp[k] = max(hyp[k], v[1]) # lower limit
|
||||
hyp[k] = min(hyp[k], v[2]) # upper limit
|
||||
hyp[k] = round(hyp[k], 5) # significant digits
|
||||
|
||||
# Train mutation
|
||||
results = train(hyp.copy(), opt, device, callbacks)
|
||||
callbacks = Callbacks()
|
||||
# Write mutation results
|
||||
print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket)
|
||||
|
||||
# Plot results
|
||||
plot_evolve(evolve_csv)
|
||||
LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
|
||||
f"Results saved to {colorstr('bold', save_dir)}\n"
|
||||
f'Usage example: $ python train.py --hyp {evolve_yaml}')
|
||||
|
||||
|
||||
def run(**kwargs):
|
||||
# Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
|
||||
opt = parse_opt(True)
|
||||
for k, v in kwargs.items():
|
||||
setattr(opt, k, v)
|
||||
main(opt)
|
||||
return opt
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,594 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "t6MPjfT5NrKQ"
|
||||
},
|
||||
"source": [
|
||||
"<div align=\"center\">\n",
|
||||
"\n",
|
||||
" <a href=\"https://ultralytics.com/yolov5\" target=\"_blank\">\n",
|
||||
" <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png\"></a>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"<br>\n",
|
||||
" <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n",
|
||||
" <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
|
||||
" <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>See <a href=\"https://github.com/ultralytics/yolov5/issues/new/choose\">GitHub</a> for community support or <a href=\"https://ultralytics.com/contact\">contact us</a> for professional support.\n",
|
||||
"\n",
|
||||
"</div>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "7mGmQbAO5pQb"
|
||||
},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"\n",
|
||||
"Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "wbvMlHd_QwMG",
|
||||
"outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stderr",
|
||||
"text": [
|
||||
"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!git clone https://github.com/ultralytics/yolov5 # clone\n",
|
||||
"%cd yolov5\n",
|
||||
"%pip install -qr requirements.txt # install\n",
|
||||
"\n",
|
||||
"import torch\n",
|
||||
"import utils\n",
|
||||
"display = utils.notebook_init() # checks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "4JnkELT0cIJg"
|
||||
},
|
||||
"source": [
|
||||
"# 1. Predict\n",
|
||||
"\n",
|
||||
"`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n",
|
||||
"\n",
|
||||
"```shell\n",
|
||||
"python segment/predict.py --source 0 # webcam\n",
|
||||
" img.jpg # image \n",
|
||||
" vid.mp4 # video\n",
|
||||
" screen # screenshot\n",
|
||||
" path/ # directory\n",
|
||||
" 'path/*.jpg' # glob\n",
|
||||
" 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n",
|
||||
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "zR9ZbuQCH7FX",
|
||||
"outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n",
|
||||
"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
||||
"\n",
|
||||
"Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n",
|
||||
"100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n",
|
||||
"\n",
|
||||
"Fusing layers... \n",
|
||||
"YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
|
||||
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n",
|
||||
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n",
|
||||
"Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n",
|
||||
"Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n",
|
||||
"#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "hkAzDWJ7cWTr"
|
||||
},
|
||||
"source": [
|
||||
" \n",
|
||||
"<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/199030123-08c72f8d-6871-4116-8ed3-c373642cf28e.jpg\" width=\"600\">"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "0eq1SMWl6Sfn"
|
||||
},
|
||||
"source": [
|
||||
"# 2. Validate\n",
|
||||
"Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "WQPtK1QYVaD_",
|
||||
"outputId": "9d751d8c-bee8-4339-cf30-9854ca530449"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip ...\n",
|
||||
"Downloading http://images.cocodataset.org/zips/val2017.zip ...\n",
|
||||
"######################################################################## 100.0%\n",
|
||||
"######################################################################## 100.0%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Download COCO val\n",
|
||||
"!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "X58w8JLpMnjH",
|
||||
"outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n",
|
||||
"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
||||
"\n",
|
||||
"Fusing layers... \n",
|
||||
"YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n",
|
||||
" all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n",
|
||||
"Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n",
|
||||
"Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Validate YOLOv5s-seg on COCO val\n",
|
||||
"!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "ZY2VXXXu74w5"
|
||||
},
|
||||
"source": [
|
||||
"# 3. Train\n",
|
||||
"\n",
|
||||
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png\"/></a></p>\n",
|
||||
"Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
|
||||
"<br><br>\n",
|
||||
"\n",
|
||||
"Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n",
|
||||
"\n",
|
||||
"- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
|
||||
"automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
|
||||
"- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n",
|
||||
"- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n",
|
||||
"<br><br>\n",
|
||||
"\n",
|
||||
"A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n",
|
||||
"\n",
|
||||
"## Train on Custom Data with Roboflow 🌟 NEW\n",
|
||||
"\n",
|
||||
"[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n",
|
||||
"\n",
|
||||
"- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n",
|
||||
"- Custom Training Notebook: [](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"480\" src=\"https://robflow-public-assets.s3.amazonaws.com/how-to-train-yolov5-segmentation-annotation.gif\"/></a></p>Label images lightning fast (including with model-assisted labeling)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "i3oKtE4g-aNn"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#@title Select YOLOv5 🚀 logger {run: 'auto'}\n",
|
||||
"logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n",
|
||||
"\n",
|
||||
"if logger == 'TensorBoard':\n",
|
||||
" %load_ext tensorboard\n",
|
||||
" %tensorboard --logdir runs/train-seg\n",
|
||||
"elif logger == 'Comet':\n",
|
||||
" %pip install -q comet_ml\n",
|
||||
" import comet_ml; comet_ml.init()\n",
|
||||
"elif logger == 'ClearML':\n",
|
||||
" import clearml; clearml.browser_login()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "1NcFxRcFdJ_O",
|
||||
"outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n",
|
||||
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
|
||||
"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
||||
"\n",
|
||||
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
|
||||
"\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n",
|
||||
"\n",
|
||||
"Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n",
|
||||
"Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n",
|
||||
"100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n",
|
||||
"Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n",
|
||||
"\n",
|
||||
" from n params module arguments \n",
|
||||
" 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n",
|
||||
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
|
||||
" 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
|
||||
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
|
||||
" 4 -1 2 115712 models.common.C3 [128, 128, 2] \n",
|
||||
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
|
||||
" 6 -1 3 625152 models.common.C3 [256, 256, 3] \n",
|
||||
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
|
||||
" 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n",
|
||||
" 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n",
|
||||
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
|
||||
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
||||
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
|
||||
" 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
|
||||
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
|
||||
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
||||
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
|
||||
" 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
|
||||
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
|
||||
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
|
||||
" 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
|
||||
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
|
||||
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
|
||||
" 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
|
||||
" 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n",
|
||||
"Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n",
|
||||
"\n",
|
||||
"Transferred 367/367 items from yolov5s-seg.pt\n",
|
||||
"\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
|
||||
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n",
|
||||
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n",
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n",
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 98.90it/s]\n",
|
||||
"\n",
|
||||
"\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
|
||||
"Plotting labels to runs/train-seg/exp/labels.jpg... \n",
|
||||
"Image sizes 640 train, 640 val\n",
|
||||
"Using 2 dataloader workers\n",
|
||||
"Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n",
|
||||
"Starting training for 3 epochs...\n",
|
||||
"\n",
|
||||
" Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
|
||||
" 0/2 4.92G 0.0417 0.04646 0.06066 0.02126 192 640: 100% 8/8 [00:08<00:00, 1.10s/it]\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.81it/s]\n",
|
||||
" all 128 929 0.737 0.649 0.715 0.492 0.719 0.617 0.658 0.408\n",
|
||||
"\n",
|
||||
" Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
|
||||
" 1/2 6.29G 0.04157 0.04503 0.05772 0.01777 208 640: 100% 8/8 [00:09<00:00, 1.21s/it]\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.87it/s]\n",
|
||||
" all 128 929 0.756 0.674 0.738 0.506 0.725 0.64 0.68 0.422\n",
|
||||
"\n",
|
||||
" Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
|
||||
" 2/2 6.29G 0.0425 0.04793 0.06784 0.01863 161 640: 100% 8/8 [00:03<00:00, 2.02it/s]\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.88it/s]\n",
|
||||
" all 128 929 0.736 0.694 0.747 0.522 0.769 0.622 0.683 0.427\n",
|
||||
"\n",
|
||||
"3 epochs completed in 0.009 hours.\n",
|
||||
"Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n",
|
||||
"Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n",
|
||||
"\n",
|
||||
"Validating runs/train-seg/exp/weights/best.pt...\n",
|
||||
"Fusing layers... \n",
|
||||
"Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:06<00:00, 1.59s/it]\n",
|
||||
" all 128 929 0.738 0.694 0.746 0.522 0.759 0.625 0.682 0.426\n",
|
||||
" person 128 254 0.845 0.756 0.836 0.55 0.861 0.669 0.759 0.407\n",
|
||||
" bicycle 128 6 0.475 0.333 0.549 0.341 0.711 0.333 0.526 0.322\n",
|
||||
" car 128 46 0.612 0.565 0.539 0.257 0.555 0.435 0.477 0.171\n",
|
||||
" motorcycle 128 5 0.73 0.8 0.752 0.571 0.747 0.8 0.752 0.42\n",
|
||||
" airplane 128 6 1 0.943 0.995 0.732 0.92 0.833 0.839 0.555\n",
|
||||
" bus 128 7 0.677 0.714 0.722 0.653 0.711 0.714 0.722 0.593\n",
|
||||
" train 128 3 1 0.951 0.995 0.551 1 0.884 0.995 0.781\n",
|
||||
" truck 128 12 0.555 0.417 0.457 0.285 0.624 0.417 0.397 0.277\n",
|
||||
" boat 128 6 0.624 0.5 0.584 0.186 1 0.326 0.412 0.133\n",
|
||||
" traffic light 128 14 0.513 0.302 0.411 0.247 0.435 0.214 0.376 0.251\n",
|
||||
" stop sign 128 2 0.824 1 0.995 0.796 0.906 1 0.995 0.747\n",
|
||||
" bench 128 9 0.75 0.667 0.763 0.367 0.724 0.585 0.698 0.209\n",
|
||||
" bird 128 16 0.961 1 0.995 0.686 0.918 0.938 0.91 0.525\n",
|
||||
" cat 128 4 0.771 0.857 0.945 0.752 0.76 0.8 0.945 0.728\n",
|
||||
" dog 128 9 0.987 0.778 0.963 0.681 1 0.705 0.89 0.574\n",
|
||||
" horse 128 2 0.703 1 0.995 0.697 0.759 1 0.995 0.249\n",
|
||||
" elephant 128 17 0.916 0.882 0.93 0.691 0.811 0.765 0.829 0.537\n",
|
||||
" bear 128 1 0.664 1 0.995 0.995 0.701 1 0.995 0.895\n",
|
||||
" zebra 128 4 0.864 1 0.995 0.921 0.879 1 0.995 0.804\n",
|
||||
" giraffe 128 9 0.883 0.889 0.94 0.683 0.845 0.778 0.78 0.463\n",
|
||||
" backpack 128 6 1 0.59 0.701 0.372 1 0.474 0.52 0.252\n",
|
||||
" umbrella 128 18 0.654 0.839 0.887 0.52 0.517 0.556 0.427 0.229\n",
|
||||
" handbag 128 19 0.54 0.211 0.408 0.221 0.796 0.206 0.396 0.196\n",
|
||||
" tie 128 7 0.864 0.857 0.857 0.577 0.925 0.857 0.857 0.534\n",
|
||||
" suitcase 128 4 0.716 1 0.945 0.647 0.767 1 0.945 0.634\n",
|
||||
" frisbee 128 5 0.708 0.8 0.761 0.643 0.737 0.8 0.761 0.501\n",
|
||||
" skis 128 1 0.691 1 0.995 0.796 0.761 1 0.995 0.199\n",
|
||||
" snowboard 128 7 0.918 0.857 0.904 0.604 0.32 0.286 0.235 0.137\n",
|
||||
" sports ball 128 6 0.902 0.667 0.701 0.466 0.727 0.5 0.497 0.471\n",
|
||||
" kite 128 10 0.586 0.4 0.511 0.231 0.663 0.394 0.417 0.139\n",
|
||||
" baseball bat 128 4 0.359 0.5 0.401 0.169 0.631 0.5 0.526 0.133\n",
|
||||
" baseball glove 128 7 1 0.519 0.58 0.327 0.687 0.286 0.455 0.328\n",
|
||||
" skateboard 128 5 0.729 0.8 0.862 0.631 0.599 0.6 0.604 0.379\n",
|
||||
" tennis racket 128 7 0.57 0.714 0.645 0.448 0.608 0.714 0.645 0.412\n",
|
||||
" bottle 128 18 0.469 0.393 0.537 0.357 0.661 0.389 0.543 0.349\n",
|
||||
" wine glass 128 16 0.677 0.938 0.866 0.441 0.53 0.625 0.67 0.334\n",
|
||||
" cup 128 36 0.777 0.722 0.812 0.466 0.725 0.583 0.762 0.467\n",
|
||||
" fork 128 6 0.948 0.333 0.425 0.27 0.527 0.167 0.18 0.102\n",
|
||||
" knife 128 16 0.757 0.587 0.669 0.458 0.79 0.5 0.552 0.34\n",
|
||||
" spoon 128 22 0.74 0.364 0.559 0.269 0.925 0.364 0.513 0.213\n",
|
||||
" bowl 128 28 0.766 0.714 0.725 0.559 0.803 0.584 0.665 0.353\n",
|
||||
" banana 128 1 0.408 1 0.995 0.398 0.539 1 0.995 0.497\n",
|
||||
" sandwich 128 2 1 0 0.695 0.536 1 0 0.498 0.448\n",
|
||||
" orange 128 4 0.467 1 0.995 0.693 0.518 1 0.995 0.663\n",
|
||||
" broccoli 128 11 0.462 0.455 0.383 0.259 0.548 0.455 0.384 0.256\n",
|
||||
" carrot 128 24 0.631 0.875 0.77 0.533 0.757 0.909 0.853 0.499\n",
|
||||
" hot dog 128 2 0.555 1 0.995 0.995 0.578 1 0.995 0.796\n",
|
||||
" pizza 128 5 0.89 0.8 0.962 0.796 1 0.778 0.962 0.766\n",
|
||||
" donut 128 14 0.695 1 0.893 0.772 0.704 1 0.893 0.696\n",
|
||||
" cake 128 4 0.826 1 0.995 0.92 0.862 1 0.995 0.846\n",
|
||||
" chair 128 35 0.53 0.571 0.613 0.336 0.67 0.6 0.538 0.271\n",
|
||||
" couch 128 6 0.972 0.667 0.833 0.627 1 0.62 0.696 0.394\n",
|
||||
" potted plant 128 14 0.7 0.857 0.883 0.552 0.836 0.857 0.883 0.473\n",
|
||||
" bed 128 3 0.979 0.667 0.83 0.366 1 0 0.83 0.373\n",
|
||||
" dining table 128 13 0.775 0.308 0.505 0.364 0.644 0.231 0.25 0.0804\n",
|
||||
" toilet 128 2 0.836 1 0.995 0.846 0.887 1 0.995 0.797\n",
|
||||
" tv 128 2 0.6 1 0.995 0.846 0.655 1 0.995 0.896\n",
|
||||
" laptop 128 3 0.822 0.333 0.445 0.307 1 0 0.392 0.12\n",
|
||||
" mouse 128 2 1 0 0 0 1 0 0 0\n",
|
||||
" remote 128 8 0.745 0.5 0.62 0.459 0.821 0.5 0.624 0.449\n",
|
||||
" cell phone 128 8 0.686 0.375 0.502 0.272 0.488 0.25 0.28 0.132\n",
|
||||
" microwave 128 3 0.831 1 0.995 0.722 0.867 1 0.995 0.592\n",
|
||||
" oven 128 5 0.439 0.4 0.435 0.294 0.823 0.6 0.645 0.418\n",
|
||||
" sink 128 6 0.677 0.5 0.565 0.448 0.722 0.5 0.46 0.362\n",
|
||||
" refrigerator 128 5 0.533 0.8 0.783 0.524 0.558 0.8 0.783 0.527\n",
|
||||
" book 128 29 0.732 0.379 0.423 0.196 0.69 0.207 0.38 0.131\n",
|
||||
" clock 128 9 0.889 0.778 0.917 0.677 0.908 0.778 0.875 0.604\n",
|
||||
" vase 128 2 0.375 1 0.995 0.995 0.455 1 0.995 0.796\n",
|
||||
" scissors 128 1 1 0 0.0166 0.00166 1 0 0 0\n",
|
||||
" teddy bear 128 21 0.813 0.829 0.841 0.457 0.826 0.678 0.786 0.422\n",
|
||||
" toothbrush 128 5 0.806 1 0.995 0.733 0.991 1 0.995 0.628\n",
|
||||
"Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Train YOLOv5s on COCO128 for 3 epochs\n",
|
||||
"!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "15glLzbQx5u0"
|
||||
},
|
||||
"source": [
|
||||
"# 4. Visualize"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "nWOsI5wJR1o3"
|
||||
},
|
||||
"source": [
|
||||
"## Comet Logging and Visualization 🌟 NEW\n",
|
||||
"\n",
|
||||
"[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n",
|
||||
"\n",
|
||||
"Getting started is easy:\n",
|
||||
"```shell\n",
|
||||
"pip install comet_ml # 1. install\n",
|
||||
"export COMET_API_KEY=<Your API Key> # 2. paste API key\n",
|
||||
"python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n",
|
||||
"```\n",
|
||||
"To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n",
|
||||
"[](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n",
|
||||
"\n",
|
||||
"<a href=\"https://bit.ly/yolov5-readme-comet2\">\n",
|
||||
"<img alt=\"Comet Dashboard\" src=\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\" width=\"1280\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Lay2WsTjNJzP"
|
||||
},
|
||||
"source": [
|
||||
"## ClearML Logging and Automation 🌟 NEW\n",
|
||||
"\n",
|
||||
"[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n",
|
||||
"\n",
|
||||
"- `pip install clearml`\n",
|
||||
"- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n",
|
||||
"\n",
|
||||
"You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n",
|
||||
"\n",
|
||||
"You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n",
|
||||
"\n",
|
||||
"<a href=\"https://cutt.ly/yolov5-notebook-clearml\">\n",
|
||||
"<img alt=\"ClearML Experiment Management UI\" src=\"https://github.com/thepycoder/clearml_screenshots/raw/main/scalars.jpg\" width=\"1280\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "-WPvRbS5Swl6"
|
||||
},
|
||||
"source": [
|
||||
"## Local Logging\n",
|
||||
"\n",
|
||||
"Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
|
||||
"\n",
|
||||
"This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n",
|
||||
"\n",
|
||||
"<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Zelyeqbyt3GD"
|
||||
},
|
||||
"source": [
|
||||
"# Environments\n",
|
||||
"\n",
|
||||
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
|
||||
"\n",
|
||||
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
||||
"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n",
|
||||
"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n",
|
||||
"- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "6Qu7Iesl0p54"
|
||||
},
|
||||
"source": [
|
||||
"# Status\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "IEijrePND_2I"
|
||||
},
|
||||
"source": [
|
||||
"# Appendix\n",
|
||||
"\n",
|
||||
"Additional content below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "GMusP4OAxFu6"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
||||
"import torch\n",
|
||||
"\n",
|
||||
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n",
|
||||
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
||||
"results = model(im) # inference\n",
|
||||
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"name": "YOLOv5 Segmentation Tutorial",
|
||||
"provenance": [],
|
||||
"toc_visible": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
|
@ -0,0 +1,473 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Validate a trained YOLOv5 segment model on a segment dataset
|
||||
|
||||
Usage:
|
||||
$ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images)
|
||||
$ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments
|
||||
|
||||
Usage - formats:
|
||||
$ python segment/val.py --weights yolov5s-seg.pt # PyTorch
|
||||
yolov5s-seg.torchscript # TorchScript
|
||||
yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s-seg_openvino_label # OpenVINO
|
||||
yolov5s-seg.engine # TensorRT
|
||||
yolov5s-seg.mlmodel # CoreML (macOS-only)
|
||||
yolov5s-seg_saved_model # TensorFlow SavedModel
|
||||
yolov5s-seg.pb # TensorFlow GraphDef
|
||||
yolov5s-seg.tflite # TensorFlow Lite
|
||||
yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s-seg_paddle_model # PaddlePaddle
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
import torch.nn.functional as F
|
||||
|
||||
from models.common import DetectMultiBackend
|
||||
from models.yolo import SegmentationModel
|
||||
from utils.callbacks import Callbacks
|
||||
from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size,
|
||||
check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path,
|
||||
non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
|
||||
from utils.metrics import ConfusionMatrix, box_iou
|
||||
from utils.plots import output_to_target, plot_val_study
|
||||
from utils.segment.dataloaders import create_dataloader
|
||||
from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image
|
||||
from utils.segment.metrics import Metrics, ap_per_class_box_and_mask
|
||||
from utils.segment.plots import plot_images_and_masks
|
||||
from utils.torch_utils import de_parallel, select_device, smart_inference_mode
|
||||
|
||||
|
||||
def save_one_txt(predn, save_conf, shape, file):
|
||||
# Save one txt result
|
||||
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
|
||||
for *xyxy, conf, cls in predn.tolist():
|
||||
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
||||
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
||||
with open(file, 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
|
||||
|
||||
def save_one_json(predn, jdict, path, class_map, pred_masks):
|
||||
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
|
||||
from pycocotools.mask import encode
|
||||
|
||||
def single_encode(x):
|
||||
rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0]
|
||||
rle['counts'] = rle['counts'].decode('utf-8')
|
||||
return rle
|
||||
|
||||
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
|
||||
box = xyxy2xywh(predn[:, :4]) # xywh
|
||||
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
||||
pred_masks = np.transpose(pred_masks, (2, 0, 1))
|
||||
with ThreadPool(NUM_THREADS) as pool:
|
||||
rles = pool.map(single_encode, pred_masks)
|
||||
for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
|
||||
jdict.append({
|
||||
'image_id': image_id,
|
||||
'category_id': class_map[int(p[5])],
|
||||
'bbox': [round(x, 3) for x in b],
|
||||
'score': round(p[4], 5),
|
||||
'segmentation': rles[i]})
|
||||
|
||||
|
||||
def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False):
|
||||
"""
|
||||
Return correct prediction matrix
|
||||
Arguments:
|
||||
detections (array[N, 6]), x1, y1, x2, y2, conf, class
|
||||
labels (array[M, 5]), class, x1, y1, x2, y2
|
||||
Returns:
|
||||
correct (array[N, 10]), for 10 IoU levels
|
||||
"""
|
||||
if masks:
|
||||
if overlap:
|
||||
nl = len(labels)
|
||||
index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
|
||||
gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640)
|
||||
gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
|
||||
if gt_masks.shape[1:] != pred_masks.shape[1:]:
|
||||
gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0]
|
||||
gt_masks = gt_masks.gt_(0.5)
|
||||
iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
|
||||
else: # boxes
|
||||
iou = box_iou(labels[:, 1:], detections[:, :4])
|
||||
|
||||
correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
|
||||
correct_class = labels[:, 0:1] == detections[:, 5]
|
||||
for i in range(len(iouv)):
|
||||
x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
|
||||
if x[0].shape[0]:
|
||||
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
|
||||
if x[0].shape[0] > 1:
|
||||
matches = matches[matches[:, 2].argsort()[::-1]]
|
||||
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
||||
# matches = matches[matches[:, 2].argsort()[::-1]]
|
||||
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
||||
correct[matches[:, 1].astype(int), i] = True
|
||||
return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
data,
|
||||
weights=None, # model.pt path(s)
|
||||
batch_size=32, # batch size
|
||||
imgsz=640, # inference size (pixels)
|
||||
conf_thres=0.001, # confidence threshold
|
||||
iou_thres=0.6, # NMS IoU threshold
|
||||
max_det=300, # maximum detections per image
|
||||
task='val', # train, val, test, speed or study
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
workers=8, # max dataloader workers (per RANK in DDP mode)
|
||||
single_cls=False, # treat as single-class dataset
|
||||
augment=False, # augmented inference
|
||||
verbose=False, # verbose output
|
||||
save_txt=False, # save results to *.txt
|
||||
save_hybrid=False, # save label+prediction hybrid results to *.txt
|
||||
save_conf=False, # save confidences in --save-txt labels
|
||||
save_json=False, # save a COCO-JSON results file
|
||||
project=ROOT / 'runs/val-seg', # save to project/name
|
||||
name='exp', # save to project/name
|
||||
exist_ok=False, # existing project/name ok, do not increment
|
||||
half=True, # use FP16 half-precision inference
|
||||
dnn=False, # use OpenCV DNN for ONNX inference
|
||||
model=None,
|
||||
dataloader=None,
|
||||
save_dir=Path(''),
|
||||
plots=True,
|
||||
overlap=False,
|
||||
mask_downsample_ratio=1,
|
||||
compute_loss=None,
|
||||
callbacks=Callbacks(),
|
||||
):
|
||||
if save_json:
|
||||
check_requirements('pycocotools>=2.0.6')
|
||||
process = process_mask_native # more accurate
|
||||
else:
|
||||
process = process_mask # faster
|
||||
|
||||
# Initialize/load model and set device
|
||||
training = model is not None
|
||||
if training: # called by train.py
|
||||
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
|
||||
half &= device.type != 'cpu' # half precision only supported on CUDA
|
||||
model.half() if half else model.float()
|
||||
nm = de_parallel(model).model[-1].nm # number of masks
|
||||
else: # called directly
|
||||
device = select_device(device, batch_size=batch_size)
|
||||
|
||||
# Directories
|
||||
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
||||
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
||||
|
||||
# Load model
|
||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
||||
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
|
||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||
half = model.fp16 # FP16 supported on limited backends with CUDA
|
||||
nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks
|
||||
if engine:
|
||||
batch_size = model.batch_size
|
||||
else:
|
||||
device = model.device
|
||||
if not (pt or jit):
|
||||
batch_size = 1 # export.py models default to batch-size 1
|
||||
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
||||
|
||||
# Data
|
||||
data = check_dataset(data) # check
|
||||
|
||||
# Configure
|
||||
model.eval()
|
||||
cuda = device.type != 'cpu'
|
||||
is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
|
||||
nc = 1 if single_cls else int(data['nc']) # number of classes
|
||||
iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
|
||||
niou = iouv.numel()
|
||||
|
||||
# Dataloader
|
||||
if not training:
|
||||
if pt and not single_cls: # check --weights are trained on --data
|
||||
ncm = model.model.nc
|
||||
assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
|
||||
f'classes). Pass correct combination of --weights and --data that are trained together.'
|
||||
model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
|
||||
pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
|
||||
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
|
||||
dataloader = create_dataloader(data[task],
|
||||
imgsz,
|
||||
batch_size,
|
||||
stride,
|
||||
single_cls,
|
||||
pad=pad,
|
||||
rect=rect,
|
||||
workers=workers,
|
||||
prefix=colorstr(f'{task}: '),
|
||||
overlap_mask=overlap,
|
||||
mask_downsample_ratio=mask_downsample_ratio)[0]
|
||||
|
||||
seen = 0
|
||||
confusion_matrix = ConfusionMatrix(nc=nc)
|
||||
names = model.names if hasattr(model, 'names') else model.module.names # get class names
|
||||
if isinstance(names, (list, tuple)): # old format
|
||||
names = dict(enumerate(names))
|
||||
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
|
||||
s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R',
|
||||
'mAP50', 'mAP50-95)')
|
||||
dt = Profile(), Profile(), Profile()
|
||||
metrics = Metrics()
|
||||
loss = torch.zeros(4, device=device)
|
||||
jdict, stats = [], []
|
||||
# callbacks.run('on_val_start')
|
||||
pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
|
||||
for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):
|
||||
# callbacks.run('on_val_batch_start')
|
||||
with dt[0]:
|
||||
if cuda:
|
||||
im = im.to(device, non_blocking=True)
|
||||
targets = targets.to(device)
|
||||
masks = masks.to(device)
|
||||
masks = masks.float()
|
||||
im = im.half() if half else im.float() # uint8 to fp16/32
|
||||
im /= 255 # 0 - 255 to 0.0 - 1.0
|
||||
nb, _, height, width = im.shape # batch size, channels, height, width
|
||||
|
||||
# Inference
|
||||
with dt[1]:
|
||||
preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None)
|
||||
|
||||
# Loss
|
||||
if compute_loss:
|
||||
loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls
|
||||
|
||||
# NMS
|
||||
targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
|
||||
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
|
||||
with dt[2]:
|
||||
preds = non_max_suppression(preds,
|
||||
conf_thres,
|
||||
iou_thres,
|
||||
labels=lb,
|
||||
multi_label=True,
|
||||
agnostic=single_cls,
|
||||
max_det=max_det,
|
||||
nm=nm)
|
||||
|
||||
# Metrics
|
||||
plot_masks = [] # masks for plotting
|
||||
for si, (pred, proto) in enumerate(zip(preds, protos)):
|
||||
labels = targets[targets[:, 0] == si, 1:]
|
||||
nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
|
||||
path, shape = Path(paths[si]), shapes[si][0]
|
||||
correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
|
||||
correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
|
||||
seen += 1
|
||||
|
||||
if npr == 0:
|
||||
if nl:
|
||||
stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0]))
|
||||
if plots:
|
||||
confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
|
||||
continue
|
||||
|
||||
# Masks
|
||||
midx = [si] if overlap else targets[:, 0] == si
|
||||
gt_masks = masks[midx]
|
||||
pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:])
|
||||
|
||||
# Predictions
|
||||
if single_cls:
|
||||
pred[:, 5] = 0
|
||||
predn = pred.clone()
|
||||
scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
|
||||
|
||||
# Evaluate
|
||||
if nl:
|
||||
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
|
||||
scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
|
||||
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
|
||||
correct_bboxes = process_batch(predn, labelsn, iouv)
|
||||
correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True)
|
||||
if plots:
|
||||
confusion_matrix.process_batch(predn, labelsn)
|
||||
stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls)
|
||||
|
||||
pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
|
||||
if plots and batch_i < 3:
|
||||
plot_masks.append(pred_masks[:15]) # filter top 15 to plot
|
||||
|
||||
# Save/log
|
||||
if save_txt:
|
||||
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
|
||||
if save_json:
|
||||
pred_masks = scale_image(im[si].shape[1:],
|
||||
pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1])
|
||||
save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary
|
||||
# callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
|
||||
|
||||
# Plot images
|
||||
if plots and batch_i < 3:
|
||||
if len(plot_masks):
|
||||
plot_masks = torch.cat(plot_masks, dim=0)
|
||||
plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names)
|
||||
plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths,
|
||||
save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
|
||||
|
||||
# callbacks.run('on_val_batch_end')
|
||||
|
||||
# Compute metrics
|
||||
stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
|
||||
if len(stats) and stats[0].any():
|
||||
results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names)
|
||||
metrics.update(results)
|
||||
nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class
|
||||
|
||||
# Print results
|
||||
pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format
|
||||
LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results()))
|
||||
if nt.sum() == 0:
|
||||
LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
|
||||
|
||||
# Print results per class
|
||||
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
||||
for i, c in enumerate(metrics.ap_class_index):
|
||||
LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))
|
||||
|
||||
# Print speeds
|
||||
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
||||
if not training:
|
||||
shape = (batch_size, 3, imgsz, imgsz)
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
|
||||
|
||||
# Plots
|
||||
if plots:
|
||||
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
|
||||
# callbacks.run('on_val_end')
|
||||
|
||||
mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results()
|
||||
|
||||
# Save JSON
|
||||
if save_json and len(jdict):
|
||||
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
||||
anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations
|
||||
pred_json = str(save_dir / f'{w}_predictions.json') # predictions
|
||||
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
|
||||
with open(pred_json, 'w') as f:
|
||||
json.dump(jdict, f)
|
||||
|
||||
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
||||
from pycocotools.coco import COCO
|
||||
from pycocotools.cocoeval import COCOeval
|
||||
|
||||
anno = COCO(anno_json) # init annotations api
|
||||
pred = anno.loadRes(pred_json) # init predictions api
|
||||
results = []
|
||||
for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'):
|
||||
if is_coco:
|
||||
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate
|
||||
eval.evaluate()
|
||||
eval.accumulate()
|
||||
eval.summarize()
|
||||
results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5)
|
||||
map_bbox, map50_bbox, map_mask, map50_mask = results
|
||||
except Exception as e:
|
||||
LOGGER.info(f'pycocotools unable to run: {e}')
|
||||
|
||||
# Return results
|
||||
model.float() # for training
|
||||
if not training:
|
||||
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
||||
final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask
|
||||
return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
|
||||
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
|
||||
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
|
||||
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
|
||||
parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
|
||||
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
||||
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
||||
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
||||
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
||||
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
|
||||
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
||||
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
||||
opt = parser.parse_args()
|
||||
opt.data = check_yaml(opt.data) # check YAML
|
||||
# opt.save_json |= opt.data.endswith('coco.yaml')
|
||||
opt.save_txt |= opt.save_hybrid
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
||||
|
||||
if opt.task in ('train', 'val', 'test'): # run normally
|
||||
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
|
||||
LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
|
||||
if opt.save_hybrid:
|
||||
LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone')
|
||||
run(**vars(opt))
|
||||
|
||||
else:
|
||||
weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
|
||||
opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
|
||||
if opt.task == 'speed': # speed benchmarks
|
||||
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
|
||||
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
|
||||
for opt.weights in weights:
|
||||
run(**vars(opt), plots=False)
|
||||
|
||||
elif opt.task == 'study': # speed vs mAP benchmarks
|
||||
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
|
||||
for opt.weights in weights:
|
||||
f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
|
||||
x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
|
||||
for opt.imgsz in x: # img-size
|
||||
LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
|
||||
r, _, t = run(**vars(opt), plots=False)
|
||||
y.append(r + t) # results and times
|
||||
np.savetxt(f, y, fmt='%10.4g') # save
|
||||
subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt'])
|
||||
plot_val_study(x=x) # plot
|
||||
else:
|
||||
raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,54 @@
|
|||
# Project-wide configuration file, can be used for package metadata and other toll configurations
|
||||
# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
|
||||
# Local usage: pip install pre-commit, pre-commit run --all-files
|
||||
|
||||
[metadata]
|
||||
license_file = LICENSE
|
||||
description_file = README.md
|
||||
|
||||
[tool:pytest]
|
||||
norecursedirs =
|
||||
.git
|
||||
dist
|
||||
build
|
||||
addopts =
|
||||
--doctest-modules
|
||||
--durations=25
|
||||
--color=yes
|
||||
|
||||
[flake8]
|
||||
max-line-length = 120
|
||||
exclude = .tox,*.egg,build,temp
|
||||
select = E,W,F
|
||||
doctests = True
|
||||
verbose = 2
|
||||
# https://pep8.readthedocs.io/en/latest/intro.html#error-codes
|
||||
format = pylint
|
||||
# see: https://www.flake8rules.com/
|
||||
ignore = E731,F405,E402,F401,W504,E127,E231,E501,F403
|
||||
# E731: Do not assign a lambda expression, use a def
|
||||
# F405: name may be undefined, or defined from star imports: module
|
||||
# E402: module level import not at top of file
|
||||
# F401: module imported but unused
|
||||
# W504: line break after binary operator
|
||||
# E127: continuation line over-indented for visual indent
|
||||
# E231: missing whitespace after ‘,’, ‘;’, or ‘:’
|
||||
# E501: line too long
|
||||
# F403: ‘from module import *’ used; unable to detect undefined names
|
||||
|
||||
[isort]
|
||||
# https://pycqa.github.io/isort/docs/configuration/options.html
|
||||
line_length = 120
|
||||
# see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html
|
||||
multi_line_output = 0
|
||||
|
||||
[yapf]
|
||||
based_on_style = pep8
|
||||
spaces_before_comment = 2
|
||||
COLUMN_LIMIT = 120
|
||||
COALESCE_BRACKETS = True
|
||||
SPACES_AROUND_POWER_OPERATOR = True
|
||||
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False
|
||||
SPLIT_BEFORE_CLOSING_BRACKET = False
|
||||
SPLIT_BEFORE_FIRST_ARGUMENT = False
|
||||
# EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
|
|
@ -0,0 +1,640 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Train a YOLOv5 model on a custom dataset.
|
||||
Models and datasets download automatically from the latest YOLOv5 release.
|
||||
|
||||
Usage - Single-GPU training:
|
||||
$ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended)
|
||||
$ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
|
||||
|
||||
Usage - Multi-GPU DDP training:
|
||||
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3
|
||||
|
||||
Models: https://github.com/ultralytics/yolov5/tree/master/models
|
||||
Datasets: https://github.com/ultralytics/yolov5/tree/master/data
|
||||
Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
import yaml
|
||||
from torch.optim import lr_scheduler
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[0] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
import val as validate # for end-of-epoch mAP
|
||||
from models.experimental import attempt_load
|
||||
from models.yolo import Model
|
||||
from utils.autoanchor import check_anchors
|
||||
from utils.autobatch import check_train_batch_size
|
||||
from utils.callbacks import Callbacks
|
||||
from utils.dataloaders import create_dataloader
|
||||
from utils.downloads import attempt_download, is_url
|
||||
from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
|
||||
check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr,
|
||||
get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
|
||||
labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer,
|
||||
yaml_save)
|
||||
from utils.loggers import Loggers
|
||||
from utils.loggers.comet.comet_utils import check_comet_resume
|
||||
from utils.loss import ComputeLoss
|
||||
from utils.metrics import fitness
|
||||
from utils.plots import plot_evolve
|
||||
from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer,
|
||||
smart_resume, torch_distributed_zero_first)
|
||||
|
||||
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
||||
GIT_INFO = check_git_info()
|
||||
|
||||
|
||||
def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
|
||||
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
|
||||
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
|
||||
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
|
||||
callbacks.run('on_pretrain_routine_start')
|
||||
|
||||
# Directories
|
||||
w = save_dir / 'weights' # weights dir
|
||||
(w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
|
||||
last, best = w / 'last.pt', w / 'best.pt'
|
||||
|
||||
# Hyperparameters
|
||||
if isinstance(hyp, str):
|
||||
with open(hyp, errors='ignore') as f:
|
||||
hyp = yaml.safe_load(f) # load hyps dict
|
||||
LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
|
||||
opt.hyp = hyp.copy() # for saving hyps to checkpoints
|
||||
|
||||
# Save run settings
|
||||
if not evolve:
|
||||
yaml_save(save_dir / 'hyp.yaml', hyp)
|
||||
yaml_save(save_dir / 'opt.yaml', vars(opt))
|
||||
|
||||
# Loggers
|
||||
data_dict = None
|
||||
if RANK in {-1, 0}:
|
||||
loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
|
||||
|
||||
# Register actions
|
||||
for k in methods(loggers):
|
||||
callbacks.register_action(k, callback=getattr(loggers, k))
|
||||
|
||||
# Process custom dataset artifact link
|
||||
data_dict = loggers.remote_dataset
|
||||
if resume: # If resuming runs from remote artifact
|
||||
weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
|
||||
|
||||
# Config
|
||||
plots = not evolve and not opt.noplots # create plots
|
||||
cuda = device.type != 'cpu'
|
||||
init_seeds(opt.seed + 1 + RANK, deterministic=True)
|
||||
with torch_distributed_zero_first(LOCAL_RANK):
|
||||
data_dict = data_dict or check_dataset(data) # check if None
|
||||
train_path, val_path = data_dict['train'], data_dict['val']
|
||||
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
|
||||
names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
||||
is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
|
||||
|
||||
# Model
|
||||
check_suffix(weights, '.pt') # check weights
|
||||
pretrained = weights.endswith('.pt')
|
||||
if pretrained:
|
||||
with torch_distributed_zero_first(LOCAL_RANK):
|
||||
weights = attempt_download(weights) # download if not found locally
|
||||
ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
|
||||
model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
||||
exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
|
||||
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
||||
csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
|
||||
model.load_state_dict(csd, strict=False) # load
|
||||
LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
|
||||
else:
|
||||
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
||||
amp = check_amp(model) # check AMP
|
||||
|
||||
# Freeze
|
||||
freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
|
||||
for k, v in model.named_parameters():
|
||||
v.requires_grad = True # train all layers
|
||||
# v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
|
||||
if any(x in k for x in freeze):
|
||||
LOGGER.info(f'freezing {k}')
|
||||
v.requires_grad = False
|
||||
|
||||
# Image size
|
||||
gs = max(int(model.stride.max()), 32) # grid size (max stride)
|
||||
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
|
||||
|
||||
# Batch size
|
||||
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
|
||||
batch_size = check_train_batch_size(model, imgsz, amp)
|
||||
loggers.on_params_update({'batch_size': batch_size})
|
||||
|
||||
# Optimizer
|
||||
nbs = 64 # nominal batch size
|
||||
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
|
||||
hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
|
||||
optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
|
||||
|
||||
# Scheduler
|
||||
if opt.cos_lr:
|
||||
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
|
||||
else:
|
||||
lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
|
||||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
|
||||
|
||||
# EMA
|
||||
ema = ModelEMA(model) if RANK in {-1, 0} else None
|
||||
|
||||
# Resume
|
||||
best_fitness, start_epoch = 0.0, 0
|
||||
if pretrained:
|
||||
if resume:
|
||||
best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
|
||||
del ckpt, csd
|
||||
|
||||
# DP mode
|
||||
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
||||
LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
|
||||
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
|
||||
model = torch.nn.DataParallel(model)
|
||||
|
||||
# SyncBatchNorm
|
||||
if opt.sync_bn and cuda and RANK != -1:
|
||||
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
||||
LOGGER.info('Using SyncBatchNorm()')
|
||||
|
||||
# Trainloader
|
||||
train_loader, dataset = create_dataloader(train_path,
|
||||
imgsz,
|
||||
batch_size // WORLD_SIZE,
|
||||
gs,
|
||||
single_cls,
|
||||
hyp=hyp,
|
||||
augment=True,
|
||||
cache=None if opt.cache == 'val' else opt.cache,
|
||||
rect=opt.rect,
|
||||
rank=LOCAL_RANK,
|
||||
workers=workers,
|
||||
image_weights=opt.image_weights,
|
||||
quad=opt.quad,
|
||||
prefix=colorstr('train: '),
|
||||
shuffle=True,
|
||||
seed=opt.seed)
|
||||
labels = np.concatenate(dataset.labels, 0)
|
||||
mlc = int(labels[:, 0].max()) # max label class
|
||||
assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
|
||||
|
||||
# Process 0
|
||||
if RANK in {-1, 0}:
|
||||
val_loader = create_dataloader(val_path,
|
||||
imgsz,
|
||||
batch_size // WORLD_SIZE * 2,
|
||||
gs,
|
||||
single_cls,
|
||||
hyp=hyp,
|
||||
cache=None if noval else opt.cache,
|
||||
rect=True,
|
||||
rank=-1,
|
||||
workers=workers * 2,
|
||||
pad=0.5,
|
||||
prefix=colorstr('val: '))[0]
|
||||
|
||||
if not resume:
|
||||
if not opt.noautoanchor:
|
||||
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
|
||||
model.half().float() # pre-reduce anchor precision
|
||||
|
||||
callbacks.run('on_pretrain_routine_end', labels, names)
|
||||
|
||||
# DDP mode
|
||||
if cuda and RANK != -1:
|
||||
model = smart_DDP(model)
|
||||
|
||||
# Model attributes
|
||||
nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
|
||||
hyp['box'] *= 3 / nl # scale to layers
|
||||
hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
|
||||
hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
|
||||
hyp['label_smoothing'] = opt.label_smoothing
|
||||
model.nc = nc # attach number of classes to model
|
||||
model.hyp = hyp # attach hyperparameters to model
|
||||
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
|
||||
model.names = names
|
||||
|
||||
# Start training
|
||||
t0 = time.time()
|
||||
nb = len(train_loader) # number of batches
|
||||
nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
|
||||
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
|
||||
last_opt_step = -1
|
||||
maps = np.zeros(nc) # mAP per class
|
||||
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
|
||||
scheduler.last_epoch = start_epoch - 1 # do not move
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=amp)
|
||||
stopper, stop = EarlyStopping(patience=opt.patience), False
|
||||
compute_loss = ComputeLoss(model) # init loss class
|
||||
callbacks.run('on_train_start')
|
||||
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
|
||||
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
|
||||
f"Logging results to {colorstr('bold', save_dir)}\n"
|
||||
f'Starting training for {epochs} epochs...')
|
||||
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
||||
callbacks.run('on_train_epoch_start')
|
||||
model.train()
|
||||
|
||||
# Update image weights (optional, single-GPU only)
|
||||
if opt.image_weights:
|
||||
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
|
||||
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
|
||||
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
|
||||
|
||||
# Update mosaic border (optional)
|
||||
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
|
||||
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
|
||||
|
||||
mloss = torch.zeros(3, device=device) # mean losses
|
||||
if RANK != -1:
|
||||
train_loader.sampler.set_epoch(epoch)
|
||||
pbar = enumerate(train_loader)
|
||||
LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size'))
|
||||
if RANK in {-1, 0}:
|
||||
pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
|
||||
optimizer.zero_grad()
|
||||
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
|
||||
callbacks.run('on_train_batch_start')
|
||||
ni = i + nb * epoch # number integrated batches (since train start)
|
||||
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
|
||||
|
||||
# Warmup
|
||||
if ni <= nw:
|
||||
xi = [0, nw] # x interp
|
||||
# compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
|
||||
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
|
||||
for j, x in enumerate(optimizer.param_groups):
|
||||
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
|
||||
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
|
||||
if 'momentum' in x:
|
||||
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
|
||||
|
||||
# Multi-scale
|
||||
if opt.multi_scale:
|
||||
sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size
|
||||
sf = sz / max(imgs.shape[2:]) # scale factor
|
||||
if sf != 1:
|
||||
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
|
||||
imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
|
||||
|
||||
# Forward
|
||||
with torch.cuda.amp.autocast(amp):
|
||||
pred = model(imgs) # forward
|
||||
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
|
||||
if RANK != -1:
|
||||
loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
|
||||
if opt.quad:
|
||||
loss *= 4.
|
||||
|
||||
# Backward
|
||||
scaler.scale(loss).backward()
|
||||
|
||||
# Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
|
||||
if ni - last_opt_step >= accumulate:
|
||||
scaler.unscale_(optimizer) # unscale gradients
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
|
||||
scaler.step(optimizer) # optimizer.step
|
||||
scaler.update()
|
||||
optimizer.zero_grad()
|
||||
if ema:
|
||||
ema.update(model)
|
||||
last_opt_step = ni
|
||||
|
||||
# Log
|
||||
if RANK in {-1, 0}:
|
||||
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
|
||||
mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
|
||||
pbar.set_description(('%11s' * 2 + '%11.4g' * 5) %
|
||||
(f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
|
||||
callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss))
|
||||
if callbacks.stop_training:
|
||||
return
|
||||
# end batch ------------------------------------------------------------------------------------------------
|
||||
|
||||
# Scheduler
|
||||
lr = [x['lr'] for x in optimizer.param_groups] # for loggers
|
||||
scheduler.step()
|
||||
|
||||
if RANK in {-1, 0}:
|
||||
# mAP
|
||||
callbacks.run('on_train_epoch_end', epoch=epoch)
|
||||
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
|
||||
final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
|
||||
if not noval or final_epoch: # Calculate mAP
|
||||
results, maps, _ = validate.run(data_dict,
|
||||
batch_size=batch_size // WORLD_SIZE * 2,
|
||||
imgsz=imgsz,
|
||||
half=amp,
|
||||
model=ema.ema,
|
||||
single_cls=single_cls,
|
||||
dataloader=val_loader,
|
||||
save_dir=save_dir,
|
||||
plots=False,
|
||||
callbacks=callbacks,
|
||||
compute_loss=compute_loss)
|
||||
|
||||
# Update best mAP
|
||||
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
|
||||
stop = stopper(epoch=epoch, fitness=fi) # early stop check
|
||||
if fi > best_fitness:
|
||||
best_fitness = fi
|
||||
log_vals = list(mloss) + list(results) + lr
|
||||
callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
|
||||
|
||||
# Save model
|
||||
if (not nosave) or (final_epoch and not evolve): # if save
|
||||
ckpt = {
|
||||
'epoch': epoch,
|
||||
'best_fitness': best_fitness,
|
||||
'model': deepcopy(de_parallel(model)).half(),
|
||||
'ema': deepcopy(ema.ema).half(),
|
||||
'updates': ema.updates,
|
||||
'optimizer': optimizer.state_dict(),
|
||||
'opt': vars(opt),
|
||||
'git': GIT_INFO, # {remote, branch, commit} if a git repo
|
||||
'date': datetime.now().isoformat()}
|
||||
|
||||
# Save last, best and delete
|
||||
torch.save(ckpt, last)
|
||||
if best_fitness == fi:
|
||||
torch.save(ckpt, best)
|
||||
if opt.save_period > 0 and epoch % opt.save_period == 0:
|
||||
torch.save(ckpt, w / f'epoch{epoch}.pt')
|
||||
del ckpt
|
||||
callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
|
||||
|
||||
# EarlyStopping
|
||||
if RANK != -1: # if DDP training
|
||||
broadcast_list = [stop if RANK == 0 else None]
|
||||
dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
|
||||
if RANK != 0:
|
||||
stop = broadcast_list[0]
|
||||
if stop:
|
||||
break # must break all DDP ranks
|
||||
|
||||
# end epoch ----------------------------------------------------------------------------------------------------
|
||||
# end training -----------------------------------------------------------------------------------------------------
|
||||
if RANK in {-1, 0}:
|
||||
LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
|
||||
for f in last, best:
|
||||
if f.exists():
|
||||
strip_optimizer(f) # strip optimizers
|
||||
if f is best:
|
||||
LOGGER.info(f'\nValidating {f}...')
|
||||
results, _, _ = validate.run(
|
||||
data_dict,
|
||||
batch_size=batch_size // WORLD_SIZE * 2,
|
||||
imgsz=imgsz,
|
||||
model=attempt_load(f, device).half(),
|
||||
iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
|
||||
single_cls=single_cls,
|
||||
dataloader=val_loader,
|
||||
save_dir=save_dir,
|
||||
save_json=is_coco,
|
||||
verbose=True,
|
||||
plots=plots,
|
||||
callbacks=callbacks,
|
||||
compute_loss=compute_loss) # val best model with plots
|
||||
if is_coco:
|
||||
callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
|
||||
|
||||
callbacks.run('on_train_end', last, best, epoch, results)
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
return results
|
||||
|
||||
|
||||
def parse_opt(known=False):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path')
|
||||
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
||||
parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
|
||||
parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
|
||||
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
|
||||
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
||||
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
|
||||
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
||||
parser.add_argument('--noval', action='store_true', help='only validate final epoch')
|
||||
parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
|
||||
parser.add_argument('--noplots', action='store_true', help='save no plot files')
|
||||
parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
|
||||
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
|
||||
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
|
||||
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
|
||||
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
|
||||
parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
|
||||
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--quad', action='store_true', help='quad dataloader')
|
||||
parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
|
||||
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
|
||||
parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
|
||||
parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
|
||||
parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
|
||||
parser.add_argument('--seed', type=int, default=0, help='Global training seed')
|
||||
parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
|
||||
|
||||
# Logger arguments
|
||||
parser.add_argument('--entity', default=None, help='Entity')
|
||||
parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option')
|
||||
parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval')
|
||||
parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use')
|
||||
|
||||
return parser.parse_known_args()[0] if known else parser.parse_args()
|
||||
|
||||
|
||||
def main(opt, callbacks=Callbacks()):
|
||||
# Checks
|
||||
if RANK in {-1, 0}:
|
||||
print_args(vars(opt))
|
||||
check_git_status()
|
||||
check_requirements()
|
||||
|
||||
# Resume (from specified or most recent last.pt)
|
||||
if opt.resume and not check_comet_resume(opt) and not opt.evolve:
|
||||
last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
|
||||
opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
|
||||
opt_data = opt.data # original dataset
|
||||
if opt_yaml.is_file():
|
||||
with open(opt_yaml, errors='ignore') as f:
|
||||
d = yaml.safe_load(f)
|
||||
else:
|
||||
d = torch.load(last, map_location='cpu')['opt']
|
||||
opt = argparse.Namespace(**d) # replace
|
||||
opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
|
||||
if is_url(opt_data):
|
||||
opt.data = check_file(opt_data) # avoid HUB resume auth timeout
|
||||
else:
|
||||
opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
|
||||
check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
|
||||
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
|
||||
if opt.evolve:
|
||||
if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
|
||||
opt.project = str(ROOT / 'runs/evolve')
|
||||
opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
|
||||
if opt.name == 'cfg':
|
||||
opt.name = Path(opt.cfg).stem # use model.yaml as name
|
||||
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
|
||||
|
||||
# DDP mode
|
||||
device = select_device(opt.device, batch_size=opt.batch_size)
|
||||
if LOCAL_RANK != -1:
|
||||
msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
|
||||
assert not opt.image_weights, f'--image-weights {msg}'
|
||||
assert not opt.evolve, f'--evolve {msg}'
|
||||
assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
|
||||
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
|
||||
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
|
||||
torch.cuda.set_device(LOCAL_RANK)
|
||||
device = torch.device('cuda', LOCAL_RANK)
|
||||
dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
|
||||
|
||||
# Train
|
||||
if not opt.evolve:
|
||||
train(opt.hyp, opt, device, callbacks)
|
||||
|
||||
# Evolve hyperparameters (optional)
|
||||
else:
|
||||
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
|
||||
meta = {
|
||||
'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
|
||||
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
|
||||
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
|
||||
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
|
||||
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
|
||||
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
|
||||
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
|
||||
'box': (1, 0.02, 0.2), # box loss gain
|
||||
'cls': (1, 0.2, 4.0), # cls loss gain
|
||||
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
|
||||
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
|
||||
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
|
||||
'iou_t': (0, 0.1, 0.7), # IoU training threshold
|
||||
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
|
||||
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
|
||||
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
|
||||
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
|
||||
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
|
||||
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
|
||||
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
|
||||
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
|
||||
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
|
||||
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
|
||||
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
|
||||
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
|
||||
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
|
||||
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
|
||||
'mixup': (1, 0.0, 1.0), # image mixup (probability)
|
||||
'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
|
||||
|
||||
with open(opt.hyp, errors='ignore') as f:
|
||||
hyp = yaml.safe_load(f) # load hyps dict
|
||||
if 'anchors' not in hyp: # anchors commented in hyp.yaml
|
||||
hyp['anchors'] = 3
|
||||
if opt.noautoanchor:
|
||||
del hyp['anchors'], meta['anchors']
|
||||
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
|
||||
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
|
||||
evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
|
||||
if opt.bucket:
|
||||
# download evolve.csv if exists
|
||||
subprocess.run([
|
||||
'gsutil',
|
||||
'cp',
|
||||
f'gs://{opt.bucket}/evolve.csv',
|
||||
str(evolve_csv),])
|
||||
|
||||
for _ in range(opt.evolve): # generations to evolve
|
||||
if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
|
||||
# Select parent(s)
|
||||
parent = 'single' # parent selection method: 'single' or 'weighted'
|
||||
x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
|
||||
n = min(5, len(x)) # number of previous results to consider
|
||||
x = x[np.argsort(-fitness(x))][:n] # top n mutations
|
||||
w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
|
||||
if parent == 'single' or len(x) == 1:
|
||||
# x = x[random.randint(0, n - 1)] # random selection
|
||||
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
|
||||
elif parent == 'weighted':
|
||||
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
|
||||
|
||||
# Mutate
|
||||
mp, s = 0.8, 0.2 # mutation probability, sigma
|
||||
npr = np.random
|
||||
npr.seed(int(time.time()))
|
||||
g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
|
||||
ng = len(meta)
|
||||
v = np.ones(ng)
|
||||
while all(v == 1): # mutate until a change occurs (prevent duplicates)
|
||||
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
|
||||
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
|
||||
hyp[k] = float(x[i + 7] * v[i]) # mutate
|
||||
|
||||
# Constrain to limits
|
||||
for k, v in meta.items():
|
||||
hyp[k] = max(hyp[k], v[1]) # lower limit
|
||||
hyp[k] = min(hyp[k], v[2]) # upper limit
|
||||
hyp[k] = round(hyp[k], 5) # significant digits
|
||||
|
||||
# Train mutation
|
||||
results = train(hyp.copy(), opt, device, callbacks)
|
||||
callbacks = Callbacks()
|
||||
# Write mutation results
|
||||
keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss',
|
||||
'val/obj_loss', 'val/cls_loss')
|
||||
print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket)
|
||||
|
||||
# Plot results
|
||||
plot_evolve(evolve_csv)
|
||||
LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
|
||||
f"Results saved to {colorstr('bold', save_dir)}\n"
|
||||
f'Usage example: $ python train.py --hyp {evolve_yaml}')
|
||||
|
||||
|
||||
def run(**kwargs):
|
||||
# Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
|
||||
opt = parse_opt(True)
|
||||
for k, v in kwargs.items():
|
||||
setattr(opt, k, v)
|
||||
main(opt)
|
||||
return opt
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
|
@ -0,0 +1,976 @@
|
|||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"name": "YOLOv5 Tutorial",
|
||||
"provenance": [],
|
||||
"toc_visible": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"accelerator": "GPU",
|
||||
"widgets": {
|
||||
"application/vnd.jupyter.widget-state+json": {
|
||||
"1f7df330663048998adcf8a45bc8f69b": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_name": "HBoxModel",
|
||||
"model_module_version": "1.5.0",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HBoxModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HBoxView",
|
||||
"box_style": "",
|
||||
"children": [
|
||||
"IPY_MODEL_e896e6096dd244c59d7955e2035cd729",
|
||||
"IPY_MODEL_a6ff238c29984b24bf6d0bd175c19430",
|
||||
"IPY_MODEL_3c085ba3f3fd4c3c8a6bb41b41ce1479"
|
||||
],
|
||||
"layout": "IPY_MODEL_16b0c8aa6e0f427e8a54d3791abb7504"
|
||||
}
|
||||
},
|
||||
"e896e6096dd244c59d7955e2035cd729": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_name": "HTMLModel",
|
||||
"model_module_version": "1.5.0",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HTMLModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HTMLView",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_c7b2dd0f78384cad8e400b282996cdf5",
|
||||
"placeholder": "",
|
||||
"style": "IPY_MODEL_6a27e43b0e434edd82ee63f0a91036ca",
|
||||
"value": "100%"
|
||||
}
|
||||
},
|
||||
"a6ff238c29984b24bf6d0bd175c19430": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_name": "FloatProgressModel",
|
||||
"model_module_version": "1.5.0",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "FloatProgressModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "ProgressView",
|
||||
"bar_style": "success",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_cce0e6c0c4ec442cb47e65c674e02e92",
|
||||
"max": 818322941,
|
||||
"min": 0,
|
||||
"orientation": "horizontal",
|
||||
"style": "IPY_MODEL_c5b9f38e2f0d4f9aa97fe87265263743",
|
||||
"value": 818322941
|
||||
}
|
||||
},
|
||||
"3c085ba3f3fd4c3c8a6bb41b41ce1479": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_name": "HTMLModel",
|
||||
"model_module_version": "1.5.0",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HTMLModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HTMLView",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_df554fb955c7454696beac5a82889386",
|
||||
"placeholder": "",
|
||||
"style": "IPY_MODEL_74e9112a87a242f4831b7d68c7da6333",
|
||||
"value": " 780M/780M [00:05<00:00, 126MB/s]"
|
||||
}
|
||||
},
|
||||
"16b0c8aa6e0f427e8a54d3791abb7504": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_name": "LayoutModel",
|
||||
"model_module_version": "1.2.0",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"c7b2dd0f78384cad8e400b282996cdf5": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_name": "LayoutModel",
|
||||
"model_module_version": "1.2.0",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"6a27e43b0e434edd82ee63f0a91036ca": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_name": "DescriptionStyleModel",
|
||||
"model_module_version": "1.5.0",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "DescriptionStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"cce0e6c0c4ec442cb47e65c674e02e92": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_name": "LayoutModel",
|
||||
"model_module_version": "1.2.0",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"c5b9f38e2f0d4f9aa97fe87265263743": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_name": "ProgressStyleModel",
|
||||
"model_module_version": "1.5.0",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "ProgressStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"bar_color": null,
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"df554fb955c7454696beac5a82889386": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_name": "LayoutModel",
|
||||
"model_module_version": "1.2.0",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"74e9112a87a242f4831b7d68c7da6333": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_name": "DescriptionStyleModel",
|
||||
"model_module_version": "1.5.0",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "DescriptionStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"description_width": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "t6MPjfT5NrKQ"
|
||||
},
|
||||
"source": [
|
||||
"<div align=\"center\">\n",
|
||||
"\n",
|
||||
" <a href=\"https://ultralytics.com/yolov5\" target=\"_blank\">\n",
|
||||
" <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png\"></a>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"<br>\n",
|
||||
" <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n",
|
||||
" <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
|
||||
" <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>See <a href=\"https://github.com/ultralytics/yolov5/issues/new/choose\">GitHub</a> for community support or <a href=\"https://ultralytics.com/contact\">contact us</a> for professional support.\n",
|
||||
"\n",
|
||||
"</div>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "7mGmQbAO5pQb"
|
||||
},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"\n",
|
||||
"Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "wbvMlHd_QwMG",
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"outputId": "f9f016ad-3dcf-4bd2-e1c3-d5b79efc6f32"
|
||||
},
|
||||
"source": [
|
||||
"!git clone https://github.com/ultralytics/yolov5 # clone\n",
|
||||
"%cd yolov5\n",
|
||||
"%pip install -qr requirements.txt # install\n",
|
||||
"\n",
|
||||
"import torch\n",
|
||||
"import utils\n",
|
||||
"display = utils.notebook_init() # checks"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stderr",
|
||||
"text": [
|
||||
"YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "4JnkELT0cIJg"
|
||||
},
|
||||
"source": [
|
||||
"# 1. Detect\n",
|
||||
"\n",
|
||||
"`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n",
|
||||
"\n",
|
||||
"```shell\n",
|
||||
"python detect.py --source 0 # webcam\n",
|
||||
" img.jpg # image \n",
|
||||
" vid.mp4 # video\n",
|
||||
" screen # screenshot\n",
|
||||
" path/ # directory\n",
|
||||
" 'path/*.jpg' # glob\n",
|
||||
" 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n",
|
||||
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "zR9ZbuQCH7FX",
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"outputId": "b4db5c49-f501-4505-cf0d-a1d35236c485"
|
||||
},
|
||||
"source": [
|
||||
"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n",
|
||||
"# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n",
|
||||
"YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
||||
"\n",
|
||||
"Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\n",
|
||||
"100% 14.1M/14.1M [00:00<00:00, 116MB/s] \n",
|
||||
"\n",
|
||||
"Fusing layers... \n",
|
||||
"YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n",
|
||||
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.0ms\n",
|
||||
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 14.3ms\n",
|
||||
"Speed: 0.5ms pre-process, 15.7ms inference, 18.6ms NMS per image at shape (1, 3, 640, 640)\n",
|
||||
"Results saved to \u001b[1mruns/detect/exp\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "hkAzDWJ7cWTr"
|
||||
},
|
||||
"source": [
|
||||
" \n",
|
||||
"<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/127574988-6a558aa1-d268-44b9-bf6b-62d4c605cc72.jpg\" width=\"600\">"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "0eq1SMWl6Sfn"
|
||||
},
|
||||
"source": [
|
||||
"# 2. Validate\n",
|
||||
"Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "WQPtK1QYVaD_",
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 49,
|
||||
"referenced_widgets": [
|
||||
"1f7df330663048998adcf8a45bc8f69b",
|
||||
"e896e6096dd244c59d7955e2035cd729",
|
||||
"a6ff238c29984b24bf6d0bd175c19430",
|
||||
"3c085ba3f3fd4c3c8a6bb41b41ce1479",
|
||||
"16b0c8aa6e0f427e8a54d3791abb7504",
|
||||
"c7b2dd0f78384cad8e400b282996cdf5",
|
||||
"6a27e43b0e434edd82ee63f0a91036ca",
|
||||
"cce0e6c0c4ec442cb47e65c674e02e92",
|
||||
"c5b9f38e2f0d4f9aa97fe87265263743",
|
||||
"df554fb955c7454696beac5a82889386",
|
||||
"74e9112a87a242f4831b7d68c7da6333"
|
||||
]
|
||||
},
|
||||
"outputId": "c7d0a0d2-abfb-44c3-d60d-f99d0e7aabad"
|
||||
},
|
||||
"source": [
|
||||
"# Download COCO val\n",
|
||||
"torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n",
|
||||
"!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "display_data",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
" 0%| | 0.00/780M [00:00<?, ?B/s]"
|
||||
],
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"version_major": 2,
|
||||
"version_minor": 0,
|
||||
"model_id": "1f7df330663048998adcf8a45bc8f69b"
|
||||
}
|
||||
},
|
||||
"metadata": {}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "X58w8JLpMnjH",
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"outputId": "5fc61358-7bc5-4310-a310-9059f66c6322"
|
||||
},
|
||||
"source": [
|
||||
"# Validate YOLOv5s on COCO val\n",
|
||||
"!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n",
|
||||
"YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
||||
"\n",
|
||||
"Fusing layers... \n",
|
||||
"YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 1977.30it/s]\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n",
|
||||
" Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:12<00:00, 2.17it/s]\n",
|
||||
" all 5000 36335 0.67 0.521 0.566 0.371\n",
|
||||
"Speed: 0.1ms pre-process, 2.9ms inference, 2.0ms NMS per image at shape (32, 3, 640, 640)\n",
|
||||
"\n",
|
||||
"Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n",
|
||||
"loading annotations into memory...\n",
|
||||
"Done (t=0.43s)\n",
|
||||
"creating index...\n",
|
||||
"index created!\n",
|
||||
"Loading and preparing results...\n",
|
||||
"DONE (t=5.85s)\n",
|
||||
"creating index...\n",
|
||||
"index created!\n",
|
||||
"Running per image evaluation...\n",
|
||||
"Evaluate annotation type *bbox*\n",
|
||||
"DONE (t=82.22s).\n",
|
||||
"Accumulating evaluation results...\n",
|
||||
"DONE (t=14.92s).\n",
|
||||
" Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n",
|
||||
" Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n",
|
||||
" Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n",
|
||||
" Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.211\n",
|
||||
" Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.423\n",
|
||||
" Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.489\n",
|
||||
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.311\n",
|
||||
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.516\n",
|
||||
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.566\n",
|
||||
" Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.378\n",
|
||||
" Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.625\n",
|
||||
" Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.723\n",
|
||||
"Results saved to \u001b[1mruns/val/exp\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "ZY2VXXXu74w5"
|
||||
},
|
||||
"source": [
|
||||
"# 3. Train\n",
|
||||
"\n",
|
||||
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png\"/></a></p>\n",
|
||||
"Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
|
||||
"<br><br>\n",
|
||||
"\n",
|
||||
"Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n",
|
||||
"\n",
|
||||
"- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
|
||||
"automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
|
||||
"- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n",
|
||||
"- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n",
|
||||
"<br><br>\n",
|
||||
"\n",
|
||||
"A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n",
|
||||
"\n",
|
||||
"## Train on Custom Data with Roboflow 🌟 NEW\n",
|
||||
"\n",
|
||||
"[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n",
|
||||
"\n",
|
||||
"- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n",
|
||||
"- Custom Training Notebook: [](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"480\" src=\"https://uploads-ssl.webflow.com/5f6bc60e665f54545a1e52a5/6152a275ad4b4ac20cd2e21a_roboflow-annotate.gif\"/></a></p>Label images lightning fast (including with model-assisted labeling)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title Select YOLOv5 🚀 logger {run: 'auto'}\n",
|
||||
"logger = 'ClearML' #@param ['ClearML', 'Comet', 'TensorBoard']\n",
|
||||
"\n",
|
||||
"if logger == 'ClearML':\n",
|
||||
" %pip install -q clearml\n",
|
||||
" import clearml; clearml.browser_login()\n",
|
||||
"elif logger == 'Comet':\n",
|
||||
" %pip install -q comet_ml\n",
|
||||
" import comet_ml; comet_ml.init()\n",
|
||||
"elif logger == 'TensorBoard':\n",
|
||||
" %load_ext tensorboard\n",
|
||||
" %tensorboard --logdir runs/train"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "i3oKtE4g-aNn"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "1NcFxRcFdJ_O",
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"outputId": "721b9028-767f-4a05-c964-692c245f7398"
|
||||
},
|
||||
"source": [
|
||||
"# Train YOLOv5s on COCO128 for 3 epochs\n",
|
||||
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n",
|
||||
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
|
||||
"YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
||||
"\n",
|
||||
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
|
||||
"\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n",
|
||||
"\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
|
||||
"\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
|
||||
"\n",
|
||||
"Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n",
|
||||
"Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n",
|
||||
"100% 6.66M/6.66M [00:00<00:00, 261MB/s]\n",
|
||||
"Dataset download success ✅ (0.3s), saved to \u001b[1m/content/datasets\u001b[0m\n",
|
||||
"\n",
|
||||
" from n params module arguments \n",
|
||||
" 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n",
|
||||
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
|
||||
" 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
|
||||
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
|
||||
" 4 -1 2 115712 models.common.C3 [128, 128, 2] \n",
|
||||
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
|
||||
" 6 -1 3 625152 models.common.C3 [256, 256, 3] \n",
|
||||
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
|
||||
" 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n",
|
||||
" 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n",
|
||||
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
|
||||
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
||||
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
|
||||
" 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
|
||||
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
|
||||
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
||||
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
|
||||
" 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
|
||||
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
|
||||
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
|
||||
" 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
|
||||
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
|
||||
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
|
||||
" 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
|
||||
" 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
|
||||
"Model summary: 214 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n",
|
||||
"\n",
|
||||
"Transferred 349/349 items from yolov5s.pt\n",
|
||||
"\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
|
||||
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
|
||||
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1911.57it/s]\n",
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n",
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 229.69it/s]\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 97.70it/s] \n",
|
||||
"\n",
|
||||
"\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
|
||||
"Plotting labels to runs/train/exp/labels.jpg... \n",
|
||||
"Image sizes 640 train, 640 val\n",
|
||||
"Using 2 dataloader workers\n",
|
||||
"Logging results to \u001b[1mruns/train/exp\u001b[0m\n",
|
||||
"Starting training for 3 epochs...\n",
|
||||
"\n",
|
||||
" Epoch GPU_mem box_loss obj_loss cls_loss Instances Size\n",
|
||||
" 0/2 3.74G 0.04618 0.07207 0.017 232 640: 100% 8/8 [00:07<00:00, 1.10it/s]\n",
|
||||
" Class Images Instances P R mAP50 mAP50-95: 100% 4/4 [00:01<00:00, 2.28it/s]\n",
|
||||
" all 128 929 0.672 0.594 0.682 0.451\n",
|
||||
"\n",
|
||||
" Epoch GPU_mem box_loss obj_loss cls_loss Instances Size\n",
|
||||
" 1/2 5.36G 0.04623 0.06888 0.01821 201 640: 100% 8/8 [00:02<00:00, 3.29it/s]\n",
|
||||
" Class Images Instances P R mAP50 mAP50-95: 100% 4/4 [00:01<00:00, 3.17it/s]\n",
|
||||
" all 128 929 0.721 0.639 0.724 0.48\n",
|
||||
"\n",
|
||||
" Epoch GPU_mem box_loss obj_loss cls_loss Instances Size\n",
|
||||
" 2/2 5.36G 0.04361 0.06479 0.01698 227 640: 100% 8/8 [00:02<00:00, 3.46it/s]\n",
|
||||
" Class Images Instances P R mAP50 mAP50-95: 100% 4/4 [00:01<00:00, 3.11it/s]\n",
|
||||
" all 128 929 0.758 0.641 0.731 0.487\n",
|
||||
"\n",
|
||||
"3 epochs completed in 0.005 hours.\n",
|
||||
"Optimizer stripped from runs/train/exp/weights/last.pt, 14.9MB\n",
|
||||
"Optimizer stripped from runs/train/exp/weights/best.pt, 14.9MB\n",
|
||||
"\n",
|
||||
"Validating runs/train/exp/weights/best.pt...\n",
|
||||
"Fusing layers... \n",
|
||||
"Model summary: 157 layers, 7225885 parameters, 0 gradients, 16.4 GFLOPs\n",
|
||||
" Class Images Instances P R mAP50 mAP50-95: 100% 4/4 [00:03<00:00, 1.05it/s]\n",
|
||||
" all 128 929 0.757 0.641 0.732 0.487\n",
|
||||
" person 128 254 0.86 0.705 0.804 0.528\n",
|
||||
" bicycle 128 6 0.773 0.578 0.725 0.426\n",
|
||||
" car 128 46 0.658 0.435 0.554 0.239\n",
|
||||
" motorcycle 128 5 0.59 0.8 0.837 0.635\n",
|
||||
" airplane 128 6 1 0.996 0.995 0.696\n",
|
||||
" bus 128 7 0.635 0.714 0.756 0.666\n",
|
||||
" train 128 3 0.691 0.333 0.753 0.511\n",
|
||||
" truck 128 12 0.604 0.333 0.472 0.26\n",
|
||||
" boat 128 6 0.941 0.333 0.46 0.183\n",
|
||||
" traffic light 128 14 0.557 0.183 0.302 0.214\n",
|
||||
" stop sign 128 2 0.827 1 0.995 0.846\n",
|
||||
" bench 128 9 0.79 0.556 0.677 0.318\n",
|
||||
" bird 128 16 0.962 1 0.995 0.663\n",
|
||||
" cat 128 4 0.867 1 0.995 0.754\n",
|
||||
" dog 128 9 1 0.649 0.903 0.654\n",
|
||||
" horse 128 2 0.853 1 0.995 0.622\n",
|
||||
" elephant 128 17 0.908 0.882 0.934 0.698\n",
|
||||
" bear 128 1 0.697 1 0.995 0.995\n",
|
||||
" zebra 128 4 0.867 1 0.995 0.905\n",
|
||||
" giraffe 128 9 0.788 0.829 0.912 0.701\n",
|
||||
" backpack 128 6 0.841 0.5 0.738 0.311\n",
|
||||
" umbrella 128 18 0.786 0.815 0.859 0.48\n",
|
||||
" handbag 128 19 0.772 0.263 0.366 0.216\n",
|
||||
" tie 128 7 0.975 0.714 0.77 0.491\n",
|
||||
" suitcase 128 4 0.643 0.75 0.912 0.563\n",
|
||||
" frisbee 128 5 0.72 0.8 0.76 0.717\n",
|
||||
" skis 128 1 0.748 1 0.995 0.3\n",
|
||||
" snowboard 128 7 0.827 0.686 0.833 0.57\n",
|
||||
" sports ball 128 6 0.637 0.667 0.602 0.311\n",
|
||||
" kite 128 10 0.645 0.6 0.594 0.224\n",
|
||||
" baseball bat 128 4 0.519 0.278 0.468 0.205\n",
|
||||
" baseball glove 128 7 0.483 0.429 0.465 0.278\n",
|
||||
" skateboard 128 5 0.923 0.6 0.687 0.493\n",
|
||||
" tennis racket 128 7 0.774 0.429 0.544 0.333\n",
|
||||
" bottle 128 18 0.577 0.379 0.551 0.275\n",
|
||||
" wine glass 128 16 0.715 0.875 0.893 0.511\n",
|
||||
" cup 128 36 0.843 0.667 0.833 0.531\n",
|
||||
" fork 128 6 0.998 0.333 0.45 0.315\n",
|
||||
" knife 128 16 0.77 0.688 0.695 0.399\n",
|
||||
" spoon 128 22 0.839 0.473 0.638 0.383\n",
|
||||
" bowl 128 28 0.765 0.583 0.715 0.512\n",
|
||||
" banana 128 1 0.903 1 0.995 0.301\n",
|
||||
" sandwich 128 2 1 0 0.359 0.301\n",
|
||||
" orange 128 4 0.718 0.75 0.912 0.581\n",
|
||||
" broccoli 128 11 0.545 0.364 0.43 0.319\n",
|
||||
" carrot 128 24 0.62 0.625 0.724 0.495\n",
|
||||
" hot dog 128 2 0.385 1 0.828 0.762\n",
|
||||
" pizza 128 5 0.833 1 0.962 0.725\n",
|
||||
" donut 128 14 0.631 1 0.96 0.833\n",
|
||||
" cake 128 4 0.871 1 0.995 0.83\n",
|
||||
" chair 128 35 0.583 0.6 0.608 0.318\n",
|
||||
" couch 128 6 0.909 0.667 0.813 0.543\n",
|
||||
" potted plant 128 14 0.745 0.786 0.822 0.48\n",
|
||||
" bed 128 3 0.973 0.333 0.753 0.41\n",
|
||||
" dining table 128 13 0.821 0.356 0.577 0.342\n",
|
||||
" toilet 128 2 1 0.949 0.995 0.797\n",
|
||||
" tv 128 2 0.566 1 0.995 0.796\n",
|
||||
" laptop 128 3 1 0 0.59 0.311\n",
|
||||
" mouse 128 2 1 0 0.105 0.0527\n",
|
||||
" remote 128 8 1 0.623 0.634 0.538\n",
|
||||
" cell phone 128 8 0.565 0.375 0.399 0.179\n",
|
||||
" microwave 128 3 0.709 1 0.995 0.736\n",
|
||||
" oven 128 5 0.328 0.4 0.43 0.282\n",
|
||||
" sink 128 6 0.438 0.333 0.339 0.266\n",
|
||||
" refrigerator 128 5 0.564 0.8 0.798 0.535\n",
|
||||
" book 128 29 0.597 0.256 0.351 0.155\n",
|
||||
" clock 128 9 0.763 0.889 0.934 0.737\n",
|
||||
" vase 128 2 0.331 1 0.995 0.895\n",
|
||||
" scissors 128 1 1 0 0.497 0.0552\n",
|
||||
" teddy bear 128 21 0.857 0.57 0.837 0.544\n",
|
||||
" toothbrush 128 5 0.799 1 0.928 0.556\n",
|
||||
"Results saved to \u001b[1mruns/train/exp\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "15glLzbQx5u0"
|
||||
},
|
||||
"source": [
|
||||
"# 4. Visualize"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Comet Logging and Visualization 🌟 NEW\n",
|
||||
"\n",
|
||||
"[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n",
|
||||
"\n",
|
||||
"Getting started is easy:\n",
|
||||
"```shell\n",
|
||||
"pip install comet_ml # 1. install\n",
|
||||
"export COMET_API_KEY=<Your API Key> # 2. paste API key\n",
|
||||
"python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n",
|
||||
"```\n",
|
||||
"To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n",
|
||||
"[](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n",
|
||||
"\n",
|
||||
"<a href=\"https://bit.ly/yolov5-readme-comet2\">\n",
|
||||
"<img alt=\"Comet Dashboard\" src=\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\" width=\"1280\"/></a>"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nWOsI5wJR1o3"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## ClearML Logging and Automation 🌟 NEW\n",
|
||||
"\n",
|
||||
"[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n",
|
||||
"\n",
|
||||
"- `pip install clearml`\n",
|
||||
"- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n",
|
||||
"\n",
|
||||
"You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n",
|
||||
"\n",
|
||||
"You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n",
|
||||
"\n",
|
||||
"<a href=\"https://cutt.ly/yolov5-notebook-clearml\">\n",
|
||||
"<img alt=\"ClearML Experiment Management UI\" src=\"https://github.com/thepycoder/clearml_screenshots/raw/main/scalars.jpg\" width=\"1280\"/></a>"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Lay2WsTjNJzP"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "-WPvRbS5Swl6"
|
||||
},
|
||||
"source": [
|
||||
"## Local Logging\n",
|
||||
"\n",
|
||||
"Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
|
||||
"\n",
|
||||
"This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n",
|
||||
"\n",
|
||||
"<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Zelyeqbyt3GD"
|
||||
},
|
||||
"source": [
|
||||
"# Environments\n",
|
||||
"\n",
|
||||
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
|
||||
"\n",
|
||||
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
||||
"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n",
|
||||
"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n",
|
||||
"- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "6Qu7Iesl0p54"
|
||||
},
|
||||
"source": [
|
||||
"# Status\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "IEijrePND_2I"
|
||||
},
|
||||
"source": [
|
||||
"# Appendix\n",
|
||||
"\n",
|
||||
"Additional content below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "GMusP4OAxFu6"
|
||||
},
|
||||
"source": [
|
||||
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
||||
"import torch\n",
|
||||
"\n",
|
||||
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True) # yolov5n - yolov5x6 or custom\n",
|
||||
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
||||
"results = model(im) # inference\n",
|
||||
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
utils/initialization
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import platform
|
||||
import threading
|
||||
|
||||
|
||||
def emojis(str=''):
|
||||
# Return platform-dependent emoji-safe version of string
|
||||
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
|
||||
|
||||
|
||||
class TryExcept(contextlib.ContextDecorator):
|
||||
# YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
|
||||
def __init__(self, msg=''):
|
||||
self.msg = msg
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
if value:
|
||||
print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
|
||||
return True
|
||||
|
||||
|
||||
def threaded(func):
|
||||
# Multi-threads a target function and returns thread. Usage: @threaded decorator
|
||||
def wrapper(*args, **kwargs):
|
||||
thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
|
||||
thread.start()
|
||||
return thread
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def join_threads(verbose=False):
|
||||
# Join all daemon threads, i.e. atexit.register(lambda: join_threads())
|
||||
main_thread = threading.current_thread()
|
||||
for t in threading.enumerate():
|
||||
if t is not main_thread:
|
||||
if verbose:
|
||||
print(f'Joining thread {t.name}')
|
||||
t.join()
|
||||
|
||||
|
||||
def notebook_init(verbose=True):
|
||||
# Check system software and hardware
|
||||
print('Checking setup...')
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from utils.general import check_font, check_requirements, is_colab
|
||||
from utils.torch_utils import select_device # imports
|
||||
|
||||
check_font()
|
||||
|
||||
import psutil
|
||||
|
||||
if is_colab():
|
||||
shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
|
||||
|
||||
# System info
|
||||
display = None
|
||||
if verbose:
|
||||
gb = 1 << 30 # bytes to GiB (1024 ** 3)
|
||||
ram = psutil.virtual_memory().total
|
||||
total, used, free = shutil.disk_usage('/')
|
||||
with contextlib.suppress(Exception): # clear display if ipython is installed
|
||||
from IPython import display
|
||||
display.clear_output()
|
||||
s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
|
||||
else:
|
||||
s = ''
|
||||
|
||||
select_device(newline=False)
|
||||
print(emojis(f'Setup complete ✅ {s}'))
|
||||
return display
|
|
@ -0,0 +1,103 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Activation functions
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class SiLU(nn.Module):
|
||||
# SiLU activation https://arxiv.org/pdf/1606.08415.pdf
|
||||
@staticmethod
|
||||
def forward(x):
|
||||
return x * torch.sigmoid(x)
|
||||
|
||||
|
||||
class Hardswish(nn.Module):
|
||||
# Hard-SiLU activation
|
||||
@staticmethod
|
||||
def forward(x):
|
||||
# return x * F.hardsigmoid(x) # for TorchScript and CoreML
|
||||
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
|
||||
|
||||
|
||||
class Mish(nn.Module):
|
||||
# Mish activation https://github.com/digantamisra98/Mish
|
||||
@staticmethod
|
||||
def forward(x):
|
||||
return x * F.softplus(x).tanh()
|
||||
|
||||
|
||||
class MemoryEfficientMish(nn.Module):
|
||||
# Mish activation memory-efficient
|
||||
class F(torch.autograd.Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, x):
|
||||
ctx.save_for_backward(x)
|
||||
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, grad_output):
|
||||
x = ctx.saved_tensors[0]
|
||||
sx = torch.sigmoid(x)
|
||||
fx = F.softplus(x).tanh()
|
||||
return grad_output * (fx + x * sx * (1 - fx * fx))
|
||||
|
||||
def forward(self, x):
|
||||
return self.F.apply(x)
|
||||
|
||||
|
||||
class FReLU(nn.Module):
|
||||
# FReLU activation https://arxiv.org/abs/2007.11824
|
||||
def __init__(self, c1, k=3): # ch_in, kernel
|
||||
super().__init__()
|
||||
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
|
||||
self.bn = nn.BatchNorm2d(c1)
|
||||
|
||||
def forward(self, x):
|
||||
return torch.max(x, self.bn(self.conv(x)))
|
||||
|
||||
|
||||
class AconC(nn.Module):
|
||||
r""" ACON activation (activate or not)
|
||||
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
|
||||
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
|
||||
"""
|
||||
|
||||
def __init__(self, c1):
|
||||
super().__init__()
|
||||
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
||||
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
||||
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
|
||||
|
||||
def forward(self, x):
|
||||
dpx = (self.p1 - self.p2) * x
|
||||
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
|
||||
|
||||
|
||||
class MetaAconC(nn.Module):
|
||||
r""" ACON activation (activate or not)
|
||||
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
|
||||
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
|
||||
"""
|
||||
|
||||
def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
|
||||
super().__init__()
|
||||
c2 = max(r, c1 // r)
|
||||
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
||||
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
||||
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
|
||||
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
|
||||
# self.bn1 = nn.BatchNorm2d(c2)
|
||||
# self.bn2 = nn.BatchNorm2d(c1)
|
||||
|
||||
def forward(self, x):
|
||||
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
|
||||
# batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
|
||||
# beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
|
||||
beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
|
||||
dpx = (self.p1 - self.p2) * x
|
||||
return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
|
|
@ -0,0 +1,397 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Image augmentation functions
|
||||
"""
|
||||
|
||||
import math
|
||||
import random
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
import torchvision.transforms.functional as TF
|
||||
|
||||
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
|
||||
from utils.metrics import bbox_ioa
|
||||
|
||||
IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean
|
||||
IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation
|
||||
|
||||
|
||||
class Albumentations:
|
||||
# YOLOv5 Albumentations class (optional, only used if package is installed)
|
||||
def __init__(self, size=640):
|
||||
self.transform = None
|
||||
prefix = colorstr('albumentations: ')
|
||||
try:
|
||||
import albumentations as A
|
||||
check_version(A.__version__, '1.0.3', hard=True) # version requirement
|
||||
|
||||
T = [
|
||||
A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0),
|
||||
A.Blur(p=0.01),
|
||||
A.MedianBlur(p=0.01),
|
||||
A.ToGray(p=0.01),
|
||||
A.CLAHE(p=0.01),
|
||||
A.RandomBrightnessContrast(p=0.0),
|
||||
A.RandomGamma(p=0.0),
|
||||
A.ImageCompression(quality_lower=75, p=0.0)] # transforms
|
||||
self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
|
||||
|
||||
LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
|
||||
except ImportError: # package not installed, skip
|
||||
pass
|
||||
except Exception as e:
|
||||
LOGGER.info(f'{prefix}{e}')
|
||||
|
||||
def __call__(self, im, labels, p=1.0):
|
||||
if self.transform and random.random() < p:
|
||||
new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
|
||||
im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
|
||||
return im, labels
|
||||
|
||||
|
||||
def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
|
||||
# Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std
|
||||
return TF.normalize(x, mean, std, inplace=inplace)
|
||||
|
||||
|
||||
def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
|
||||
# Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean
|
||||
for i in range(3):
|
||||
x[:, i] = x[:, i] * std[i] + mean[i]
|
||||
return x
|
||||
|
||||
|
||||
def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
|
||||
# HSV color-space augmentation
|
||||
if hgain or sgain or vgain:
|
||||
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
|
||||
hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
|
||||
dtype = im.dtype # uint8
|
||||
|
||||
x = np.arange(0, 256, dtype=r.dtype)
|
||||
lut_hue = ((x * r[0]) % 180).astype(dtype)
|
||||
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
|
||||
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
|
||||
|
||||
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
|
||||
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed
|
||||
|
||||
|
||||
def hist_equalize(im, clahe=True, bgr=False):
|
||||
# Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
|
||||
yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
|
||||
if clahe:
|
||||
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
|
||||
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
|
||||
else:
|
||||
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
|
||||
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
|
||||
|
||||
|
||||
def replicate(im, labels):
|
||||
# Replicate labels
|
||||
h, w = im.shape[:2]
|
||||
boxes = labels[:, 1:].astype(int)
|
||||
x1, y1, x2, y2 = boxes.T
|
||||
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
|
||||
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
|
||||
x1b, y1b, x2b, y2b = boxes[i]
|
||||
bh, bw = y2b - y1b, x2b - x1b
|
||||
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
|
||||
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
|
||||
im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]
|
||||
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
|
||||
|
||||
return im, labels
|
||||
|
||||
|
||||
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
|
||||
# Resize and pad image while meeting stride-multiple constraints
|
||||
shape = im.shape[:2] # current shape [height, width]
|
||||
if isinstance(new_shape, int):
|
||||
new_shape = (new_shape, new_shape)
|
||||
|
||||
# Scale ratio (new / old)
|
||||
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
||||
if not scaleup: # only scale down, do not scale up (for better val mAP)
|
||||
r = min(r, 1.0)
|
||||
|
||||
# Compute padding
|
||||
ratio = r, r # width, height ratios
|
||||
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
|
||||
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
|
||||
if auto: # minimum rectangle
|
||||
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
|
||||
elif scaleFill: # stretch
|
||||
dw, dh = 0.0, 0.0
|
||||
new_unpad = (new_shape[1], new_shape[0])
|
||||
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
|
||||
|
||||
dw /= 2 # divide padding into 2 sides
|
||||
dh /= 2
|
||||
|
||||
if shape[::-1] != new_unpad: # resize
|
||||
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
|
||||
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
||||
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
||||
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
|
||||
return im, ratio, (dw, dh)
|
||||
|
||||
|
||||
def random_perspective(im,
|
||||
targets=(),
|
||||
segments=(),
|
||||
degrees=10,
|
||||
translate=.1,
|
||||
scale=.1,
|
||||
shear=10,
|
||||
perspective=0.0,
|
||||
border=(0, 0)):
|
||||
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
|
||||
# targets = [cls, xyxy]
|
||||
|
||||
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
|
||||
width = im.shape[1] + border[1] * 2
|
||||
|
||||
# Center
|
||||
C = np.eye(3)
|
||||
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
|
||||
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
|
||||
|
||||
# Perspective
|
||||
P = np.eye(3)
|
||||
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
|
||||
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
|
||||
|
||||
# Rotation and Scale
|
||||
R = np.eye(3)
|
||||
a = random.uniform(-degrees, degrees)
|
||||
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
|
||||
s = random.uniform(1 - scale, 1 + scale)
|
||||
# s = 2 ** random.uniform(-scale, scale)
|
||||
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
|
||||
|
||||
# Shear
|
||||
S = np.eye(3)
|
||||
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
|
||||
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
|
||||
|
||||
# Translation
|
||||
T = np.eye(3)
|
||||
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
|
||||
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
|
||||
|
||||
# Combined rotation matrix
|
||||
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
|
||||
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
|
||||
if perspective:
|
||||
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
|
||||
else: # affine
|
||||
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
|
||||
|
||||
# Visualize
|
||||
# import matplotlib.pyplot as plt
|
||||
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
|
||||
# ax[0].imshow(im[:, :, ::-1]) # base
|
||||
# ax[1].imshow(im2[:, :, ::-1]) # warped
|
||||
|
||||
# Transform label coordinates
|
||||
n = len(targets)
|
||||
if n:
|
||||
use_segments = any(x.any() for x in segments) and len(segments) == n
|
||||
new = np.zeros((n, 4))
|
||||
if use_segments: # warp segments
|
||||
segments = resample_segments(segments) # upsample
|
||||
for i, segment in enumerate(segments):
|
||||
xy = np.ones((len(segment), 3))
|
||||
xy[:, :2] = segment
|
||||
xy = xy @ M.T # transform
|
||||
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
|
||||
|
||||
# clip
|
||||
new[i] = segment2box(xy, width, height)
|
||||
|
||||
else: # warp boxes
|
||||
xy = np.ones((n * 4, 3))
|
||||
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
|
||||
xy = xy @ M.T # transform
|
||||
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
|
||||
|
||||
# create new boxes
|
||||
x = xy[:, [0, 2, 4, 6]]
|
||||
y = xy[:, [1, 3, 5, 7]]
|
||||
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
|
||||
|
||||
# clip
|
||||
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
|
||||
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
|
||||
|
||||
# filter candidates
|
||||
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
|
||||
targets = targets[i]
|
||||
targets[:, 1:5] = new[i]
|
||||
|
||||
return im, targets
|
||||
|
||||
|
||||
def copy_paste(im, labels, segments, p=0.5):
|
||||
# Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
|
||||
n = len(segments)
|
||||
if p and n:
|
||||
h, w, c = im.shape # height, width, channels
|
||||
im_new = np.zeros(im.shape, np.uint8)
|
||||
for j in random.sample(range(n), k=round(p * n)):
|
||||
l, s = labels[j], segments[j]
|
||||
box = w - l[3], l[2], w - l[1], l[4]
|
||||
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
|
||||
if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
|
||||
labels = np.concatenate((labels, [[l[0], *box]]), 0)
|
||||
segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
|
||||
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)
|
||||
|
||||
result = cv2.flip(im, 1) # augment segments (flip left-right)
|
||||
i = cv2.flip(im_new, 1).astype(bool)
|
||||
im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug
|
||||
|
||||
return im, labels, segments
|
||||
|
||||
|
||||
def cutout(im, labels, p=0.5):
|
||||
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
|
||||
if random.random() < p:
|
||||
h, w = im.shape[:2]
|
||||
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
|
||||
for s in scales:
|
||||
mask_h = random.randint(1, int(h * s)) # create random masks
|
||||
mask_w = random.randint(1, int(w * s))
|
||||
|
||||
# box
|
||||
xmin = max(0, random.randint(0, w) - mask_w // 2)
|
||||
ymin = max(0, random.randint(0, h) - mask_h // 2)
|
||||
xmax = min(w, xmin + mask_w)
|
||||
ymax = min(h, ymin + mask_h)
|
||||
|
||||
# apply random color mask
|
||||
im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
|
||||
|
||||
# return unobscured labels
|
||||
if len(labels) and s > 0.03:
|
||||
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
|
||||
ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area
|
||||
labels = labels[ioa < 0.60] # remove >60% obscured labels
|
||||
|
||||
return labels
|
||||
|
||||
|
||||
def mixup(im, labels, im2, labels2):
|
||||
# Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
|
||||
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
|
||||
im = (im * r + im2 * (1 - r)).astype(np.uint8)
|
||||
labels = np.concatenate((labels, labels2), 0)
|
||||
return im, labels
|
||||
|
||||
|
||||
def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
|
||||
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
|
||||
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
|
||||
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
|
||||
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
|
||||
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
|
||||
|
||||
|
||||
def classify_albumentations(
|
||||
augment=True,
|
||||
size=224,
|
||||
scale=(0.08, 1.0),
|
||||
ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33
|
||||
hflip=0.5,
|
||||
vflip=0.0,
|
||||
jitter=0.4,
|
||||
mean=IMAGENET_MEAN,
|
||||
std=IMAGENET_STD,
|
||||
auto_aug=False):
|
||||
# YOLOv5 classification Albumentations (optional, only used if package is installed)
|
||||
prefix = colorstr('albumentations: ')
|
||||
try:
|
||||
import albumentations as A
|
||||
from albumentations.pytorch import ToTensorV2
|
||||
check_version(A.__version__, '1.0.3', hard=True) # version requirement
|
||||
if augment: # Resize and crop
|
||||
T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)]
|
||||
if auto_aug:
|
||||
# TODO: implement AugMix, AutoAug & RandAug in albumentation
|
||||
LOGGER.info(f'{prefix}auto augmentations are currently not supported')
|
||||
else:
|
||||
if hflip > 0:
|
||||
T += [A.HorizontalFlip(p=hflip)]
|
||||
if vflip > 0:
|
||||
T += [A.VerticalFlip(p=vflip)]
|
||||
if jitter > 0:
|
||||
color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
|
||||
T += [A.ColorJitter(*color_jitter, 0)]
|
||||
else: # Use fixed crop for eval set (reproducibility)
|
||||
T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
|
||||
T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor
|
||||
LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
|
||||
return A.Compose(T)
|
||||
|
||||
except ImportError: # package not installed, skip
|
||||
LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)')
|
||||
except Exception as e:
|
||||
LOGGER.info(f'{prefix}{e}')
|
||||
|
||||
|
||||
def classify_transforms(size=224):
|
||||
# Transforms to apply if albumentations not installed
|
||||
assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)'
|
||||
# T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
|
||||
return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
|
||||
|
||||
|
||||
class LetterBox:
|
||||
# YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
|
||||
def __init__(self, size=(640, 640), auto=False, stride=32):
|
||||
super().__init__()
|
||||
self.h, self.w = (size, size) if isinstance(size, int) else size
|
||||
self.auto = auto # pass max size integer, automatically solve for short side using stride
|
||||
self.stride = stride # used with auto
|
||||
|
||||
def __call__(self, im): # im = np.array HWC
|
||||
imh, imw = im.shape[:2]
|
||||
r = min(self.h / imh, self.w / imw) # ratio of new/old
|
||||
h, w = round(imh * r), round(imw * r) # resized image
|
||||
hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w
|
||||
top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
|
||||
im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype)
|
||||
im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
|
||||
return im_out
|
||||
|
||||
|
||||
class CenterCrop:
|
||||
# YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
|
||||
def __init__(self, size=640):
|
||||
super().__init__()
|
||||
self.h, self.w = (size, size) if isinstance(size, int) else size
|
||||
|
||||
def __call__(self, im): # im = np.array HWC
|
||||
imh, imw = im.shape[:2]
|
||||
m = min(imh, imw) # min dimension
|
||||
top, left = (imh - m) // 2, (imw - m) // 2
|
||||
return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
|
||||
class ToTensor:
|
||||
# YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
|
||||
def __init__(self, half=False):
|
||||
super().__init__()
|
||||
self.half = half
|
||||
|
||||
def __call__(self, im): # im = np.array HWC in BGR order
|
||||
im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous
|
||||
im = torch.from_numpy(im) # to torch
|
||||
im = im.half() if self.half else im.float() # uint8 to fp16/32
|
||||
im /= 255.0 # 0-255 to 0.0-1.0
|
||||
return im
|
|
@ -0,0 +1,169 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
AutoAnchor utils
|
||||
"""
|
||||
|
||||
import random
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import yaml
|
||||
from tqdm import tqdm
|
||||
|
||||
from utils import TryExcept
|
||||
from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr
|
||||
|
||||
PREFIX = colorstr('AutoAnchor: ')
|
||||
|
||||
|
||||
def check_anchor_order(m):
|
||||
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
|
||||
a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer
|
||||
da = a[-1] - a[0] # delta a
|
||||
ds = m.stride[-1] - m.stride[0] # delta s
|
||||
if da and (da.sign() != ds.sign()): # same order
|
||||
LOGGER.info(f'{PREFIX}Reversing anchor order')
|
||||
m.anchors[:] = m.anchors.flip(0)
|
||||
|
||||
|
||||
@TryExcept(f'{PREFIX}ERROR')
|
||||
def check_anchors(dataset, model, thr=4.0, imgsz=640):
|
||||
# Check anchor fit to data, recompute if necessary
|
||||
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
|
||||
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
||||
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
|
||||
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
|
||||
|
||||
def metric(k): # compute metric
|
||||
r = wh[:, None] / k[None]
|
||||
x = torch.min(r, 1 / r).min(2)[0] # ratio metric
|
||||
best = x.max(1)[0] # best_x
|
||||
aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold
|
||||
bpr = (best > 1 / thr).float().mean() # best possible recall
|
||||
return bpr, aat
|
||||
|
||||
stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides
|
||||
anchors = m.anchors.clone() * stride # current anchors
|
||||
bpr, aat = metric(anchors.cpu().view(-1, 2))
|
||||
s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). '
|
||||
if bpr > 0.98: # threshold to recompute
|
||||
LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅')
|
||||
else:
|
||||
LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')
|
||||
na = m.anchors.numel() // 2 # number of anchors
|
||||
anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
|
||||
new_bpr = metric(anchors)[0]
|
||||
if new_bpr > bpr: # replace anchors
|
||||
anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
|
||||
m.anchors[:] = anchors.clone().view_as(m.anchors)
|
||||
check_anchor_order(m) # must be in pixel-space (not grid-space)
|
||||
m.anchors /= stride
|
||||
s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)'
|
||||
else:
|
||||
s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)'
|
||||
LOGGER.info(s)
|
||||
|
||||
|
||||
def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
|
||||
""" Creates kmeans-evolved anchors from training dataset
|
||||
|
||||
Arguments:
|
||||
dataset: path to data.yaml, or a loaded dataset
|
||||
n: number of anchors
|
||||
img_size: image size used for training
|
||||
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
|
||||
gen: generations to evolve anchors using genetic algorithm
|
||||
verbose: print all results
|
||||
|
||||
Return:
|
||||
k: kmeans evolved anchors
|
||||
|
||||
Usage:
|
||||
from utils.autoanchor import *; _ = kmean_anchors()
|
||||
"""
|
||||
from scipy.cluster.vq import kmeans
|
||||
|
||||
npr = np.random
|
||||
thr = 1 / thr
|
||||
|
||||
def metric(k, wh): # compute metrics
|
||||
r = wh[:, None] / k[None]
|
||||
x = torch.min(r, 1 / r).min(2)[0] # ratio metric
|
||||
# x = wh_iou(wh, torch.tensor(k)) # iou metric
|
||||
return x, x.max(1)[0] # x, best_x
|
||||
|
||||
def anchor_fitness(k): # mutation fitness
|
||||
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
|
||||
return (best * (best > thr).float()).mean() # fitness
|
||||
|
||||
def print_results(k, verbose=True):
|
||||
k = k[np.argsort(k.prod(1))] # sort small to large
|
||||
x, best = metric(k, wh0)
|
||||
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
|
||||
s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \
|
||||
f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \
|
||||
f'past_thr={x[x > thr].mean():.3f}-mean: '
|
||||
for x in k:
|
||||
s += '%i,%i, ' % (round(x[0]), round(x[1]))
|
||||
if verbose:
|
||||
LOGGER.info(s[:-2])
|
||||
return k
|
||||
|
||||
if isinstance(dataset, str): # *.yaml file
|
||||
with open(dataset, errors='ignore') as f:
|
||||
data_dict = yaml.safe_load(f) # model dict
|
||||
from utils.dataloaders import LoadImagesAndLabels
|
||||
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
|
||||
|
||||
# Get label wh
|
||||
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
||||
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
|
||||
|
||||
# Filter
|
||||
i = (wh0 < 3.0).any(1).sum()
|
||||
if i:
|
||||
LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size')
|
||||
wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels
|
||||
# wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
|
||||
|
||||
# Kmeans init
|
||||
try:
|
||||
LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...')
|
||||
assert n <= len(wh) # apply overdetermined constraint
|
||||
s = wh.std(0) # sigmas for whitening
|
||||
k = kmeans(wh / s, n, iter=30)[0] * s # points
|
||||
assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar
|
||||
except Exception:
|
||||
LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init')
|
||||
k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init
|
||||
wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))
|
||||
k = print_results(k, verbose=False)
|
||||
|
||||
# Plot
|
||||
# k, d = [None] * 20, [None] * 20
|
||||
# for i in tqdm(range(1, 21)):
|
||||
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
|
||||
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
|
||||
# ax = ax.ravel()
|
||||
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
|
||||
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
|
||||
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
|
||||
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
|
||||
# fig.savefig('wh.png', dpi=200)
|
||||
|
||||
# Evolve
|
||||
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
|
||||
pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar
|
||||
for _ in pbar:
|
||||
v = np.ones(sh)
|
||||
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
|
||||
v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
|
||||
kg = (k.copy() * v).clip(min=2.0)
|
||||
fg = anchor_fitness(kg)
|
||||
if fg > f:
|
||||
f, k = fg, kg.copy()
|
||||
pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
|
||||
if verbose:
|
||||
print_results(k, verbose)
|
||||
|
||||
return print_results(k).astype(np.float32)
|
|
@ -0,0 +1,72 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Auto-batch utils
|
||||
"""
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from utils.general import LOGGER, colorstr
|
||||
from utils.torch_utils import profile
|
||||
|
||||
|
||||
def check_train_batch_size(model, imgsz=640, amp=True):
|
||||
# Check YOLOv5 training batch size
|
||||
with torch.cuda.amp.autocast(amp):
|
||||
return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
|
||||
|
||||
|
||||
def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):
|
||||
# Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory
|
||||
# Usage:
|
||||
# import torch
|
||||
# from utils.autobatch import autobatch
|
||||
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
|
||||
# print(autobatch(model))
|
||||
|
||||
# Check device
|
||||
prefix = colorstr('AutoBatch: ')
|
||||
LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
|
||||
device = next(model.parameters()).device # get model device
|
||||
if device.type == 'cpu':
|
||||
LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
|
||||
return batch_size
|
||||
if torch.backends.cudnn.benchmark:
|
||||
LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}')
|
||||
return batch_size
|
||||
|
||||
# Inspect CUDA memory
|
||||
gb = 1 << 30 # bytes to GiB (1024 ** 3)
|
||||
d = str(device).upper() # 'CUDA:0'
|
||||
properties = torch.cuda.get_device_properties(device) # device properties
|
||||
t = properties.total_memory / gb # GiB total
|
||||
r = torch.cuda.memory_reserved(device) / gb # GiB reserved
|
||||
a = torch.cuda.memory_allocated(device) / gb # GiB allocated
|
||||
f = t - (r + a) # GiB free
|
||||
LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
|
||||
|
||||
# Profile batch sizes
|
||||
batch_sizes = [1, 2, 4, 8, 16]
|
||||
try:
|
||||
img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
|
||||
results = profile(img, model, n=3, device=device)
|
||||
except Exception as e:
|
||||
LOGGER.warning(f'{prefix}{e}')
|
||||
|
||||
# Fit a solution
|
||||
y = [x[2] for x in results if x] # memory [2]
|
||||
p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
|
||||
b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
|
||||
if None in results: # some sizes failed
|
||||
i = results.index(None) # first fail index
|
||||
if b >= batch_sizes[i]: # y intercept above failure point
|
||||
b = batch_sizes[max(i - 1, 0)] # select prior safe point
|
||||
if b < 1 or b > 1024: # b outside of safe range
|
||||
b = batch_size
|
||||
LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
|
||||
|
||||
fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
|
||||
LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
|
||||
return b
|
|
@ -0,0 +1,26 @@
|
|||
# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
|
||||
# This script will run on every instance restart, not only on first start
|
||||
# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
|
||||
|
||||
Content-Type: multipart/mixed; boundary="//"
|
||||
MIME-Version: 1.0
|
||||
|
||||
--//
|
||||
Content-Type: text/cloud-config; charset="us-ascii"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Content-Disposition: attachment; filename="cloud-config.txt"
|
||||
|
||||
#cloud-config
|
||||
cloud_final_modules:
|
||||
- [scripts-user, always]
|
||||
|
||||
--//
|
||||
Content-Type: text/x-shellscript; charset="us-ascii"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Content-Disposition: attachment; filename="userdata.txt"
|
||||
|
||||
#!/bin/bash
|
||||
# --- paste contents of userdata.sh here ---
|
||||
--//
|
|
@ -0,0 +1,40 @@
|
|||
# Resume all interrupted trainings in yolov5/ dir including DDP trainings
|
||||
# Usage: $ python utils/aws/resume.py
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import yaml
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[2] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
|
||||
port = 0 # --master_port
|
||||
path = Path('').resolve()
|
||||
for last in path.rglob('*/**/last.pt'):
|
||||
ckpt = torch.load(last)
|
||||
if ckpt['optimizer'] is None:
|
||||
continue
|
||||
|
||||
# Load opt.yaml
|
||||
with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
|
||||
opt = yaml.safe_load(f)
|
||||
|
||||
# Get device count
|
||||
d = opt['device'].split(',') # devices
|
||||
nd = len(d) # number of devices
|
||||
ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
|
||||
|
||||
if ddp: # multi-GPU
|
||||
port += 1
|
||||
cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
|
||||
else: # single-GPU
|
||||
cmd = f'python train.py --resume {last}'
|
||||
|
||||
cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
|
||||
print(cmd)
|
||||
os.system(cmd)
|
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
|
||||
# This script will run only once on first instance start (for a re-start script see mime.sh)
|
||||
# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
|
||||
# Use >300 GB SSD
|
||||
|
||||
cd home/ubuntu
|
||||
if [ ! -d yolov5 ]; then
|
||||
echo "Running first-time script." # install dependencies, download COCO, pull Docker
|
||||
git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
|
||||
cd yolov5
|
||||
bash data/scripts/get_coco.sh && echo "COCO done." &
|
||||
sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
|
||||
python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
|
||||
wait && echo "All tasks done." # finish background tasks
|
||||
else
|
||||
echo "Running re-start script." # resume interrupted runs
|
||||
i=0
|
||||
list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
|
||||
while IFS= read -r id; do
|
||||
((i++))
|
||||
echo "restarting container $i: $id"
|
||||
sudo docker start $id
|
||||
# sudo docker exec -it $id python train.py --resume # single-GPU
|
||||
sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
|
||||
done <<<"$list"
|
||||
fi
|
|
@ -0,0 +1,76 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Callback utils
|
||||
"""
|
||||
|
||||
import threading
|
||||
|
||||
|
||||
class Callbacks:
|
||||
""""
|
||||
Handles all registered callbacks for YOLOv5 Hooks
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Define the available callbacks
|
||||
self._callbacks = {
|
||||
'on_pretrain_routine_start': [],
|
||||
'on_pretrain_routine_end': [],
|
||||
'on_train_start': [],
|
||||
'on_train_epoch_start': [],
|
||||
'on_train_batch_start': [],
|
||||
'optimizer_step': [],
|
||||
'on_before_zero_grad': [],
|
||||
'on_train_batch_end': [],
|
||||
'on_train_epoch_end': [],
|
||||
'on_val_start': [],
|
||||
'on_val_batch_start': [],
|
||||
'on_val_image_end': [],
|
||||
'on_val_batch_end': [],
|
||||
'on_val_end': [],
|
||||
'on_fit_epoch_end': [], # fit = train + val
|
||||
'on_model_save': [],
|
||||
'on_train_end': [],
|
||||
'on_params_update': [],
|
||||
'teardown': [],}
|
||||
self.stop_training = False # set True to interrupt training
|
||||
|
||||
def register_action(self, hook, name='', callback=None):
|
||||
"""
|
||||
Register a new action to a callback hook
|
||||
|
||||
Args:
|
||||
hook: The callback hook name to register the action to
|
||||
name: The name of the action for later reference
|
||||
callback: The callback to fire
|
||||
"""
|
||||
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
|
||||
assert callable(callback), f"callback '{callback}' is not callable"
|
||||
self._callbacks[hook].append({'name': name, 'callback': callback})
|
||||
|
||||
def get_registered_actions(self, hook=None):
|
||||
""""
|
||||
Returns all the registered actions by callback hook
|
||||
|
||||
Args:
|
||||
hook: The name of the hook to check, defaults to all
|
||||
"""
|
||||
return self._callbacks[hook] if hook else self._callbacks
|
||||
|
||||
def run(self, hook, *args, thread=False, **kwargs):
|
||||
"""
|
||||
Loop through the registered actions and fire all callbacks on main thread
|
||||
|
||||
Args:
|
||||
hook: The name of the hook to check, defaults to all
|
||||
args: Arguments to receive from YOLOv5
|
||||
thread: (boolean) Run callbacks in daemon thread
|
||||
kwargs: Keyword Arguments to receive from YOLOv5
|
||||
"""
|
||||
|
||||
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
|
||||
for logger in self._callbacks[hook]:
|
||||
if thread:
|
||||
threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()
|
||||
else:
|
||||
logger['callback'](*args, **kwargs)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,75 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
|
||||
# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference
|
||||
|
||||
# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
|
||||
# FROM docker.io/pytorch/pytorch:latest
|
||||
FROM pytorch/pytorch:latest
|
||||
|
||||
# Downloads to user config dir
|
||||
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
|
||||
|
||||
# Install linux packages
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
RUN apt update
|
||||
RUN TZ=Etc/UTC apt install -y tzdata
|
||||
RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
|
||||
# RUN alias python=python3
|
||||
|
||||
# Security updates
|
||||
# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
|
||||
RUN apt upgrade --no-install-recommends -y openssl
|
||||
|
||||
# Create working directory
|
||||
RUN rm -rf /usr/src/app && mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
# Copy contents
|
||||
# COPY . /usr/src/app (issues as not a .git directory)
|
||||
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
|
||||
|
||||
# Install pip packages
|
||||
COPY requirements.txt .
|
||||
RUN python3 -m pip install --upgrade pip wheel
|
||||
RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
|
||||
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3'
|
||||
# tensorflow tensorflowjs \
|
||||
|
||||
# Set environment variables
|
||||
ENV OMP_NUM_THREADS=1
|
||||
|
||||
# Cleanup
|
||||
ENV DEBIAN_FRONTEND teletype
|
||||
|
||||
|
||||
# Usage Examples -------------------------------------------------------------------------------------------------------
|
||||
|
||||
# Build and Push
|
||||
# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t
|
||||
|
||||
# Pull and Run
|
||||
# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
|
||||
|
||||
# Pull and Run with local directory access
|
||||
# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
|
||||
|
||||
# Kill all
|
||||
# sudo docker kill $(sudo docker ps -q)
|
||||
|
||||
# Kill all image-based
|
||||
# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
|
||||
|
||||
# DockerHub tag update
|
||||
# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew
|
||||
|
||||
# Clean up
|
||||
# sudo docker system prune -a --volumes
|
||||
|
||||
# Update Ubuntu drivers
|
||||
# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
|
||||
|
||||
# DDP test
|
||||
# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
|
||||
|
||||
# GCP VM from Image
|
||||
# docker.io/ultralytics/yolov5:latest
|
|
@ -0,0 +1,41 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
|
||||
# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi
|
||||
|
||||
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
|
||||
FROM arm64v8/ubuntu:rolling
|
||||
|
||||
# Downloads to user config dir
|
||||
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
|
||||
|
||||
# Install linux packages
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
RUN apt update
|
||||
RUN TZ=Etc/UTC apt install -y tzdata
|
||||
RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev
|
||||
# RUN alias python=python3
|
||||
|
||||
# Install pip packages
|
||||
COPY requirements.txt .
|
||||
RUN python3 -m pip install --upgrade pip wheel
|
||||
RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
|
||||
coremltools onnx onnxruntime
|
||||
# tensorflow-aarch64 tensorflowjs \
|
||||
|
||||
# Create working directory
|
||||
RUN mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
# Copy contents
|
||||
# COPY . /usr/src/app (issues as not a .git directory)
|
||||
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
|
||||
ENV DEBIAN_FRONTEND teletype
|
||||
|
||||
|
||||
# Usage Examples -------------------------------------------------------------------------------------------------------
|
||||
|
||||
# Build and Push
|
||||
# t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t
|
||||
|
||||
# Pull and Run
|
||||
# t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
|
|
@ -0,0 +1,42 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
|
||||
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
|
||||
|
||||
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
|
||||
FROM ubuntu:rolling
|
||||
|
||||
# Downloads to user config dir
|
||||
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
|
||||
|
||||
# Install linux packages
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
RUN apt update
|
||||
RUN TZ=Etc/UTC apt install -y tzdata
|
||||
RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
|
||||
# RUN alias python=python3
|
||||
|
||||
# Install pip packages
|
||||
COPY requirements.txt .
|
||||
RUN python3 -m pip install --upgrade pip wheel
|
||||
RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
|
||||
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \
|
||||
# tensorflow tensorflowjs \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Create working directory
|
||||
RUN mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
# Copy contents
|
||||
# COPY . /usr/src/app (issues as not a .git directory)
|
||||
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
|
||||
ENV DEBIAN_FRONTEND teletype
|
||||
|
||||
|
||||
# Usage Examples -------------------------------------------------------------------------------------------------------
|
||||
|
||||
# Build and Push
|
||||
# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t
|
||||
|
||||
# Pull and Run
|
||||
# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
|
|
@ -0,0 +1,128 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Download utils
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import urllib
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
import torch
|
||||
|
||||
|
||||
def is_url(url, check=True):
|
||||
# Check if string is URL and check if URL exists
|
||||
try:
|
||||
url = str(url)
|
||||
result = urllib.parse.urlparse(url)
|
||||
assert all([result.scheme, result.netloc]) # check if is url
|
||||
return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online
|
||||
except (AssertionError, urllib.request.HTTPError):
|
||||
return False
|
||||
|
||||
|
||||
def gsutil_getsize(url=''):
|
||||
# gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
|
||||
output = subprocess.check_output(['gsutil', 'du', url], shell=True, encoding='utf-8')
|
||||
if output:
|
||||
return int(output.split()[0])
|
||||
return 0
|
||||
|
||||
|
||||
def url_getsize(url='https://ultralytics.com/images/bus.jpg'):
|
||||
# Return downloadable file size in bytes
|
||||
response = requests.head(url, allow_redirects=True)
|
||||
return int(response.headers.get('content-length', -1))
|
||||
|
||||
|
||||
def curl_download(url, filename, *, silent: bool = False) -> bool:
|
||||
"""
|
||||
Download a file from a url to a filename using curl.
|
||||
"""
|
||||
silent_option = 'sS' if silent else '' # silent
|
||||
proc = subprocess.run([
|
||||
'curl',
|
||||
'-#',
|
||||
f'-{silent_option}L',
|
||||
url,
|
||||
'--output',
|
||||
filename,
|
||||
'--retry',
|
||||
'9',
|
||||
'-C',
|
||||
'-',])
|
||||
return proc.returncode == 0
|
||||
|
||||
|
||||
def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
|
||||
# Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
|
||||
from utils.general import LOGGER
|
||||
|
||||
file = Path(file)
|
||||
assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
|
||||
try: # url1
|
||||
LOGGER.info(f'Downloading {url} to {file}...')
|
||||
torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO)
|
||||
assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
|
||||
except Exception as e: # url2
|
||||
if file.exists():
|
||||
file.unlink() # remove partial downloads
|
||||
LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
|
||||
# curl download, retry and resume on fail
|
||||
curl_download(url2 or url, file)
|
||||
finally:
|
||||
if not file.exists() or file.stat().st_size < min_bytes: # check
|
||||
if file.exists():
|
||||
file.unlink() # remove partial downloads
|
||||
LOGGER.info(f'ERROR: {assert_msg}\n{error_msg}')
|
||||
LOGGER.info('')
|
||||
|
||||
|
||||
def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'):
|
||||
# Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc.
|
||||
from utils.general import LOGGER
|
||||
|
||||
def github_assets(repository, version='latest'):
|
||||
# Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...])
|
||||
if version != 'latest':
|
||||
version = f'tags/{version}' # i.e. tags/v7.0
|
||||
response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api
|
||||
return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets
|
||||
|
||||
file = Path(str(file).strip().replace("'", ''))
|
||||
if not file.exists():
|
||||
# URL specified
|
||||
name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc.
|
||||
if str(file).startswith(('http:/', 'https:/')): # download
|
||||
url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
|
||||
file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth...
|
||||
if Path(file).is_file():
|
||||
LOGGER.info(f'Found {url} locally at {file}') # file already exists
|
||||
else:
|
||||
safe_download(file=file, url=url, min_bytes=1E5)
|
||||
return file
|
||||
|
||||
# GitHub assets
|
||||
assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default
|
||||
try:
|
||||
tag, assets = github_assets(repo, release)
|
||||
except Exception:
|
||||
try:
|
||||
tag, assets = github_assets(repo) # latest release
|
||||
except Exception:
|
||||
try:
|
||||
tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
|
||||
except Exception:
|
||||
tag = release
|
||||
|
||||
if name in assets:
|
||||
file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
|
||||
safe_download(file,
|
||||
url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
|
||||
min_bytes=1E5,
|
||||
error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag}')
|
||||
|
||||
return str(file)
|
|
@ -0,0 +1,73 @@
|
|||
# Flask REST API
|
||||
|
||||
[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are
|
||||
commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API
|
||||
created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
|
||||
|
||||
## Requirements
|
||||
|
||||
[Flask](https://palletsprojects.com/p/flask/) is required. Install with:
|
||||
|
||||
```shell
|
||||
$ pip install Flask
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
After Flask installation run:
|
||||
|
||||
```shell
|
||||
$ python3 restapi.py --port 5000
|
||||
```
|
||||
|
||||
Then use [curl](https://curl.se/) to perform a request:
|
||||
|
||||
```shell
|
||||
$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'
|
||||
```
|
||||
|
||||
The model inference results are returned as a JSON response:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"class": 0,
|
||||
"confidence": 0.8900438547,
|
||||
"height": 0.9318675399,
|
||||
"name": "person",
|
||||
"width": 0.3264600933,
|
||||
"xcenter": 0.7438579798,
|
||||
"ycenter": 0.5207948685
|
||||
},
|
||||
{
|
||||
"class": 0,
|
||||
"confidence": 0.8440024257,
|
||||
"height": 0.7155083418,
|
||||
"name": "person",
|
||||
"width": 0.6546785235,
|
||||
"xcenter": 0.427829951,
|
||||
"ycenter": 0.6334488392
|
||||
},
|
||||
{
|
||||
"class": 27,
|
||||
"confidence": 0.3771208823,
|
||||
"height": 0.3902671337,
|
||||
"name": "tie",
|
||||
"width": 0.0696444362,
|
||||
"xcenter": 0.3675483763,
|
||||
"ycenter": 0.7991207838
|
||||
},
|
||||
{
|
||||
"class": 27,
|
||||
"confidence": 0.3527112305,
|
||||
"height": 0.1540903747,
|
||||
"name": "tie",
|
||||
"width": 0.0336618312,
|
||||
"xcenter": 0.7814827561,
|
||||
"ycenter": 0.5065554976
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given
|
||||
in `example_request.py`
|
|
@ -0,0 +1,19 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Perform test request
|
||||
"""
|
||||
|
||||
import pprint
|
||||
|
||||
import requests
|
||||
|
||||
DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s'
|
||||
IMAGE = 'zidane.jpg'
|
||||
|
||||
# Read image
|
||||
with open(IMAGE, 'rb') as f:
|
||||
image_data = f.read()
|
||||
|
||||
response = requests.post(DETECTION_URL, files={'image': image_data}).json()
|
||||
|
||||
pprint.pprint(response)
|
|
@ -0,0 +1,48 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Run a Flask REST API exposing one or more YOLOv5s models
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import io
|
||||
|
||||
import torch
|
||||
from flask import Flask, request
|
||||
from PIL import Image
|
||||
|
||||
app = Flask(__name__)
|
||||
models = {}
|
||||
|
||||
DETECTION_URL = '/v1/object-detection/<model>'
|
||||
|
||||
|
||||
@app.route(DETECTION_URL, methods=['POST'])
|
||||
def predict(model):
|
||||
if request.method != 'POST':
|
||||
return
|
||||
|
||||
if request.files.get('image'):
|
||||
# Method 1
|
||||
# with request.files["image"] as f:
|
||||
# im = Image.open(io.BytesIO(f.read()))
|
||||
|
||||
# Method 2
|
||||
im_file = request.files['image']
|
||||
im_bytes = im_file.read()
|
||||
im = Image.open(io.BytesIO(im_bytes))
|
||||
|
||||
if model in models:
|
||||
results = models[model](im, size=640) # reduce size=320 for faster inference
|
||||
return results.pandas().xyxy[0].to_json(orient='records')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model')
|
||||
parser.add_argument('--port', default=5000, type=int, help='port number')
|
||||
parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s')
|
||||
opt = parser.parse_args()
|
||||
|
||||
for m in opt.model:
|
||||
models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True)
|
||||
|
||||
app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,25 @@
|
|||
FROM gcr.io/google-appengine/python
|
||||
|
||||
# Create a virtualenv for dependencies. This isolates these packages from
|
||||
# system-level packages.
|
||||
# Use -p python3 or -p python3.7 to select python version. Default is version 2.
|
||||
RUN virtualenv /env -p python3
|
||||
|
||||
# Setting these environment variables are the same as running
|
||||
# source /env/bin/activate.
|
||||
ENV VIRTUAL_ENV /env
|
||||
ENV PATH /env/bin:$PATH
|
||||
|
||||
RUN apt-get update && apt-get install -y python-opencv
|
||||
|
||||
# Copy the application's requirements.txt and run pip to install all
|
||||
# dependencies into the virtualenv.
|
||||
ADD requirements.txt /app/requirements.txt
|
||||
RUN pip install -r /app/requirements.txt
|
||||
|
||||
# Add the application source code.
|
||||
ADD . /app
|
||||
|
||||
# Run a WSGI server to serve the application. gunicorn must be declared as
|
||||
# a dependency in requirements.txt.
|
||||
CMD gunicorn -b :$PORT main:app
|
|
@ -0,0 +1,401 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Logging utils
|
||||
"""
|
||||
|
||||
import os
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
import pkg_resources as pkg
|
||||
import torch
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
|
||||
from utils.general import LOGGER, colorstr, cv2
|
||||
from utils.loggers.clearml.clearml_utils import ClearmlLogger
|
||||
from utils.loggers.wandb.wandb_utils import WandbLogger
|
||||
from utils.plots import plot_images, plot_labels, plot_results
|
||||
from utils.torch_utils import de_parallel
|
||||
|
||||
LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
|
||||
try:
|
||||
import wandb
|
||||
|
||||
assert hasattr(wandb, '__version__') # verify package import not local dir
|
||||
if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}:
|
||||
try:
|
||||
wandb_login_success = wandb.login(timeout=30)
|
||||
except wandb.errors.UsageError: # known non-TTY terminal issue
|
||||
wandb_login_success = False
|
||||
if not wandb_login_success:
|
||||
wandb = None
|
||||
except (ImportError, AssertionError):
|
||||
wandb = None
|
||||
|
||||
try:
|
||||
import clearml
|
||||
|
||||
assert hasattr(clearml, '__version__') # verify package import not local dir
|
||||
except (ImportError, AssertionError):
|
||||
clearml = None
|
||||
|
||||
try:
|
||||
if RANK not in [0, -1]:
|
||||
comet_ml = None
|
||||
else:
|
||||
import comet_ml
|
||||
|
||||
assert hasattr(comet_ml, '__version__') # verify package import not local dir
|
||||
from utils.loggers.comet import CometLogger
|
||||
|
||||
except (ModuleNotFoundError, ImportError, AssertionError):
|
||||
comet_ml = None
|
||||
|
||||
|
||||
class Loggers():
|
||||
# YOLOv5 Loggers class
|
||||
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
|
||||
self.save_dir = save_dir
|
||||
self.weights = weights
|
||||
self.opt = opt
|
||||
self.hyp = hyp
|
||||
self.plots = not opt.noplots # plot results
|
||||
self.logger = logger # for printing results to console
|
||||
self.include = include
|
||||
self.keys = [
|
||||
'train/box_loss',
|
||||
'train/obj_loss',
|
||||
'train/cls_loss', # train loss
|
||||
'metrics/precision',
|
||||
'metrics/recall',
|
||||
'metrics/mAP_0.5',
|
||||
'metrics/mAP_0.5:0.95', # metrics
|
||||
'val/box_loss',
|
||||
'val/obj_loss',
|
||||
'val/cls_loss', # val loss
|
||||
'x/lr0',
|
||||
'x/lr1',
|
||||
'x/lr2'] # params
|
||||
self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95']
|
||||
for k in LOGGERS:
|
||||
setattr(self, k, None) # init empty logger dictionary
|
||||
self.csv = True # always log to csv
|
||||
|
||||
# Messages
|
||||
if not clearml:
|
||||
prefix = colorstr('ClearML: ')
|
||||
s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML"
|
||||
self.logger.info(s)
|
||||
if not comet_ml:
|
||||
prefix = colorstr('Comet: ')
|
||||
s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet"
|
||||
self.logger.info(s)
|
||||
# TensorBoard
|
||||
s = self.save_dir
|
||||
if 'tb' in self.include and not self.opt.evolve:
|
||||
prefix = colorstr('TensorBoard: ')
|
||||
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
|
||||
self.tb = SummaryWriter(str(s))
|
||||
|
||||
# W&B
|
||||
if wandb and 'wandb' in self.include:
|
||||
self.opt.hyp = self.hyp # add hyperparameters
|
||||
self.wandb = WandbLogger(self.opt)
|
||||
else:
|
||||
self.wandb = None
|
||||
|
||||
# ClearML
|
||||
if clearml and 'clearml' in self.include:
|
||||
try:
|
||||
self.clearml = ClearmlLogger(self.opt, self.hyp)
|
||||
except Exception:
|
||||
self.clearml = None
|
||||
prefix = colorstr('ClearML: ')
|
||||
LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.'
|
||||
f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme')
|
||||
|
||||
else:
|
||||
self.clearml = None
|
||||
|
||||
# Comet
|
||||
if comet_ml and 'comet' in self.include:
|
||||
if isinstance(self.opt.resume, str) and self.opt.resume.startswith('comet://'):
|
||||
run_id = self.opt.resume.split('/')[-1]
|
||||
self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id)
|
||||
|
||||
else:
|
||||
self.comet_logger = CometLogger(self.opt, self.hyp)
|
||||
|
||||
else:
|
||||
self.comet_logger = None
|
||||
|
||||
@property
|
||||
def remote_dataset(self):
|
||||
# Get data_dict if custom dataset artifact link is provided
|
||||
data_dict = None
|
||||
if self.clearml:
|
||||
data_dict = self.clearml.data_dict
|
||||
if self.wandb:
|
||||
data_dict = self.wandb.data_dict
|
||||
if self.comet_logger:
|
||||
data_dict = self.comet_logger.data_dict
|
||||
|
||||
return data_dict
|
||||
|
||||
def on_train_start(self):
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_train_start()
|
||||
|
||||
def on_pretrain_routine_start(self):
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_pretrain_routine_start()
|
||||
|
||||
def on_pretrain_routine_end(self, labels, names):
|
||||
# Callback runs on pre-train routine end
|
||||
if self.plots:
|
||||
plot_labels(labels, names, self.save_dir)
|
||||
paths = self.save_dir.glob('*labels*.jpg') # training labels
|
||||
if self.wandb:
|
||||
self.wandb.log({'Labels': [wandb.Image(str(x), caption=x.name) for x in paths]})
|
||||
# if self.clearml:
|
||||
# pass # ClearML saves these images automatically using hooks
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_pretrain_routine_end(paths)
|
||||
|
||||
def on_train_batch_end(self, model, ni, imgs, targets, paths, vals):
|
||||
log_dict = dict(zip(self.keys[:3], vals))
|
||||
# Callback runs on train batch end
|
||||
# ni: number integrated batches (since train start)
|
||||
if self.plots:
|
||||
if ni < 3:
|
||||
f = self.save_dir / f'train_batch{ni}.jpg' # filename
|
||||
plot_images(imgs, targets, paths, f)
|
||||
if ni == 0 and self.tb and not self.opt.sync_bn:
|
||||
log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz))
|
||||
if ni == 10 and (self.wandb or self.clearml):
|
||||
files = sorted(self.save_dir.glob('train*.jpg'))
|
||||
if self.wandb:
|
||||
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
|
||||
if self.clearml:
|
||||
self.clearml.log_debug_samples(files, title='Mosaics')
|
||||
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_train_batch_end(log_dict, step=ni)
|
||||
|
||||
def on_train_epoch_end(self, epoch):
|
||||
# Callback runs on train epoch end
|
||||
if self.wandb:
|
||||
self.wandb.current_epoch = epoch + 1
|
||||
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_train_epoch_end(epoch)
|
||||
|
||||
def on_val_start(self):
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_val_start()
|
||||
|
||||
def on_val_image_end(self, pred, predn, path, names, im):
|
||||
# Callback runs on val image end
|
||||
if self.wandb:
|
||||
self.wandb.val_one_image(pred, predn, path, names, im)
|
||||
if self.clearml:
|
||||
self.clearml.log_image_with_boxes(path, pred, names, im)
|
||||
|
||||
def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out):
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out)
|
||||
|
||||
def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
|
||||
# Callback runs on val end
|
||||
if self.wandb or self.clearml:
|
||||
files = sorted(self.save_dir.glob('val*.jpg'))
|
||||
if self.wandb:
|
||||
self.wandb.log({'Validation': [wandb.Image(str(f), caption=f.name) for f in files]})
|
||||
if self.clearml:
|
||||
self.clearml.log_debug_samples(files, title='Validation')
|
||||
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
|
||||
|
||||
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
|
||||
# Callback runs at the end of each fit (train+val) epoch
|
||||
x = dict(zip(self.keys, vals))
|
||||
if self.csv:
|
||||
file = self.save_dir / 'results.csv'
|
||||
n = len(x) + 1 # number of cols
|
||||
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
|
||||
with open(file, 'a') as f:
|
||||
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
|
||||
|
||||
if self.tb:
|
||||
for k, v in x.items():
|
||||
self.tb.add_scalar(k, v, epoch)
|
||||
elif self.clearml: # log to ClearML if TensorBoard not used
|
||||
for k, v in x.items():
|
||||
title, series = k.split('/')
|
||||
self.clearml.task.get_logger().report_scalar(title, series, v, epoch)
|
||||
|
||||
if self.wandb:
|
||||
if best_fitness == fi:
|
||||
best_results = [epoch] + vals[3:7]
|
||||
for i, name in enumerate(self.best_keys):
|
||||
self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary
|
||||
self.wandb.log(x)
|
||||
self.wandb.end_epoch()
|
||||
|
||||
if self.clearml:
|
||||
self.clearml.current_epoch_logged_images = set() # reset epoch image limit
|
||||
self.clearml.current_epoch += 1
|
||||
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_fit_epoch_end(x, epoch=epoch)
|
||||
|
||||
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
|
||||
# Callback runs on model save event
|
||||
if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1:
|
||||
if self.wandb:
|
||||
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
|
||||
if self.clearml:
|
||||
self.clearml.task.update_output_model(model_path=str(last),
|
||||
model_name='Latest Model',
|
||||
auto_delete_file=False)
|
||||
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi)
|
||||
|
||||
def on_train_end(self, last, best, epoch, results):
|
||||
# Callback runs on training end, i.e. saving best model
|
||||
if self.plots:
|
||||
plot_results(file=self.save_dir / 'results.csv') # save results.png
|
||||
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
|
||||
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
|
||||
self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}")
|
||||
|
||||
if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles
|
||||
for f in files:
|
||||
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
|
||||
|
||||
if self.wandb:
|
||||
self.wandb.log(dict(zip(self.keys[3:10], results)))
|
||||
self.wandb.log({'Results': [wandb.Image(str(f), caption=f.name) for f in files]})
|
||||
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
|
||||
if not self.opt.evolve:
|
||||
wandb.log_artifact(str(best if best.exists() else last),
|
||||
type='model',
|
||||
name=f'run_{self.wandb.wandb_run.id}_model',
|
||||
aliases=['latest', 'best', 'stripped'])
|
||||
self.wandb.finish_run()
|
||||
|
||||
if self.clearml and not self.opt.evolve:
|
||||
self.clearml.task.update_output_model(model_path=str(best if best.exists() else last),
|
||||
name='Best Model',
|
||||
auto_delete_file=False)
|
||||
|
||||
if self.comet_logger:
|
||||
final_results = dict(zip(self.keys[3:10], results))
|
||||
self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results)
|
||||
|
||||
def on_params_update(self, params: dict):
|
||||
# Update hyperparams or configs of the experiment
|
||||
if self.wandb:
|
||||
self.wandb.wandb_run.config.update(params, allow_val_change=True)
|
||||
if self.comet_logger:
|
||||
self.comet_logger.on_params_update(params)
|
||||
|
||||
|
||||
class GenericLogger:
|
||||
"""
|
||||
YOLOv5 General purpose logger for non-task specific logging
|
||||
Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...)
|
||||
Arguments
|
||||
opt: Run arguments
|
||||
console_logger: Console logger
|
||||
include: loggers to include
|
||||
"""
|
||||
|
||||
def __init__(self, opt, console_logger, include=('tb', 'wandb')):
|
||||
# init default loggers
|
||||
self.save_dir = Path(opt.save_dir)
|
||||
self.include = include
|
||||
self.console_logger = console_logger
|
||||
self.csv = self.save_dir / 'results.csv' # CSV logger
|
||||
if 'tb' in self.include:
|
||||
prefix = colorstr('TensorBoard: ')
|
||||
self.console_logger.info(
|
||||
f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/")
|
||||
self.tb = SummaryWriter(str(self.save_dir))
|
||||
|
||||
if wandb and 'wandb' in self.include:
|
||||
self.wandb = wandb.init(project=web_project_name(str(opt.project)),
|
||||
name=None if opt.name == 'exp' else opt.name,
|
||||
config=opt)
|
||||
else:
|
||||
self.wandb = None
|
||||
|
||||
def log_metrics(self, metrics, epoch):
|
||||
# Log metrics dictionary to all loggers
|
||||
if self.csv:
|
||||
keys, vals = list(metrics.keys()), list(metrics.values())
|
||||
n = len(metrics) + 1 # number of cols
|
||||
s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header
|
||||
with open(self.csv, 'a') as f:
|
||||
f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
|
||||
|
||||
if self.tb:
|
||||
for k, v in metrics.items():
|
||||
self.tb.add_scalar(k, v, epoch)
|
||||
|
||||
if self.wandb:
|
||||
self.wandb.log(metrics, step=epoch)
|
||||
|
||||
def log_images(self, files, name='Images', epoch=0):
|
||||
# Log images to all loggers
|
||||
files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path
|
||||
files = [f for f in files if f.exists()] # filter by exists
|
||||
|
||||
if self.tb:
|
||||
for f in files:
|
||||
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
|
||||
|
||||
if self.wandb:
|
||||
self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch)
|
||||
|
||||
def log_graph(self, model, imgsz=(640, 640)):
|
||||
# Log model graph to all loggers
|
||||
if self.tb:
|
||||
log_tensorboard_graph(self.tb, model, imgsz)
|
||||
|
||||
def log_model(self, model_path, epoch=0, metadata={}):
|
||||
# Log model to all loggers
|
||||
if self.wandb:
|
||||
art = wandb.Artifact(name=f'run_{wandb.run.id}_model', type='model', metadata=metadata)
|
||||
art.add_file(str(model_path))
|
||||
wandb.log_artifact(art)
|
||||
|
||||
def update_params(self, params):
|
||||
# Update the parameters logged
|
||||
if self.wandb:
|
||||
wandb.run.config.update(params, allow_val_change=True)
|
||||
|
||||
|
||||
def log_tensorboard_graph(tb, model, imgsz=(640, 640)):
|
||||
# Log model graph to TensorBoard
|
||||
try:
|
||||
p = next(model.parameters()) # for device, type
|
||||
imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand
|
||||
im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty)
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore') # suppress jit trace warning
|
||||
tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), [])
|
||||
except Exception as e:
|
||||
LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}')
|
||||
|
||||
|
||||
def web_project_name(project):
|
||||
# Convert local project name to web project name
|
||||
if not project.startswith('runs/train'):
|
||||
return project
|
||||
suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else ''
|
||||
return f'YOLOv5{suffix}'
|
|
@ -0,0 +1,237 @@
|
|||
# ClearML Integration
|
||||
|
||||
<img align="center" src="https://github.com/thepycoder/clearml_screenshots/raw/main/logos_dark.png#gh-light-mode-only" alt="Clear|ML"><img align="center" src="https://github.com/thepycoder/clearml_screenshots/raw/main/logos_light.png#gh-dark-mode-only" alt="Clear|ML">
|
||||
|
||||
## About ClearML
|
||||
|
||||
[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️.
|
||||
|
||||
🔨 Track every YOLOv5 training run in the <b>experiment manager</b>
|
||||
|
||||
🔧 Version and easily access your custom training data with the integrated ClearML <b>Data Versioning Tool</b>
|
||||
|
||||
🔦 <b>Remotely train and monitor</b> your YOLOv5 training runs using ClearML Agent
|
||||
|
||||
🔬 Get the very best mAP using ClearML <b>Hyperparameter Optimization</b>
|
||||
|
||||
🔭 Turn your newly trained <b>YOLOv5 model into an API</b> with just a few commands using ClearML Serving
|
||||
|
||||
<br />
|
||||
And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline!
|
||||
<br />
|
||||
<br />
|
||||
|
||||

|
||||
|
||||
<br />
|
||||
<br />
|
||||
|
||||
## 🦾 Setting Things Up
|
||||
|
||||
To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one:
|
||||
|
||||
Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go!
|
||||
|
||||
1. Install the `clearml` python package:
|
||||
|
||||
```bash
|
||||
pip install clearml
|
||||
```
|
||||
|
||||
1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions:
|
||||
|
||||
```bash
|
||||
clearml-init
|
||||
```
|
||||
|
||||
That's it! You're done 😎
|
||||
|
||||
<br />
|
||||
|
||||
## 🚀 Training YOLOv5 With ClearML
|
||||
|
||||
To enable ClearML experiment tracking, simply install the ClearML pip package.
|
||||
|
||||
```bash
|
||||
pip install clearml>=1.2.0
|
||||
```
|
||||
|
||||
This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager.
|
||||
|
||||
If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`.
|
||||
PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name!
|
||||
|
||||
```bash
|
||||
python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache
|
||||
```
|
||||
|
||||
or with custom project and task name:
|
||||
|
||||
```bash
|
||||
python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache
|
||||
```
|
||||
|
||||
This will capture:
|
||||
|
||||
- Source code + uncommitted changes
|
||||
- Installed packages
|
||||
- (Hyper)parameters
|
||||
- Model files (use `--save-period n` to save a checkpoint every n epochs)
|
||||
- Console output
|
||||
- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...)
|
||||
- General info such as machine details, runtime, creation date etc.
|
||||
- All produced plots such as label correlogram and confusion matrix
|
||||
- Images with bounding boxes per epoch
|
||||
- Mosaic per epoch
|
||||
- Validation images per epoch
|
||||
- ...
|
||||
|
||||
That's a lot right? 🤯
|
||||
Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them!
|
||||
|
||||
There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works!
|
||||
|
||||
<br />
|
||||
|
||||
## 🔗 Dataset Version Management
|
||||
|
||||
Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment!
|
||||
|
||||

|
||||
|
||||
### Prepare Your Dataset
|
||||
|
||||
The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure:
|
||||
|
||||
```
|
||||
..
|
||||
|_ yolov5
|
||||
|_ datasets
|
||||
|_ coco128
|
||||
|_ images
|
||||
|_ labels
|
||||
|_ LICENSE
|
||||
|_ README.txt
|
||||
```
|
||||
|
||||
But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure.
|
||||
|
||||
Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls.
|
||||
|
||||
Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`.
|
||||
|
||||
```
|
||||
..
|
||||
|_ yolov5
|
||||
|_ datasets
|
||||
|_ coco128
|
||||
|_ images
|
||||
|_ labels
|
||||
|_ coco128.yaml # <---- HERE!
|
||||
|_ LICENSE
|
||||
|_ README.txt
|
||||
```
|
||||
|
||||
### Upload Your Dataset
|
||||
|
||||
To get this dataset into ClearML as a versioned dataset, go to the dataset root folder and run the following command:
|
||||
|
||||
```bash
|
||||
cd coco128
|
||||
clearml-data sync --project YOLOv5 --name coco128 --folder .
|
||||
```
|
||||
|
||||
The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other:
|
||||
|
||||
```bash
|
||||
# Optionally add --parent <parent_dataset_id> if you want to base
|
||||
# this version on another dataset version, so no duplicate files are uploaded!
|
||||
clearml-data create --name coco128 --project YOLOv5
|
||||
clearml-data add --files .
|
||||
clearml-data close
|
||||
```
|
||||
|
||||
### Run Training Using A ClearML Dataset
|
||||
|
||||
Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 🚀 models!
|
||||
|
||||
```bash
|
||||
python train.py --img 640 --batch 16 --epochs 3 --data clearml://<your_dataset_id> --weights yolov5s.pt --cache
|
||||
```
|
||||
|
||||
<br />
|
||||
|
||||
## 👀 Hyperparameter Optimization
|
||||
|
||||
Now that we have our experiments and data versioned, it's time to take a look at what we can build on top!
|
||||
|
||||
Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does!
|
||||
|
||||
To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters.
|
||||
|
||||
You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead.
|
||||
|
||||
```bash
|
||||
# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch
|
||||
pip install optuna
|
||||
python utils/loggers/clearml/hpo.py
|
||||
```
|
||||
|
||||

|
||||
|
||||
## 🤯 Remote Execution (advanced)
|
||||
|
||||
Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs.
|
||||
This is where the ClearML Agent comes into play. Check out what the agent can do here:
|
||||
|
||||
- [YouTube video](https://youtu.be/MX3BrXnaULs)
|
||||
- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent)
|
||||
|
||||
In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager.
|
||||
|
||||
You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running:
|
||||
|
||||
```bash
|
||||
clearml-agent daemon --queue <queues_to_listen_to> [--docker]
|
||||
```
|
||||
|
||||
### Cloning, Editing And Enqueuing
|
||||
|
||||
With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too!
|
||||
|
||||
🪄 Clone the experiment by right-clicking it
|
||||
|
||||
🎯 Edit the hyperparameters to what you wish them to be
|
||||
|
||||
⏳ Enqueue the task to any of the queues by right-clicking it
|
||||
|
||||

|
||||
|
||||
### Executing A Task Remotely
|
||||
|
||||
Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on!
|
||||
|
||||
To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instantiated:
|
||||
|
||||
```python
|
||||
# ...
|
||||
# Loggers
|
||||
data_dict = None
|
||||
if RANK in {-1, 0}:
|
||||
loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
|
||||
if loggers.clearml:
|
||||
loggers.clearml.task.execute_remotely(queue="my_queue") # <------ ADD THIS LINE
|
||||
# Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML
|
||||
data_dict = loggers.clearml.data_dict
|
||||
# ...
|
||||
```
|
||||
|
||||
When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead!
|
||||
|
||||
### Autoscaling workers
|
||||
|
||||
ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines, and you stop paying!
|
||||
|
||||
Check out the autoscalers getting started video below.
|
||||
|
||||
[](https://youtu.be/j4XVMAaUt3E)
|
|
@ -0,0 +1,164 @@
|
|||
"""Main Logger class for ClearML experiment tracking."""
|
||||
import glob
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import yaml
|
||||
|
||||
from utils.plots import Annotator, colors
|
||||
|
||||
try:
|
||||
import clearml
|
||||
from clearml import Dataset, Task
|
||||
|
||||
assert hasattr(clearml, '__version__') # verify package import not local dir
|
||||
except (ImportError, AssertionError):
|
||||
clearml = None
|
||||
|
||||
|
||||
def construct_dataset(clearml_info_string):
|
||||
"""Load in a clearml dataset and fill the internal data_dict with its contents.
|
||||
"""
|
||||
dataset_id = clearml_info_string.replace('clearml://', '')
|
||||
dataset = Dataset.get(dataset_id=dataset_id)
|
||||
dataset_root_path = Path(dataset.get_local_copy())
|
||||
|
||||
# We'll search for the yaml file definition in the dataset
|
||||
yaml_filenames = list(glob.glob(str(dataset_root_path / '*.yaml')) + glob.glob(str(dataset_root_path / '*.yml')))
|
||||
if len(yaml_filenames) > 1:
|
||||
raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '
|
||||
'the dataset definition this way.')
|
||||
elif len(yaml_filenames) == 0:
|
||||
raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '
|
||||
'inside the dataset root path.')
|
||||
with open(yaml_filenames[0]) as f:
|
||||
dataset_definition = yaml.safe_load(f)
|
||||
|
||||
assert set(dataset_definition.keys()).issuperset(
|
||||
{'train', 'test', 'val', 'nc', 'names'}
|
||||
), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')"
|
||||
|
||||
data_dict = dict()
|
||||
data_dict['train'] = str(
|
||||
(dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None
|
||||
data_dict['test'] = str(
|
||||
(dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None
|
||||
data_dict['val'] = str(
|
||||
(dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None
|
||||
data_dict['nc'] = dataset_definition['nc']
|
||||
data_dict['names'] = dataset_definition['names']
|
||||
|
||||
return data_dict
|
||||
|
||||
|
||||
class ClearmlLogger:
|
||||
"""Log training runs, datasets, models, and predictions to ClearML.
|
||||
|
||||
This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,
|
||||
this information includes hyperparameters, system configuration and metrics, model metrics, code information and
|
||||
basic data metrics and analyses.
|
||||
|
||||
By providing additional command line arguments to train.py, datasets,
|
||||
models and predictions can also be logged.
|
||||
"""
|
||||
|
||||
def __init__(self, opt, hyp):
|
||||
"""
|
||||
- Initialize ClearML Task, this object will capture the experiment
|
||||
- Upload dataset version to ClearML Data if opt.upload_dataset is True
|
||||
|
||||
arguments:
|
||||
opt (namespace) -- Commandline arguments for this run
|
||||
hyp (dict) -- Hyperparameters for this run
|
||||
|
||||
"""
|
||||
self.current_epoch = 0
|
||||
# Keep tracked of amount of logged images to enforce a limit
|
||||
self.current_epoch_logged_images = set()
|
||||
# Maximum number of images to log to clearML per epoch
|
||||
self.max_imgs_to_log_per_epoch = 16
|
||||
# Get the interval of epochs when bounding box images should be logged
|
||||
self.bbox_interval = opt.bbox_interval
|
||||
self.clearml = clearml
|
||||
self.task = None
|
||||
self.data_dict = None
|
||||
if self.clearml:
|
||||
self.task = Task.init(
|
||||
project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5',
|
||||
task_name=opt.name if opt.name != 'exp' else 'Training',
|
||||
tags=['YOLOv5'],
|
||||
output_uri=True,
|
||||
reuse_last_task_id=opt.exist_ok,
|
||||
auto_connect_frameworks={'pytorch': False}
|
||||
# We disconnect pytorch auto-detection, because we added manual model save points in the code
|
||||
)
|
||||
# ClearML's hooks will already grab all general parameters
|
||||
# Only the hyperparameters coming from the yaml config file
|
||||
# will have to be added manually!
|
||||
self.task.connect(hyp, name='Hyperparameters')
|
||||
self.task.connect(opt, name='Args')
|
||||
|
||||
# Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent
|
||||
self.task.set_base_docker('ultralytics/yolov5:latest',
|
||||
docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"',
|
||||
docker_setup_bash_script='pip install clearml')
|
||||
|
||||
# Get ClearML Dataset Version if requested
|
||||
if opt.data.startswith('clearml://'):
|
||||
# data_dict should have the following keys:
|
||||
# names, nc (number of classes), test, train, val (all three relative paths to ../datasets)
|
||||
self.data_dict = construct_dataset(opt.data)
|
||||
# Set data to data_dict because wandb will crash without this information and opt is the best way
|
||||
# to give it to them
|
||||
opt.data = self.data_dict
|
||||
|
||||
def log_debug_samples(self, files, title='Debug Samples'):
|
||||
"""
|
||||
Log files (images) as debug samples in the ClearML task.
|
||||
|
||||
arguments:
|
||||
files (List(PosixPath)) a list of file paths in PosixPath format
|
||||
title (str) A title that groups together images with the same values
|
||||
"""
|
||||
for f in files:
|
||||
if f.exists():
|
||||
it = re.search(r'_batch(\d+)', f.name)
|
||||
iteration = int(it.groups()[0]) if it else 0
|
||||
self.task.get_logger().report_image(title=title,
|
||||
series=f.name.replace(it.group(), ''),
|
||||
local_path=str(f),
|
||||
iteration=iteration)
|
||||
|
||||
def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):
|
||||
"""
|
||||
Draw the bounding boxes on a single image and report the result as a ClearML debug sample.
|
||||
|
||||
arguments:
|
||||
image_path (PosixPath) the path the original image file
|
||||
boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
|
||||
class_names (dict): dict containing mapping of class int to class name
|
||||
image (Tensor): A torch tensor containing the actual image data
|
||||
"""
|
||||
if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:
|
||||
# Log every bbox_interval times and deduplicate for any intermittend extra eval runs
|
||||
if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:
|
||||
im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))
|
||||
annotator = Annotator(im=im, pil=True)
|
||||
for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):
|
||||
color = colors(i)
|
||||
|
||||
class_name = class_names[int(class_nr)]
|
||||
confidence_percentage = round(float(conf) * 100, 2)
|
||||
label = f'{class_name}: {confidence_percentage}%'
|
||||
|
||||
if conf > conf_threshold:
|
||||
annotator.rectangle(box.cpu().numpy(), outline=color)
|
||||
annotator.box_label(box.cpu().numpy(), label=label, color=color)
|
||||
|
||||
annotated_image = annotator.result()
|
||||
self.task.get_logger().report_image(title='Bounding Boxes',
|
||||
series=image_path.name,
|
||||
iteration=self.current_epoch,
|
||||
image=annotated_image)
|
||||
self.current_epoch_logged_images.add(image_path)
|
|
@ -0,0 +1,84 @@
|
|||
from clearml import Task
|
||||
# Connecting ClearML with the current process,
|
||||
# from here on everything is logged automatically
|
||||
from clearml.automation import HyperParameterOptimizer, UniformParameterRange
|
||||
from clearml.automation.optuna import OptimizerOptuna
|
||||
|
||||
task = Task.init(project_name='Hyper-Parameter Optimization',
|
||||
task_name='YOLOv5',
|
||||
task_type=Task.TaskTypes.optimizer,
|
||||
reuse_last_task_id=False)
|
||||
|
||||
# Example use case:
|
||||
optimizer = HyperParameterOptimizer(
|
||||
# This is the experiment we want to optimize
|
||||
base_task_id='<your_template_task_id>',
|
||||
# here we define the hyper-parameters to optimize
|
||||
# Notice: The parameter name should exactly match what you see in the UI: <section_name>/<parameter>
|
||||
# For Example, here we see in the base experiment a section Named: "General"
|
||||
# under it a parameter named "batch_size", this becomes "General/batch_size"
|
||||
# If you have `argparse` for example, then arguments will appear under the "Args" section,
|
||||
# and you should instead pass "Args/batch_size"
|
||||
hyper_parameters=[
|
||||
UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1),
|
||||
UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0),
|
||||
UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98),
|
||||
UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001),
|
||||
UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0),
|
||||
UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95),
|
||||
UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2),
|
||||
UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2),
|
||||
UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0),
|
||||
UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0),
|
||||
UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0),
|
||||
UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0),
|
||||
UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7),
|
||||
UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0),
|
||||
UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0),
|
||||
UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1),
|
||||
UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9),
|
||||
UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9),
|
||||
UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0),
|
||||
UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9),
|
||||
UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9),
|
||||
UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0),
|
||||
UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001),
|
||||
UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0),
|
||||
UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0),
|
||||
UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0),
|
||||
UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0),
|
||||
UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)],
|
||||
# this is the objective metric we want to maximize/minimize
|
||||
objective_metric_title='metrics',
|
||||
objective_metric_series='mAP_0.5',
|
||||
# now we decide if we want to maximize it or minimize it (accuracy we maximize)
|
||||
objective_metric_sign='max',
|
||||
# let us limit the number of concurrent experiments,
|
||||
# this in turn will make sure we do dont bombard the scheduler with experiments.
|
||||
# if we have an auto-scaler connected, this, by proxy, will limit the number of machine
|
||||
max_number_of_concurrent_tasks=1,
|
||||
# this is the optimizer class (actually doing the optimization)
|
||||
# Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
|
||||
optimizer_class=OptimizerOptuna,
|
||||
# If specified only the top K performing Tasks will be kept, the others will be automatically archived
|
||||
save_top_k_tasks_only=5, # 5,
|
||||
compute_time_limit=None,
|
||||
total_max_jobs=20,
|
||||
min_iteration_per_job=None,
|
||||
max_iteration_per_job=None,
|
||||
)
|
||||
|
||||
# report every 10 seconds, this is way too often, but we are testing here
|
||||
optimizer.set_report_period(10 / 60)
|
||||
# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent
|
||||
# an_optimizer.start_locally(job_complete_callback=job_complete_callback)
|
||||
# set the time limit for the optimization process (2 hours)
|
||||
optimizer.set_time_limit(in_minutes=120.0)
|
||||
# Start the optimization process in the local environment
|
||||
optimizer.start_locally()
|
||||
# wait until process is done (notice we are controlling the optimization process in the background)
|
||||
optimizer.wait()
|
||||
# make sure background optimization stopped
|
||||
optimizer.stop()
|
||||
|
||||
print('We are done, good bye')
|
|
@ -0,0 +1,258 @@
|
|||
<img src="https://cdn.comet.ml/img/notebook_logo.png">
|
||||
|
||||
# YOLOv5 with Comet
|
||||
|
||||
This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2)
|
||||
|
||||
# About Comet
|
||||
|
||||
Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models.
|
||||
|
||||
Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)!
|
||||
Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!
|
||||
|
||||
# Getting Started
|
||||
|
||||
## Install Comet
|
||||
|
||||
```shell
|
||||
pip install comet_ml
|
||||
```
|
||||
|
||||
## Configure Comet Credentials
|
||||
|
||||
There are two ways to configure Comet with YOLOv5.
|
||||
|
||||
You can either set your credentials through environment variables
|
||||
|
||||
**Environment Variables**
|
||||
|
||||
```shell
|
||||
export COMET_API_KEY=<Your Comet API Key>
|
||||
export COMET_PROJECT_NAME=<Your Comet Project Name> # This will default to 'yolov5'
|
||||
```
|
||||
|
||||
Or create a `.comet.config` file in your working directory and set your credentials there.
|
||||
|
||||
**Comet Configuration File**
|
||||
|
||||
```
|
||||
[comet]
|
||||
api_key=<Your Comet API Key>
|
||||
project_name=<Your Comet Project Name> # This will default to 'yolov5'
|
||||
```
|
||||
|
||||
## Run the Training Script
|
||||
|
||||
```shell
|
||||
# Train YOLOv5s on COCO128 for 5 epochs
|
||||
python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt
|
||||
```
|
||||
|
||||
That's it! Comet will automatically log your hyperparameters, command line arguments, training and validation metrics. You can visualize and analyze your runs in the Comet UI
|
||||
|
||||
<img width="1920" alt="yolo-ui" src="https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png">
|
||||
|
||||
# Try out an Example!
|
||||
|
||||
Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)
|
||||
|
||||
Or better yet, try it out yourself in this Colab Notebook
|
||||
|
||||
[](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)
|
||||
|
||||
# Log automatically
|
||||
|
||||
By default, Comet will log the following items
|
||||
|
||||
## Metrics
|
||||
|
||||
- Box Loss, Object Loss, Classification Loss for the training and validation data
|
||||
- mAP_0.5, mAP_0.5:0.95 metrics for the validation data.
|
||||
- Precision and Recall for the validation data
|
||||
|
||||
## Parameters
|
||||
|
||||
- Model Hyperparameters
|
||||
- All parameters passed through the command line options
|
||||
|
||||
## Visualizations
|
||||
|
||||
- Confusion Matrix of the model predictions on the validation data
|
||||
- Plots for the PR and F1 curves across all classes
|
||||
- Correlogram of the Class Labels
|
||||
|
||||
# Configure Comet Logging
|
||||
|
||||
Comet can be configured to log additional data either through command line flags passed to the training script
|
||||
or through environment variables.
|
||||
|
||||
```shell
|
||||
export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online
|
||||
export COMET_MODEL_NAME=<your model name> #Set the name for the saved model. Defaults to yolov5
|
||||
export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true
|
||||
export COMET_MAX_IMAGE_UPLOADS=<number of allowed images to upload to Comet> # Controls how many total image predictions to log to Comet. Defaults to 100.
|
||||
export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false
|
||||
export COMET_DEFAULT_CHECKPOINT_FILENAME=<your checkpoint filename> # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt'
|
||||
export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false.
|
||||
export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions
|
||||
```
|
||||
|
||||
## Logging Checkpoints with Comet
|
||||
|
||||
Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the
|
||||
logged checkpoints to Comet based on the interval value provided by `save-period`
|
||||
|
||||
```shell
|
||||
python train.py \
|
||||
--img 640 \
|
||||
--batch 16 \
|
||||
--epochs 5 \
|
||||
--data coco128.yaml \
|
||||
--weights yolov5s.pt \
|
||||
--save-period 1
|
||||
```
|
||||
|
||||
## Logging Model Predictions
|
||||
|
||||
By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet.
|
||||
|
||||
You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch.
|
||||
|
||||
**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly.
|
||||
|
||||
Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)
|
||||
|
||||
```shell
|
||||
python train.py \
|
||||
--img 640 \
|
||||
--batch 16 \
|
||||
--epochs 5 \
|
||||
--data coco128.yaml \
|
||||
--weights yolov5s.pt \
|
||||
--bbox_interval 2
|
||||
```
|
||||
|
||||
### Controlling the number of Prediction Images logged to Comet
|
||||
|
||||
When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable.
|
||||
|
||||
```shell
|
||||
env COMET_MAX_IMAGE_UPLOADS=200 python train.py \
|
||||
--img 640 \
|
||||
--batch 16 \
|
||||
--epochs 5 \
|
||||
--data coco128.yaml \
|
||||
--weights yolov5s.pt \
|
||||
--bbox_interval 1
|
||||
```
|
||||
|
||||
### Logging Class Level Metrics
|
||||
|
||||
Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class.
|
||||
|
||||
```shell
|
||||
env COMET_LOG_PER_CLASS_METRICS=true python train.py \
|
||||
--img 640 \
|
||||
--batch 16 \
|
||||
--epochs 5 \
|
||||
--data coco128.yaml \
|
||||
--weights yolov5s.pt
|
||||
```
|
||||
|
||||
## Uploading a Dataset to Comet Artifacts
|
||||
|
||||
If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag.
|
||||
|
||||
The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file.
|
||||
|
||||
```shell
|
||||
python train.py \
|
||||
--img 640 \
|
||||
--batch 16 \
|
||||
--epochs 5 \
|
||||
--data coco128.yaml \
|
||||
--weights yolov5s.pt \
|
||||
--upload_dataset
|
||||
```
|
||||
|
||||
You can find the uploaded dataset in the Artifacts tab in your Comet Workspace
|
||||
<img width="1073" alt="artifact-1" src="https://user-images.githubusercontent.com/7529846/186929193-162718bf-ec7b-4eb9-8c3b-86b3763ef8ea.png">
|
||||
|
||||
You can preview the data directly in the Comet UI.
|
||||
<img width="1082" alt="artifact-2" src="https://user-images.githubusercontent.com/7529846/186929215-432c36a9-c109-4eb0-944b-84c2786590d6.png">
|
||||
|
||||
Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file
|
||||
<img width="963" alt="artifact-3" src="https://user-images.githubusercontent.com/7529846/186929256-9d44d6eb-1a19-42de-889a-bcbca3018f2e.png">
|
||||
|
||||
### Using a saved Artifact
|
||||
|
||||
If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL.
|
||||
|
||||
```
|
||||
# contents of artifact.yaml file
|
||||
path: "comet://<workspace name>/<artifact name>:<artifact version or alias>"
|
||||
```
|
||||
|
||||
Then pass this file to your training script in the following way
|
||||
|
||||
```shell
|
||||
python train.py \
|
||||
--img 640 \
|
||||
--batch 16 \
|
||||
--epochs 5 \
|
||||
--data artifact.yaml \
|
||||
--weights yolov5s.pt
|
||||
```
|
||||
|
||||
Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset.
|
||||
<img width="1391" alt="artifact-4" src="https://user-images.githubusercontent.com/7529846/186929264-4c4014fa-fe51-4f3c-a5c5-f6d24649b1b4.png">
|
||||
|
||||
## Resuming a Training Run
|
||||
|
||||
If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path.
|
||||
|
||||
The Run Path has the following format `comet://<your workspace name>/<your project name>/<experiment id>`.
|
||||
|
||||
This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI
|
||||
|
||||
```shell
|
||||
python train.py \
|
||||
--resume "comet://<your run path>"
|
||||
```
|
||||
|
||||
## Hyperparameter Search with the Comet Optimizer
|
||||
|
||||
YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualize hyperparameter sweeps in the Comet UI.
|
||||
|
||||
### Configuring an Optimizer Sweep
|
||||
|
||||
To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json`
|
||||
|
||||
```shell
|
||||
python utils/loggers/comet/hpo.py \
|
||||
--comet_optimizer_config "utils/loggers/comet/optimizer_config.json"
|
||||
```
|
||||
|
||||
The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after
|
||||
the script.
|
||||
|
||||
```shell
|
||||
python utils/loggers/comet/hpo.py \
|
||||
--comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \
|
||||
--save-period 1 \
|
||||
--bbox_interval 1
|
||||
```
|
||||
|
||||
### Running a Sweep in Parallel
|
||||
|
||||
```shell
|
||||
comet optimizer -j <set number of workers> utils/loggers/comet/hpo.py \
|
||||
utils/loggers/comet/optimizer_config.json"
|
||||
```
|
||||
|
||||
### Visualizing Results
|
||||
|
||||
Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)
|
||||
|
||||
<img width="1626" alt="hyperparameter-yolo" src="https://user-images.githubusercontent.com/7529846/186914869-7dc1de14-583f-4323-967b-c9a66a29e495.png">
|
|
@ -0,0 +1,508 @@
|
|||
import glob
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[3] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
|
||||
try:
|
||||
import comet_ml
|
||||
|
||||
# Project Configuration
|
||||
config = comet_ml.config.get_config()
|
||||
COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5')
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
comet_ml = None
|
||||
COMET_PROJECT_NAME = None
|
||||
|
||||
import PIL
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
import yaml
|
||||
|
||||
from utils.dataloaders import img2label_paths
|
||||
from utils.general import check_dataset, scale_boxes, xywh2xyxy
|
||||
from utils.metrics import box_iou
|
||||
|
||||
COMET_PREFIX = 'comet://'
|
||||
|
||||
COMET_MODE = os.getenv('COMET_MODE', 'online')
|
||||
|
||||
# Model Saving Settings
|
||||
COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5')
|
||||
|
||||
# Dataset Artifact Settings
|
||||
COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true'
|
||||
|
||||
# Evaluation Settings
|
||||
COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true'
|
||||
COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true'
|
||||
COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100))
|
||||
|
||||
# Confusion Matrix Settings
|
||||
CONF_THRES = float(os.getenv('CONF_THRES', 0.001))
|
||||
IOU_THRES = float(os.getenv('IOU_THRES', 0.6))
|
||||
|
||||
# Batch Logging Settings
|
||||
COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true'
|
||||
COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1)
|
||||
COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1)
|
||||
COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true'
|
||||
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
|
||||
to_pil = T.ToPILImage()
|
||||
|
||||
|
||||
class CometLogger:
|
||||
"""Log metrics, parameters, source code, models and much more
|
||||
with Comet
|
||||
"""
|
||||
|
||||
def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwargs) -> None:
|
||||
self.job_type = job_type
|
||||
self.opt = opt
|
||||
self.hyp = hyp
|
||||
|
||||
# Comet Flags
|
||||
self.comet_mode = COMET_MODE
|
||||
|
||||
self.save_model = opt.save_period > -1
|
||||
self.model_name = COMET_MODEL_NAME
|
||||
|
||||
# Batch Logging Settings
|
||||
self.log_batch_metrics = COMET_LOG_BATCH_METRICS
|
||||
self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
|
||||
|
||||
# Dataset Artifact Settings
|
||||
self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET
|
||||
self.resume = self.opt.resume
|
||||
|
||||
# Default parameters to pass to Experiment objects
|
||||
self.default_experiment_kwargs = {
|
||||
'log_code': False,
|
||||
'log_env_gpu': True,
|
||||
'log_env_cpu': True,
|
||||
'project_name': COMET_PROJECT_NAME,}
|
||||
self.default_experiment_kwargs.update(experiment_kwargs)
|
||||
self.experiment = self._get_experiment(self.comet_mode, run_id)
|
||||
|
||||
self.data_dict = self.check_dataset(self.opt.data)
|
||||
self.class_names = self.data_dict['names']
|
||||
self.num_classes = self.data_dict['nc']
|
||||
|
||||
self.logged_images_count = 0
|
||||
self.max_images = COMET_MAX_IMAGE_UPLOADS
|
||||
|
||||
if run_id is None:
|
||||
self.experiment.log_other('Created from', 'YOLOv5')
|
||||
if not isinstance(self.experiment, comet_ml.OfflineExperiment):
|
||||
workspace, project_name, experiment_id = self.experiment.url.split('/')[-3:]
|
||||
self.experiment.log_other(
|
||||
'Run Path',
|
||||
f'{workspace}/{project_name}/{experiment_id}',
|
||||
)
|
||||
self.log_parameters(vars(opt))
|
||||
self.log_parameters(self.opt.hyp)
|
||||
self.log_asset_data(
|
||||
self.opt.hyp,
|
||||
name='hyperparameters.json',
|
||||
metadata={'type': 'hyp-config-file'},
|
||||
)
|
||||
self.log_asset(
|
||||
f'{self.opt.save_dir}/opt.yaml',
|
||||
metadata={'type': 'opt-config-file'},
|
||||
)
|
||||
|
||||
self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX
|
||||
|
||||
if hasattr(self.opt, 'conf_thres'):
|
||||
self.conf_thres = self.opt.conf_thres
|
||||
else:
|
||||
self.conf_thres = CONF_THRES
|
||||
if hasattr(self.opt, 'iou_thres'):
|
||||
self.iou_thres = self.opt.iou_thres
|
||||
else:
|
||||
self.iou_thres = IOU_THRES
|
||||
|
||||
self.log_parameters({'val_iou_threshold': self.iou_thres, 'val_conf_threshold': self.conf_thres})
|
||||
|
||||
self.comet_log_predictions = COMET_LOG_PREDICTIONS
|
||||
if self.opt.bbox_interval == -1:
|
||||
self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10
|
||||
else:
|
||||
self.comet_log_prediction_interval = self.opt.bbox_interval
|
||||
|
||||
if self.comet_log_predictions:
|
||||
self.metadata_dict = {}
|
||||
self.logged_image_names = []
|
||||
|
||||
self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS
|
||||
|
||||
self.experiment.log_others({
|
||||
'comet_mode': COMET_MODE,
|
||||
'comet_max_image_uploads': COMET_MAX_IMAGE_UPLOADS,
|
||||
'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS,
|
||||
'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS,
|
||||
'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX,
|
||||
'comet_model_name': COMET_MODEL_NAME,})
|
||||
|
||||
# Check if running the Experiment with the Comet Optimizer
|
||||
if hasattr(self.opt, 'comet_optimizer_id'):
|
||||
self.experiment.log_other('optimizer_id', self.opt.comet_optimizer_id)
|
||||
self.experiment.log_other('optimizer_objective', self.opt.comet_optimizer_objective)
|
||||
self.experiment.log_other('optimizer_metric', self.opt.comet_optimizer_metric)
|
||||
self.experiment.log_other('optimizer_parameters', json.dumps(self.hyp))
|
||||
|
||||
def _get_experiment(self, mode, experiment_id=None):
|
||||
if mode == 'offline':
|
||||
if experiment_id is not None:
|
||||
return comet_ml.ExistingOfflineExperiment(
|
||||
previous_experiment=experiment_id,
|
||||
**self.default_experiment_kwargs,
|
||||
)
|
||||
|
||||
return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,)
|
||||
|
||||
else:
|
||||
try:
|
||||
if experiment_id is not None:
|
||||
return comet_ml.ExistingExperiment(
|
||||
previous_experiment=experiment_id,
|
||||
**self.default_experiment_kwargs,
|
||||
)
|
||||
|
||||
return comet_ml.Experiment(**self.default_experiment_kwargs)
|
||||
|
||||
except ValueError:
|
||||
logger.warning('COMET WARNING: '
|
||||
'Comet credentials have not been set. '
|
||||
'Comet will default to offline logging. '
|
||||
'Please set your credentials to enable online logging.')
|
||||
return self._get_experiment('offline', experiment_id)
|
||||
|
||||
return
|
||||
|
||||
def log_metrics(self, log_dict, **kwargs):
|
||||
self.experiment.log_metrics(log_dict, **kwargs)
|
||||
|
||||
def log_parameters(self, log_dict, **kwargs):
|
||||
self.experiment.log_parameters(log_dict, **kwargs)
|
||||
|
||||
def log_asset(self, asset_path, **kwargs):
|
||||
self.experiment.log_asset(asset_path, **kwargs)
|
||||
|
||||
def log_asset_data(self, asset, **kwargs):
|
||||
self.experiment.log_asset_data(asset, **kwargs)
|
||||
|
||||
def log_image(self, img, **kwargs):
|
||||
self.experiment.log_image(img, **kwargs)
|
||||
|
||||
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
|
||||
if not self.save_model:
|
||||
return
|
||||
|
||||
model_metadata = {
|
||||
'fitness_score': fitness_score[-1],
|
||||
'epochs_trained': epoch + 1,
|
||||
'save_period': opt.save_period,
|
||||
'total_epochs': opt.epochs,}
|
||||
|
||||
model_files = glob.glob(f'{path}/*.pt')
|
||||
for model_path in model_files:
|
||||
name = Path(model_path).name
|
||||
|
||||
self.experiment.log_model(
|
||||
self.model_name,
|
||||
file_or_folder=model_path,
|
||||
file_name=name,
|
||||
metadata=model_metadata,
|
||||
overwrite=True,
|
||||
)
|
||||
|
||||
def check_dataset(self, data_file):
|
||||
with open(data_file) as f:
|
||||
data_config = yaml.safe_load(f)
|
||||
|
||||
if data_config['path'].startswith(COMET_PREFIX):
|
||||
path = data_config['path'].replace(COMET_PREFIX, '')
|
||||
data_dict = self.download_dataset_artifact(path)
|
||||
|
||||
return data_dict
|
||||
|
||||
self.log_asset(self.opt.data, metadata={'type': 'data-config-file'})
|
||||
|
||||
return check_dataset(data_file)
|
||||
|
||||
def log_predictions(self, image, labelsn, path, shape, predn):
|
||||
if self.logged_images_count >= self.max_images:
|
||||
return
|
||||
detections = predn[predn[:, 4] > self.conf_thres]
|
||||
iou = box_iou(labelsn[:, 1:], detections[:, :4])
|
||||
mask, _ = torch.where(iou > self.iou_thres)
|
||||
if len(mask) == 0:
|
||||
return
|
||||
|
||||
filtered_detections = detections[mask]
|
||||
filtered_labels = labelsn[mask]
|
||||
|
||||
image_id = path.split('/')[-1].split('.')[0]
|
||||
image_name = f'{image_id}_curr_epoch_{self.experiment.curr_epoch}'
|
||||
if image_name not in self.logged_image_names:
|
||||
native_scale_image = PIL.Image.open(path)
|
||||
self.log_image(native_scale_image, name=image_name)
|
||||
self.logged_image_names.append(image_name)
|
||||
|
||||
metadata = []
|
||||
for cls, *xyxy in filtered_labels.tolist():
|
||||
metadata.append({
|
||||
'label': f'{self.class_names[int(cls)]}-gt',
|
||||
'score': 100,
|
||||
'box': {
|
||||
'x': xyxy[0],
|
||||
'y': xyxy[1],
|
||||
'x2': xyxy[2],
|
||||
'y2': xyxy[3]},})
|
||||
for *xyxy, conf, cls in filtered_detections.tolist():
|
||||
metadata.append({
|
||||
'label': f'{self.class_names[int(cls)]}',
|
||||
'score': conf * 100,
|
||||
'box': {
|
||||
'x': xyxy[0],
|
||||
'y': xyxy[1],
|
||||
'x2': xyxy[2],
|
||||
'y2': xyxy[3]},})
|
||||
|
||||
self.metadata_dict[image_name] = metadata
|
||||
self.logged_images_count += 1
|
||||
|
||||
return
|
||||
|
||||
def preprocess_prediction(self, image, labels, shape, pred):
|
||||
nl, _ = labels.shape[0], pred.shape[0]
|
||||
|
||||
# Predictions
|
||||
if self.opt.single_cls:
|
||||
pred[:, 5] = 0
|
||||
|
||||
predn = pred.clone()
|
||||
scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1])
|
||||
|
||||
labelsn = None
|
||||
if nl:
|
||||
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
|
||||
scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels
|
||||
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
|
||||
scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred
|
||||
|
||||
return predn, labelsn
|
||||
|
||||
def add_assets_to_artifact(self, artifact, path, asset_path, split):
|
||||
img_paths = sorted(glob.glob(f'{asset_path}/*'))
|
||||
label_paths = img2label_paths(img_paths)
|
||||
|
||||
for image_file, label_file in zip(img_paths, label_paths):
|
||||
image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
|
||||
|
||||
try:
|
||||
artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split})
|
||||
artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split})
|
||||
except ValueError as e:
|
||||
logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.')
|
||||
logger.error(f'COMET ERROR: {e}')
|
||||
continue
|
||||
|
||||
return artifact
|
||||
|
||||
def upload_dataset_artifact(self):
|
||||
dataset_name = self.data_dict.get('dataset_name', 'yolov5-dataset')
|
||||
path = str((ROOT / Path(self.data_dict['path'])).resolve())
|
||||
|
||||
metadata = self.data_dict.copy()
|
||||
for key in ['train', 'val', 'test']:
|
||||
split_path = metadata.get(key)
|
||||
if split_path is not None:
|
||||
metadata[key] = split_path.replace(path, '')
|
||||
|
||||
artifact = comet_ml.Artifact(name=dataset_name, artifact_type='dataset', metadata=metadata)
|
||||
for key in metadata.keys():
|
||||
if key in ['train', 'val', 'test']:
|
||||
if isinstance(self.upload_dataset, str) and (key != self.upload_dataset):
|
||||
continue
|
||||
|
||||
asset_path = self.data_dict.get(key)
|
||||
if asset_path is not None:
|
||||
artifact = self.add_assets_to_artifact(artifact, path, asset_path, key)
|
||||
|
||||
self.experiment.log_artifact(artifact)
|
||||
|
||||
return
|
||||
|
||||
def download_dataset_artifact(self, artifact_path):
|
||||
logged_artifact = self.experiment.get_artifact(artifact_path)
|
||||
artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name)
|
||||
logged_artifact.download(artifact_save_dir)
|
||||
|
||||
metadata = logged_artifact.metadata
|
||||
data_dict = metadata.copy()
|
||||
data_dict['path'] = artifact_save_dir
|
||||
|
||||
metadata_names = metadata.get('names')
|
||||
if type(metadata_names) == dict:
|
||||
data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()}
|
||||
elif type(metadata_names) == list:
|
||||
data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
|
||||
else:
|
||||
raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
|
||||
|
||||
data_dict = self.update_data_paths(data_dict)
|
||||
return data_dict
|
||||
|
||||
def update_data_paths(self, data_dict):
|
||||
path = data_dict.get('path', '')
|
||||
|
||||
for split in ['train', 'val', 'test']:
|
||||
if data_dict.get(split):
|
||||
split_path = data_dict.get(split)
|
||||
data_dict[split] = (f'{path}/{split_path}' if isinstance(split, str) else [
|
||||
f'{path}/{x}' for x in split_path])
|
||||
|
||||
return data_dict
|
||||
|
||||
def on_pretrain_routine_end(self, paths):
|
||||
if self.opt.resume:
|
||||
return
|
||||
|
||||
for path in paths:
|
||||
self.log_asset(str(path))
|
||||
|
||||
if self.upload_dataset:
|
||||
if not self.resume:
|
||||
self.upload_dataset_artifact()
|
||||
|
||||
return
|
||||
|
||||
def on_train_start(self):
|
||||
self.log_parameters(self.hyp)
|
||||
|
||||
def on_train_epoch_start(self):
|
||||
return
|
||||
|
||||
def on_train_epoch_end(self, epoch):
|
||||
self.experiment.curr_epoch = epoch
|
||||
|
||||
return
|
||||
|
||||
def on_train_batch_start(self):
|
||||
return
|
||||
|
||||
def on_train_batch_end(self, log_dict, step):
|
||||
self.experiment.curr_step = step
|
||||
if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0):
|
||||
self.log_metrics(log_dict, step=step)
|
||||
|
||||
return
|
||||
|
||||
def on_train_end(self, files, save_dir, last, best, epoch, results):
|
||||
if self.comet_log_predictions:
|
||||
curr_epoch = self.experiment.curr_epoch
|
||||
self.experiment.log_asset_data(self.metadata_dict, 'image-metadata.json', epoch=curr_epoch)
|
||||
|
||||
for f in files:
|
||||
self.log_asset(f, metadata={'epoch': epoch})
|
||||
self.log_asset(f'{save_dir}/results.csv', metadata={'epoch': epoch})
|
||||
|
||||
if not self.opt.evolve:
|
||||
model_path = str(best if best.exists() else last)
|
||||
name = Path(model_path).name
|
||||
if self.save_model:
|
||||
self.experiment.log_model(
|
||||
self.model_name,
|
||||
file_or_folder=model_path,
|
||||
file_name=name,
|
||||
overwrite=True,
|
||||
)
|
||||
|
||||
# Check if running Experiment with Comet Optimizer
|
||||
if hasattr(self.opt, 'comet_optimizer_id'):
|
||||
metric = results.get(self.opt.comet_optimizer_metric)
|
||||
self.experiment.log_other('optimizer_metric_value', metric)
|
||||
|
||||
self.finish_run()
|
||||
|
||||
def on_val_start(self):
|
||||
return
|
||||
|
||||
def on_val_batch_start(self):
|
||||
return
|
||||
|
||||
def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs):
|
||||
if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)):
|
||||
return
|
||||
|
||||
for si, pred in enumerate(outputs):
|
||||
if len(pred) == 0:
|
||||
continue
|
||||
|
||||
image = images[si]
|
||||
labels = targets[targets[:, 0] == si, 1:]
|
||||
shape = shapes[si]
|
||||
path = paths[si]
|
||||
predn, labelsn = self.preprocess_prediction(image, labels, shape, pred)
|
||||
if labelsn is not None:
|
||||
self.log_predictions(image, labelsn, path, shape, predn)
|
||||
|
||||
return
|
||||
|
||||
def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
|
||||
if self.comet_log_per_class_metrics:
|
||||
if self.num_classes > 1:
|
||||
for i, c in enumerate(ap_class):
|
||||
class_name = self.class_names[c]
|
||||
self.experiment.log_metrics(
|
||||
{
|
||||
'mAP@.5': ap50[i],
|
||||
'mAP@.5:.95': ap[i],
|
||||
'precision': p[i],
|
||||
'recall': r[i],
|
||||
'f1': f1[i],
|
||||
'true_positives': tp[i],
|
||||
'false_positives': fp[i],
|
||||
'support': nt[c]},
|
||||
prefix=class_name)
|
||||
|
||||
if self.comet_log_confusion_matrix:
|
||||
epoch = self.experiment.curr_epoch
|
||||
class_names = list(self.class_names.values())
|
||||
class_names.append('background')
|
||||
num_classes = len(class_names)
|
||||
|
||||
self.experiment.log_confusion_matrix(
|
||||
matrix=confusion_matrix.matrix,
|
||||
max_categories=num_classes,
|
||||
labels=class_names,
|
||||
epoch=epoch,
|
||||
column_label='Actual Category',
|
||||
row_label='Predicted Category',
|
||||
file_name=f'confusion-matrix-epoch-{epoch}.json',
|
||||
)
|
||||
|
||||
def on_fit_epoch_end(self, result, epoch):
|
||||
self.log_metrics(result, epoch=epoch)
|
||||
|
||||
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
|
||||
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
|
||||
self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
|
||||
|
||||
def on_params_update(self, params):
|
||||
self.log_parameters(params)
|
||||
|
||||
def finish_run(self):
|
||||
self.experiment.end()
|
|
@ -0,0 +1,150 @@
|
|||
import logging
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
|
||||
try:
|
||||
import comet_ml
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
comet_ml = None
|
||||
|
||||
import yaml
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
COMET_PREFIX = 'comet://'
|
||||
COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5')
|
||||
COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt')
|
||||
|
||||
|
||||
def download_model_checkpoint(opt, experiment):
|
||||
model_dir = f'{opt.project}/{experiment.name}'
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
|
||||
model_name = COMET_MODEL_NAME
|
||||
model_asset_list = experiment.get_model_asset_list(model_name)
|
||||
|
||||
if len(model_asset_list) == 0:
|
||||
logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}')
|
||||
return
|
||||
|
||||
model_asset_list = sorted(
|
||||
model_asset_list,
|
||||
key=lambda x: x['step'],
|
||||
reverse=True,
|
||||
)
|
||||
logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list}
|
||||
|
||||
resource_url = urlparse(opt.weights)
|
||||
checkpoint_filename = resource_url.query
|
||||
|
||||
if checkpoint_filename:
|
||||
asset_id = logged_checkpoint_map.get(checkpoint_filename)
|
||||
else:
|
||||
asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
|
||||
checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
|
||||
|
||||
if asset_id is None:
|
||||
logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment')
|
||||
return
|
||||
|
||||
try:
|
||||
logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}')
|
||||
asset_filename = checkpoint_filename
|
||||
|
||||
model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False)
|
||||
model_download_path = f'{model_dir}/{asset_filename}'
|
||||
with open(model_download_path, 'wb') as f:
|
||||
f.write(model_binary)
|
||||
|
||||
opt.weights = model_download_path
|
||||
|
||||
except Exception as e:
|
||||
logger.warning('COMET WARNING: Unable to download checkpoint from Comet')
|
||||
logger.exception(e)
|
||||
|
||||
|
||||
def set_opt_parameters(opt, experiment):
|
||||
"""Update the opts Namespace with parameters
|
||||
from Comet's ExistingExperiment when resuming a run
|
||||
|
||||
Args:
|
||||
opt (argparse.Namespace): Namespace of command line options
|
||||
experiment (comet_ml.APIExperiment): Comet API Experiment object
|
||||
"""
|
||||
asset_list = experiment.get_asset_list()
|
||||
resume_string = opt.resume
|
||||
|
||||
for asset in asset_list:
|
||||
if asset['fileName'] == 'opt.yaml':
|
||||
asset_id = asset['assetId']
|
||||
asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False)
|
||||
opt_dict = yaml.safe_load(asset_binary)
|
||||
for key, value in opt_dict.items():
|
||||
setattr(opt, key, value)
|
||||
opt.resume = resume_string
|
||||
|
||||
# Save hyperparameters to YAML file
|
||||
# Necessary to pass checks in training script
|
||||
save_dir = f'{opt.project}/{experiment.name}'
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
hyp_yaml_path = f'{save_dir}/hyp.yaml'
|
||||
with open(hyp_yaml_path, 'w') as f:
|
||||
yaml.dump(opt.hyp, f)
|
||||
opt.hyp = hyp_yaml_path
|
||||
|
||||
|
||||
def check_comet_weights(opt):
|
||||
"""Downloads model weights from Comet and updates the
|
||||
weights path to point to saved weights location
|
||||
|
||||
Args:
|
||||
opt (argparse.Namespace): Command Line arguments passed
|
||||
to YOLOv5 training script
|
||||
|
||||
Returns:
|
||||
None/bool: Return True if weights are successfully downloaded
|
||||
else return None
|
||||
"""
|
||||
if comet_ml is None:
|
||||
return
|
||||
|
||||
if isinstance(opt.weights, str):
|
||||
if opt.weights.startswith(COMET_PREFIX):
|
||||
api = comet_ml.API()
|
||||
resource = urlparse(opt.weights)
|
||||
experiment_path = f'{resource.netloc}{resource.path}'
|
||||
experiment = api.get(experiment_path)
|
||||
download_model_checkpoint(opt, experiment)
|
||||
return True
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def check_comet_resume(opt):
|
||||
"""Restores run parameters to its original state based on the model checkpoint
|
||||
and logged Experiment parameters.
|
||||
|
||||
Args:
|
||||
opt (argparse.Namespace): Command Line arguments passed
|
||||
to YOLOv5 training script
|
||||
|
||||
Returns:
|
||||
None/bool: Return True if the run is restored successfully
|
||||
else return None
|
||||
"""
|
||||
if comet_ml is None:
|
||||
return
|
||||
|
||||
if isinstance(opt.resume, str):
|
||||
if opt.resume.startswith(COMET_PREFIX):
|
||||
api = comet_ml.API()
|
||||
resource = urlparse(opt.resume)
|
||||
experiment_path = f'{resource.netloc}{resource.path}'
|
||||
experiment = api.get(experiment_path)
|
||||
set_opt_parameters(opt, experiment)
|
||||
download_model_checkpoint(opt, experiment)
|
||||
|
||||
return True
|
||||
|
||||
return None
|
|
@ -0,0 +1,118 @@
|
|||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import comet_ml
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[3] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
|
||||
from train import train
|
||||
from utils.callbacks import Callbacks
|
||||
from utils.general import increment_path
|
||||
from utils.torch_utils import select_device
|
||||
|
||||
# Project Configuration
|
||||
config = comet_ml.config.get_config()
|
||||
COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5')
|
||||
|
||||
|
||||
def get_args(known=False):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path')
|
||||
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
||||
parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
|
||||
parser.add_argument('--epochs', type=int, default=300, help='total training epochs')
|
||||
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
|
||||
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
||||
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
|
||||
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
||||
parser.add_argument('--noval', action='store_true', help='only validate final epoch')
|
||||
parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
|
||||
parser.add_argument('--noplots', action='store_true', help='save no plot files')
|
||||
parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
|
||||
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
|
||||
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
|
||||
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
|
||||
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
|
||||
parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
|
||||
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--quad', action='store_true', help='quad dataloader')
|
||||
parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
|
||||
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
|
||||
parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
|
||||
parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
|
||||
parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
|
||||
parser.add_argument('--seed', type=int, default=0, help='Global training seed')
|
||||
parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
|
||||
|
||||
# Weights & Biases arguments
|
||||
parser.add_argument('--entity', default=None, help='W&B: Entity')
|
||||
parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option')
|
||||
parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval')
|
||||
parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use')
|
||||
|
||||
# Comet Arguments
|
||||
parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.')
|
||||
parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.')
|
||||
parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.")
|
||||
parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.')
|
||||
parser.add_argument('--comet_optimizer_workers',
|
||||
type=int,
|
||||
default=1,
|
||||
help='Comet: Number of Parallel Workers to use with the Comet Optimizer.')
|
||||
|
||||
return parser.parse_known_args()[0] if known else parser.parse_args()
|
||||
|
||||
|
||||
def run(parameters, opt):
|
||||
hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']}
|
||||
|
||||
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
|
||||
opt.batch_size = parameters.get('batch_size')
|
||||
opt.epochs = parameters.get('epochs')
|
||||
|
||||
device = select_device(opt.device, batch_size=opt.batch_size)
|
||||
train(hyp_dict, opt, device, callbacks=Callbacks())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = get_args(known=True)
|
||||
|
||||
opt.weights = str(opt.weights)
|
||||
opt.cfg = str(opt.cfg)
|
||||
opt.data = str(opt.data)
|
||||
opt.project = str(opt.project)
|
||||
|
||||
optimizer_id = os.getenv('COMET_OPTIMIZER_ID')
|
||||
if optimizer_id is None:
|
||||
with open(opt.comet_optimizer_config) as f:
|
||||
optimizer_config = json.load(f)
|
||||
optimizer = comet_ml.Optimizer(optimizer_config)
|
||||
else:
|
||||
optimizer = comet_ml.Optimizer(optimizer_id)
|
||||
|
||||
opt.comet_optimizer_id = optimizer.id
|
||||
status = optimizer.status()
|
||||
|
||||
opt.comet_optimizer_objective = status['spec']['objective']
|
||||
opt.comet_optimizer_metric = status['spec']['metric']
|
||||
|
||||
logger.info('COMET INFO: Starting Hyperparameter Sweep')
|
||||
for parameter in optimizer.get_parameters():
|
||||
run(parameter['parameters'], opt)
|
|
@ -0,0 +1,193 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
|
||||
# WARNING ⚠️ wandb is deprecated and will be removed in future release.
|
||||
# See supported integrations at https://github.com/ultralytics/yolov5#integrations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
|
||||
from utils.general import LOGGER, colorstr
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[3] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \
|
||||
f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.'
|
||||
|
||||
try:
|
||||
import wandb
|
||||
|
||||
assert hasattr(wandb, '__version__') # verify package import not local dir
|
||||
LOGGER.warning(DEPRECATION_WARNING)
|
||||
except (ImportError, AssertionError):
|
||||
wandb = None
|
||||
|
||||
|
||||
class WandbLogger():
|
||||
"""Log training runs, datasets, models, and predictions to Weights & Biases.
|
||||
|
||||
This logger sends information to W&B at wandb.ai. By default, this information
|
||||
includes hyperparameters, system configuration and metrics, model metrics,
|
||||
and basic data metrics and analyses.
|
||||
|
||||
By providing additional command line arguments to train.py, datasets,
|
||||
models and predictions can also be logged.
|
||||
|
||||
For more on how this logger is used, see the Weights & Biases documentation:
|
||||
https://docs.wandb.com/guides/integrations/yolov5
|
||||
"""
|
||||
|
||||
def __init__(self, opt, run_id=None, job_type='Training'):
|
||||
"""
|
||||
- Initialize WandbLogger instance
|
||||
- Upload dataset if opt.upload_dataset is True
|
||||
- Setup training processes if job_type is 'Training'
|
||||
|
||||
arguments:
|
||||
opt (namespace) -- Commandline arguments for this run
|
||||
run_id (str) -- Run ID of W&B run to be resumed
|
||||
job_type (str) -- To set the job_type for this run
|
||||
|
||||
"""
|
||||
# Pre-training routine --
|
||||
self.job_type = job_type
|
||||
self.wandb, self.wandb_run = wandb, wandb.run if wandb else None
|
||||
self.val_artifact, self.train_artifact = None, None
|
||||
self.train_artifact_path, self.val_artifact_path = None, None
|
||||
self.result_artifact = None
|
||||
self.val_table, self.result_table = None, None
|
||||
self.max_imgs_to_log = 16
|
||||
self.data_dict = None
|
||||
if self.wandb:
|
||||
self.wandb_run = wandb.init(config=opt,
|
||||
resume='allow',
|
||||
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
|
||||
entity=opt.entity,
|
||||
name=opt.name if opt.name != 'exp' else None,
|
||||
job_type=job_type,
|
||||
id=run_id,
|
||||
allow_val_change=True) if not wandb.run else wandb.run
|
||||
|
||||
if self.wandb_run:
|
||||
if self.job_type == 'Training':
|
||||
if isinstance(opt.data, dict):
|
||||
# This means another dataset manager has already processed the dataset info (e.g. ClearML)
|
||||
# and they will have stored the already processed dict in opt.data
|
||||
self.data_dict = opt.data
|
||||
self.setup_training(opt)
|
||||
|
||||
def setup_training(self, opt):
|
||||
"""
|
||||
Setup the necessary processes for training YOLO models:
|
||||
- Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
|
||||
- Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
|
||||
- Setup log_dict, initialize bbox_interval
|
||||
|
||||
arguments:
|
||||
opt (namespace) -- commandline arguments for this run
|
||||
|
||||
"""
|
||||
self.log_dict, self.current_epoch = {}, 0
|
||||
self.bbox_interval = opt.bbox_interval
|
||||
if isinstance(opt.resume, str):
|
||||
model_dir, _ = self.download_model_artifact(opt)
|
||||
if model_dir:
|
||||
self.weights = Path(model_dir) / 'last.pt'
|
||||
config = self.wandb_run.config
|
||||
opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str(
|
||||
self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \
|
||||
config.hyp, config.imgsz
|
||||
|
||||
if opt.bbox_interval == -1:
|
||||
self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
|
||||
if opt.evolve or opt.noplots:
|
||||
self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval
|
||||
|
||||
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
|
||||
"""
|
||||
Log the model checkpoint as W&B artifact
|
||||
|
||||
arguments:
|
||||
path (Path) -- Path of directory containing the checkpoints
|
||||
opt (namespace) -- Command line arguments for this run
|
||||
epoch (int) -- Current epoch number
|
||||
fitness_score (float) -- fitness score for current epoch
|
||||
best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
|
||||
"""
|
||||
model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model',
|
||||
type='model',
|
||||
metadata={
|
||||
'original_url': str(path),
|
||||
'epochs_trained': epoch + 1,
|
||||
'save period': opt.save_period,
|
||||
'project': opt.project,
|
||||
'total_epochs': opt.epochs,
|
||||
'fitness_score': fitness_score})
|
||||
model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
|
||||
wandb.log_artifact(model_artifact,
|
||||
aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
|
||||
LOGGER.info(f'Saving model artifact on epoch {epoch + 1}')
|
||||
|
||||
def val_one_image(self, pred, predn, path, names, im):
|
||||
pass
|
||||
|
||||
def log(self, log_dict):
|
||||
"""
|
||||
save the metrics to the logging dictionary
|
||||
|
||||
arguments:
|
||||
log_dict (Dict) -- metrics/media to be logged in current step
|
||||
"""
|
||||
if self.wandb_run:
|
||||
for key, value in log_dict.items():
|
||||
self.log_dict[key] = value
|
||||
|
||||
def end_epoch(self):
|
||||
"""
|
||||
commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
|
||||
|
||||
arguments:
|
||||
best_result (boolean): Boolean representing if the result of this evaluation is best or not
|
||||
"""
|
||||
if self.wandb_run:
|
||||
with all_logging_disabled():
|
||||
try:
|
||||
wandb.log(self.log_dict)
|
||||
except BaseException as e:
|
||||
LOGGER.info(
|
||||
f'An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}'
|
||||
)
|
||||
self.wandb_run.finish()
|
||||
self.wandb_run = None
|
||||
self.log_dict = {}
|
||||
|
||||
def finish_run(self):
|
||||
"""
|
||||
Log metrics if any and finish the current W&B run
|
||||
"""
|
||||
if self.wandb_run:
|
||||
if self.log_dict:
|
||||
with all_logging_disabled():
|
||||
wandb.log(self.log_dict)
|
||||
wandb.run.finish()
|
||||
LOGGER.warning(DEPRECATION_WARNING)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def all_logging_disabled(highest_level=logging.CRITICAL):
|
||||
""" source - https://gist.github.com/simon-weber/7853144
|
||||
A context manager that will prevent any logging messages triggered during the body from being processed.
|
||||
:param highest_level: the maximum logging level in use.
|
||||
This would only need to be changed if a custom level greater than CRITICAL is defined.
|
||||
"""
|
||||
previous_level = logging.root.manager.disable
|
||||
logging.disable(highest_level)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
logging.disable(previous_level)
|
|
@ -0,0 +1,234 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Loss functions
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from utils.metrics import bbox_iou
|
||||
from utils.torch_utils import de_parallel
|
||||
|
||||
|
||||
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
|
||||
# return positive, negative label smoothing BCE targets
|
||||
return 1.0 - 0.5 * eps, 0.5 * eps
|
||||
|
||||
|
||||
class BCEBlurWithLogitsLoss(nn.Module):
|
||||
# BCEwithLogitLoss() with reduced missing label effects.
|
||||
def __init__(self, alpha=0.05):
|
||||
super().__init__()
|
||||
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
|
||||
self.alpha = alpha
|
||||
|
||||
def forward(self, pred, true):
|
||||
loss = self.loss_fcn(pred, true)
|
||||
pred = torch.sigmoid(pred) # prob from logits
|
||||
dx = pred - true # reduce only missing label effects
|
||||
# dx = (pred - true).abs() # reduce missing label and false label effects
|
||||
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
|
||||
loss *= alpha_factor
|
||||
return loss.mean()
|
||||
|
||||
|
||||
class FocalLoss(nn.Module):
|
||||
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
|
||||
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
|
||||
super().__init__()
|
||||
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
|
||||
self.gamma = gamma
|
||||
self.alpha = alpha
|
||||
self.reduction = loss_fcn.reduction
|
||||
self.loss_fcn.reduction = 'none' # required to apply FL to each element
|
||||
|
||||
def forward(self, pred, true):
|
||||
loss = self.loss_fcn(pred, true)
|
||||
# p_t = torch.exp(-loss)
|
||||
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
|
||||
|
||||
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
|
||||
pred_prob = torch.sigmoid(pred) # prob from logits
|
||||
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
|
||||
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
|
||||
modulating_factor = (1.0 - p_t) ** self.gamma
|
||||
loss *= alpha_factor * modulating_factor
|
||||
|
||||
if self.reduction == 'mean':
|
||||
return loss.mean()
|
||||
elif self.reduction == 'sum':
|
||||
return loss.sum()
|
||||
else: # 'none'
|
||||
return loss
|
||||
|
||||
|
||||
class QFocalLoss(nn.Module):
|
||||
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
|
||||
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
|
||||
super().__init__()
|
||||
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
|
||||
self.gamma = gamma
|
||||
self.alpha = alpha
|
||||
self.reduction = loss_fcn.reduction
|
||||
self.loss_fcn.reduction = 'none' # required to apply FL to each element
|
||||
|
||||
def forward(self, pred, true):
|
||||
loss = self.loss_fcn(pred, true)
|
||||
|
||||
pred_prob = torch.sigmoid(pred) # prob from logits
|
||||
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
|
||||
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
|
||||
loss *= alpha_factor * modulating_factor
|
||||
|
||||
if self.reduction == 'mean':
|
||||
return loss.mean()
|
||||
elif self.reduction == 'sum':
|
||||
return loss.sum()
|
||||
else: # 'none'
|
||||
return loss
|
||||
|
||||
|
||||
class ComputeLoss:
|
||||
sort_obj_iou = False
|
||||
|
||||
# Compute losses
|
||||
def __init__(self, model, autobalance=False):
|
||||
device = next(model.parameters()).device # get model device
|
||||
h = model.hyp # hyperparameters
|
||||
|
||||
# Define criteria
|
||||
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
|
||||
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
|
||||
|
||||
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
|
||||
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
|
||||
|
||||
# Focal loss
|
||||
g = h['fl_gamma'] # focal loss gamma
|
||||
if g > 0:
|
||||
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
|
||||
|
||||
m = de_parallel(model).model[-1] # Detect() module
|
||||
self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
|
||||
self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index
|
||||
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
|
||||
self.na = m.na # number of anchors
|
||||
self.nc = m.nc # number of classes
|
||||
self.nl = m.nl # number of layers
|
||||
self.anchors = m.anchors
|
||||
self.device = device
|
||||
|
||||
def __call__(self, p, targets): # predictions, targets
|
||||
lcls = torch.zeros(1, device=self.device) # class loss
|
||||
lbox = torch.zeros(1, device=self.device) # box loss
|
||||
lobj = torch.zeros(1, device=self.device) # object loss
|
||||
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
|
||||
|
||||
# Losses
|
||||
for i, pi in enumerate(p): # layer index, layer predictions
|
||||
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
|
||||
tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj
|
||||
|
||||
n = b.shape[0] # number of targets
|
||||
if n:
|
||||
# pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0
|
||||
pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions
|
||||
|
||||
# Regression
|
||||
pxy = pxy.sigmoid() * 2 - 0.5
|
||||
pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
|
||||
pbox = torch.cat((pxy, pwh), 1) # predicted box
|
||||
iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target)
|
||||
lbox += (1.0 - iou).mean() # iou loss
|
||||
|
||||
# Objectness
|
||||
iou = iou.detach().clamp(0).type(tobj.dtype)
|
||||
if self.sort_obj_iou:
|
||||
j = iou.argsort()
|
||||
b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]
|
||||
if self.gr < 1:
|
||||
iou = (1.0 - self.gr) + self.gr * iou
|
||||
tobj[b, a, gj, gi] = iou # iou ratio
|
||||
|
||||
# Classification
|
||||
if self.nc > 1: # cls loss (only if multiple classes)
|
||||
t = torch.full_like(pcls, self.cn, device=self.device) # targets
|
||||
t[range(n), tcls[i]] = self.cp
|
||||
lcls += self.BCEcls(pcls, t) # BCE
|
||||
|
||||
# Append targets to text file
|
||||
# with open('targets.txt', 'a') as file:
|
||||
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
|
||||
|
||||
obji = self.BCEobj(pi[..., 4], tobj)
|
||||
lobj += obji * self.balance[i] # obj loss
|
||||
if self.autobalance:
|
||||
self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
|
||||
|
||||
if self.autobalance:
|
||||
self.balance = [x / self.balance[self.ssi] for x in self.balance]
|
||||
lbox *= self.hyp['box']
|
||||
lobj *= self.hyp['obj']
|
||||
lcls *= self.hyp['cls']
|
||||
bs = tobj.shape[0] # batch size
|
||||
|
||||
return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
|
||||
|
||||
def build_targets(self, p, targets):
|
||||
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
|
||||
na, nt = self.na, targets.shape[0] # number of anchors, targets
|
||||
tcls, tbox, indices, anch = [], [], [], []
|
||||
gain = torch.ones(7, device=self.device) # normalized to gridspace gain
|
||||
ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
|
||||
targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices
|
||||
|
||||
g = 0.5 # bias
|
||||
off = torch.tensor(
|
||||
[
|
||||
[0, 0],
|
||||
[1, 0],
|
||||
[0, 1],
|
||||
[-1, 0],
|
||||
[0, -1], # j,k,l,m
|
||||
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
|
||||
],
|
||||
device=self.device).float() * g # offsets
|
||||
|
||||
for i in range(self.nl):
|
||||
anchors, shape = self.anchors[i], p[i].shape
|
||||
gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain
|
||||
|
||||
# Match targets to anchors
|
||||
t = targets * gain # shape(3,n,7)
|
||||
if nt:
|
||||
# Matches
|
||||
r = t[..., 4:6] / anchors[:, None] # wh ratio
|
||||
j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare
|
||||
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
|
||||
t = t[j] # filter
|
||||
|
||||
# Offsets
|
||||
gxy = t[:, 2:4] # grid xy
|
||||
gxi = gain[[2, 3]] - gxy # inverse
|
||||
j, k = ((gxy % 1 < g) & (gxy > 1)).T
|
||||
l, m = ((gxi % 1 < g) & (gxi > 1)).T
|
||||
j = torch.stack((torch.ones_like(j), j, k, l, m))
|
||||
t = t.repeat((5, 1, 1))[j]
|
||||
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
|
||||
else:
|
||||
t = targets[0]
|
||||
offsets = 0
|
||||
|
||||
# Define
|
||||
bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors
|
||||
a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class
|
||||
gij = (gxy - offsets).long()
|
||||
gi, gj = gij.T # grid indices
|
||||
|
||||
# Append
|
||||
indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid
|
||||
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
|
||||
anch.append(anchors[a]) # anchors
|
||||
tcls.append(c) # class
|
||||
|
||||
return tcls, tbox, indices, anch
|
|
@ -0,0 +1,360 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Model validation metrics
|
||||
"""
|
||||
|
||||
import math
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from utils import TryExcept, threaded
|
||||
|
||||
|
||||
def fitness(x):
|
||||
# Model fitness as a weighted combination of metrics
|
||||
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
|
||||
return (x[:, :4] * w).sum(1)
|
||||
|
||||
|
||||
def smooth(y, f=0.05):
|
||||
# Box filter of fraction f
|
||||
nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd)
|
||||
p = np.ones(nf // 2) # ones padding
|
||||
yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded
|
||||
return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed
|
||||
|
||||
|
||||
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''):
|
||||
""" Compute the average precision, given the recall and precision curves.
|
||||
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
|
||||
# Arguments
|
||||
tp: True positives (nparray, nx1 or nx10).
|
||||
conf: Objectness value from 0-1 (nparray).
|
||||
pred_cls: Predicted object classes (nparray).
|
||||
target_cls: True object classes (nparray).
|
||||
plot: Plot precision-recall curve at mAP@0.5
|
||||
save_dir: Plot save directory
|
||||
# Returns
|
||||
The average precision as computed in py-faster-rcnn.
|
||||
"""
|
||||
|
||||
# Sort by objectness
|
||||
i = np.argsort(-conf)
|
||||
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
|
||||
|
||||
# Find unique classes
|
||||
unique_classes, nt = np.unique(target_cls, return_counts=True)
|
||||
nc = unique_classes.shape[0] # number of classes, number of detections
|
||||
|
||||
# Create Precision-Recall curve and compute AP for each class
|
||||
px, py = np.linspace(0, 1, 1000), [] # for plotting
|
||||
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
|
||||
for ci, c in enumerate(unique_classes):
|
||||
i = pred_cls == c
|
||||
n_l = nt[ci] # number of labels
|
||||
n_p = i.sum() # number of predictions
|
||||
if n_p == 0 or n_l == 0:
|
||||
continue
|
||||
|
||||
# Accumulate FPs and TPs
|
||||
fpc = (1 - tp[i]).cumsum(0)
|
||||
tpc = tp[i].cumsum(0)
|
||||
|
||||
# Recall
|
||||
recall = tpc / (n_l + eps) # recall curve
|
||||
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
|
||||
|
||||
# Precision
|
||||
precision = tpc / (tpc + fpc) # precision curve
|
||||
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
|
||||
|
||||
# AP from recall-precision curve
|
||||
for j in range(tp.shape[1]):
|
||||
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
|
||||
if plot and j == 0:
|
||||
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
|
||||
|
||||
# Compute F1 (harmonic mean of precision and recall)
|
||||
f1 = 2 * p * r / (p + r + eps)
|
||||
names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data
|
||||
names = dict(enumerate(names)) # to dict
|
||||
if plot:
|
||||
plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names)
|
||||
plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1')
|
||||
plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision')
|
||||
plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall')
|
||||
|
||||
i = smooth(f1.mean(0), 0.1).argmax() # max F1 index
|
||||
p, r, f1 = p[:, i], r[:, i], f1[:, i]
|
||||
tp = (r * nt).round() # true positives
|
||||
fp = (tp / (p + eps) - tp).round() # false positives
|
||||
return tp, fp, p, r, f1, ap, unique_classes.astype(int)
|
||||
|
||||
|
||||
def compute_ap(recall, precision):
|
||||
""" Compute the average precision, given the recall and precision curves
|
||||
# Arguments
|
||||
recall: The recall curve (list)
|
||||
precision: The precision curve (list)
|
||||
# Returns
|
||||
Average precision, precision curve, recall curve
|
||||
"""
|
||||
|
||||
# Append sentinel values to beginning and end
|
||||
mrec = np.concatenate(([0.0], recall, [1.0]))
|
||||
mpre = np.concatenate(([1.0], precision, [0.0]))
|
||||
|
||||
# Compute the precision envelope
|
||||
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
|
||||
|
||||
# Integrate area under curve
|
||||
method = 'interp' # methods: 'continuous', 'interp'
|
||||
if method == 'interp':
|
||||
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
|
||||
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
|
||||
else: # 'continuous'
|
||||
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
|
||||
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
|
||||
|
||||
return ap, mpre, mrec
|
||||
|
||||
|
||||
class ConfusionMatrix:
|
||||
# Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
|
||||
def __init__(self, nc, conf=0.25, iou_thres=0.45):
|
||||
self.matrix = np.zeros((nc + 1, nc + 1))
|
||||
self.nc = nc # number of classes
|
||||
self.conf = conf
|
||||
self.iou_thres = iou_thres
|
||||
|
||||
def process_batch(self, detections, labels):
|
||||
"""
|
||||
Return intersection-over-union (Jaccard index) of boxes.
|
||||
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
||||
Arguments:
|
||||
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
|
||||
labels (Array[M, 5]), class, x1, y1, x2, y2
|
||||
Returns:
|
||||
None, updates confusion matrix accordingly
|
||||
"""
|
||||
if detections is None:
|
||||
gt_classes = labels.int()
|
||||
for gc in gt_classes:
|
||||
self.matrix[self.nc, gc] += 1 # background FN
|
||||
return
|
||||
|
||||
detections = detections[detections[:, 4] > self.conf]
|
||||
gt_classes = labels[:, 0].int()
|
||||
detection_classes = detections[:, 5].int()
|
||||
iou = box_iou(labels[:, 1:], detections[:, :4])
|
||||
|
||||
x = torch.where(iou > self.iou_thres)
|
||||
if x[0].shape[0]:
|
||||
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
|
||||
if x[0].shape[0] > 1:
|
||||
matches = matches[matches[:, 2].argsort()[::-1]]
|
||||
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
||||
matches = matches[matches[:, 2].argsort()[::-1]]
|
||||
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
||||
else:
|
||||
matches = np.zeros((0, 3))
|
||||
|
||||
n = matches.shape[0] > 0
|
||||
m0, m1, _ = matches.transpose().astype(int)
|
||||
for i, gc in enumerate(gt_classes):
|
||||
j = m0 == i
|
||||
if n and sum(j) == 1:
|
||||
self.matrix[detection_classes[m1[j]], gc] += 1 # correct
|
||||
else:
|
||||
self.matrix[self.nc, gc] += 1 # true background
|
||||
|
||||
if n:
|
||||
for i, dc in enumerate(detection_classes):
|
||||
if not any(m1 == i):
|
||||
self.matrix[dc, self.nc] += 1 # predicted background
|
||||
|
||||
def tp_fp(self):
|
||||
tp = self.matrix.diagonal() # true positives
|
||||
fp = self.matrix.sum(1) - tp # false positives
|
||||
# fn = self.matrix.sum(0) - tp # false negatives (missed detections)
|
||||
return tp[:-1], fp[:-1] # remove background class
|
||||
|
||||
@TryExcept('WARNING ⚠️ ConfusionMatrix plot failure')
|
||||
def plot(self, normalize=True, save_dir='', names=()):
|
||||
import seaborn as sn
|
||||
|
||||
array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns
|
||||
array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
|
||||
|
||||
fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)
|
||||
nc, nn = self.nc, len(names) # number of classes, names
|
||||
sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size
|
||||
labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels
|
||||
ticklabels = (names + ['background']) if labels else 'auto'
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered
|
||||
sn.heatmap(array,
|
||||
ax=ax,
|
||||
annot=nc < 30,
|
||||
annot_kws={
|
||||
'size': 8},
|
||||
cmap='Blues',
|
||||
fmt='.2f',
|
||||
square=True,
|
||||
vmin=0.0,
|
||||
xticklabels=ticklabels,
|
||||
yticklabels=ticklabels).set_facecolor((1, 1, 1))
|
||||
ax.set_xlabel('True')
|
||||
ax.set_ylabel('Predicted')
|
||||
ax.set_title('Confusion Matrix')
|
||||
fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
|
||||
plt.close(fig)
|
||||
|
||||
def print(self):
|
||||
for i in range(self.nc + 1):
|
||||
print(' '.join(map(str, self.matrix[i])))
|
||||
|
||||
|
||||
def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
|
||||
# Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4)
|
||||
|
||||
# Get the coordinates of bounding boxes
|
||||
if xywh: # transform from xywh to xyxy
|
||||
(x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)
|
||||
w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2
|
||||
b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_
|
||||
b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_
|
||||
else: # x1, y1, x2, y2 = box1
|
||||
b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)
|
||||
b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)
|
||||
w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps)
|
||||
w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps)
|
||||
|
||||
# Intersection area
|
||||
inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \
|
||||
(b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0)
|
||||
|
||||
# Union Area
|
||||
union = w1 * h1 + w2 * h2 - inter + eps
|
||||
|
||||
# IoU
|
||||
iou = inter / union
|
||||
if CIoU or DIoU or GIoU:
|
||||
cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width
|
||||
ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height
|
||||
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
|
||||
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
|
||||
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2
|
||||
if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
|
||||
v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2)
|
||||
with torch.no_grad():
|
||||
alpha = v / (v - iou + (1 + eps))
|
||||
return iou - (rho2 / c2 + v * alpha) # CIoU
|
||||
return iou - rho2 / c2 # DIoU
|
||||
c_area = cw * ch + eps # convex area
|
||||
return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
|
||||
return iou # IoU
|
||||
|
||||
|
||||
def box_iou(box1, box2, eps=1e-7):
|
||||
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
|
||||
"""
|
||||
Return intersection-over-union (Jaccard index) of boxes.
|
||||
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
||||
Arguments:
|
||||
box1 (Tensor[N, 4])
|
||||
box2 (Tensor[M, 4])
|
||||
Returns:
|
||||
iou (Tensor[N, M]): the NxM matrix containing the pairwise
|
||||
IoU values for every element in boxes1 and boxes2
|
||||
"""
|
||||
|
||||
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
|
||||
(a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)
|
||||
inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)
|
||||
|
||||
# IoU = inter / (area1 + area2 - inter)
|
||||
return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)
|
||||
|
||||
|
||||
def bbox_ioa(box1, box2, eps=1e-7):
|
||||
""" Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2
|
||||
box1: np.array of shape(4)
|
||||
box2: np.array of shape(nx4)
|
||||
returns: np.array of shape(n)
|
||||
"""
|
||||
|
||||
# Get the coordinates of bounding boxes
|
||||
b1_x1, b1_y1, b1_x2, b1_y2 = box1
|
||||
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
|
||||
|
||||
# Intersection area
|
||||
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
|
||||
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
|
||||
|
||||
# box2 area
|
||||
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
|
||||
|
||||
# Intersection over box2 area
|
||||
return inter_area / box2_area
|
||||
|
||||
|
||||
def wh_iou(wh1, wh2, eps=1e-7):
|
||||
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
|
||||
wh1 = wh1[:, None] # [N,1,2]
|
||||
wh2 = wh2[None] # [1,M,2]
|
||||
inter = torch.min(wh1, wh2).prod(2) # [N,M]
|
||||
return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter)
|
||||
|
||||
|
||||
# Plots ----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
@threaded
|
||||
def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()):
|
||||
# Precision-recall curve
|
||||
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
|
||||
py = np.stack(py, axis=1)
|
||||
|
||||
if 0 < len(names) < 21: # display per-class legend if < 21 classes
|
||||
for i, y in enumerate(py.T):
|
||||
ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
|
||||
else:
|
||||
ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
|
||||
|
||||
ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())
|
||||
ax.set_xlabel('Recall')
|
||||
ax.set_ylabel('Precision')
|
||||
ax.set_xlim(0, 1)
|
||||
ax.set_ylim(0, 1)
|
||||
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')
|
||||
ax.set_title('Precision-Recall Curve')
|
||||
fig.savefig(save_dir, dpi=250)
|
||||
plt.close(fig)
|
||||
|
||||
|
||||
@threaded
|
||||
def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'):
|
||||
# Metric-confidence curve
|
||||
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
|
||||
|
||||
if 0 < len(names) < 21: # display per-class legend if < 21 classes
|
||||
for i, y in enumerate(py):
|
||||
ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
|
||||
else:
|
||||
ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
|
||||
|
||||
y = smooth(py.mean(0), 0.05)
|
||||
ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
|
||||
ax.set_xlabel(xlabel)
|
||||
ax.set_ylabel(ylabel)
|
||||
ax.set_xlim(0, 1)
|
||||
ax.set_ylim(0, 1)
|
||||
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')
|
||||
ax.set_title(f'{ylabel}-Confidence Curve')
|
||||
fig.savefig(save_dir, dpi=250)
|
||||
plt.close(fig)
|
|
@ -0,0 +1,560 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Plotting utils
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import math
|
||||
import os
|
||||
from copy import copy
|
||||
from pathlib import Path
|
||||
from urllib.error import URLError
|
||||
|
||||
import cv2
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import seaborn as sn
|
||||
import torch
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
|
||||
from utils import TryExcept, threaded
|
||||
from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path,
|
||||
is_ascii, xywh2xyxy, xyxy2xywh)
|
||||
from utils.metrics import fitness
|
||||
from utils.segment.general import scale_image
|
||||
|
||||
# Settings
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
matplotlib.rc('font', **{'size': 11})
|
||||
matplotlib.use('Agg') # for writing to files only
|
||||
|
||||
|
||||
class Colors:
|
||||
# Ultralytics color palette https://ultralytics.com/
|
||||
def __init__(self):
|
||||
# hex = matplotlib.colors.TABLEAU_COLORS.values()
|
||||
hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
|
||||
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
|
||||
self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
|
||||
self.n = len(self.palette)
|
||||
|
||||
def __call__(self, i, bgr=False):
|
||||
c = self.palette[int(i) % self.n]
|
||||
return (c[2], c[1], c[0]) if bgr else c
|
||||
|
||||
@staticmethod
|
||||
def hex2rgb(h): # rgb order (PIL)
|
||||
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
|
||||
|
||||
|
||||
colors = Colors() # create instance for 'from utils.plots import colors'
|
||||
|
||||
|
||||
def check_pil_font(font=FONT, size=10):
|
||||
# Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
|
||||
font = Path(font)
|
||||
font = font if font.exists() else (CONFIG_DIR / font.name)
|
||||
try:
|
||||
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
|
||||
except Exception: # download if missing
|
||||
try:
|
||||
check_font(font)
|
||||
return ImageFont.truetype(str(font), size)
|
||||
except TypeError:
|
||||
check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
|
||||
except URLError: # not online
|
||||
return ImageFont.load_default()
|
||||
|
||||
|
||||
class Annotator:
|
||||
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
|
||||
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
|
||||
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
|
||||
non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic
|
||||
self.pil = pil or non_ascii
|
||||
if self.pil: # use PIL
|
||||
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
|
||||
self.draw = ImageDraw.Draw(self.im)
|
||||
self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font,
|
||||
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
|
||||
else: # use cv2
|
||||
self.im = im
|
||||
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
|
||||
|
||||
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
|
||||
# Add one xyxy box to image with label
|
||||
if self.pil or not is_ascii(label):
|
||||
self.draw.rectangle(box, width=self.lw, outline=color) # box
|
||||
if label:
|
||||
w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0
|
||||
# _, _, w, h = self.font.getbbox(label) # text width, height (New)
|
||||
outside = box[1] - h >= 0 # label fits outside box
|
||||
self.draw.rectangle(
|
||||
(box[0], box[1] - h if outside else box[1], box[0] + w + 1,
|
||||
box[1] + 1 if outside else box[1] + h + 1),
|
||||
fill=color,
|
||||
)
|
||||
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
|
||||
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
|
||||
else: # cv2
|
||||
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
|
||||
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
|
||||
if label:
|
||||
tf = max(self.lw - 1, 1) # font thickness
|
||||
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
|
||||
outside = p1[1] - h >= 3
|
||||
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
|
||||
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
|
||||
cv2.putText(self.im,
|
||||
label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
|
||||
0,
|
||||
self.lw / 3,
|
||||
txt_color,
|
||||
thickness=tf,
|
||||
lineType=cv2.LINE_AA)
|
||||
|
||||
def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):
|
||||
"""Plot masks at once.
|
||||
Args:
|
||||
masks (tensor): predicted masks on cuda, shape: [n, h, w]
|
||||
colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n]
|
||||
im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1]
|
||||
alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque
|
||||
"""
|
||||
if self.pil:
|
||||
# convert to numpy first
|
||||
self.im = np.asarray(self.im).copy()
|
||||
if len(masks) == 0:
|
||||
self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
|
||||
colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0
|
||||
colors = colors[:, None, None] # shape(n,1,1,3)
|
||||
masks = masks.unsqueeze(3) # shape(n,h,w,1)
|
||||
masks_color = masks * (colors * alpha) # shape(n,h,w,3)
|
||||
|
||||
inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
|
||||
mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3)
|
||||
|
||||
im_gpu = im_gpu.flip(dims=[0]) # flip channel
|
||||
im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
|
||||
im_gpu = im_gpu * inv_alph_masks[-1] + mcs
|
||||
im_mask = (im_gpu * 255).byte().cpu().numpy()
|
||||
self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape)
|
||||
if self.pil:
|
||||
# convert im back to PIL and update draw
|
||||
self.fromarray(self.im)
|
||||
|
||||
def rectangle(self, xy, fill=None, outline=None, width=1):
|
||||
# Add rectangle to image (PIL-only)
|
||||
self.draw.rectangle(xy, fill, outline, width)
|
||||
|
||||
def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):
|
||||
# Add text to image (PIL-only)
|
||||
if anchor == 'bottom': # start y from font bottom
|
||||
w, h = self.font.getsize(text) # text width, height
|
||||
xy[1] += 1 - h
|
||||
self.draw.text(xy, text, fill=txt_color, font=self.font)
|
||||
|
||||
def fromarray(self, im):
|
||||
# Update self.im from a numpy array
|
||||
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
|
||||
self.draw = ImageDraw.Draw(self.im)
|
||||
|
||||
def result(self):
|
||||
# Return annotated image as array
|
||||
return np.asarray(self.im)
|
||||
|
||||
|
||||
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
|
||||
"""
|
||||
x: Features to be visualized
|
||||
module_type: Module type
|
||||
stage: Module stage within model
|
||||
n: Maximum number of feature maps to plot
|
||||
save_dir: Directory to save results
|
||||
"""
|
||||
if 'Detect' not in module_type:
|
||||
batch, channels, height, width = x.shape # batch, channels, height, width
|
||||
if height > 1 and width > 1:
|
||||
f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
|
||||
|
||||
blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
|
||||
n = min(n, channels) # number of plots
|
||||
fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
|
||||
ax = ax.ravel()
|
||||
plt.subplots_adjust(wspace=0.05, hspace=0.05)
|
||||
for i in range(n):
|
||||
ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
|
||||
ax[i].axis('off')
|
||||
|
||||
LOGGER.info(f'Saving {f}... ({n}/{channels})')
|
||||
plt.savefig(f, dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save
|
||||
|
||||
|
||||
def hist2d(x, y, n=100):
|
||||
# 2d histogram used in labels.png and evolve.png
|
||||
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
|
||||
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
|
||||
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
|
||||
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
|
||||
return np.log(hist[xidx, yidx])
|
||||
|
||||
|
||||
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
|
||||
from scipy.signal import butter, filtfilt
|
||||
|
||||
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
|
||||
def butter_lowpass(cutoff, fs, order):
|
||||
nyq = 0.5 * fs
|
||||
normal_cutoff = cutoff / nyq
|
||||
return butter(order, normal_cutoff, btype='low', analog=False)
|
||||
|
||||
b, a = butter_lowpass(cutoff, fs, order=order)
|
||||
return filtfilt(b, a, data) # forward-backward filter
|
||||
|
||||
|
||||
def output_to_target(output, max_det=300):
|
||||
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting
|
||||
targets = []
|
||||
for i, o in enumerate(output):
|
||||
box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)
|
||||
j = torch.full((conf.shape[0], 1), i)
|
||||
targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))
|
||||
return torch.cat(targets, 0).numpy()
|
||||
|
||||
|
||||
@threaded
|
||||
def plot_images(images, targets, paths=None, fname='images.jpg', names=None):
|
||||
# Plot image grid with labels
|
||||
if isinstance(images, torch.Tensor):
|
||||
images = images.cpu().float().numpy()
|
||||
if isinstance(targets, torch.Tensor):
|
||||
targets = targets.cpu().numpy()
|
||||
|
||||
max_size = 1920 # max image size
|
||||
max_subplots = 16 # max image subplots, i.e. 4x4
|
||||
bs, _, h, w = images.shape # batch size, _, height, width
|
||||
bs = min(bs, max_subplots) # limit plot images
|
||||
ns = np.ceil(bs ** 0.5) # number of subplots (square)
|
||||
if np.max(images[0]) <= 1:
|
||||
images *= 255 # de-normalise (optional)
|
||||
|
||||
# Build Image
|
||||
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
|
||||
for i, im in enumerate(images):
|
||||
if i == max_subplots: # if last batch has fewer images than we expect
|
||||
break
|
||||
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
||||
im = im.transpose(1, 2, 0)
|
||||
mosaic[y:y + h, x:x + w, :] = im
|
||||
|
||||
# Resize (optional)
|
||||
scale = max_size / ns / max(h, w)
|
||||
if scale < 1:
|
||||
h = math.ceil(scale * h)
|
||||
w = math.ceil(scale * w)
|
||||
mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
|
||||
|
||||
# Annotate
|
||||
fs = int((h + w) * ns * 0.01) # font size
|
||||
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)
|
||||
for i in range(i + 1):
|
||||
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
||||
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
||||
if paths:
|
||||
annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
||||
if len(targets) > 0:
|
||||
ti = targets[targets[:, 0] == i] # image targets
|
||||
boxes = xywh2xyxy(ti[:, 2:6]).T
|
||||
classes = ti[:, 1].astype('int')
|
||||
labels = ti.shape[1] == 6 # labels if no conf column
|
||||
conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
|
||||
|
||||
if boxes.shape[1]:
|
||||
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
|
||||
boxes[[0, 2]] *= w # scale to pixels
|
||||
boxes[[1, 3]] *= h
|
||||
elif scale < 1: # absolute coords need scale if image scales
|
||||
boxes *= scale
|
||||
boxes[[0, 2]] += x
|
||||
boxes[[1, 3]] += y
|
||||
for j, box in enumerate(boxes.T.tolist()):
|
||||
cls = classes[j]
|
||||
color = colors(cls)
|
||||
cls = names[cls] if names else cls
|
||||
if labels or conf[j] > 0.25: # 0.25 conf thresh
|
||||
label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
|
||||
annotator.box_label(box, label, color=color)
|
||||
annotator.im.save(fname) # save
|
||||
|
||||
|
||||
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
|
||||
# Plot LR simulating training for full epochs
|
||||
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
|
||||
y = []
|
||||
for _ in range(epochs):
|
||||
scheduler.step()
|
||||
y.append(optimizer.param_groups[0]['lr'])
|
||||
plt.plot(y, '.-', label='LR')
|
||||
plt.xlabel('epoch')
|
||||
plt.ylabel('LR')
|
||||
plt.grid()
|
||||
plt.xlim(0, epochs)
|
||||
plt.ylim(0)
|
||||
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_val_txt(): # from utils.plots import *; plot_val()
|
||||
# Plot val.txt histograms
|
||||
x = np.loadtxt('val.txt', dtype=np.float32)
|
||||
box = xyxy2xywh(x[:, :4])
|
||||
cx, cy = box[:, 0], box[:, 1]
|
||||
|
||||
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
|
||||
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
|
||||
ax.set_aspect('equal')
|
||||
plt.savefig('hist2d.png', dpi=300)
|
||||
|
||||
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
|
||||
ax[0].hist(cx, bins=600)
|
||||
ax[1].hist(cy, bins=600)
|
||||
plt.savefig('hist1d.png', dpi=200)
|
||||
|
||||
|
||||
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
|
||||
# Plot targets.txt histograms
|
||||
x = np.loadtxt('targets.txt', dtype=np.float32).T
|
||||
s = ['x targets', 'y targets', 'width targets', 'height targets']
|
||||
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
|
||||
ax = ax.ravel()
|
||||
for i in range(4):
|
||||
ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
|
||||
ax[i].legend()
|
||||
ax[i].set_title(s[i])
|
||||
plt.savefig('targets.jpg', dpi=200)
|
||||
|
||||
|
||||
def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
|
||||
# Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
|
||||
save_dir = Path(file).parent if file else Path(dir)
|
||||
plot2 = False # plot additional results
|
||||
if plot2:
|
||||
ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
|
||||
|
||||
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
|
||||
# for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
|
||||
for f in sorted(save_dir.glob('study*.txt')):
|
||||
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
|
||||
x = np.arange(y.shape[1]) if x is None else np.array(x)
|
||||
if plot2:
|
||||
s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
|
||||
for i in range(7):
|
||||
ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
|
||||
ax[i].set_title(s[i])
|
||||
|
||||
j = y[3].argmax() + 1
|
||||
ax2.plot(y[5, 1:j],
|
||||
y[3, 1:j] * 1E2,
|
||||
'.-',
|
||||
linewidth=2,
|
||||
markersize=8,
|
||||
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
|
||||
|
||||
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
|
||||
'k.-',
|
||||
linewidth=2,
|
||||
markersize=8,
|
||||
alpha=.25,
|
||||
label='EfficientDet')
|
||||
|
||||
ax2.grid(alpha=0.2)
|
||||
ax2.set_yticks(np.arange(20, 60, 5))
|
||||
ax2.set_xlim(0, 57)
|
||||
ax2.set_ylim(25, 55)
|
||||
ax2.set_xlabel('GPU Speed (ms/img)')
|
||||
ax2.set_ylabel('COCO AP val')
|
||||
ax2.legend(loc='lower right')
|
||||
f = save_dir / 'study.png'
|
||||
print(f'Saving {f}...')
|
||||
plt.savefig(f, dpi=300)
|
||||
|
||||
|
||||
@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395
|
||||
def plot_labels(labels, names=(), save_dir=Path('')):
|
||||
# plot dataset labels
|
||||
LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
|
||||
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
|
||||
nc = int(c.max() + 1) # number of classes
|
||||
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
|
||||
|
||||
# seaborn correlogram
|
||||
sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
|
||||
plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
|
||||
plt.close()
|
||||
|
||||
# matplotlib labels
|
||||
matplotlib.use('svg') # faster
|
||||
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
|
||||
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
|
||||
with contextlib.suppress(Exception): # color histogram bars by class
|
||||
[y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195
|
||||
ax[0].set_ylabel('instances')
|
||||
if 0 < len(names) < 30:
|
||||
ax[0].set_xticks(range(len(names)))
|
||||
ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10)
|
||||
else:
|
||||
ax[0].set_xlabel('classes')
|
||||
sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
|
||||
sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
|
||||
|
||||
# rectangles
|
||||
labels[:, 1:3] = 0.5 # center
|
||||
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
|
||||
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
|
||||
for cls, *box in labels[:1000]:
|
||||
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
|
||||
ax[1].imshow(img)
|
||||
ax[1].axis('off')
|
||||
|
||||
for a in [0, 1, 2, 3]:
|
||||
for s in ['top', 'right', 'left', 'bottom']:
|
||||
ax[a].spines[s].set_visible(False)
|
||||
|
||||
plt.savefig(save_dir / 'labels.jpg', dpi=200)
|
||||
matplotlib.use('Agg')
|
||||
plt.close()
|
||||
|
||||
|
||||
def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')):
|
||||
# Show classification image grid with labels (optional) and predictions (optional)
|
||||
from utils.augmentations import denormalize
|
||||
|
||||
names = names or [f'class{i}' for i in range(1000)]
|
||||
blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im),
|
||||
dim=0) # select batch index 0, block by channels
|
||||
n = min(len(blocks), nmax) # number of plots
|
||||
m = min(8, round(n ** 0.5)) # 8 x 8 default
|
||||
fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols
|
||||
ax = ax.ravel() if m > 1 else [ax]
|
||||
# plt.subplots_adjust(wspace=0.05, hspace=0.05)
|
||||
for i in range(n):
|
||||
ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0))
|
||||
ax[i].axis('off')
|
||||
if labels is not None:
|
||||
s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '')
|
||||
ax[i].set_title(s, fontsize=8, verticalalignment='top')
|
||||
plt.savefig(f, dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
if verbose:
|
||||
LOGGER.info(f'Saving {f}')
|
||||
if labels is not None:
|
||||
LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax]))
|
||||
if pred is not None:
|
||||
LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax]))
|
||||
return f
|
||||
|
||||
|
||||
def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
|
||||
# Plot evolve.csv hyp evolution results
|
||||
evolve_csv = Path(evolve_csv)
|
||||
data = pd.read_csv(evolve_csv)
|
||||
keys = [x.strip() for x in data.columns]
|
||||
x = data.values
|
||||
f = fitness(x)
|
||||
j = np.argmax(f) # max fitness index
|
||||
plt.figure(figsize=(10, 12), tight_layout=True)
|
||||
matplotlib.rc('font', **{'size': 8})
|
||||
print(f'Best results from row {j} of {evolve_csv}:')
|
||||
for i, k in enumerate(keys[7:]):
|
||||
v = x[:, 7 + i]
|
||||
mu = v[j] # best single result
|
||||
plt.subplot(6, 5, i + 1)
|
||||
plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
|
||||
plt.plot(mu, f.max(), 'k+', markersize=15)
|
||||
plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
|
||||
if i % 5 != 0:
|
||||
plt.yticks([])
|
||||
print(f'{k:>15}: {mu:.3g}')
|
||||
f = evolve_csv.with_suffix('.png') # filename
|
||||
plt.savefig(f, dpi=200)
|
||||
plt.close()
|
||||
print(f'Saved {f}')
|
||||
|
||||
|
||||
def plot_results(file='path/to/results.csv', dir=''):
|
||||
# Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
|
||||
save_dir = Path(file).parent if file else Path(dir)
|
||||
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
|
||||
ax = ax.ravel()
|
||||
files = list(save_dir.glob('results*.csv'))
|
||||
assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
|
||||
for f in files:
|
||||
try:
|
||||
data = pd.read_csv(f)
|
||||
s = [x.strip() for x in data.columns]
|
||||
x = data.values[:, 0]
|
||||
for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
|
||||
y = data.values[:, j].astype('float')
|
||||
# y[y == 0] = np.nan # don't show zero values
|
||||
ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
|
||||
ax[i].set_title(s[j], fontsize=12)
|
||||
# if j in [8, 9, 10]: # share train and val loss y axes
|
||||
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
|
||||
except Exception as e:
|
||||
LOGGER.info(f'Warning: Plotting error for {f}: {e}')
|
||||
ax[1].legend()
|
||||
fig.savefig(save_dir / 'results.png', dpi=200)
|
||||
plt.close()
|
||||
|
||||
|
||||
def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
|
||||
# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
|
||||
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
|
||||
s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
|
||||
files = list(Path(save_dir).glob('frames*.txt'))
|
||||
for fi, f in enumerate(files):
|
||||
try:
|
||||
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
|
||||
n = results.shape[1] # number of rows
|
||||
x = np.arange(start, min(stop, n) if stop else n)
|
||||
results = results[:, x]
|
||||
t = (results[0] - results[0].min()) # set t0=0s
|
||||
results[0] = x
|
||||
for i, a in enumerate(ax):
|
||||
if i < len(results):
|
||||
label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
|
||||
a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
|
||||
a.set_title(s[i])
|
||||
a.set_xlabel('time (s)')
|
||||
# if fi == len(files) - 1:
|
||||
# a.set_ylim(bottom=0)
|
||||
for side in ['top', 'right']:
|
||||
a.spines[side].set_visible(False)
|
||||
else:
|
||||
a.remove()
|
||||
except Exception as e:
|
||||
print(f'Warning: Plotting error for {f}; {e}')
|
||||
ax[1].legend()
|
||||
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
|
||||
|
||||
|
||||
def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):
|
||||
# Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
|
||||
xyxy = torch.tensor(xyxy).view(-1, 4)
|
||||
b = xyxy2xywh(xyxy) # boxes
|
||||
if square:
|
||||
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
|
||||
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
|
||||
xyxy = xywh2xyxy(b).long()
|
||||
clip_boxes(xyxy, im.shape)
|
||||
crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
|
||||
if save:
|
||||
file.parent.mkdir(parents=True, exist_ok=True) # make directory
|
||||
f = str(increment_path(file).with_suffix('.jpg'))
|
||||
# cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue
|
||||
Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB
|
||||
return crop
|
|
@ -0,0 +1,104 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Image augmentation functions
|
||||
"""
|
||||
|
||||
import math
|
||||
import random
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from ..augmentations import box_candidates
|
||||
from ..general import resample_segments, segment2box
|
||||
|
||||
|
||||
def mixup(im, labels, segments, im2, labels2, segments2):
|
||||
# Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
|
||||
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
|
||||
im = (im * r + im2 * (1 - r)).astype(np.uint8)
|
||||
labels = np.concatenate((labels, labels2), 0)
|
||||
segments = np.concatenate((segments, segments2), 0)
|
||||
return im, labels, segments
|
||||
|
||||
|
||||
def random_perspective(im,
|
||||
targets=(),
|
||||
segments=(),
|
||||
degrees=10,
|
||||
translate=.1,
|
||||
scale=.1,
|
||||
shear=10,
|
||||
perspective=0.0,
|
||||
border=(0, 0)):
|
||||
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
|
||||
# targets = [cls, xyxy]
|
||||
|
||||
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
|
||||
width = im.shape[1] + border[1] * 2
|
||||
|
||||
# Center
|
||||
C = np.eye(3)
|
||||
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
|
||||
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
|
||||
|
||||
# Perspective
|
||||
P = np.eye(3)
|
||||
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
|
||||
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
|
||||
|
||||
# Rotation and Scale
|
||||
R = np.eye(3)
|
||||
a = random.uniform(-degrees, degrees)
|
||||
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
|
||||
s = random.uniform(1 - scale, 1 + scale)
|
||||
# s = 2 ** random.uniform(-scale, scale)
|
||||
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
|
||||
|
||||
# Shear
|
||||
S = np.eye(3)
|
||||
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
|
||||
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
|
||||
|
||||
# Translation
|
||||
T = np.eye(3)
|
||||
T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels)
|
||||
T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels)
|
||||
|
||||
# Combined rotation matrix
|
||||
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
|
||||
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
|
||||
if perspective:
|
||||
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
|
||||
else: # affine
|
||||
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
|
||||
|
||||
# Visualize
|
||||
# import matplotlib.pyplot as plt
|
||||
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
|
||||
# ax[0].imshow(im[:, :, ::-1]) # base
|
||||
# ax[1].imshow(im2[:, :, ::-1]) # warped
|
||||
|
||||
# Transform label coordinates
|
||||
n = len(targets)
|
||||
new_segments = []
|
||||
if n:
|
||||
new = np.zeros((n, 4))
|
||||
segments = resample_segments(segments) # upsample
|
||||
for i, segment in enumerate(segments):
|
||||
xy = np.ones((len(segment), 3))
|
||||
xy[:, :2] = segment
|
||||
xy = xy @ M.T # transform
|
||||
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine
|
||||
|
||||
# clip
|
||||
new[i] = segment2box(xy, width, height)
|
||||
new_segments.append(xy)
|
||||
|
||||
# filter candidates
|
||||
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01)
|
||||
targets = targets[i]
|
||||
targets[:, 1:5] = new[i]
|
||||
new_segments = np.array(new_segments)[i]
|
||||
|
||||
return im, targets, new_segments
|
|
@ -0,0 +1,332 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
"""
|
||||
Dataloaders
|
||||
"""
|
||||
|
||||
import os
|
||||
import random
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, distributed
|
||||
|
||||
from ..augmentations import augment_hsv, copy_paste, letterbox
|
||||
from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker
|
||||
from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn
|
||||
from ..torch_utils import torch_distributed_zero_first
|
||||
from .augmentations import mixup, random_perspective
|
||||
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
|
||||
|
||||
def create_dataloader(path,
|
||||
imgsz,
|
||||
batch_size,
|
||||
stride,
|
||||
single_cls=False,
|
||||
hyp=None,
|
||||
augment=False,
|
||||
cache=False,
|
||||
pad=0.0,
|
||||
rect=False,
|
||||
rank=-1,
|
||||
workers=8,
|
||||
image_weights=False,
|
||||
quad=False,
|
||||
prefix='',
|
||||
shuffle=False,
|
||||
mask_downsample_ratio=1,
|
||||
overlap_mask=False,
|
||||
seed=0):
|
||||
if rect and shuffle:
|
||||
LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
|
||||
shuffle = False
|
||||
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
||||
dataset = LoadImagesAndLabelsAndMasks(
|
||||
path,
|
||||
imgsz,
|
||||
batch_size,
|
||||
augment=augment, # augmentation
|
||||
hyp=hyp, # hyperparameters
|
||||
rect=rect, # rectangular batches
|
||||
cache_images=cache,
|
||||
single_cls=single_cls,
|
||||
stride=int(stride),
|
||||
pad=pad,
|
||||
image_weights=image_weights,
|
||||
prefix=prefix,
|
||||
downsample_ratio=mask_downsample_ratio,
|
||||
overlap=overlap_mask)
|
||||
|
||||
batch_size = min(batch_size, len(dataset))
|
||||
nd = torch.cuda.device_count() # number of CUDA devices
|
||||
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
|
||||
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
|
||||
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
|
||||
generator = torch.Generator()
|
||||
generator.manual_seed(6148914691236517205 + seed + RANK)
|
||||
return loader(
|
||||
dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=shuffle and sampler is None,
|
||||
num_workers=nw,
|
||||
sampler=sampler,
|
||||
pin_memory=True,
|
||||
collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn,
|
||||
worker_init_fn=seed_worker,
|
||||
generator=generator,
|
||||
), dataset
|
||||
|
||||
|
||||
class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path,
|
||||
img_size=640,
|
||||
batch_size=16,
|
||||
augment=False,
|
||||
hyp=None,
|
||||
rect=False,
|
||||
image_weights=False,
|
||||
cache_images=False,
|
||||
single_cls=False,
|
||||
stride=32,
|
||||
pad=0,
|
||||
min_items=0,
|
||||
prefix='',
|
||||
downsample_ratio=1,
|
||||
overlap=False,
|
||||
):
|
||||
super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls,
|
||||
stride, pad, min_items, prefix)
|
||||
self.downsample_ratio = downsample_ratio
|
||||
self.overlap = overlap
|
||||
|
||||
def __getitem__(self, index):
|
||||
index = self.indices[index] # linear, shuffled, or image_weights
|
||||
|
||||
hyp = self.hyp
|
||||
mosaic = self.mosaic and random.random() < hyp['mosaic']
|
||||
masks = []
|
||||
if mosaic:
|
||||
# Load mosaic
|
||||
img, labels, segments = self.load_mosaic(index)
|
||||
shapes = None
|
||||
|
||||
# MixUp augmentation
|
||||
if random.random() < hyp['mixup']:
|
||||
img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1)))
|
||||
|
||||
else:
|
||||
# Load image
|
||||
img, (h0, w0), (h, w) = self.load_image(index)
|
||||
|
||||
# Letterbox
|
||||
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
|
||||
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
|
||||
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
|
||||
|
||||
labels = self.labels[index].copy()
|
||||
# [array, array, ....], array.shape=(num_points, 2), xyxyxyxy
|
||||
segments = self.segments[index].copy()
|
||||
if len(segments):
|
||||
for i_s in range(len(segments)):
|
||||
segments[i_s] = xyn2xy(
|
||||
segments[i_s],
|
||||
ratio[0] * w,
|
||||
ratio[1] * h,
|
||||
padw=pad[0],
|
||||
padh=pad[1],
|
||||
)
|
||||
if labels.size: # normalized xywh to pixel xyxy format
|
||||
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
|
||||
|
||||
if self.augment:
|
||||
img, labels, segments = random_perspective(img,
|
||||
labels,
|
||||
segments=segments,
|
||||
degrees=hyp['degrees'],
|
||||
translate=hyp['translate'],
|
||||
scale=hyp['scale'],
|
||||
shear=hyp['shear'],
|
||||
perspective=hyp['perspective'])
|
||||
|
||||
nl = len(labels) # number of labels
|
||||
if nl:
|
||||
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3)
|
||||
if self.overlap:
|
||||
masks, sorted_idx = polygons2masks_overlap(img.shape[:2],
|
||||
segments,
|
||||
downsample_ratio=self.downsample_ratio)
|
||||
masks = masks[None] # (640, 640) -> (1, 640, 640)
|
||||
labels = labels[sorted_idx]
|
||||
else:
|
||||
masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio)
|
||||
|
||||
masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] //
|
||||
self.downsample_ratio, img.shape[1] //
|
||||
self.downsample_ratio))
|
||||
# TODO: albumentations support
|
||||
if self.augment:
|
||||
# Albumentations
|
||||
# there are some augmentation that won't change boxes and masks,
|
||||
# so just be it for now.
|
||||
img, labels = self.albumentations(img, labels)
|
||||
nl = len(labels) # update after albumentations
|
||||
|
||||
# HSV color-space
|
||||
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
|
||||
|
||||
# Flip up-down
|
||||
if random.random() < hyp['flipud']:
|
||||
img = np.flipud(img)
|
||||
if nl:
|
||||
labels[:, 2] = 1 - labels[:, 2]
|
||||
masks = torch.flip(masks, dims=[1])
|
||||
|
||||
# Flip left-right
|
||||
if random.random() < hyp['fliplr']:
|
||||
img = np.fliplr(img)
|
||||
if nl:
|
||||
labels[:, 1] = 1 - labels[:, 1]
|
||||
masks = torch.flip(masks, dims=[2])
|
||||
|
||||
# Cutouts # labels = cutout(img, labels, p=0.5)
|
||||
|
||||
labels_out = torch.zeros((nl, 6))
|
||||
if nl:
|
||||
labels_out[:, 1:] = torch.from_numpy(labels)
|
||||
|
||||
# Convert
|
||||
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
||||
img = np.ascontiguousarray(img)
|
||||
|
||||
return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks)
|
||||
|
||||
def load_mosaic(self, index):
|
||||
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
|
||||
labels4, segments4 = [], []
|
||||
s = self.img_size
|
||||
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
|
||||
|
||||
# 3 additional image indices
|
||||
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
|
||||
for i, index in enumerate(indices):
|
||||
# Load image
|
||||
img, _, (h, w) = self.load_image(index)
|
||||
|
||||
# place img in img4
|
||||
if i == 0: # top left
|
||||
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
|
||||
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
|
||||
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
|
||||
elif i == 1: # top right
|
||||
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
|
||||
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
|
||||
elif i == 2: # bottom left
|
||||
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
|
||||
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
|
||||
elif i == 3: # bottom right
|
||||
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
|
||||
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
|
||||
|
||||
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
|
||||
padw = x1a - x1b
|
||||
padh = y1a - y1b
|
||||
|
||||
labels, segments = self.labels[index].copy(), self.segments[index].copy()
|
||||
|
||||
if labels.size:
|
||||
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
|
||||
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
|
||||
labels4.append(labels)
|
||||
segments4.extend(segments)
|
||||
|
||||
# Concat/clip labels
|
||||
labels4 = np.concatenate(labels4, 0)
|
||||
for x in (labels4[:, 1:], *segments4):
|
||||
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
|
||||
# img4, labels4 = replicate(img4, labels4) # replicate
|
||||
|
||||
# Augment
|
||||
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
|
||||
img4, labels4, segments4 = random_perspective(img4,
|
||||
labels4,
|
||||
segments4,
|
||||
degrees=self.hyp['degrees'],
|
||||
translate=self.hyp['translate'],
|
||||
scale=self.hyp['scale'],
|
||||
shear=self.hyp['shear'],
|
||||
perspective=self.hyp['perspective'],
|
||||
border=self.mosaic_border) # border to remove
|
||||
return img4, labels4, segments4
|
||||
|
||||
@staticmethod
|
||||
def collate_fn(batch):
|
||||
img, label, path, shapes, masks = zip(*batch) # transposed
|
||||
batched_masks = torch.cat(masks, 0)
|
||||
for i, l in enumerate(label):
|
||||
l[:, 0] = i # add target image index for build_targets()
|
||||
return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks
|
||||
|
||||
|
||||
def polygon2mask(img_size, polygons, color=1, downsample_ratio=1):
|
||||
"""
|
||||
Args:
|
||||
img_size (tuple): The image size.
|
||||
polygons (np.ndarray): [N, M], N is the number of polygons,
|
||||
M is the number of points(Be divided by 2).
|
||||
"""
|
||||
mask = np.zeros(img_size, dtype=np.uint8)
|
||||
polygons = np.asarray(polygons)
|
||||
polygons = polygons.astype(np.int32)
|
||||
shape = polygons.shape
|
||||
polygons = polygons.reshape(shape[0], -1, 2)
|
||||
cv2.fillPoly(mask, polygons, color=color)
|
||||
nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio)
|
||||
# NOTE: fillPoly firstly then resize is trying the keep the same way
|
||||
# of loss calculation when mask-ratio=1.
|
||||
mask = cv2.resize(mask, (nw, nh))
|
||||
return mask
|
||||
|
||||
|
||||
def polygons2masks(img_size, polygons, color, downsample_ratio=1):
|
||||
"""
|
||||
Args:
|
||||
img_size (tuple): The image size.
|
||||
polygons (list[np.ndarray]): each polygon is [N, M],
|
||||
N is the number of polygons,
|
||||
M is the number of points(Be divided by 2).
|
||||
"""
|
||||
masks = []
|
||||
for si in range(len(polygons)):
|
||||
mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio)
|
||||
masks.append(mask)
|
||||
return np.array(masks)
|
||||
|
||||
|
||||
def polygons2masks_overlap(img_size, segments, downsample_ratio=1):
|
||||
"""Return a (640, 640) overlap mask."""
|
||||
masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio),
|
||||
dtype=np.int32 if len(segments) > 255 else np.uint8)
|
||||
areas = []
|
||||
ms = []
|
||||
for si in range(len(segments)):
|
||||
mask = polygon2mask(
|
||||
img_size,
|
||||
[segments[si].reshape(-1)],
|
||||
downsample_ratio=downsample_ratio,
|
||||
color=1,
|
||||
)
|
||||
ms.append(mask)
|
||||
areas.append(mask.sum())
|
||||
areas = np.asarray(areas)
|
||||
index = np.argsort(-areas)
|
||||
ms = np.array(ms)[index]
|
||||
for i in range(len(segments)):
|
||||
mask = ms[i] * (i + 1)
|
||||
masks = masks + mask
|
||||
masks = np.clip(masks, a_min=0, a_max=i + 1)
|
||||
return masks, index
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue