diff --git a/.github/workflows/CIlinux.yml b/.github/workflows/CIlinux.yml index 0290f13..0b766ad 100644 --- a/.github/workflows/CIlinux.yml +++ b/.github/workflows/CIlinux.yml @@ -69,9 +69,10 @@ jobs: flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics if: success() - name: Upload coverage to Codecov - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: name: ${{ matrix.python-version }} + token: ${{ secrets.CODECOV_TOKEN }} fail_ci_if_error: true verbose: true if: ${{ env.EXIT_CODE != '124' }} \ No newline at end of file diff --git a/README.md b/README.md index 81211b9..6676c6e 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@
- - ![DeFFcode](docs/overrides/assets/images/deffcode.png#gh-light-mode-only) - ![DeFFcode](docs/overrides/assets/images/deffcode-dark.png#gh-dark-mode-only) + + ![DeFFcode](https://user-images.githubusercontent.com/34266896/211494454-a16fe5b1-e7d2-44dd-84ba-67b0578611cb.png#gh-light-mode-only) + ![DeFFcode](https://user-images.githubusercontent.com/34266896/211494463-a63a03d2-72a6-46bb-b40a-ab485a63674b.png#gh-dark-mode-only)
-[![Build Status][github-cli]][github-flow] [![Codecov branch][codecov]][code] [![Azure DevOps builds (branch)][azure-badge]][azure-pipeline] +[![Build Status][github-cli]][github-flow] [![Codecov branch][codecov]][code] [![Azure DevOps builds (branch)][azure-badge]][azure-pipeline] -[![Glitter chat][gitter-bagde]][gitter] [![Build Status][appveyor]][app] [![PyPi version][pypi-badge]][pypi] +[![Glitter chat][gitter-bagde]][gitter] [![Build Status][appveyor]][app] [![PyPi version][pypi-badge]][pypi] [![Code Style][black-badge]][black] - ----- +--- [Releases][release]   |   [Recipes][recipes]   |   [Documentation][docs]   |   [Installation][installation-notes]   |   [License](#copyright) ----- +---
@@ -73,7 +72,7 @@ Here are some key features that stand out: - Effortless [**Metadata Extraction**][extracting-video-metadata] from all streams available in the source. - Maintains the standard easy to learn [**OpenCV-Python**](https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html) coding syntax. - Out-of-the-box support for all prominent Computer Vision libraries. -- Cross-platform, runs on Python 3.7+, and easy to install. +- Cross-platform, runs on Python 3.7+, and easy to install. -[appveyor]:https://img.shields.io/appveyor/ci/abhitronix/deffcode.svg?style=for-the-badge&logo=appveyor -[codecov]:https://img.shields.io/codecov/c/gh/abhiTronix/deffcode?logo=codecov&style=for-the-badge&token=zrES4mwVKe -[github-cli]:https://img.shields.io/github/workflow/status/abhiTronix/deffcode/GitHub%20Action%20workflow%20for%20Linux?style=for-the-badge&logo=data:image/svg%2bxml;base64,PHN2ZyB3aWR0aD0iNDgiIGhlaWdodD0iNDgiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHBhdGggY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNMTAgMWE5IDkgMCAwMTkgOSA5IDkgMCAwMS05IDkgOSA5IDAgMDEtOS05IDkgOSAwIDAxOS05ek0yMyAxOWE2IDYgMCAxMTAgMTIgNiA2IDAgMDEwLTEyek0yMyAzNWE2IDYgMCAxMTAgMTIgNiA2IDAgMDEwLTEyeiIgc3Ryb2tlPSJ2YXIoLS1jb2xvci1tYXJrZXRpbmctaWNvbi1wcmltYXJ5LCAjMjA4OEZGKSIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiIHN0cm9rZS1saW5lam9pbj0icm91bmQiLz48cGF0aCBjbGlwLXJ1bGU9ImV2ZW5vZGQiIGQ9Ik00MSAzNWE2IDYgMCAxMTAgMTIgNiA2IDAgMDEwLTEyeiIgc3Ryb2tlPSJ2YXIoLS1jb2xvci1tYXJrZXRpbmctaWNvbi1zZWNvbmRhcnksICM3OUI4RkYpIiBzdHJva2Utd2lkdGg9IjIiIHN0cm9rZS1saW5lY2FwPSJyb3VuZCIgc3Ryb2tlLWxpbmVqb2luPSJyb3VuZCIvPjxwYXRoIGQ9Ik0yNS4wMzcgMjMuNjA3bC0zLjA3IDMuMDY1LTEuNDktMS40ODUiIHN0cm9rZT0idmFyKC0tY29sb3ItbWFya2V0aW5nLWljb24tcHJpbWFyeSwgIzIwODhGRikiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIi8+PHBhdGggY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNNDEgMTlhNiA2IDAgMTEwIDEyIDYgNiAwIDAxMC0xMnoiIHN0cm9rZT0idmFyKC0tY29sb3ItbWFya2V0aW5nLWljb24tcHJpbWFyeSwgIzIwODhGRikiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIi8+PHBhdGggZD0iTTQzLjAzNiAyMy42MDdsLTMuMDY5IDMuMDY1LTEuNDktMS40ODVNNyA2LjgxMmExIDEgMCAwMTEuNTMzLS44NDZsNS4xMTMgMy4yMmExIDEgMCAwMS0uMDA2IDEuNjk3bC01LjExMyAzLjE3QTEgMSAwIDAxNyAxMy4yMDNWNi44MTN6TTkgMTl2MTVjMCAzLjg2NiAzLjE3NyA3IDcgN2gxIiBzdHJva2U9InZhcigtLWNvbG9yLW1hcmtldGluZy1pY29uLXByaW1hcnksICMyMDg4RkYpIiBzdHJva2Utd2lkdGg9IjIiIHN0cm9rZS1saW5lY2FwPSJyb3VuZCIgc3Ryb2tlLWxpbmVqb2luPSJyb3VuZCIvPjxwYXRoIGQ9Ik0xNi45NDkgMjZhMSAxIDAgMTAwLTJ2MnpNOCAxOS4wMzVBNi45NjUgNi45NjUgMCAwMDE0Ljk2NSAyNnYtMkE0Ljk2NSA0Ljk2NSAwIDAxMTAgMTkuMDM1SDh6TTE0Ljk2NSAyNmgxLjk4NHYtMmgtMS45ODR2MnoiIGZpbGw9InZhcigtLWNvbG9yLW1hcmtldGluZy1pY29uLXByaW1hcnksICMyMDg4RkYpIi8+PHBhdGggZD0iTTI5LjA1NSAyNWg1Ljk0NCIgc3Ryb2tlPSJ2YXIoLS1jb2xvci1tYXJrZXRpbmctaWNvbi1wcmltYXJ5LCAjMjA4OEZGKSIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiIHN0cm9rZS1saW5lam9pbj0icm91bmQiLz48cGF0aCBmaWxsLXJ1bGU9ImV2ZW5vZGQiIGNsaXAtcnVsZT0iZXZlbm9kZCIgZD0iTTIxIDQwYTEgMSAwIDExLS4wMDEgMi4wMDFBMSAxIDAgMDEyMSA0MHpNMjUgNDBhMSAxIDAgMTEtLjAwMSAyLjAwMUExIDEgMCAwMTI1IDQweiIgZmlsbD0idmFyKC0tY29sb3ItbWFya2V0aW5nLWljb24tc2Vjb25kYXJ5LCAjNzlCOEZGKSIvPjxwYXRoIGQ9Ik0zNC4wMDUgNDEuMDA3bC0xLjAxMy4wMzMiIHN0cm9rZT0idmFyKC0tY29sb3ItbWFya2V0aW5nLWljb24tc2Vjb25kYXJ5LCAjNzlCOEZGKSIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiLz48L3N2Zz4= -[prs-badge]:https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=for-the-badge&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAABC0lEQVRYhdWVPQoCMRCFX6HY2ghaiZUXsLW0EDyBrbWtN/EUHsHTWFnYyCL4gxibVZZlZzKTnWz0QZpk5r0vIdkF/kBPAMOKeddE+CQPKoc5Yt5cTjBMdQSwDQToWgBJAn3jmhqgltapAV6E6b5U17MGGAUaUj07TficMfIBZDV6vxowBm1BP9WbSQE4o5h9IjPJmy73TEPDDxVmoZdQrQ5jRhly9Q8tgMUXkIIWn0oG4GYQfAXQzz1PGoCiQndM7b4RgJay/h7zBLT3hASgoKjamQJMreKf0gfuAGyYtXEIAKcL/Dss15iq6ohXghozLYiAMxPuACwtIT4yeQUxAaLrZwAoqGRKGk7qDSYTfYQ8LuYnAAAAAElFTkSuQmCC -[azure-badge]:https://img.shields.io/azure-devops/build/abhiuna12/942b3b13-d745-49e9-8d7d-b3918ff43ac2/3/master?logo=azure-pipelines&style=for-the-badge -[pypi-badge]:https://img.shields.io/pypi/v/deffcode.svg?style=for-the-badge&logo=pypi -[gitter-bagde]:https://img.shields.io/badge/Chat-Gitter-blueviolet.svg?style=for-the-badge&logo=gitter -[Coffee-badge]:https://abhitronix.github.io/img/deffcode/orange_img.png -[kofi-badge]:https://www.ko-fi.com/img/githubbutton_sm.svg -[black-badge]:https://img.shields.io/badge/code%20style-black-000000.svg?style=for-the-badge&logo=github + +[appveyor]: https://img.shields.io/appveyor/ci/abhitronix/deffcode.svg?style=for-the-badge&logo=appveyor +[codecov]: https://img.shields.io/codecov/c/gh/abhiTronix/deffcode?logo=codecov&style=for-the-badge&token=zrES4mwVKe +[github-cli]: https://img.shields.io/github/actions/workflow/status/abhiTronix/deffcode/.github/workflows/CIlinux.yml?style=for-the-badge&logo=data:image/svg%2bxml;base64,PHN2ZyB3aWR0aD0iNDgiIGhlaWdodD0iNDgiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHBhdGggY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNMTAgMWE5IDkgMCAwMTkgOSA5IDkgMCAwMS05IDkgOSA5IDAgMDEtOS05IDkgOSAwIDAxOS05ek0yMyAxOWE2IDYgMCAxMTAgMTIgNiA2IDAgMDEwLTEyek0yMyAzNWE2IDYgMCAxMTAgMTIgNiA2IDAgMDEwLTEyeiIgc3Ryb2tlPSJ2YXIoLS1jb2xvci1tYXJrZXRpbmctaWNvbi1wcmltYXJ5LCAjMjA4OEZGKSIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiIHN0cm9rZS1saW5lam9pbj0icm91bmQiLz48cGF0aCBjbGlwLXJ1bGU9ImV2ZW5vZGQiIGQ9Ik00MSAzNWE2IDYgMCAxMTAgMTIgNiA2IDAgMDEwLTEyeiIgc3Ryb2tlPSJ2YXIoLS1jb2xvci1tYXJrZXRpbmctaWNvbi1zZWNvbmRhcnksICM3OUI4RkYpIiBzdHJva2Utd2lkdGg9IjIiIHN0cm9rZS1saW5lY2FwPSJyb3VuZCIgc3Ryb2tlLWxpbmVqb2luPSJyb3VuZCIvPjxwYXRoIGQ9Ik0yNS4wMzcgMjMuNjA3bC0zLjA3IDMuMDY1LTEuNDktMS40ODUiIHN0cm9rZT0idmFyKC0tY29sb3ItbWFya2V0aW5nLWljb24tcHJpbWFyeSwgIzIwODhGRikiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIi8+PHBhdGggY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNNDEgMTlhNiA2IDAgMTEwIDEyIDYgNiAwIDAxMC0xMnoiIHN0cm9rZT0idmFyKC0tY29sb3ItbWFya2V0aW5nLWljb24tcHJpbWFyeSwgIzIwODhGRikiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIi8+PHBhdGggZD0iTTQzLjAzNiAyMy42MDdsLTMuMDY5IDMuMDY1LTEuNDktMS40ODVNNyA2LjgxMmExIDEgMCAwMTEuNTMzLS44NDZsNS4xMTMgMy4yMmExIDEgMCAwMS0uMDA2IDEuNjk3bC01LjExMyAzLjE3QTEgMSAwIDAxNyAxMy4yMDNWNi44MTN6TTkgMTl2MTVjMCAzLjg2NiAzLjE3NyA3IDcgN2gxIiBzdHJva2U9InZhcigtLWNvbG9yLW1hcmtldGluZy1pY29uLXByaW1hcnksICMyMDg4RkYpIiBzdHJva2Utd2lkdGg9IjIiIHN0cm9rZS1saW5lY2FwPSJyb3VuZCIgc3Ryb2tlLWxpbmVqb2luPSJyb3VuZCIvPjxwYXRoIGQ9Ik0xNi45NDkgMjZhMSAxIDAgMTAwLTJ2MnpNOCAxOS4wMzVBNi45NjUgNi45NjUgMCAwMDE0Ljk2NSAyNnYtMkE0Ljk2NSA0Ljk2NSAwIDAxMTAgMTkuMDM1SDh6TTE0Ljk2NSAyNmgxLjk4NHYtMmgtMS45ODR2MnoiIGZpbGw9InZhcigtLWNvbG9yLW1hcmtldGluZy1pY29uLXByaW1hcnksICMyMDg4RkYpIi8+PHBhdGggZD0iTTI5LjA1NSAyNWg1Ljk0NCIgc3Ryb2tlPSJ2YXIoLS1jb2xvci1tYXJrZXRpbmctaWNvbi1wcmltYXJ5LCAjMjA4OEZGKSIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiIHN0cm9rZS1saW5lam9pbj0icm91bmQiLz48cGF0aCBmaWxsLXJ1bGU9ImV2ZW5vZGQiIGNsaXAtcnVsZT0iZXZlbm9kZCIgZD0iTTIxIDQwYTEgMSAwIDExLS4wMDEgMi4wMDFBMSAxIDAgMDEyMSA0MHpNMjUgNDBhMSAxIDAgMTEtLjAwMSAyLjAwMUExIDEgMCAwMTI1IDQweiIgZmlsbD0idmFyKC0tY29sb3ItbWFya2V0aW5nLWljb24tc2Vjb25kYXJ5LCAjNzlCOEZGKSIvPjxwYXRoIGQ9Ik0zNC4wMDUgNDEuMDA3bC0xLjAxMy4wMzMiIHN0cm9rZT0idmFyKC0tY29sb3ItbWFya2V0aW5nLWljb24tc2Vjb25kYXJ5LCAjNzlCOEZGKSIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiLz48L3N2Zz4= +[prs-badge]: https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=for-the-badge&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAABC0lEQVRYhdWVPQoCMRCFX6HY2ghaiZUXsLW0EDyBrbWtN/EUHsHTWFnYyCL4gxibVZZlZzKTnWz0QZpk5r0vIdkF/kBPAMOKeddE+CQPKoc5Yt5cTjBMdQSwDQToWgBJAn3jmhqgltapAV6E6b5U17MGGAUaUj07TficMfIBZDV6vxowBm1BP9WbSQE4o5h9IjPJmy73TEPDDxVmoZdQrQ5jRhly9Q8tgMUXkIIWn0oG4GYQfAXQzz1PGoCiQndM7b4RgJay/h7zBLT3hASgoKjamQJMreKf0gfuAGyYtXEIAKcL/Dss15iq6ohXghozLYiAMxPuACwtIT4yeQUxAaLrZwAoqGRKGk7qDSYTfYQ8LuYnAAAAAElFTkSuQmCC +[azure-badge]: https://img.shields.io/azure-devops/build/abhiuna12/942b3b13-d745-49e9-8d7d-b3918ff43ac2/3/master?logo=azure-pipelines&style=for-the-badge +[pypi-badge]: https://img.shields.io/pypi/v/deffcode.svg?style=for-the-badge&logo=pypi +[gitter-bagde]: https://img.shields.io/badge/Chat-Gitter-blueviolet.svg?style=for-the-badge&logo=gitter +[coffee-badge]: https://abhitronix.github.io/img/deffcode/orange_img.png +[kofi-badge]: https://www.ko-fi.com/img/githubbutton_sm.svg +[black-badge]: https://img.shields.io/badge/code%20style-black-000000.svg?style=for-the-badge&logo=github -[docs]:https://abhitronix.github.io/deffcode/latest/ -[release]:https://github.com/abhiTronix/deffcode/releases/latest -[recipes]:https://abhitronix.github.io/deffcode/latest/recipes/basic/ -[license]:https://github.com/abhiTronix/deffcode/blob/master/LICENSE -[help]:https://abhitronix.github.io/deffcode/latest/help/get_help -[installation-notes]:https://abhitronix.github.io/deffcode/latest/installation/#installation-notes -[ffdecoder-api]:https://abhitronix.github.io/deffcode/latest/reference/ffdecoder/#ffdecoder-api -[sourcer-api]:https://abhitronix.github.io/deffcode/latest/reference/sourcer/#sourcer-api -[contribute]:https://abhitronix.github.io/deffcode/latest/contribution/ + +[docs]: https://abhitronix.github.io/deffcode/latest/ +[release]: https://github.com/abhiTronix/deffcode/releases/latest +[recipes]: https://abhitronix.github.io/deffcode/latest/recipes/basic/ +[license]: https://github.com/abhiTronix/deffcode/blob/master/LICENSE +[help]: https://abhitronix.github.io/deffcode/latest/help/get_help +[installation-notes]: https://abhitronix.github.io/deffcode/latest/installation/#installation-notes +[ffdecoder-api]: https://abhitronix.github.io/deffcode/latest/reference/ffdecoder/#ffdecoder-api +[sourcer-api]: https://abhitronix.github.io/deffcode/latest/reference/sourcer/#sourcer-api +[contribute]: https://abhitronix.github.io/deffcode/latest/contribution/ -[basic-recipes]:https://abhitronix.github.io/deffcode/latest/recipes/basic/ -[decoding-video-files]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-video-files/#decoding-video-files -[decoding-camera-devices-using-indexes]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-camera-devices/#decoding-camera-devices-using-indexes -[decoding-network-streams]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-network-streams/#decoding-network-streams -[decoding-image-sequences]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-image-sequences/#decoding-image-sequences -[transcode-live-frames]:https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames/ -[transcoding-live-simple-filtergraphs]:https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-live-simple-filtergraphs -[saving-key-frames-as-image]:https://abhitronix.github.io/deffcode/latest/recipes/basic/save-keyframe-image/#saving-key-frames-as-image -[extracting-video-metadata]:https://abhitronix.github.io/deffcode/latest/recipes/basic/extract-video-metadata/#extracting-video-metadata - -[accessing-rgb-frames-from-a-video-file]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-video-files/#accessing-rgb-frames-from-a-video-file -[capturing-and-previewing-bgr-frames-from-a-video-file]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-video-files/#capturing-and-previewing-bgr-frames-from-a-video-file -[capturing-and-previewing-bgr-frames-from-a-video-file]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-video-files/#capturing-and-previewing-bgr-frames-from-a-video-file -[enumerating-all-camera-devices-with-indexes]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-camera-devices/#enumerating-all-camera-devices-with-indexes -[capturing-and-previewing-frames-from-a-camera-using-indexes]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-camera-devices/#capturing-and-previewing-frames-from-a-camera-using-indexes -[capturing-and-previewing-frames-from-a-https-stream]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-https-stream -[capturing-and-previewing-frames-from-a-rtsprtp-stream]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-rtsprtp-stream -[capturing-and-previewing-frames-from-sequence-of-images]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-sequence-of-images -[capturing-and-previewing-frames-from-single-looping-image]:https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-single-looping-image -[transcoding-video-using-opencv-videowriter-api]:https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames/#transcoding-video-using-opencv-videowriter-api -[transcoding-lossless-video-using-writegear-api]:https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames/#transcoding-lossless-video-using-writegear-api -[transcoding-trimmed-and-reversed-video]:https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-trimmed-and-reversed-video -[transcoding-cropped-video]:https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-cropped-video -[transcoding-rotated-video-with-rotate-filter]:https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-rotate-filter -[transcoding-rotated-video-with-transpose-filter]:https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-transpose-filter -[transcoding-horizontally-flipped-and-scaled-video]:https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-horizontally-flipped-and-scaled-video -[extracting-key-frames-as-png-image]:https://abhitronix.github.io/deffcode/latest/recipes/basic/save-keyframe-image/#extracting-key-frames-as-png-image -[generating-thumbnail-with-a-fancy-filter]:https://abhitronix.github.io/deffcode/latest/recipes/basic/save-keyframe-image/#generating-thumbnail-with-a-fancy-filter -[extracting-video-metadata-using-sourcer-api]:https://abhitronix.github.io/deffcode/latest/recipes/basic/extract-video-metadata/#extracting-video-metadata-using-sourcer-api -[extracting-video-metadata-using-ffdecoder-api]:https://abhitronix.github.io/deffcode/latest/recipes/basic/extract-video-metadata/#extracting-video-metadata-using-ffdecoder-api + +[basic-recipes]: https://abhitronix.github.io/deffcode/latest/recipes/basic/ +[decoding-video-files]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-video-files/#decoding-video-files +[decoding-camera-devices-using-indexes]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-camera-devices/#decoding-camera-devices-using-indexes +[decoding-network-streams]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-network-streams/#decoding-network-streams +[decoding-image-sequences]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-image-sequences/#decoding-image-sequences +[transcode-live-frames]: https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames/ +[transcoding-live-simple-filtergraphs]: https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-live-simple-filtergraphs +[saving-key-frames-as-image]: https://abhitronix.github.io/deffcode/latest/recipes/basic/save-keyframe-image/#saving-key-frames-as-image +[extracting-video-metadata]: https://abhitronix.github.io/deffcode/latest/recipes/basic/extract-video-metadata/#extracting-video-metadata +[accessing-rgb-frames-from-a-video-file]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-video-files/#accessing-rgb-frames-from-a-video-file +[capturing-and-previewing-bgr-frames-from-a-video-file]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-video-files/#capturing-and-previewing-bgr-frames-from-a-video-file +[playing-with-any-other-ffmpeg-pixel-formats]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-video-files/#playing-with-any-other-ffmpeg-pixel-formats +[capturing-and-previewing-frames-from-a-looping-video]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-video-files/#capturing-and-previewing-frames-from-a-looping-video +[enumerating-all-camera-devices-with-indexes]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-camera-devices/#enumerating-all-camera-devices-with-indexes +[capturing-and-previewing-frames-from-a-camera-using-indexes]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-camera-devices/#capturing-and-previewing-frames-from-a-camera-using-indexes +[capturing-and-previewing-frames-from-a-https-stream]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-https-stream +[capturing-and-previewing-frames-from-a-rtsprtp-stream]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-rtsprtp-stream +[capturing-and-previewing-frames-from-sequence-of-images]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-sequence-of-images +[capturing-and-previewing-frames-from-single-looping-image]: https://abhitronix.github.io/deffcode/latest/recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-single-looping-image +[transcoding-video-using-opencv-videowriter-api]: https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames/#transcoding-video-using-opencv-videowriter-api +[transcoding-lossless-video-using-writegear-api]: https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames/#transcoding-lossless-video-using-writegear-api +[transcoding-trimmed-and-reversed-video]: https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-trimmed-and-reversed-video +[transcoding-cropped-video]: https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-cropped-video +[transcoding-rotated-video-with-rotate-filter]: https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-rotate-filter +[transcoding-rotated-video-with-transpose-filter]: https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-transpose-filter +[transcoding-horizontally-flipped-and-scaled-video]: https://abhitronix.github.io/deffcode/latest/recipes/basic/transcode-live-frames-simplegraphs/#transcoding-horizontally-flipped-and-scaled-video +[extracting-key-frames-as-png-image]: https://abhitronix.github.io/deffcode/latest/recipes/basic/save-keyframe-image/#extracting-key-frames-as-png-image +[generating-thumbnail-with-a-fancy-filter]: https://abhitronix.github.io/deffcode/latest/recipes/basic/save-keyframe-image/#generating-thumbnail-with-a-fancy-filter +[extracting-video-metadata-using-sourcer-api]: https://abhitronix.github.io/deffcode/latest/recipes/basic/extract-video-metadata/#extracting-video-metadata-using-sourcer-api +[extracting-video-metadata-using-ffdecoder-api]: https://abhitronix.github.io/deffcode/latest/recipes/basic/extract-video-metadata/#extracting-video-metadata-using-ffdecoder-api -[advanced-recipes]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/ -[decoding-live-virtual-sources]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#decoding-live-virtual-sources -[decoding-live-feed-devices]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-feed-devices/#decoding-live-feed-devices -[hardware-accelerated-video-decoding]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-hw-acceleration/#hardware-accelerated-video-decoding -[transcoding-live-complex-filtergraphs]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-live-complex-filtergraphs -[transcoding-video-art-with-filtergraphs]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-filtergraphs -[hardware-accelerated-video-transcoding]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-hw-acceleration/#hardware-accelerated-video-transcoding -[updating-video-metadata]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/update-metadata/#updating-video-metadata - - -[generate-and-decode-frames-from-sierpinski-pattern]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-sierpinski-pattern -[generate-and-decode-frames-from-test-source-pattern]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-test-source-pattern -[generate-and-decode-frames-from-gradients-with-custom-text-effect]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-gradients-with-custom-text-effect -[generate-and-decode-frames-from-mandelbrot-test-pattern-with-vectorscope-waveforms]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-mandelbrot-test-pattern-with-vectorscope-waveforms -[generate-and-decode-frames-from-game-of-life-visualization]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-game-of-life-visualization -[gpu-accelerated-hardware-based-video-decoding]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-hw-acceleration/#gpu-accelerated-hardware-based-video-decoding -[transcoding-video-with-live-custom-watermark-image-overlay]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-with-live-custom-watermark-image-overlay -[transcoding-video-from-sequence-of-images-with-additional-filtering]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-from-sequence-of-images-with-additional-filtering -[transcoding-video-art-with-jetcolor-effect]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-jetcolor-effect -[transcoding-video-art-with-yuv-bitplane-visualization]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-yuv-bitplane-visualization -[transcoding-video-art-with-ghosting-effect]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-ghosting-effect -[transcoding-video-art-with-pixelation-effect]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-pixelation-effect -[capturing-and-previewing-frames-from-a-webcam-using-custom-demuxer]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-a-webcam-using-custom-demuxer -[capturing-and-previewing-frames-from-your-desktop]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-your-desktop -[gpu-accelerated-hardware-based-video-transcoding-with-writegear-api]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-hw-acceleration/#gpu-accelerated-hardware-based-video-transcoding-with-writegear-api -[added-new-attributes-to-metadata-in-ffdecoder-api]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/update-metadata/#added-new-attributes-to-metadata-in-ffdecoder-api -[overriding-source-video-metadata-in-ffdecoder-api]:https://abhitronix.github.io/deffcode/latest/recipes/advanced/update-metadata/#overriding-source-video-metadata-in-ffdecoder-api + +[advanced-recipes]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/ +[decoding-live-virtual-sources]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#decoding-live-virtual-sources +[decoding-live-feed-devices]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-feed-devices/#decoding-live-feed-devices +[hardware-accelerated-video-decoding]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-hw-acceleration/#hardware-accelerated-video-decoding +[transcoding-live-complex-filtergraphs]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-live-complex-filtergraphs +[transcoding-video-art-with-filtergraphs]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-filtergraphs +[hardware-accelerated-video-transcoding]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-hw-acceleration/#hardware-accelerated-video-transcoding +[updating-video-metadata]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/update-metadata/#updating-video-metadata +[generate-and-decode-frames-from-sierpinski-pattern]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-sierpinski-pattern +[generate-and-decode-frames-from-test-source-pattern]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-test-source-pattern +[generate-and-decode-frames-from-gradients-with-custom-text-effect]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-gradients-with-custom-text-effect +[generate-and-decode-frames-from-mandelbrot-test-pattern-with-vectorscope-waveforms]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-mandelbrot-test-pattern-with-vectorscope-waveforms +[generate-and-decode-frames-from-game-of-life-visualization]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-game-of-life-visualization +[cuvid-accelerated-hardware-based-video-decoding-and-previewing]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-hw-acceleration/#cuvid-accelerated-hardware-based-video-decoding-and-previewing +[cuda-accelerated-hardware-based-video-decoding-and-previewing]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-hw-acceleration/#cuda-accelerated-hardware-based-video-decoding-and-previewing +[transcoding-video-with-live-custom-watermark-image-overlay]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-with-live-custom-watermark-image-overlay +[transcoding-video-from-sequence-of-images-with-additional-filtering]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-from-sequence-of-images-with-additional-filtering +[transcoding-video-art-with-jetcolor-effect]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-jetcolor-effect +[transcoding-video-art-with-yuv-bitplane-visualization]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-yuv-bitplane-visualization +[transcoding-video-art-with-ghosting-effect]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-ghosting-effect +[transcoding-video-art-with-pixelation-effect]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-pixelation-effect +[capturing-and-previewing-frames-from-a-webcam-using-custom-demuxer]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-a-webcam-using-custom-demuxer +[capturing-and-previewing-frames-from-your-desktop]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-your-desktop +[cuda-accelerated-video-transcoding-with-opencvs-videowriter-api]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-hw-acceleration/#cuda-accelerated-video-transcoding-with-opencvs-videowriter-api +[cuda-nvenc-accelerated-video-transcoding-with-writegear-api]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-video-transcoding-with-writegear-api +[cuda-nvenc-accelerated-end-to-end-lossless-video-transcoding-with-writegear-api]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-end-to-end-lossless-video-transcoding-with-writegear-api +[added-new-attributes-to-metadata-in-ffdecoder-api]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/update-metadata/#added-new-attributes-to-metadata-in-ffdecoder-api +[overriding-source-video-metadata-in-ffdecoder-api]: https://abhitronix.github.io/deffcode/latest/recipes/advanced/update-metadata/#overriding-source-video-metadata-in-ffdecoder-api -[github-flow]:https://github.com/abhiTronix/deffcode/actions/workflows/CIlinux.yml -[azure-pipeline]:https://dev.azure.com/abhiuna12/public/_build?definitionId=3 -[app]:https://ci.appveyor.com/project/abhiTronix/deffcode -[code]:https://codecov.io/gh/abhiTronix/deffcode + +[github-flow]: https://github.com/abhiTronix/deffcode/actions/workflows/CIlinux.yml +[azure-pipeline]: https://dev.azure.com/abhiuna12/public/_build?definitionId=3 +[app]: https://ci.appveyor.com/project/abhiTronix/deffcode +[code]: https://codecov.io/gh/abhiTronix/deffcode [black]: https://github.com/psf/black -[opencv-py]:https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html -[ffmpeg]:https://www.ffmpeg.org/ -[pypi]:https://pypi.org/project/deffcode/ -[gitter]:https://gitter.im/deffcode-python/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge -[coffee]:https://www.buymeacoffee.com/2twOXFvlA + +[opencv-py]: https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html +[ffmpeg]: https://www.ffmpeg.org/ +[pypi]: https://pypi.org/project/deffcode/ +[gitter]: https://gitter.im/deffcode-python/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge +[coffee]: https://www.buymeacoffee.com/2twOXFvlA [kofi]: https://ko-fi.com/W7W8WTYO diff --git a/deffcode/ffdecoder.py b/deffcode/ffdecoder.py index 2d259ef..e11207e 100644 --- a/deffcode/ffdecoder.py +++ b/deffcode/ffdecoder.py @@ -199,6 +199,15 @@ def __init__( # handle valid FFmpeg assets location self.__ffmpeg = self.__sourcer_metadata["ffmpeg_binary_path"] + # handle YUV pixel formats(such as `yuv420p`, `yuv444p`, `nv12`, `nv21` etc.) + # patch for compatibility with OpenCV APIs. + self.__cv_patch = self.__extra_params.pop("-enforce_cv_patch", False) + if not (isinstance(self.__cv_patch, bool)): + self.__cv_patch = False + self.__verbose_logs and logger.critical( + "Enforcing OpenCV compatibility patch for YUV/NV frames." + ) + # handle pass-through audio mode works in conjunction with WriteGear [TODO] self.__passthrough_mode = self.__extra_params.pop("-passthrough_audio", False) if not (isinstance(self.__passthrough_mode, bool)): @@ -428,7 +437,9 @@ def formulate(self): for x in self.__ff_pixfmt_metadata if x[0] == rawframe_pixfmt ][0] - raw_bit_per_component = rawframesbpp // self.__raw_frame_depth + raw_bit_per_component = ( + rawframesbpp // self.__raw_frame_depth if self.__raw_frame_depth else 0 + ) if 4 <= raw_bit_per_component <= 8: self.__raw_frame_dtype = np.dtype("u1") elif 8 < raw_bit_per_component <= 16 and rawframe_pixfmt.endswith( @@ -598,7 +609,7 @@ def formulate(self): self.__raw_frame_num = None # log that number of frames are unknown self.__verbose_logs and logger.info( - "Live/Network Stream detected! Number of frames in given source are not known." + "Number of frames in given source are unknown. Live/Network/Looping stream detected!" ) # log Mode of Operation @@ -626,11 +637,15 @@ def __fetchNextfromPipeline(self): self.__process is None ), "Pipeline is not running! You must call `formulate()` method first." - # formulated raw frame size + # formulated raw frame size and apply YUV pixel formats patch(if applicable) raw_frame_size = ( - self.__raw_frame_depth - * self.__raw_frame_resolution[0] - * self.__raw_frame_resolution[1] + (self.__raw_frame_resolution[0] * (self.__raw_frame_resolution[1] * 3 // 2)) + if self.__raw_frame_pixfmt.startswith(("yuv", "nv")) and self.__cv_patch + else ( + self.__raw_frame_depth + * self.__raw_frame_resolution[0] + * self.__raw_frame_resolution[1] + ) ) # next dataframe as numpy ndarray nparray = None @@ -668,6 +683,12 @@ def __fetchNextFrame(self): self.__raw_frame_depth, ) )[:, :, 0] + elif self.__raw_frame_pixfmt.startswith(("yuv", "nv")) and self.__cv_patch: + # reconstruct exclusive YUV formats frames for OpenCV APIs + frame = frame.reshape( + self.__raw_frame_resolution[1] * 3 // 2, + self.__raw_frame_resolution[0], + ) else: # reconstruct default frames frame = frame.reshape( diff --git a/deffcode/ffhelper.py b/deffcode/ffhelper.py index 10cb5ec..0611a57 100644 --- a/deffcode/ffhelper.py +++ b/deffcode/ffhelper.py @@ -27,8 +27,7 @@ from tqdm import tqdm from pathlib import Path -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.util.retry import Retry +from requests.adapters import HTTPAdapter, Retry # import utils packages from .utils import logger_handler, delete_file_safe @@ -48,7 +47,7 @@ class TimeoutHTTPAdapter(HTTPAdapter): A custom Transport Adapter with default timeouts """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs): self.timeout = DEFAULT_TIMEOUT if "timeout" in kwargs: self.timeout = kwargs["timeout"] @@ -215,7 +214,11 @@ def download_ffmpeg_binaries(path, os_windows=False, os_bit=""): http.mount("https://", adapter) response = http.get(file_url, stream=True) response.raise_for_status() - total_length = response.headers.get("content-length") + total_length = ( + response.headers.get("content-length") + if "content-length" in response.headers + else len(response.content) + ) assert not ( total_length is None ), "[Helper:ERROR] :: Failed to retrieve files, check your Internet connectivity!" diff --git a/deffcode/sourcer.py b/deffcode/sourcer.py index b22b166..1dfe1cf 100644 --- a/deffcode/sourcer.py +++ b/deffcode/sourcer.py @@ -362,6 +362,12 @@ def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False): "approx_video_nframes": ( int(self.__approx_video_nframes) if self.__approx_video_nframes + and not any( + "loop" in x for x in self.__ffmpeg_prefixes + ) # check if any loops in prefix + and not any( + "loop" in x for x in dict2Args(self.__sourcer_params) + ) # check if any loops in filters else None ), "source_video_bitrate": self.__default_video_bitrate, diff --git a/deffcode/version.py b/deffcode/version.py index 788da1f..fe404ae 100644 --- a/deffcode/version.py +++ b/deffcode/version.py @@ -1 +1 @@ -__version__ = "0.2.4" +__version__ = "0.2.5" diff --git a/docs/changelog.md b/docs/changelog.md index eaeb68c..f25e5dc 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -20,7 +20,83 @@ limitations under the License. # Release Notes -## v0.2.4 (2022-10-07) :material-new-box: +## v0.2.5 (2023-01-11) :material-new-box: + +??? new "New Features" + - [x] **FFdecoder:** + * Added OpenCV compatibility patch for YUV pixel-formats. + * Implemented new patch for handling YUV pixel-formats(such as `YUV420p`, `yuv444p`, `NV12`, `NV21` etc.) for exclusive compatibility with OpenCV APIs. + * **Note:** Only YUV pixel-formats starting with `YUV` and `NV` are currently supported. + * Added new `-enforce_cv_patch` boolean attribute for enabling OpenCV compatibility patch. + - [x] **Sourcer:** + * Added Looping Video support. + * Now raw-frame numbers revert to null(`None`) whenever any looping is defined through filter(such as `-filter_complex "loop=loop=3:size=75:start=25"`) or prefix(`"-ffprefixes":["-stream_loop", "3"]`). + - [x] **Docs:** + * Added YUV frames example code for `Capturing and Previewing BGR frames from a video file` recipe. + * Added YUV frames example code for `Transcoding video using OpenCV VideoWriter API recipe. + * Added YUV frames example code for `Transcoding lossless video using WriteGear API recipe. + * Added new CUVID-accelerated Hardware-based Video Decoding and Previewing recipe. + * Added new CUDA-accelerated Hardware-based Video Decoding and Previewing recipe. + * Added new CUDA-accelerated Video Transcoding with OpenCV`s VideoWriter API recipe. + * Added new CUDA-NVENC-accelerated Video Transcoding with WriteGear API recipe both for consuming BGR and NV12 frames. + * Added new CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API recipe which is still WIP(💬confirmed with a GIF from tenor). + * Added new Capturing and Previewing frames from a Looping Video recipe using `-stream_loop` option and `loop` filter. + * Added docs for `-enforce_cv_patch` boolean attribute in `ffparam` dictionary parameter. + * Added new python dependency block for recipes. + * Reflected new OpenCV compatibility patch for YUV pixel-formats in code. + * Added new `content.code.copy` and `content.code.link` features. + +??? success "Updates/Improvements" + - [x] FFhelper: + * Replaced depreciating `Retry` API from `requests.packages` with `requests.adapters`. + - [x] Maintenance: + * Replaced `raw.github.com` links with GitLab and GH links. + * Removed unused code. + * Updated log message. + - [x] CI: + * Updated `test_FFdecoder_params` unittest to include `with` statement access method. + * Updated `test_frame_format` test to include `-enforce_cv_patch` boolean attribute. + * Updated `test_source` to test looping video support. + - [x] Setup: + * Removed unused imports and patches. + * Bumped version to `0.2.5`. + - [x] Docs: + * Updated Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing passage. + * Updated and corrected docs hyperlinks in index.md and ReadMe.md + * Update Zenodo Badge and BibTex entry. + * Updated `Readme.md` banner image URLs. + * Updated md-typeset text font size to `.75rem`. + * Updated text and admonitions. + * Updated recipe assumptions. + * Updated `Readme.md` GIF URLs. + * Updated abstract text in recipes. + * Updated `changelog.md`. + * Updated recipe code. + * Removed old recipes. + +??? bug "Bug-fixes" + - [x] FFdecoder API: + * Fixed Zero division bug while calculating `raw_bit_per_component`. + - [x] FFhelper: + * Fixed response.headers returning `content-length` as Nonetype since it may not necessarily have the Content-Length header set. + * **Reason:** The response from gitlab.com contains a Transfer-Encoding field as `'Transfer-Encoding': 'chunked'`, which means data is sent in a series of chunks, so the Content-Length header is emitted. More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Transfer-Encoding#Directives + - [x] Docs: + * Fixed https://github.com/badges/shields/issues/8671 badge issue in README.md + * Removed depreciated text. + * Fixed several typos in docs. + - [x] CI: + * Added fix for codecov upload bug (https://github.com/codecov/codecov-action/issues/598). + * Updated `codecov-action` workflow to `v3. + * Added new `CODECOV_TOKEN` GitHub secret. + +??? question "Pull Requests" + * PR #37 + +  + +  + +## v0.2.4 (2022-10-07) ??? new "New Features" - [x] **FFdecoder API:** diff --git a/docs/index.md b/docs/index.md index 972b98c..eaba80d 100644 --- a/docs/index.md +++ b/docs/index.md @@ -197,18 +197,18 @@ It is something I am doing with my own free time. But so much more needs to be d Here is a Bibtex entry you can use to cite this project in a publication: -[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.6984364.svg)](https://doi.org/10.5281/zenodo.6984364) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7155399.svg)](https://doi.org/10.5281/zenodo.7155399) ```BibTeX @software{deffcode, - author = {Abhishek Thakur}, - title = {abhiTronix/deffcode: v0.2.3}, - month = aug, + author = {Abhishek Singh Thakur}, + title = {abhiTronix/deffcode: v0.2.4}, + month = oct, year = 2022, publisher = {Zenodo}, - version = {v0.2.3}, - doi = {10.5281/zenodo.6984364}, - url = {https://doi.org/10.5281/zenodo.6984364} + version = {v0.2.4}, + doi = {10.5281/zenodo.7155399}, + url = {https://doi.org/10.5281/zenodo.7155399} } ``` diff --git a/docs/overrides/assets/stylesheets/custom.css b/docs/overrides/assets/stylesheets/custom.css index b28dbc6..dfdd29d 100644 --- a/docs/overrides/assets/stylesheets/custom.css +++ b/docs/overrides/assets/stylesheets/custom.css @@ -42,18 +42,18 @@ limitations under the License. border-color: rgb(0, 0, 139); } -.md-typeset .advance > .admonition-title, -.md-typeset .advance > summary, -.md-typeset .experiment > .admonition-title, -.md-typeset .experiment > summary { +.md-typeset .advance>.admonition-title, +.md-typeset .advance>summary, +.md-typeset .experiment>.admonition-title, +.md-typeset .experiment>summary { background-color: rgb(0, 0, 139, 0.1); border-color: rgb(0, 0, 139); } -.md-typeset .advance > .admonition-title::before, -.md-typeset .advance > summary::before, -.md-typeset .experiment > .admonition-title::before, -.md-typeset .experiment > summary::before { +.md-typeset .advance>.admonition-title::before, +.md-typeset .advance>summary::before, +.md-typeset .experiment>.admonition-title::before, +.md-typeset .experiment>summary::before { background-color: rgb(0, 0, 139); -webkit-mask-image: var(--md-admonition-icon--xadvance); mask-image: var(--md-admonition-icon--xadvance); @@ -65,14 +65,14 @@ limitations under the License. border-color: rgb(255, 195, 0); } -.md-typeset .new > .admonition-title, -.md-typeset .new > summary { - background-color: rgb(255, 195, 0,0.1); +.md-typeset .new>.admonition-title, +.md-typeset .new>summary { + background-color: rgb(255, 195, 0, 0.1); border-color: rgb(255, 195, 0); } -.md-typeset .new > .admonition-title::before, -.md-typeset .new > summary::before { +.md-typeset .new>.admonition-title::before, +.md-typeset .new>summary::before { background-color: rgb(255, 195, 0); -webkit-mask-image: var(--md-admonition-icon--new); mask-image: var(--md-admonition-icon--new); @@ -85,144 +85,146 @@ limitations under the License. border-color: rgb(255, 0, 127); } -.md-typeset .alert > .admonition-title, -.md-typeset .alert > summary { - background-color: rgba(255, 0, 127 , 0.1); +.md-typeset .alert>.admonition-title, +.md-typeset .alert>summary { + background-color: rgba(255, 0, 127, 0.1); border-color: rgb(255, 0, 127); } -.md-typeset .alert > .admonition-title::before, -.md-typeset .alert > summary::before { +.md-typeset .alert>.admonition-title::before, +.md-typeset .alert>summary::before { background-color: rgb(255, 0, 127); -webkit-mask-image: var(--md-admonition-icon--alert); mask-image: var(--md-admonition-icon--alert); } /* Custom "Warning" admonition*/ -.md-typeset .attention > .admonition-title::before, -.md-typeset .attention > summary::before, -.md-typeset .caution > .admonition-title::before, -.md-typeset .caution > summary::before, -.md-typeset .warning > .admonition-title::before, -.md-typeset .warning > summary::before { +.md-typeset .attention>.admonition-title::before, +.md-typeset .attention>summary::before, +.md-typeset .caution>.admonition-title::before, +.md-typeset .caution>summary::before, +.md-typeset .warning>.admonition-title::before, +.md-typeset .warning>summary::before { -webkit-mask-image: var(--md-admonition-icon--xwarning); mask-image: var(--md-admonition-icon--xwarning); } /* Custom "Tip" admonition*/ -.md-typeset .hint > .admonition-title::before, -.md-typeset .hint > summary::before, -.md-typeset .important > .admonition-title::before, -.md-typeset .important > summary::before, -.md-typeset .tip > .admonition-title::before, -.md-typeset .tip > summary::before { +.md-typeset .hint>.admonition-title::before, +.md-typeset .hint>summary::before, +.md-typeset .important>.admonition-title::before, +.md-typeset .important>summary::before, +.md-typeset .tip>.admonition-title::before, +.md-typeset .tip>summary::before { -webkit-mask-image: var(--md-admonition-icon--xtip) !important; mask-image: var(--md-admonition-icon--xtip) !important; } /* Custom "Info" admonition*/ -.md-typeset .info > .admonition-title::before, -.md-typeset .info > summary::before, -.md-typeset .todo > .admonition-title::before, -.md-typeset .todo > summary::before { +.md-typeset .info>.admonition-title::before, +.md-typeset .info>summary::before, +.md-typeset .todo>.admonition-title::before, +.md-typeset .todo>summary::before { -webkit-mask-image: var(--md-admonition-icon--xinfo) !important; mask-image: var(--md-admonition-icon--xinfo) !important; } /* Custom "Danger" admonition*/ -.md-typeset .danger > .admonition-title::before, -.md-typeset .danger > summary::before, -.md-typeset .error > .admonition-title::before, -.md-typeset .error > summary::before { +.md-typeset .danger>.admonition-title::before, +.md-typeset .danger>summary::before, +.md-typeset .error>.admonition-title::before, +.md-typeset .error>summary::before { -webkit-mask-image: var(--md-admonition-icon--xdanger) !important; mask-image: var(--md-admonition-icon--xdanger) !important; } /* Custom "Note" admonition*/ -.md-typeset .note > .admonition-title::before, -.md-typeset .note > summary::before { +.md-typeset .note>.admonition-title::before, +.md-typeset .note>summary::before { -webkit-mask-image: var(--md-admonition-icon--xnote); mask-image: var(--md-admonition-icon--xnote); } /* Custom "Abstract" admonition*/ -.md-typeset .abstract > .admonition-title::before, -.md-typeset .abstract > summary::before, -.md-typeset .summary > .admonition-title::before, -.md-typeset .summary > summary::before, -.md-typeset .tldr > .admonition-title::before, -.md-typeset .tldr > summary::before { +.md-typeset .abstract>.admonition-title::before, +.md-typeset .abstract>summary::before, +.md-typeset .summary>.admonition-title::before, +.md-typeset .summary>summary::before, +.md-typeset .tldr>.admonition-title::before, +.md-typeset .tldr>summary::before { -webkit-mask-image: var(--md-admonition-icon--xabstract); mask-image: var(--md-admonition-icon--xabstract); } /* Custom "Question" admonition*/ -.md-typeset .faq > .admonition-title::before, -.md-typeset .faq > summary::before, -.md-typeset .help > .admonition-title::before, -.md-typeset .help > summary::before, -.md-typeset .question > .admonition-title::before, -.md-typeset .question > summary::before { +.md-typeset .faq>.admonition-title::before, +.md-typeset .faq>summary::before, +.md-typeset .help>.admonition-title::before, +.md-typeset .help>summary::before, +.md-typeset .question>.admonition-title::before, +.md-typeset .question>summary::before { -webkit-mask-image: var(--md-admonition-icon--xquestion); mask-image: var(--md-admonition-icon--xquestion); } /* Custom "Success" admonition*/ -.md-typeset .check > .admonition-title::before, -.md-typeset .check > summary::before, -.md-typeset .done > .admonition-title::before, -.md-typeset .done > summary::before, -.md-typeset .success > .admonition-title::before, -.md-typeset .success > summary::before { +.md-typeset .check>.admonition-title::before, +.md-typeset .check>summary::before, +.md-typeset .done>.admonition-title::before, +.md-typeset .done>summary::before, +.md-typeset .success>.admonition-title::before, +.md-typeset .success>summary::before { -webkit-mask-image: var(--md-admonition-icon--xsuccess) !important; mask-image: var(--md-admonition-icon--xsuccess) !important; } /* Custom "Fail" admonition*/ -.md-typeset .fail > .admonition-title::before, -.md-typeset .fail > summary::before, -.md-typeset .failure > .admonition-title::before, -.md-typeset .failure > summary::before, -.md-typeset .missing > .admonition-title::before, -.md-typeset .missing > summary::before { +.md-typeset .fail>.admonition-title::before, +.md-typeset .fail>summary::before, +.md-typeset .failure>.admonition-title::before, +.md-typeset .failure>summary::before, +.md-typeset .missing>.admonition-title::before, +.md-typeset .missing>summary::before { -webkit-mask-image: var(--md-admonition-icon--xfail); mask-image: var(--md-admonition-icon--xfail); } /* Custom "bug" admonition*/ -.md-typeset .bug > .admonition-title::before, -.md-typeset .bug > summary::before { +.md-typeset .bug>.admonition-title::before, +.md-typeset .bug>summary::before { -webkit-mask-image: var(--md-admonition-icon--xbug) !important; mask-image: var(--md-admonition-icon--xbug) !important; } /* Custom "Example" admonition*/ -.md-typeset .example > .admonition-title::before, -.md-typeset .example > summary::before { +.md-typeset .example>.admonition-title::before, +.md-typeset .example>summary::before { -webkit-mask-image: var(--md-admonition-icon--xexample); mask-image: var(--md-admonition-icon--xexample); } /* Handles Gitter Sidecard UI */ .gitter-open-chat-button { - background-color: var(--md-primary-fg-color) !important; - font-family: inherit !important; - font-size: 12px; - -webkit-filter: none !important; - filter: none !important; + background-color: var(--md-primary-fg-color) !important; + font-family: inherit !important; + font-size: 12px; + -webkit-filter: none !important; + filter: none !important; } /* Handles DeFFcode UI */ -.md-nav__item--active > .md-nav__link { +.md-nav__item--active>.md-nav__link { font-weight: bold; } + .center { display: block; margin-left: auto; margin-right: auto; width: 80%; } + .doc-heading { padding-top: 50px; } @@ -261,7 +263,7 @@ limitations under the License. /* Custom Blockquotes */ blockquote { padding: 0.5em 10px; - quotes: "\201C""\201D""\2018""\2019"; + quotes: "\201C" "\201D" "\2018" "\2019"; } blockquote:before { @@ -306,15 +308,22 @@ footer.sponsorship:not(:hover) .twemoji.heart-throb-hover svg { /* Heart Animation */ @keyframes heart { - 0%, 40%, 80%, 100% { - transform: scale(1); - } - 20%, 60% { - transform: scale(1.15); - } + + 0%, + 40%, + 80%, + 100% { + transform: scale(1); + } + + 20%, + 60% { + transform: scale(1.15); + } } + .heart { - animation: heart 1000ms infinite; + animation: heart 1000ms infinite; } /* Custom Button UI */ @@ -363,36 +372,41 @@ footer.sponsorship:not(:hover) .twemoji.heart-throb-hover svg { /* Dark Theme Changes */ -body[data-md-color-scheme="slate"] img[class="shadow"]{ - -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); - filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); +body[data-md-color-scheme="slate"] img[class="shadow"] { + -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); + filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); } -body[data-md-color-scheme="slate"] div[class="btn-container"]{ - -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); - filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); +body[data-md-color-scheme="slate"] div[class="btn-container"] { + -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); + filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); } -body[data-md-color-scheme="slate"] div[class="highlight"]{ - -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); - filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); +body[data-md-color-scheme="slate"] div[class="highlight"] { + -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); + filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); } -body[data-md-color-scheme="slate"] div[class^="admonition"]{ - -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); - filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); +body[data-md-color-scheme="slate"] div[class^="admonition"] { + -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); + filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); } -body[data-md-color-scheme="slate"] img[class="shadow2"]{ - -webkit-filter: drop-shadow(1px 1px 0 black); - filter: drop-shadow(1px 1px 0 black); +body[data-md-color-scheme="slate"] img[class="shadow2"] { + -webkit-filter: drop-shadow(1px 1px 0 black); + filter: drop-shadow(1px 1px 0 black); } + [data-md-color-scheme="slate"] { - --md-hue: 285; + --md-hue: 285; +} + +.md-typeset { + font-size: .75rem !important; } /* Custom Spacing*/ .spacer { - height: 1px; + height: 1px; } \ No newline at end of file diff --git a/docs/recipes/advanced/decode-hw-acceleration.md b/docs/recipes/advanced/decode-hw-acceleration.md index e38c62f..e84e5fd 100644 --- a/docs/recipes/advanced/decode-hw-acceleration.md +++ b/docs/recipes/advanced/decode-hw-acceleration.md @@ -22,9 +22,7 @@ limitations under the License. !!! abstract "FFmpeg offer access to dedicated GPU hardware with varying support on different platforms for performing a range of video-related tasks to be completed faster or using less of other resources (particularly CPU)." -> By default, DeFFcode's FFdecoder API uses the Input Source's video-decoder _(extracted using Sourcer API)_ itself for decoding its input. However, you could easily change the video-decoder to your desired specific **supported Video-Decoder** using FFmpeg options by way of its [`ffparams`](../../reference/ffdecoder/params/#ffparams) dictionary parameter. This means easy access to GPU Accelerated Hardware Decoder to get better playback and accelerated video decoding on GPUs that will generate equivalent output to software decoders, but may use less power and CPU to do so. - -!!! tip "Use `#!sh ffmpeg -decoders` terminal command to lists all FFmpeg supported decoders." +> By default, DeFFcode's FFdecoder API uses the Input Source's video-decoder _(extracted using Sourcer API)_ itself for decoding its input. However, you could easily change the video-decoder to your desired specific **supported Video-Decoder** using FFmpeg options by way of its [`ffparams`](../../reference/ffdecoder/params/#ffparams) dictionary parameter. This feature provides easy access to GPU Accelerated Hardware Decoder in FFdecoder API that will generate faster video frames:zap: while using little to no CPU power, as opposed to CPU intensive Software Decoders. We'll discuss its Hardware-Accelerated Video Decoding capabilities briefly in the following recipes: @@ -34,6 +32,26 @@ We'll discuss its Hardware-Accelerated Video Decoding capabilities briefly in th ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. +??? info "Additional Python Dependencies for following recipes" + + Following recipes requires additional python dependencies which can be installed easily as below: + + - [x] **OpenCV:** OpenCV is required for previewing video frames. You can easily install it directly via [`pip`](https://pypi.org/project/opencv-python/): + + ??? tip "OpenCV installation from source" + + You can also follow online tutorials for building & installing OpenCV on [Windows](https://www.learnopencv.com/install-opencv3-on-windows/), [Linux](https://www.pyimagesearch.com/2018/05/28/ubuntu-18-04-how-to-install-opencv/), [MacOS](https://www.pyimagesearch.com/2018/08/17/install-opencv-4-on-macos/) and [Raspberry Pi](https://www.pyimagesearch.com/2018/09/26/install-opencv-4-on-your-raspberry-pi/) machines manually from its source. + + :warning: Make sure not to install both *pip* and *source* version together. Otherwise installation will fail to work! + + ??? info "Other OpenCV binaries" + + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + + + ```sh + pip install opencv-python + ``` !!! note "Always use FFdecoder API's [`terminate()`](../../reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.terminate) method at the end to avoid undesired behavior." @@ -43,62 +61,200 @@ We'll discuss its Hardware-Accelerated Video Decoding capabilities briefly in th   -## GPU-accelerated Hardware-based Video Decoding +## CUVID-accelerated Hardware-based Video Decoding and Previewing ???+ alert "Example Assumptions" **Please note that following recipe explicitly assumes:** - - You're running :fontawesome-brands-windows: Windows operating system with a [**supported NVIDIA GPU**](https://developer.nvidia.com/nvidia-video-codec-sdk). - - You're using FFmpeg 4.4 or newer, configured with atleast `--enable-nonfree --enable-libx264 --enable-cuda --enable-cuvid --enable-cuda-nvcc` options during compilation. For manual compilation follow [these instructions ➶](https://docs.nvidia.com/video-technologies/video-codec-sdk/ffmpeg-with-nvidia-gpu/#prerequisites) + - You're running :fontawesome-brands-linux: Linux operating system with a [**supported NVIDIA GPU**](https://developer.nvidia.com/nvidia-video-codec-sdk). + - You're using FFmpeg 4.4 or newer, configured with at least ` --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc` configuration flags during compilation. For compilation follow [these instructions ➶](https://docs.nvidia.com/video-technologies/video-codec-sdk/ffmpeg-with-nvidia-gpu/#prerequisites) + + - [x] **Using `h264_cuvid` decoder**: Remember to check if your FFmpeg compiled with H.264 CUVID decoder support by executing following one-liner command in your terminal, and observing if output contains something similar as follows: + + ??? danger "Verifying H.264 CUVID decoder support in FFmpeg" + ```sh + $ ffmpeg -hide_banner -decoders | grep cuvid + + V..... av1_cuvid Nvidia CUVID AV1 decoder (codec av1) + V..... h264_cuvid Nvidia CUVID H264 decoder (codec h264) + V..... hevc_cuvid Nvidia CUVID HEVC decoder (codec hevc) + V..... mjpeg_cuvid Nvidia CUVID MJPEG decoder (codec mjpeg) + V..... mpeg1_cuvid Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video) + V..... mpeg2_cuvid Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video) + V..... mpeg4_cuvid Nvidia CUVID MPEG4 decoder (codec mpeg4) + V..... vc1_cuvid Nvidia CUVID VC1 decoder (codec vc1) + V..... vp8_cuvid Nvidia CUVID VP8 decoder (codec vp8) + V..... vp9_cuvid Nvidia CUVID VP9 decoder (codec vp9) + ``` + + !!! note "You can also use any of above decoder in the similar way, if supported." + !!! tip "Use `#!sh ffmpeg -decoders` terminal command to lists all FFmpeg supported decoders." + - You already have appropriate Nvidia video drivers and related softwares installed on your machine. + - If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable. These assumptions **MAY/MAY NOT** suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only. +In this example, we will be using Nvidia's **H.264 CUVID Video decoder** in FFdecoder API to achieve GPU-accelerated hardware video decoding of **YUV420p** frames from a given Video file _(say `foo.mp4`)_, and preview them using OpenCV Library's `cv2.imshow()` method. -In this example, we will be using Nvidia's Hardware Accerlated **CUDA Video-decoder(`cuda`)** in FFdecoder API to automatically detect NV-accelerated video codec and achieve GPU-accelerated hardware video decoding of **YUV420p** frames from a given Video file _(say `foo.mp4`)_ on :fontawesome-brands-windows: Windows Machine. +!!! note "With FFdecoder API, frames extracted with YUV pixel formats _(`yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)_ are generally incompatible with OpenCV APIs such as `imshow()`. But you can make them easily compatible by using exclusive [`-enforce_cv_patch`](../../reference/ffdecoder/params/#b-exclusive-parameters) boolean attribute of its `ffparam` dictionary parameter." !!! info "More information on Nvidia's CUVID can be found [here ➶](https://developer.nvidia.com/blog/nvidia-ffmpeg-transcoding-guide/)" -!!! warning "YUV video-frames decoded with DeFFcode APIs are not yet supported by OpenCV methods." - Currently, there's no way to use DeFFcode APIs decoded YUV video-frames in OpenCV methods, and also you cannot change pixel format to any other due to NV-accelerated video codec supporting only few pixel-formats. +```python +# import the necessary packages +from deffcode import FFdecoder +import cv2 + +# define suitable FFmpeg parameter +ffparams = { + "-vcodec": "h264_cuvid", # use H.264 CUVID Video-decoder + "-enforce_cv_patch": True # enable OpenCV patch for YUV(YUV420p) frames +} + +# initialize and formulate the decoder with `foo.mp4` source +decoder = FFdecoder( + "foo.mp4", + frame_format="yuv420p", # use YUV420p frame pixel format + verbose=True, # enable verbose output + **ffparams # apply various params and custom filters +).formulate() + +# grab the YUV420p frame from the decoder +for frame in decoder.generateFrame(): + + # check if frame is None + if frame is None: + break + + # convert it to `BGR` pixel format, + # since imshow() method only accepts `BGR` frames + frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) + + # {do something with the BGR frame here} + + # Show output window + cv2.imshow("Output", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + +# close output window +cv2.destroyAllWindows() + +# terminate the decoder +decoder.terminate() +``` + +  + +## CUDA-accelerated Hardware-based Video Decoding and Previewing + +???+ alert "Example Assumptions" + + **Please note that following recipe explicitly assumes:** + + - You're running :fontawesome-brands-linux: Linux operating system with a [**supported NVIDIA GPU**](https://developer.nvidia.com/nvidia-video-codec-sdk). + - You're using FFmpeg 4.4 or newer, configured with at least ` --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc` configuration flags during compilation. For compilation follow [these instructions ➶](https://docs.nvidia.com/video-technologies/video-codec-sdk/ffmpeg-with-nvidia-gpu/#prerequisites) + + ??? danger "Verifying NVDEC/CUDA support in FFmpeg" + + To use CUDA Video-decoder(`cuda`), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows: + + ```sh + $ ffmpeg -hide_banner -pix_fmts | grep cuda + ..H.. cuda 0 0 0 + + $ ffmpeg -hide_banner -filters | egrep "cuda|npp" + ... bilateral_cuda V->V GPU accelerated bilateral filter + ... chromakey_cuda V->V GPU accelerated chromakey filter + ... colorspace_cuda V->V CUDA accelerated video color converter + ... hwupload_cuda V->V Upload a system memory frame to a CUDA device. + ... overlay_cuda VV->V Overlay one video on top of another using CUDA + ... scale_cuda V->V GPU accelerated video resizer + ... scale_npp V->V NVIDIA Performance Primitives video scaling and format conversion + ... scale2ref_npp VV->VV NVIDIA Performance Primitives video scaling and format conversion to the given reference. + ... sharpen_npp V->V NVIDIA Performance Primitives video sharpening filter. + ... thumbnail_cuda V->V Select the most representative frame in a given sequence of consecutive frames. + ... transpose_npp V->V NVIDIA Performance Primitives video transpose + T.. yadif_cuda V->V Deinterlace CUDA frames + ``` + + - You already have appropriate Nvidia video drivers and related softwares installed on your machine. + - If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable. + + These assumptions **MAY/MAY NOT** suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only. + + +In this example, we will be using Nvidia's **CUDA Internal hwaccel Video decoder(`cuda`)** in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory _(for applying hardware filters)_, thereby achieving GPU-accelerated decoding of **NV12** pixel-format frames from a given video file _(say `foo.mp4`)_, and preview them using OpenCV Library's `cv2.imshow()` method. + +??? warning "`NV12`(for `4:2:0` input) and `NV21`(for `4:4:4` input) are the only supported pixel format. You cannot change pixel format to any other since NV-accelerated video codec supports only them." + + NV12 is a biplanar format with a full sized Y plane followed by a single chroma plane with weaved U and V values. NV21 is the same but with weaved V and U values. The 12 in NV12 refers to 12 bits per pixel. NV12 has a half width and half height chroma channel, and therefore is a 420 subsampling. NV16 is 16 bits per pixel, with half width and full height. aka 422. NV24 is 24 bits per pixel with full sized chroma channel. aka 444. Most NV12 functions allow the destination Y pointer to be NULL. -!!! note "To learn about exclusive `-ffprefixes` parameter. See [Exclusive Parameters ➶](../../reference/ffdecoder/params/#b-exclusive-parameters)" +!!! info "With FFdecoder API, frames extracted with YUV pixel formats _(`yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)_ are generally incompatible with OpenCV APIs such as `imshow()`. But you can make them easily compatible by using exclusive [`-enforce_cv_patch`](../../reference/ffdecoder/params/#b-exclusive-parameters) boolean attribute of its `ffparam` dictionary parameter." + +!!! note "More information on Nvidia's GPU Accelerated Decoding can be found [here ➶](https://developer.nvidia.com/blog/nvidia-ffmpeg-transcoding-guide/)" ```python # import the necessary packages from deffcode import FFdecoder -import json +import cv2 # define suitable FFmpeg parameter ffparams = { - "-vcodec": None, # skip any decoder and let FFmpeg chose + "-vcodec": None, # skip source decoder and let FFmpeg chose + "-enforce_cv_patch": True # enable OpenCV patch for YUV(NV12) frames "-ffprefixes": [ "-vsync", - "0", - "-hwaccel", # chooses appropriate HW accelerator - "cuda", - "-hwaccel_output_format", # keeps the decoded frames in GPU memory - "cuda", + "0", # prevent duplicate frames + "-hwaccel", + "cuda", # accelerator + "-hwaccel_output_format", + "cuda", # output accelerator ], - "-custom_resolution": "null", # discard `-custom_resolution` - "-framerate": "null", # discard `-framerate` - "-vf": "scale_npp=format=yuv420p,hwdownload,format=yuv420p,fps=30.0", # define your filters + "-custom_resolution": "null", # discard source `-custom_resolution` + "-framerate": "null", # discard source `-framerate` + "-vf": "scale_cuda=640:360," # scale to 640x360 in GPU memory + + "fps=60.0," # framerate 60.0fps in GPU memory + + "hwdownload," # download hardware frames to system memory + + "format=nv12", # convert downloaded frames to NV12 pixel format } -# initialize and formulate the decoder with params and custom filters +# initialize and formulate the decoder with `foo.mp4` source decoder = FFdecoder( - "foo.mp4", frame_format="null", verbose=True, **ffparams # discard frame_format + "foo.mp4", + frame_format="null", # discard source frame pixel format + verbose=True, # enable verbose output + **ffparams # apply various params and custom filters ).formulate() -# grab the YUV420 frame from the decoder +# grab the NV12 frame from the decoder for frame in decoder.generateFrame(): # check if frame is None if frame is None: break - # {do something with the frame here} + # convert it to `BGR` pixel format, + # since imshow() method only accepts `BGR` frames + frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12) + + # {do something with the BGR frame here} + + # Show output window + cv2.imshow("Output", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + +# close output window +cv2.destroyAllWindows() # terminate the decoder decoder.terminate() @@ -118,15 +274,6 @@ decoder.terminate() !!! tip "You can also use optimized HEVC CUVID Video-decoder(`hevc_cuvid`) in the similar way, if supported." --> -   \ No newline at end of file diff --git a/docs/recipes/advanced/decode-live-feed-devices.md b/docs/recipes/advanced/decode-live-feed-devices.md index 196c3b5..850d8b1 100644 --- a/docs/recipes/advanced/decode-live-feed-devices.md +++ b/docs/recipes/advanced/decode-live-feed-devices.md @@ -30,7 +30,7 @@ We'll discuss the Live Feed Devices support using both these parameters briefly ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -44,7 +44,7 @@ We'll discuss the Live Feed Devices support using both these parameters briefly ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh diff --git a/docs/recipes/advanced/decode-live-virtual-sources.md b/docs/recipes/advanced/decode-live-virtual-sources.md index b1adddd..cce37c3 100644 --- a/docs/recipes/advanced/decode-live-virtual-sources.md +++ b/docs/recipes/advanced/decode-live-virtual-sources.md @@ -30,7 +30,7 @@ We'll discuss the recipies for generating Live Fake Sources briefly below: ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -44,7 +44,7 @@ We'll discuss the recipies for generating Live Fake Sources briefly below: ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh diff --git a/docs/recipes/advanced/index.md b/docs/recipes/advanced/index.md index b97c3b8..bafccc7 100644 --- a/docs/recipes/advanced/index.md +++ b/docs/recipes/advanced/index.md @@ -55,7 +55,8 @@ The following challenging recipes will take your skills to the next level and wi - [Capturing and Previewing frames from a Webcam using Custom Demuxer](../advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-a-webcam-using-custom-demuxer) - [Capturing and Previewing frames from your Desktop](../advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-your-desktop) _(Screen Recording)_ - [x] **[:octicons-cpu-16: Hardware-Accelerated Video Decoding](../advanced/decode-hw-acceleration/#hardware-accelerated-video-decoding)** - - [GPU-accelerated Hardware-based Video Decoding](../advanced/decode-hw-acceleration/#gpu-accelerated-hardware-based-video-decoding) + - [CUVID-accelerated Hardware-based Video Decoding and Previewing](../advanced/decode-hw-acceleration/#cuvid-accelerated-hardware-based-video-decoding-and-previewing) + - [CUDA-accelerated Hardware-based Video Decoding and Previewing](../advanced/decode-hw-acceleration/#cuda-accelerated-hardware-based-video-decoding-and-previewing)
@@ -70,7 +71,9 @@ The following challenging recipes will take your skills to the next level and wi - [Transcoding video art with Ghosting effect](../advanced/transcode-art-filtergraphs/#transcoding-video-art-with-ghosting-effect) - [Transcoding video art with Pixelation effect](../advanced/transcode-art-filtergraphs/#transcoding-video-art-with-pixelation-effect) - [x] **[:fontawesome-solid-microchip: Hardware-Accelerated Video Transcoding](../advanced/transcode-hw-acceleration/#hardware-accelerated-video-transcoding)** - - [GPU-accelerated Hardware-based Video Transcoding with WriteGear API](../advanced/transcode-hw-acceleration/#gpu-accelerated-hardware-based-video-transcoding-with-writegear-api) + - [CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API](../advanced/transcode-hw-acceleration/#cuda-accelerated-video-transcoding-with-opencvs-videowriter-api) + - [CUDA-NVENC-accelerated Video Transcoding with WriteGear API](../advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-video-transcoding-with-writegear-api) + - [CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API](../advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-end-to-end-lossless-video-transcoding-with-writegear-api)
diff --git a/docs/recipes/advanced/transcode-art-filtergraphs.md b/docs/recipes/advanced/transcode-art-filtergraphs.md index e30c5e0..d7232ea 100644 --- a/docs/recipes/advanced/transcode-art-filtergraphs.md +++ b/docs/recipes/advanced/transcode-art-filtergraphs.md @@ -44,7 +44,7 @@ We'll discuss the Transcoding Video Art with Filtergraphs in the following recip ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -58,7 +58,7 @@ We'll discuss the Transcoding Video Art with Filtergraphs in the following recip ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh diff --git a/docs/recipes/advanced/transcode-hw-acceleration.md b/docs/recipes/advanced/transcode-hw-acceleration.md index 7a3f0df..2b42edb 100644 --- a/docs/recipes/advanced/transcode-hw-acceleration.md +++ b/docs/recipes/advanced/transcode-hw-acceleration.md @@ -20,10 +20,8 @@ limitations under the License. # :fontawesome-solid-microchip: Hardware-Accelerated Video Transcoding -??? abstract "What exactly is Transcoding?" +???+ abstract "What exactly is Transcoding?" - Before heading directly into recipes we have to talk about Transcoding: - > Transcoding is the technique of transforming one media encoding format into another. This is typically done for compatibility purposes, such as when a media source provides a format that the intended target is not able to process; an in-between adaptation step is required: @@ -31,7 +29,27 @@ limitations under the License. - **Decode** media from its originally encoded state into raw, uncompressed information. - **Encode** the raw data back, using a different codec that is supported by end user. -> DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API creates a high-level **High-performance Lossless FFmpeg Transcoding _(Decoding & Encoding respectively)_ Pipeline :fire:** that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility. Both these APIs are capable of utilizing the potential of GPU supported fully-accelerated **Hardware based video Decoding(FFdecoder API with hardware decoder) and Encoding (WriteGear API with hardware encoder)**, thus dramatically improving the performance of the end-to-end transcoding. +> DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while **allowing us to process real-time video frames** with immense flexibility. Both these APIs are capable of utilizing the potential of GPU backed fully-accelerated **Hardware based video Decoding(FFdecoder API with hardware decoder) and Encoding (WriteGear API with hardware encoder)**, thus dramatically improving the transcoding performance. At same time, FFdecoder API Hardware-decoded frames are **fully compatible with OpenCV's VideoWriter API** for producing high-quality output video in real-time. + +??? danger "Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing" + + As we know, using the `–hwaccel cuda -hwaccel_output_format cuda` flags in FFmpeg pipeline will keep video frames in GPU memory, and this ensures that the memory transfers (system memory to video memory and vice versa) are eliminated, and that transcoding is performed with the highest possible performance on the available GPU hardware. + +
+ ![HW Acceleration](../../../assets/images/hw_accel.png){ width="350" } +
General Memory Flow with Hardware Acceleration
+
+ + But unfortunately, for processing real-time frames in our python script with FFdecoder and WriteGear APIs, we're bound to sacrifice this performance gain by explicitly copying raw decoded frames between System and GPU memory _(via the PCIe bus)_, thereby creating self-made latency in transfer time and increasing PCIe bandwidth occupancy due to overheads in communication over the bus. Moreover, given PCIe bandwidth limits, copying uncompressed image data would quickly saturate the PCIe bus. + +
+ ![HW Acceleration Limitation](../../../assets/images/hw_accel_limitation.png){ width="350" } +
Memory Flow with Hardware Acceleration
and Real-time Processing
+
+ + On the bright side, however, GPU enabled Hardware based encoding/decoding is inherently faster and more efficient _(do not use much CPU resources when frames in GPU)_ thus freeing up the CPU for other tasks, as compared to Software based encoding/decoding that is known to be completely CPU intensive. Plus scaling, de-interlacing, filtering, etc. tasks will be way faster and efficient than usual using these Hardware based decoders/encoders as oppose to Software ones. + + !!! summary "As you can see the pros definitely outweigh the cons and you're getting to process video frames in the real-time with immense speed and flexibility, which is impossible to do otherwise." We'll discuss its Hardware-Accelerated Video Transcoding capabilities using these APIs briefly in the following recipes: @@ -42,10 +60,27 @@ We'll discuss its Hardware-Accelerated Video Transcoding capabilities using thes ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: + - [x] **OpenCV:** OpenCV is required for previewing video frames. You can easily install it directly via [`pip`](https://pypi.org/project/opencv-python/): + + ??? tip "OpenCV installation from source" + + You can also follow online tutorials for building & installing OpenCV on [Windows](https://www.learnopencv.com/install-opencv3-on-windows/), [Linux](https://www.pyimagesearch.com/2018/05/28/ubuntu-18-04-how-to-install-opencv/), [MacOS](https://www.pyimagesearch.com/2018/08/17/install-opencv-4-on-macos/) and [Raspberry Pi](https://www.pyimagesearch.com/2018/09/26/install-opencv-4-on-your-raspberry-pi/) machines manually from its source. + + :warning: Make sure not to install both *pip* and *source* version together. Otherwise installation will fail to work! + + ??? info "Other OpenCV binaries" + + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + + + ```sh + pip install opencv-python + ``` + - [x] **VidGear:** VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via [`pip`](https://pypi.org/project/opencv-python/): ```sh @@ -54,122 +89,141 @@ We'll discuss its Hardware-Accelerated Video Transcoding capabilities using thes !!! note "Always use FFdecoder API's [`terminate()`](../../reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.terminate) method at the end to avoid undesired behavior." -!!! danger "==WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!=="   -## GPU-accelerated Hardware-based Video Transcoding with WriteGear API + +## CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API ???+ alert "Example Assumptions" **Please note that following recipe explicitly assumes:** - - You're running :fontawesome-brands-windows: Windows operating system with a [**supported NVIDIA GPU**](https://developer.nvidia.com/nvidia-video-codec-sdk). - - You're using FFmpeg 4.4 or newer, configured with atleast `--enable-nonfree --enable-libx264 --enable-cuda --enable-nvenc --enable-nvdec --enable-cuda-nvcc --enable-libnpp` options during compilation. For manual compilation follow [these instructions ➶](https://docs.nvidia.com/video-technologies/video-codec-sdk/ffmpeg-with-nvidia-gpu/#prerequisites) - - You already have appropriate Nvidia video drivers and related softwares installed on your machine. + - You're running :fontawesome-brands-linux: Linux operating system with a [**supported NVIDIA GPU**](https://developer.nvidia.com/nvidia-video-codec-sdk). + - You're using FFmpeg 4.4 or newer, configured with at least ` --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc` configuration flags during compilation. For compilation follow [these instructions ➶](https://docs.nvidia.com/video-technologies/video-codec-sdk/ffmpeg-with-nvidia-gpu/#prerequisites) - These assumptions **MAY/MAY NOT** suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only. + ??? danger "Verifying NVDEC/CUDA support in FFmpeg" + To use CUDA Video-decoder(`cuda`), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows: -??? danger "Limitation: Bottleneck in Hardware-Accelerated Video Transcoding Performance" + ```sh + $ ffmpeg -hide_banner -pix_fmts | grep cuda + ..H.. cuda 0 0 0 + + $ ffmpeg -hide_banner -filters | egrep "cuda|npp" + ... bilateral_cuda V->V GPU accelerated bilateral filter + ... chromakey_cuda V->V GPU accelerated chromakey filter + ... colorspace_cuda V->V CUDA accelerated video color converter + ... hwupload_cuda V->V Upload a system memory frame to a CUDA device. + ... overlay_cuda VV->V Overlay one video on top of another using CUDA + ... scale_cuda V->V GPU accelerated video resizer + ... scale_npp V->V NVIDIA Performance Primitives video scaling and format conversion + ... scale2ref_npp VV->VV NVIDIA Performance Primitives video scaling and format conversion to the given reference. + ... sharpen_npp V->V NVIDIA Performance Primitives video sharpening filter. + ... thumbnail_cuda V->V Select the most representative frame in a given sequence of consecutive frames. + ... transpose_npp V->V NVIDIA Performance Primitives video transpose + T.. yadif_cuda V->V Deinterlace CUDA frames + ``` - Generally, adding the `–hwaccel cuvid`/`–hwaccel cuda -hwaccel_output_format cuda` options means the raw decoded frames will not be copied between system and GPU memory _(via the PCIe bus)_, and the transcoding will be faster and use less system resources, and may even result in up to 2x the throughput compared to the unoptimized calls: + ??? danger "Verifying H.264 NVENC encoder support in FFmpeg" -
- ![HW Acceleration](../../../assets/images/hw_accel.png){ width="350" } -
General Memory Flow with Hardware Acceleration
-
- - But unfortunately, for processing real-time frames in our python script with FFdecoder and WriteGear APIs, we're bound to sacrifice this performance gained by explicitly copying raw decoded frames between System and GPU memory via the PCIe bus, thereby creating self-made latency in transfer time and increasing PCIe bandwidth occupancy due to overheads in communication over the bus. Also, given PCIe bandwidth limits, copying uncompressed image data would quickly saturate the PCIe bus. - -
- ![HW Acceleration Limitation](../../../assets/images/hw_accel_limitation.png){ width="350" } -
Memory Flow with Hardware Acceleration
and Real-time Processing
-
+ To use NVENC Video-encoder(`cuda`), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows: - On the bright side however, GPU supported hardware based encoding/decoding is inherently faster and more efficient _(do not use much CPU resources)_ thus freeing up the CPU for other tasks, as compared to software based encoding/decoding that is generally known to be quite CPU intensive. Plus scaling, deinterlacing, filtering, and other post-processing tasks will be faster than usual using these hardware based decoders/encoders with same equivalent output to software ones, and will use less power and CPU to do so. + ```sh + $ ffmpeg -hide_banner -encoders | grep nvenc - !!! summary "On the whole, You don't have to worry about it as you're getting to manipulate the real-time video frames with immense speed and flexibility which is impossible to do otherwise." + V....D av1_nvenc NVIDIA NVENC av1 encoder (codec av1) + V....D h264_nvenc NVIDIA NVENC H.264 encoder (codec h264) + V....D hevc_nvenc NVIDIA NVENC hevc encoder (codec hevc) + ``` -In this example, we will be using Nvidia's Hardware Accerlated **CUDA Video-decoder(`cuda`)** in FFdecoder API to decode and keep decoded **YUV420p** frames from a given Video file _(say `foo.mp4`)_ within GPU, all while rescaling _(with nvcuvid's `resize`)_ as well as encoding them in real-time with WriteGear API using Nvidia's hardware accelerated **H.264 NVENC Video-encoder(`h264_nvenc`)** into lossless video file within GPU. + !!! note "You can also use other NVENC encoder in the similar way, if supported." + -??? note "Remember to check H.264 NVENC encoder support in FFmpeg" + - You already have appropriate Nvidia video drivers and related softwares installed on your machine. + - If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable. - - [x] **Using `h264_nvenc` encoder**: Remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows: + These assumptions **MAY/MAY NOT** suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only. - ```sh - $ ffmpeg -hide_banner -encoders | grep nvenc - V....D h264_amf AMD AMF H.264 Encoder (codec h264) - V....D h264_mf H264 via MediaFoundation (codec h264) - V....D h264_nvenc NVIDIA NVENC H.264 encoder (codec h264) - ``` +In this example, we will be: - !!! note "You can also use optimized HEVC NVENC encoder(`hevc_nvenc`) in the similar way, if supported." - -!!! info "Additional Parameters in WriteGear API" - - WriteGear API only requires a valid Output filename _(e.g. `output_foo.mp4`)_ as input, but you can easily control any output specifications _(such as bitrate, codec, framerate, resolution, subtitles, etc.)_ supported by FFmpeg _(in use)_. +1. Using Nvidia's **CUDA Internal hwaccel Video decoder(`cuda`)** in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory _(for applying hardware filters)_ for achieving GPU-accelerated decoding of a given video file _(say `foo.mp4`)_. +2. Scaling and Cropping decoded frames in GPU memory. +3. Downloading decoded frames into system memory as patched **NV12** frames. +4. Converting **NV12** frames into **BGR** pixel-format using OpenCV's `cvtcolor` method. +5. Encoding **BGR** frames with OpenCV's VideoWriter API. !!! tip "You can use FFdecoder's [`metadata`](../../reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.metadata) property object that dumps source Video's metadata information _(as JSON string)_ to retrieve source framerate." +!!! note "With FFdecoder API, frames extracted with YUV pixel formats _(`yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)_ are generally incompatible with OpenCV APIs such as `imshow()`. But you can make them easily compatible by using exclusive [`-enforce_cv_patch`](../../reference/ffdecoder/params/#b-exclusive-parameters) boolean attribute of its `ffparam` dictionary parameter." -!!! warning "YUV video-frames decoded with DeFFcode APIs are not yet supported by OpenCV methods." - Currently, there's no way to use DeFFcode APIs decoded YUV video-frames in OpenCV methods, and also you cannot change pixel format to any other due to NV-accelerated video codec supporting only few pixel-formats. - +!!! info "More information on Nvidia's NVENC Encoder can be found [here ➶](https://developer.nvidia.com/blog/nvidia-ffmpeg-transcoding-guide/)" ```python # import the necessary packages from deffcode import FFdecoder -from vidgear.gears import WriteGear import json +import cv2 # define suitable FFmpeg parameter ffparams = { - "-vcodec": None, # skip any decoder and let FFmpeg chose + "-vcodec": None, # skip source decoder and let FFmpeg chose + "-enforce_cv_patch": True # enable OpenCV patch for YUV(NV12) frames "-ffprefixes": [ "-vsync", - "0", - "-hwaccel", # chooses appropriate HW accelerator - "cuda", - "-hwaccel_output_format", # keeps the decoded frames in GPU memory - "cuda", + "0", # prevent duplicate frames + "-hwaccel", + "cuda", # accelerator + "-hwaccel_output_format", + "cuda", # output accelerator ], - "-custom_resolution": "null", # discard `-custom_resolution` - "-framerate": "null", # discard `-framerate` - "-vf": "scale_npp=format=yuv420p,hwdownload,format=yuv420p,fps=30.0", # define your filters + "-custom_resolution": "null", # discard source `-custom_resolution` + "-framerate": "null", # discard source `-framerate` + "-vf": "scale_cuda=640:360," # scale to 640x360 in GPU memory + + "crop=80:60:200:100," # crop a 80×60 section from position (200, 100) in GPU memory + + "hwdownload," # download hardware frames to system memory + + "format=nv12", # convert downloaded frames to NV12 pixel format } -# initialize and formulate the decoder with params and custom filters +# initialize and formulate the decoder with `foo.mp4` source decoder = FFdecoder( - "foo.mp4", frame_format="null", verbose=True, **ffparams # discard frame_format + "foo.mp4", + frame_format="null", # discard source frame pixel format + verbose = False, # to avoid too much clutter + **ffparams # apply various params and custom filters ).formulate() -# retrieve framerate from JSON Metadata and pass it as -# `-input_framerate` parameter for controlled framerate -# and add input pixfmt as yuv420p also -output_params = { - "-input_framerate": json.loads(decoder.metadata)["output_framerate"], - "-vcodec": "h264_nvenc", - "-input_pixfmt": "yuv420p" -} +# retrieve JSON Metadata and convert it to dict +metadata_dict = json.loads(decoder.metadata) + +# prepare OpenCV parameters +FOURCC = cv2.VideoWriter_fourcc("M", "J", "P", "G") +FRAMERATE = metadata_dict["output_framerate"] +FRAMESIZE = tuple(metadata_dict["output_frames_resolution"]) -# Define writer with default parameters and suitable -# output filename for e.g. `output_foo_yuv.mp4` -writer = WriteGear(output_filename="output_foo_yuv.mp4", **output_params) +# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi` +writer = cv2.VideoWriter("output_foo.avi", FOURCC, FRAMERATE, FRAMESIZE) -# grab the YUV420 frame from the decoder +# grab the NV12 frames from the decoder for frame in decoder.generateFrame(): # check if frame is None if frame is None: break - # {do something with the frame here} + # convert it to `BGR` pixel format, + # since write() method only accepts `BGR` frames + frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12) - # writing YUV420 frame to writer + # {do something with the BGR frame here} + + # writing BGR frame to writer writer.write(frame) +# close output window +cv2.destroyAllWindows() + # terminate the decoder decoder.terminate() @@ -177,4 +231,244 @@ decoder.terminate() writer.close() ``` -  \ No newline at end of file +  + + +## CUDA-NVENC-accelerated Video Transcoding with WriteGear API + +!!! warning "==WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!==" + +??? quote "Lossless transcoding with FFdecoder and WriteGear API" + + VidGear's [**WriteGear API**](https://abhitronix.github.io/vidgear/latest/gears/writegear/introduction/) implements a complete, flexible, and robust wrapper around FFmpeg in [compression mode](https://abhitronix.github.io/vidgear/latest/gears/writegear/compression/overview/) for encoding real-time video frames to a lossless compressed multimedia output file(s)/stream(s). + + DeFFcode's FFdecoder API in conjunction with WriteGear API creates a high-level **High-performance Lossless FFmpeg Transcoding _(Decoding + Encoding)_ Pipeline** :fire: that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility. + +???+ alert "Example Assumptions" + + **Please note that following recipe explicitly assumes:** + + - You're running :fontawesome-brands-linux: Linux operating system with a [**supported NVIDIA GPU**](https://developer.nvidia.com/nvidia-video-codec-sdk). + - You're using FFmpeg 4.4 or newer, configured with at least ` --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc` configuration flags during compilation. For compilation follow [these instructions ➶](https://docs.nvidia.com/video-technologies/video-codec-sdk/ffmpeg-with-nvidia-gpu/#prerequisites) + + ??? danger "Verifying NVDEC/CUDA support in FFmpeg" + + To use CUDA Video-decoder(`cuda`), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows: + + ```sh + $ ffmpeg -hide_banner -pix_fmts | grep cuda + ..H.. cuda 0 0 0 + + $ ffmpeg -hide_banner -filters | egrep "cuda|npp" + ... bilateral_cuda V->V GPU accelerated bilateral filter + ... chromakey_cuda V->V GPU accelerated chromakey filter + ... colorspace_cuda V->V CUDA accelerated video color converter + ... hwupload_cuda V->V Upload a system memory frame to a CUDA device. + ... overlay_cuda VV->V Overlay one video on top of another using CUDA + ... scale_cuda V->V GPU accelerated video resizer + ... scale_npp V->V NVIDIA Performance Primitives video scaling and format conversion + ... scale2ref_npp VV->VV NVIDIA Performance Primitives video scaling and format conversion to the given reference. + ... sharpen_npp V->V NVIDIA Performance Primitives video sharpening filter. + ... thumbnail_cuda V->V Select the most representative frame in a given sequence of consecutive frames. + ... transpose_npp V->V NVIDIA Performance Primitives video transpose + T.. yadif_cuda V->V Deinterlace CUDA frames + ``` + + ??? danger "Verifying H.264 NVENC encoder support in FFmpeg" + + To use NVENC Video-encoder(`cuda`), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows: + + ```sh + $ ffmpeg -hide_banner -encoders | grep nvenc + + V....D av1_nvenc NVIDIA NVENC av1 encoder (codec av1) + V....D h264_nvenc NVIDIA NVENC H.264 encoder (codec h264) + V....D hevc_nvenc NVIDIA NVENC hevc encoder (codec hevc) + ``` + + !!! note "You can also use other NVENC encoder in the similar way, if supported." + + + - You already have appropriate Nvidia video drivers and related softwares installed on your machine. + - If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable. + + These assumptions **MAY/MAY NOT** suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only. + +??? info "Additional Parameters in WriteGear API" + WriteGear API only requires a valid Output filename _(e.g. `output_foo.mp4`)_ as input, but you can easily control any output specifications _(such as bitrate, codec, framerate, resolution, subtitles, etc.)_ supported by FFmpeg _(in use)_. + +!!! tip "You can use FFdecoder's [`metadata`](../../reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.metadata) property object that dumps source Video's metadata information _(as JSON string)_ to retrieve source framerate." + +=== "Consuming `BGR` frames" + + In this example, we will be: + + 1. Using Nvidia's **CUDA Internal hwaccel Video decoder(`cuda`)** in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory _(for applying hardware filters)_ for achieving GPU-accelerated decoding of a given video file _(say `foo.mp4`)_. + 2. Scaling and Cropping decoded frames in GPU memory. + 3. Downloading decoded frames into system memory as patched **NV12** frames. + 4. Converting patched **NV12** frames into **BGR** pixel-format using OpenCV's `cvtcolor` method. + 4. Encoding **BGR** frames with WriteGear API using Nvidia's Hardware accelerated **H.264 NVENC Video-encoder(`h264_nvenc`)** into lossless video file in the GPU memory. + + + ```python + # import the necessary packages + from deffcode import FFdecoder + from vidgear.gears import WriteGear + import json + import cv2 + + # define suitable FFmpeg parameter + ffparams = { + "-vcodec": None, # skip source decoder and let FFmpeg chose + "-enforce_cv_patch": True # enable OpenCV patch for YUV(NV12) frames + "-ffprefixes": [ + "-vsync", + "0", # prevent duplicate frames + "-hwaccel", + "cuda", # accelerator + "-hwaccel_output_format", + "cuda", # output accelerator + ], + "-custom_resolution": "null", # discard source `-custom_resolution` + "-framerate": "null", # discard source `-framerate` + "-vf": "scale_cuda=640:360," # scale to 640x360 in GPU memory + + "crop=80:60:200:100," # crop a 80×60 section from position (200, 100) in GPU memory + + "hwdownload," # download hardware frames to system memory + + "format=nv12", # convert downloaded frames to NV12 pixel format + } + + # initialize and formulate the decoder with `foo.mp4` source + decoder = FFdecoder( + "foo.mp4", + frame_format="null", # discard source frame pixel format + verbose = False, # to avoid too much clutter + **ffparams # apply various params and custom filters + ).formulate() + + # retrieve framerate from JSON Metadata and pass it as + # `-input_framerate` parameter for controlled framerate + output_params = { + "-input_framerate": json.loads(decoder.metadata)["output_framerate"], + "-vcodec": "h264_nvenc", # H.264 NVENC Video-encoder + + } + + # Define writer with default parameters and suitable + # output filename for e.g. `output_foo.mp4` + writer = WriteGear(output="output_foo.mp4", logging=True, **output_params) + + # grab the NV12 frames from the decoder + for frame in decoder.generateFrame(): + + # check if frame is None + if frame is None: + break + + # convert it to `BGR` pixel format + frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12) + + # {do something with the BGR frame here} + + # writing BGR frame to writer + writer.write(frame) + + # close output window + cv2.destroyAllWindows() + + # terminate the decoder + decoder.terminate() + + # safely close writer + writer.close() + ``` + +=== "Consuming `NV12` frames" + + In this example, we will be: + + 1. Using Nvidia's **CUDA Internal hwaccel Video decoder(`cuda`)** in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory _(for applying hardware filters)_ for achieving GPU-accelerated decoding of a given video file _(say `foo.mp4`)_. + 2. Scaling and Cropping decoded frames in GPU memory. + 3. Downloading decoded frames into system memory as **NV12** frames. + 4. Encoding **NV12** frames directly with WriteGear API using Nvidia's Hardware accelerated **H.264 NVENC Video-encoder(`h264_nvenc`)** into lossless video file in the GPU memory. + + ```python + # import the necessary packages + from deffcode import FFdecoder + from vidgear.gears import WriteGear + import json + import cv2 + + # define suitable FFmpeg parameter + ffparams = { + "-vcodec": None, # skip source decoder and let FFmpeg chose + "-ffprefixes": [ + "-vsync", + "0", # prevent duplicate frames + "-hwaccel", + "cuda", # accelerator + "-hwaccel_output_format", + "cuda", # output accelerator + ], + "-custom_resolution": "null", # discard source `-custom_resolution` + "-framerate": "null", # discard source `-framerate` + "-vf": "scale_cuda=640:360," # scale to 640x360 in GPU memory + + "crop=80:60:200:100," # crop a 80×60 section from position (200, 100) in GPU memory + + "hwdownload," # download hardware frames to system memory + + "format=nv12", # convert downloaded frames to NV12 pixel format + } + + # initialize and formulate the decoder with `foo.mp4` source + decoder = FFdecoder( + "foo.mp4", + frame_format="null", # discard source frame pixel format + verbose = False, # to avoid too much clutter + **ffparams # apply various params and custom filters + ).formulate() + + # retrieve framerate from JSON Metadata and pass it as + # `-input_framerate` parameter for controlled framerate + output_params = { + "-input_framerate": json.loads(decoder.metadata)["output_framerate"], + "-vcodec": "h264_nvenc", # H.264 NVENC Video-encoder + "-input_pixfmt": "nv12", # input frames pixel format as `NV12` + } + + # Define writer with default parameters and suitable + # output filename for e.g. `output_foo.mp4` + writer = WriteGear(output="output_foo.mp4", logging=True, **output_params) + + # grab the NV12 frames from the decoder + for frame in decoder.generateFrame(): + + # check if frame is None + if frame is None: + break + + # {do something with the NV12 frame here} + + # writing NV12 frame to writer + writer.write(frame) + + # close output window + cv2.destroyAllWindows() + + # terminate the decoder + decoder.terminate() + + # safely close writer + writer.close() + ``` + +  + +## CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API + +> DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API creates a **High-performance Lossless FFmpeg Transcoding Pipeline :fire:** + +
+help-shouting +
Courtesy - tenor
+
+ + + diff --git a/docs/recipes/advanced/transcode-live-frames-complexgraphs.md b/docs/recipes/advanced/transcode-live-frames-complexgraphs.md index 53abc4c..c09b79a 100644 --- a/docs/recipes/advanced/transcode-live-frames-complexgraphs.md +++ b/docs/recipes/advanced/transcode-live-frames-complexgraphs.md @@ -43,7 +43,7 @@ We'll discuss the transcoding of live complex filtergraphs in the following reci ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -57,7 +57,7 @@ We'll discuss the transcoding of live complex filtergraphs in the following reci ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh diff --git a/docs/recipes/basic/decode-camera-devices.md b/docs/recipes/basic/decode-camera-devices.md index c43accc..c786b0b 100644 --- a/docs/recipes/basic/decode-camera-devices.md +++ b/docs/recipes/basic/decode-camera-devices.md @@ -30,7 +30,7 @@ We'll discuss the Decoding Camera Devices using Indexes briefly in the following ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -44,7 +44,7 @@ We'll discuss the Decoding Camera Devices using Indexes briefly in the following ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh diff --git a/docs/recipes/basic/decode-image-sequences.md b/docs/recipes/basic/decode-image-sequences.md index 16ff2f9..d7411e5 100644 --- a/docs/recipes/basic/decode-image-sequences.md +++ b/docs/recipes/basic/decode-image-sequences.md @@ -30,7 +30,7 @@ We'll discuss both briefly in the following recipes: ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -44,7 +44,7 @@ We'll discuss both briefly in the following recipes: ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh diff --git a/docs/recipes/basic/decode-network-streams.md b/docs/recipes/basic/decode-network-streams.md index 8f53d9e..05c6664 100644 --- a/docs/recipes/basic/decode-network-streams.md +++ b/docs/recipes/basic/decode-network-streams.md @@ -30,7 +30,7 @@ We'll discuss Network Streams support briefly in the following recipes: ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -44,7 +44,7 @@ We'll discuss Network Streams support briefly in the following recipes: ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh diff --git a/docs/recipes/basic/decode-video-files.md b/docs/recipes/basic/decode-video-files.md index dacd0e6..5159a0e 100644 --- a/docs/recipes/basic/decode-video-files.md +++ b/docs/recipes/basic/decode-video-files.md @@ -30,7 +30,7 @@ We'll discuss its video files support and pixel format capabilities briefly in t ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -44,7 +44,7 @@ We'll discuss its video files support and pixel format capabilities briefly in t ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh @@ -273,29 +273,149 @@ In this example we will decode live **Grayscale** and **YUV** video frames from decoder.terminate() ``` -=== "Decode YUV" +=== "Decode YUV frames" - !!! info "You can also use `yuv422p`(4:2:2 subsampling) or `yuv444p`(4:4:4 subsampling) instead for more higher dynamic range." + !!! quote "With FFdecoder API, frames extracted with YUV pixel formats _(`yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)_ are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive [`-enforce_cv_patch`](../../reference/ffdecoder/params/#b-exclusive-parameters) boolean attribute of its `ffparam` dictionary parameter." + + Let's try decoding YUV420p pixel-format frames in following python code: + + !!! info "You can also use other YUV pixel formats such `yuv422p`(4:2:2 subsampling) or `yuv444p`(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner." ```python # import the necessary packages from deffcode import FFdecoder import cv2 - # initialize and formulate the decoder for YUV420 output - decoder = FFdecoder("input_foo.mp4", frame_format="yuv420p", verbose=True).formulate() + # enable OpenCV patch for YUV frames + ffparams = {"-enforce_cv_patch": True} + + # initialize and formulate the decoder for YUV420p output + decoder = FFdecoder( + "input_foo.mp4", frame_format="yuv420p", verbose=True, **ffparams + ).formulate() - # grab the YUV420 frames from the decoder + # grab the YUV420p frames from the decoder for yuv in decoder.generateFrame(): # check if frame is None if yuv is None: break - # {do something with the yuv frame here} + # convert it to `BGR` pixel format, + # since imshow() method only accepts `BGR` frames + bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420) + + # {do something with the bgr frame here} + + # Show output window + cv2.imshow("Output", bgr) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # terminate the decoder + decoder.terminate() + ``` + +  + +## Capturing and Previewing frames from a Looping Video + +In this example we will decode live **BGR24** video frames from looping video using different means in FFdecoder API, and preview them using OpenCV Library's `cv2.imshow()` method. + +=== "Using `-stream_loop` option" + + The recommend way to loop video is to use `-stream_loop` option via. `-ffprefixes` list attribute of `ffparam` dictionary parameter in FFdecoder API. Possible values are integer values: `>0` value of loop, `0` means no loop, `-1` means infinite loop. + + !!! note "Using `-stream_loop 3` will loop video `4` times." + + ```python + # import the necessary packages + from deffcode import FFdecoder + import cv2 + + # define `-stream_loop 3` for looping 4 times + ffparams = {"-ffprefixes":["-stream_loop", "3"]} + + # initialize and formulate the decoder with suitable source + decoder = FFdecoder("input.mp4", frame_format="bgr24", verbose=True, **ffparams).formulate() + + # print metadata as `json.dump` + print(decoder.metadata) + + # grab the BGR24 frame from the decoder + for frame in decoder.generateFrame(): + + # check if frame is None + if frame is None: + break + + # {do something with the frame here} + + # Show output window + cv2.imshow("Output", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # terminate the decoder + decoder.terminate() + ``` + +=== "Using `loop` filter" + + Another way to loop video is to use `loop` complex filter via. `-filter_complex` FFmpeg flag as attribute of `ffparam` dictionary parameter in FFdecoder API. + + !!! warning "This filter places all frames into memory(RAM), so applying [`trim`](https://ffmpeg.org/ffmpeg-filters.html#toc-trim) filter first is strongly recommended. Otherwise you might probably run Out of Memory." + + !!! tip "Using `loop` filter for looping video" + The filter accepts the following options: + - `loop`: Sets the number of loops for integer values `>0`. Setting this value to `-1` will result in infinite loops. Default is `0`(no loops). + - `size`: Sets maximal size in number of frames. Default is `0`. + - `start`: Sets first frame of loop. Default is `0`. + + !!! note "Using `loop=3` will loop video `4` times." + + ```python + # import the necessary packages + from deffcode import FFdecoder + import cv2 + + # define loop 4 times, each loop is 15 frames, each loop skips the first 25 frames + ffparams = { + "-filter_complex": "loop=loop=3:size=15:start=25" # Or use: `loop=3:15:25` + } + + # initialize and formulate the decoder with suitable source + decoder = FFdecoder( + "input.mp4", frame_format="bgr24", verbose=True, **ffparams + ).formulate() + + # print metadata as `json.dump` + print(decoder.metadata) + + # grab the BGR24 frame from the decoder + for frame in decoder.generateFrame(): + + # check if frame is None + if frame is None: + break + + # {do something with the frame here} + # Show output window - cv2.imshow("YUV Output", yuv) + cv2.imshow("Output", frame) # check for 'q' key if pressed key = cv2.waitKey(1) & 0xFF @@ -304,9 +424,9 @@ In this example we will decode live **Grayscale** and **YUV** video frames from # close output window cv2.destroyAllWindows() - + # terminate the decoder decoder.terminate() ``` -  \ No newline at end of file +  diff --git a/docs/recipes/basic/index.md b/docs/recipes/basic/index.md index b83f8a2..ce48808 100644 --- a/docs/recipes/basic/index.md +++ b/docs/recipes/basic/index.md @@ -54,7 +54,8 @@ The following recipes should be reasonably accessible to beginners of any skill - [x] **[:material-file-eye: Decoding Video files](../basic/decode-video-files/#decoding-video-files)** - [Accessing RGB frames from a video file](../basic/decode-video-files/#accessing-rgb-frames-from-a-video-file) - [Capturing and Previewing BGR frames from a video file](../basic/decode-video-files/#capturing-and-previewing-bgr-frames-from-a-video-file) _(OpenCV Support)_ - - [Playing with any other FFmpeg pixel formats](../basic/decode-video-files/#capturing-and-previewing-bgr-frames-from-a-video-file) + - [Playing with any other FFmpeg pixel formats](../basic/decode-video-files/#playing-with-any-other-ffmpeg-pixel-formats) + - [Capturing and Previewing frames from a Looping Video](../basic/decode-video-files/#capturing-and-previewing-frames-from-a-looping-video) - [x] **[:material-webcam: Decoding Camera Devices using Indexes](../basic/decode-camera-devices)** - [Enumerating all Camera Devices with Indexes](../basic/decode-camera-devices/#enumerating-all-camera-devices-with-indexes) - [Capturing and Previewing frames from a Camera using Indexes](../basic/decode-camera-devices/#capturing-and-previewing-frames-from-a-camera-using-indexes) diff --git a/docs/recipes/basic/save-keyframe-image.md b/docs/recipes/basic/save-keyframe-image.md index ceb444f..db0f438 100644 --- a/docs/recipes/basic/save-keyframe-image.md +++ b/docs/recipes/basic/save-keyframe-image.md @@ -31,7 +31,7 @@ We'll discuss aboout it briefly in the following recipes: ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -45,7 +45,7 @@ We'll discuss aboout it briefly in the following recipes: ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh diff --git a/docs/recipes/basic/transcode-live-frames-simplegraphs.md b/docs/recipes/basic/transcode-live-frames-simplegraphs.md index a0a2c19..1f3885b 100644 --- a/docs/recipes/basic/transcode-live-frames-simplegraphs.md +++ b/docs/recipes/basic/transcode-live-frames-simplegraphs.md @@ -43,7 +43,7 @@ We'll discuss the transcoding of live simple filtergraphs in the following recip ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -57,7 +57,7 @@ We'll discuss the transcoding of live simple filtergraphs in the following recip ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh @@ -87,7 +87,7 @@ In this example we will take the first 5 seconds of a video clip _(using [`trim` !!! tip "You can use FFdecoder's [`metadata`](../../reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.metadata) property object that dumps source Video's metadata information _(as JSON string)_ to retrieve output framerate and resolution." -!!! alert "By default, OpenCV expects `BGR` format frames in its `cv2.write()` method." +!!! alert "By default, OpenCV expects `BGR` format frames in its `write()` method." ```python # import the necessary packages diff --git a/docs/recipes/basic/transcode-live-frames.md b/docs/recipes/basic/transcode-live-frames.md index 7e873e2..14d61f2 100644 --- a/docs/recipes/basic/transcode-live-frames.md +++ b/docs/recipes/basic/transcode-live-frames.md @@ -42,7 +42,7 @@ We'll discuss transcoding using both these libraries briefly in the following re ==DeFFcode APIs **MUST** requires valid FFmpeg executable for all of its core functionality==, and any failure in detection will raise `RuntimeError` immediately. Follow dedicated [FFmpeg Installation doc ➶](../../../installation/ffmpeg_install/) for its installation. -???+ info "Additional Python Dependencies for following recipes" +??? info "Additional Python Dependencies for following recipes" Following recipes requires additional python dependencies which can be installed easily as below: @@ -56,7 +56,7 @@ We'll discuss transcoding using both these libraries briefly in the following re ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh @@ -89,12 +89,10 @@ In this example we will decode different pixel formats video frames from a given !!! tip "You can use FFdecoder's [`metadata`](../../reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.metadata) property object that dumps source Video's metadata information _(as JSON string)_ to retrieve output framerate and resolution." -!!! alert "By default, OpenCV expects `BGR` format frames in its `cv2.write()` method." - -!!! note "The `YUV` pixel-format frames are NOT yet supported by OpenCV VideoWriter, try using WriteGear API instead." - === "BGR frames" + By default, OpenCV expects `BGR` format frames in its `cv2.write()` method. + ```python # import the necessary packages from deffcode import FFdecoder @@ -146,7 +144,7 @@ In this example we will decode different pixel formats video frames from a given === "RGB frames" - !!! info "Since OpenCV expects `BGR` format frames in its `cv2.write()` method, therefore we need to convert `RGB` frames into `BGR` before encoding." + Since OpenCV expects `BGR` format frames in its `cv2.write()` method, therefore we need to convert `RGB` frames into `BGR` before encoding as follows: ```python # import the necessary packages @@ -191,7 +189,7 @@ In this example we will decode different pixel formats video frames from a given === "GRAYSCALE frames" - !!! info "OpenCV directly consumes `GRAYSCALE` format frames in its `cv2.write()` method." + OpenCV also directly consumes `GRAYSCALE` frames in its `cv2.write()` method. ```python # import the necessary packages @@ -231,6 +229,61 @@ In this example we will decode different pixel formats video frames from a given writer.release() ``` +=== "YUV frames" + + !!! abstract "With FFdecoder API, frames extracted with YUV pixel formats _(`yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)_ are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive [`-enforce_cv_patch`](../../reference/ffdecoder/params/#b-exclusive-parameters) boolean attribute of its `ffparam` dictionary parameter." + + Let's try encoding YUV420p pixel-format frames with OpenCV's `write()` method in following python code: + + !!! info "You can also use other YUV pixel-formats such `yuv422p`(4:2:2 subsampling) or `yuv444p`(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner." + + ```python + # import the necessary packages + from deffcode import FFdecoder + import json, cv2 + + # enable OpenCV patch for YUV frames + ffparams = {"-enforce_cv_patch": True} + + # initialize and formulate the decoder for YUV420p output + decoder = FFdecoder( + "input_foo.mp4", frame_format="yuv420p", verbose=True, **ffparams + ).formulate() + + # retrieve JSON Metadata and convert it to dict + metadata_dict = json.loads(decoder.metadata) + + # prepare OpenCV parameters + FOURCC = cv2.VideoWriter_fourcc("M", "J", "P", "G") + FRAMERATE = metadata_dict["output_framerate"] + FRAMESIZE = tuple(metadata_dict["output_frames_resolution"]) + + # Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi` + writer = cv2.VideoWriter("output_foo_gray.avi", FOURCC, FRAMERATE, FRAMESIZE) + + # grab the yuv420p frame from the decoder + for frame in decoder.generateFrame(): + + # check if frame is None + if frame is None: + break + + # convert it to `BGR` pixel format, + # since imshow() method only accepts `BGR` frames + bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420) + + # {do something with the BGR frame here} + + # writing BGR frame to writer + writer.write(frame) + + # terminate the decoder + decoder.terminate() + + # safely close writer + writer.release() + ``` +   ## Transcoding lossless video using WriteGear API @@ -251,10 +304,10 @@ In this example we will decode different pixel formats video frames from a given !!! tip "You can use FFdecoder's [`metadata`](../../reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.metadata) property object that dumps source Video's metadata information _(as JSON string)_ to retrieve source framerate." -!!! alert "WriteGear API by default expects `BGR` format frames in its `write()` class method." - === "BGR frames" + WriteGear API by default expects `BGR` format frames in its `write()` class method. + ```python # import the necessary packages from deffcode import FFdecoder @@ -295,7 +348,7 @@ In this example we will decode different pixel formats video frames from a given === "RGB frames" - !!! info "You can use [`rgb_mode`](https://abhitronix.github.io/vidgear/latest/bonus/reference/writegear/#vidgear.gears.writegear.WriteGear.write) parameter in `write()` class method to write `RGB` format frames instead of default `BGR`." + In WriteGear API, you can use [`rgb_mode`](https://abhitronix.github.io/vidgear/latest/bonus/reference/writegear/#vidgear.gears.writegear.WriteGear.write) parameter in `write()` class method to write `RGB` format frames instead of default `BGR` as follows: ```python # import the necessary packages @@ -337,7 +390,7 @@ In this example we will decode different pixel formats video frames from a given === "GRAYSCALE frames" - !!! info "WriteGear API directly consumes `GRAYSCALE` format frames in its `write()` class method. + WriteGear API also directly consumes `GRAYSCALE` format frames in its `write()` class method. ```python # import the necessary packages @@ -379,7 +432,7 @@ In this example we will decode different pixel formats video frames from a given === "YUV frames" - !!! info "WriteGear API can easily consumes `YUV` format frames in its `write()` class method only in compression mode." + WriteGear API also directly consume `YUV` _(or basically any other supported pixel format)_ frames in its `write()` class method with its `-input_pixfmt` attribute in compression mode. For its non-compression mode, see [above example](#transcoding-video-using-opencv-videowriter-api). !!! note "You can also use `yuv422p`(4:2:2 subsampling) or `yuv444p`(4:4:4 subsampling) instead for more higher dynamic ranges." diff --git a/docs/reference/ffdecoder/params.md b/docs/reference/ffdecoder/params.md index c4bd4c8..12d09b6 100644 --- a/docs/reference/ffdecoder/params.md +++ b/docs/reference/ffdecoder/params.md @@ -370,9 +370,9 @@ Its valid input can be one of the following: === ":material-apple: MacOS" - MacOS users can use the [AVFoundation](https://ffmpeg.org/ffmpeg-devices.html#avfoundation) to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines: + MacOS users can use the [AVFoundation](https://ffmpeg.org/ffmpeg-devices.html#avfoundation) to list input devices and is the currently recommended framework by Apple for stream capturing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines: - !!! note "QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases." + !!! note "QTKit is also available for stream capturing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases." - [x] **Identify Video Devices:** You can enumerate all the available input devices including screens ready to be captured using `avfoundation` as follows: @@ -494,12 +494,9 @@ This parameter select the pixel format for output video frames _(such as `gray` Any improper `frame_format` parameter value _(i.e. either `null`(special-case), undefined, or invalid type)_ , then `-pix_fmt` FFmpeg parameter value in Decoding pipeline uses `output_frames_pixfmt` metadata property extracted from Output Stream. Thereby, in case if no valid `output_frames_resolution` metadata property is found, then API finally defaults to **Default pixel-format**[^1] _(calculated variably)_. - !!! alert "The `output_frame_pixfmt` metadata property is only available when FFmpeg filters via. `-vf` or `-filter_complex` are manually defined." - - ??? info "Use `#!py3 frame_format="null"` to manually discard `-pix_fmt` FFmpeg parameter entirely from Decoding pipeline." - This feature allows users to manually skip `-pix_fmt` FFmpeg parameter in Decoding pipeline, essentially for using only `format` filter values, or even better, let FFmpeg itself choose the best available output frame pixel-format for the given source. + This feature allows users to manually skip `-pix_fmt` FFmpeg parameter in Decoding pipeline, essentially for using only `format` ffmpeg filter values instead, or even better let FFmpeg itself choose the best available output frame pixel-format for the given source. **Data-Type:** String @@ -719,6 +716,19 @@ These parameters are discussed below:   +* **`-enforce_cv_patch`** _(bool)_ : This attribute can be enabled(`True`) for patching YUV pixel-formats _(such as `YUV420p`, `yuv444p`, `NV12`, `NV21` etc.)_ frames to be seamless compatibility with OpenCV APIs such as `imshow()`, `write()` etc. It can be used as follows: + + !!! warning "As of now, YUV pixel-formats starting with `YUV` and `NV` are only supported." + + ```python + # define suitable parameter + ffparams = {"-enforce_cv_patch": True} # enables OpenCV patch for YUV frames + ``` + + !!! example "YUV pixel-formats usage recipe :material-pot-steam: can found [here ➶](../../../recipes/basic/decode-video-files/#playing-with-any-other-ffmpeg-pixel-formats)" + +  + * **`-passthrough_audio`** _(bool/list)_ : _(Yet to be supported)_   diff --git a/mkdocs.yml b/mkdocs.yml index f585a01..c5adad7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -43,6 +43,8 @@ theme: - search.highlight - search.share - content.code.annotate + - content.code.copy + - content.tabs.link palette: # Light mode - media: "(prefers-color-scheme: light)" @@ -106,9 +108,9 @@ extra: provider: google property: UA-131929464-1 extra_css: - - assets/stylesheets/custom.css + - assets/stylesheets/custom.css extra_javascript: - - assets/javascripts/extra.js + - assets/javascripts/extra.js # Extensions markdown_extensions: @@ -157,49 +159,47 @@ markdown_extensions: # Page tree nav: - - Home: - - Introduction: - - Introduction: index.md - - Installation Notes: - - Overview: installation/index.md - - FFmpeg Installation: installation/ffmpeg_install.md - - Contribution Guidelines: - - Overview: contribution/index.md - - Issue Guidelines: contribution/issue.md - - Pull Request(PR) Guidelines: contribution/PR.md - - Changelog: changelog.md - - License: license.md + - Home: + - Introduction: + - Introduction: index.md + - Installation Notes: + - Overview: installation/index.md + - FFmpeg Installation: installation/ffmpeg_install.md + - Contribution Guidelines: + - Overview: contribution/index.md + - Issue Guidelines: contribution/issue.md + - Pull Request(PR) Guidelines: contribution/PR.md + - Changelog: changelog.md + - License: license.md - Recipies: - - Basic Recipes: - - Overview: recipes/basic/index.md - - Decoding Video Files: recipes/basic/decode-video-files.md - - Decoding Camera Devices: recipes/basic/decode-camera-devices.md - - Decoding Network Streams: recipes/basic/decode-network-streams.md - - Decoding Image sequences: recipes/basic/decode-image-sequences.md - - Transcoding Live frames: recipes/basic/transcode-live-frames.md - - Transcoding Live Simple Filtergraphs: recipes/basic/transcode-live-frames-simplegraphs.md - - Saving Key-frames as Image: recipes/basic/save-keyframe-image.md - - Extracting video metadata: recipes/basic/extract-video-metadata.md - - Advanced Recipies: - - Overview: recipes/advanced/index.md - - Decoding Live Virtual Sources: recipes/advanced/decode-live-virtual-sources.md - - Decoding Live Feed Devices: recipes/advanced/decode-live-feed-devices.md - - Hardware-Accelerated Video Decoding: recipes/advanced/decode-hw-acceleration.md - - Transcoding Live Complex Filtergraphs: recipes/advanced/transcode-live-frames-complexgraphs.md - - Transcoding Video Art with Filtergraphs: recipes/advanced/transcode-art-filtergraphs.md - - Hardware-Accelerated Video Transcoding: recipes/advanced/transcode-hw-acceleration.md - - Updating Video Metadata: recipes/advanced/update-metadata.md + - Basic Recipes: + - Overview: recipes/basic/index.md + - Decoding Video Files: recipes/basic/decode-video-files.md + - Decoding Camera Devices: recipes/basic/decode-camera-devices.md + - Decoding Network Streams: recipes/basic/decode-network-streams.md + - Decoding Image sequences: recipes/basic/decode-image-sequences.md + - Transcoding Live frames: recipes/basic/transcode-live-frames.md + - Transcoding Live Simple Filtergraphs: recipes/basic/transcode-live-frames-simplegraphs.md + - Saving Key-frames as Image: recipes/basic/save-keyframe-image.md + - Extracting video metadata: recipes/basic/extract-video-metadata.md + - Advanced Recipies: + - Overview: recipes/advanced/index.md + - Decoding Live Virtual Sources: recipes/advanced/decode-live-virtual-sources.md + - Decoding Live Feed Devices: recipes/advanced/decode-live-feed-devices.md + - Hardware-Accelerated Video Decoding: recipes/advanced/decode-hw-acceleration.md + - Transcoding Live Complex Filtergraphs: recipes/advanced/transcode-live-frames-complexgraphs.md + - Transcoding Video Art with Filtergraphs: recipes/advanced/transcode-art-filtergraphs.md + - Hardware-Accelerated Video Transcoding: recipes/advanced/transcode-hw-acceleration.md + - Updating Video Metadata: recipes/advanced/update-metadata.md - API References: - - deffcode.FFdecoder: - - API: reference/ffdecoder/index.md - - API Parameters: reference/ffdecoder/params.md - - deffcode.Sourcer: - - API: reference/sourcer/index.md - - API Parameters: reference/sourcer/params.md - - deffcode.ffhelper: reference/ffhelper.md - - deffcode.utils: reference/utils.md + - deffcode.FFdecoder: + - API: reference/ffdecoder/index.md + - API Parameters: reference/ffdecoder/params.md + - deffcode.Sourcer: + - API: reference/sourcer/index.md + - API Parameters: reference/sourcer/params.md + - deffcode.ffhelper: reference/ffhelper.md + - deffcode.utils: reference/utils.md - Help Section: - - Help Us: help.md - - Get Help: help/get_help.md - - + - Help Us: help.md + - Get Help: help/get_help.md diff --git a/scripts/bash/prepare_dataset.sh b/scripts/bash/prepare_dataset.sh index d702cad..0d83a17 100644 --- a/scripts/bash/prepare_dataset.sh +++ b/scripts/bash/prepare_dataset.sh @@ -27,9 +27,6 @@ mkdir -p "$TMPFOLDER"/Downloads mkdir -p "$TMPFOLDER"/Downloads/FFmpeg_static mkdir -p "$TMPFOLDER"/Downloads/Test_videos -# Acknowledging machine architecture -MACHINE_BIT=$(uname -m) - #Defining alternate ffmpeg static binaries date/version ALTBINARIES_DATE="12-07-2022" @@ -50,7 +47,7 @@ msys*) esac #Download and Configure FFmpeg Static -cd "$TMPFOLDER"/Downloads/FFmpeg_static +cd "$TMPFOLDER"/Downloads/FFmpeg_static || exit if [ $OS_NAME = "linux" ]; then @@ -82,9 +79,9 @@ fi cd "$TMPFOLDER"/Downloads/Test_videos || exit echo "Downloading Test-Data..." -curl https://raw.githubusercontent.com/abhiTronix/Imbakup/master/Images/big_buck_bunny_720p_1mb.mp4 -o BigBuckBunny_4sec.mp4 -curl https://raw.githubusercontent.com/abhiTronix/Imbakup/master/Images/big_buck_bunny_720p_1mb_vo.mp4 -o BigBuckBunny_4sec_VO.mp4 -curl https://raw.githubusercontent.com/abhiTronix/Imbakup/master/Images/big_buck_bunny_720p_1mb_ao.aac -o BigBuckBunny_4sec_AO.aac +curl https://gitlab.com/abhiTronix/Imbakup/-/raw/master/Images/big_buck_bunny_720p_1mb.mp4 -o BigBuckBunny_4sec.mp4 +curl https://gitlab.com/abhiTronix/Imbakup/-/raw/master/Images/big_buck_bunny_720p_1mb_vo.mp4 -o BigBuckBunny_4sec_VO.mp4 +curl https://gitlab.com/abhiTronix/Imbakup/-/raw/master/Images/big_buck_bunny_720p_1mb_ao.aac -o BigBuckBunny_4sec_AO.aac curl -L https://github.com/abhiTronix/Imbakup/releases/download/vid-001/BigBuckBunny.mp4 -o BigBuckBunny.mp4 curl -L https://github.com/abhiTronix/Imbakup/releases/download/vid-001/jellyfish-50-mbps-hd-h264.mkv -o 50_mbps_hd_h264.mkv curl -L https://github.com/abhiTronix/Imbakup/releases/download/vid-001/jellyfish-90-mbps-hd-hevc-10bit.mkv -o 90_mbps_hd_hevc_10bit.mkv diff --git a/setup.py b/setup.py index fdc4207..060f0a5 100644 --- a/setup.py +++ b/setup.py @@ -19,7 +19,6 @@ """ # import the necessary packages -import setuptools from setuptools import setup from distutils.util import convert_path @@ -43,15 +42,12 @@ .replace("", "") .replace("", "") ) - # patch for unicodes + # patch for unicodes long_description = long_description.replace("➶", ">>").replace("©", "(c)") # patch internal hyperlinks long_description = long_description.replace( "(#", "(https://github.com/abhiTronix/deffcode#" ) - long_description = long_description.replace( - "docs/overrides/", "https://abhitronix.github.io/deffcode/latest/" - ) setup( diff --git a/tests/test_ffdecoder.py b/tests/test_ffdecoder.py index 887a245..11af3e2 100644 --- a/tests/test_ffdecoder.py +++ b/tests/test_ffdecoder.py @@ -50,7 +50,7 @@ [ (return_testvideo_path(fmt="av"), return_static_ffmpeg(), True), ( - "https://raw.githubusercontent.com/abhiTronix/Imbakup/master/Images/starship.mkv", + "https://gitlab.com/abhiTronix/Imbakup/-/raw/master/Images/starship.mkv", "", True, ), @@ -116,7 +116,7 @@ def test_source_playback(source, custom_ffmpeg, output): @pytest.mark.parametrize( - "pixfmts", ["bgr24", "gray", "rgba", "invalid", "invalid2", "yuv444p", "bgr48be"] + "pixfmts", ["bgr24", "gray", "rgba", "invalid", "invalid2", "yuv420p", "bgr48be"] ) def test_frame_format(pixfmts): """ @@ -129,7 +129,16 @@ def test_frame_format(pixfmts): ffparams = {"-pix_fmt": "bgr24"} try: # formulate the decoder with suitable source(for e.g. foo.mp4) - if pixfmts != "invalid2": + if pixfmts == "yuv420p": + ffparams = {"-enforce_cv_patch": True} + decoder = FFdecoder( + source, + frame_format=pixfmts, + custom_ffmpeg=return_static_ffmpeg(), + verbose=True, + **ffparams, + ).formulate() + elif pixfmts != "invalid2": decoder = FFdecoder( source, frame_format=pixfmts, @@ -149,10 +158,12 @@ def test_frame_format(pixfmts): # grab RGB24(default) 3D frames from decoder for frame in decoder.generateFrame(): + if pixfmts == "yuv420p": + # try converting to BGR frame + frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) # lets print its shape - print(frame.shape) + logger.debug(frame.shape) break - except Exception as e: pytest.fail(str(e)) finally: @@ -454,6 +465,7 @@ def test_camera_capture(source, source_demuxer, result): { "-custom_resolution": "null", # discard `-custom_resolution` "-framerate": "null", # discard `-framerate` + "-enforce_cv_patch": "invalid", # invalid value for testing "-vf": "format=bgr24,scale=320:240,fps=60", # format=bgr24, scale=320x240, framerate=60fps }, True, diff --git a/tests/test_sourcer.py b/tests/test_sourcer.py index 0fc7b01..b3e7670 100644 --- a/tests/test_sourcer.py +++ b/tests/test_sourcer.py @@ -42,12 +42,18 @@ [ ( return_generated_frames_path(return_static_ffmpeg()), - {"-ffprefixes": "invalid"}, # invalid ffprefixes + { + "-ffprefixes": "invalid", # invalid ffprefixes + "-filter_complex": "loop=loop=3:size=15:start=25", + }, return_static_ffmpeg(), ), ( "rtmp://live.twitch.tv/", - {"-ffmpeg_download_path": ["invalid"]}, # invalid FFmpeg download path + { + "-ffmpeg_download_path": ["invalid"], # invalid FFmpeg download path + "-ffprefixes": ["-stream_loop", "3"], + }, return_static_ffmpeg(), ), (