diff --git a/README.adoc b/README.adoc index e970a999..d5cd387e 100644 --- a/README.adoc +++ b/README.adoc @@ -113,41 +113,6 @@ We also have a discussion forum on https://reddit.com/r/BirdNET_Analyzer[Reddit] == Contents toc::[] -== Model version update - -[discrete] -==== V2.4, June 2023 - -* more than 6,000 species worldwide -* covers frequencies from 0 Hz to 15 kHz with two-channel spectrogram (one for low and one for high frequencies) -* 0.826 GFLOPs, 50.5 MB as FP32 -* enhanced and optimized metadata model -* global selection of species (birds and non-birds) with 6,522 classes (incl. 10 non-event classes) - -You can find a list of previous versions here: https://github.com/kahst/BirdNET-Analyzer/tree/main/checkpoints[BirdNET-Analyzer Model Version History] - -[discrete] -==== Species range model V2.4 - V2, Jan 2024 - -* updated species range model based on eBird data -* more accurate (spatial) species range prediction -* slightly increased long-tail distribution in the temporal resolution -* see https://github.com/kahst/BirdNET-Analyzer/discussions/234[this discussion post] for more details - -== Technical Details - -Model V2.4 uses the following settings: - -* 48 kHz sampling rate (we up- and downsample automatically and can deal with artifacts from lower sampling rates) -* we compute 2 mel spectrograms as input for the convolutional neural network: -** first one has fmin = 0 Hz and fmax = 3000; nfft = 2048; hop size = 278; 96 mel bins -** second one has fmin = 500 Hz and fmax = 15 kHz; nfft = 1024; hop size = 280; 96 mel bins -* both spectrograms have a final resolution of 96x511 pixels -* raw audio will be normalized between -1 and 1 before spectrogram conversion -* we use non-linear magnitude scaling as mentioned in http://ceur-ws.org/Vol-2125/paper_181.pdf[Schlüter 2018] -* V2.4 uses an EfficienNetB0-like backbone with a final embedding size of 1024 -* See https://github.com/kahst/BirdNET-Analyzer/issues/177#issuecomment-1772538736[this comment] for more details - == Usage guide This document provides instructions for downloading and installing the GUI, and conducting some of the most common types of analyses. Within the document, a link is provided to download example sound files that can be used for practice. @@ -179,6 +144,8 @@ Learn more at: https://haikubox.com[HaikuBox.com] Built on the TFLite version of BirdNET, this project uses pre-built TFLite binaries for Raspberry Pi to run on-device sound analyses. It is able to recognize bird sounds from a USB sound card in realtime and share its data with the rest of the world. +Note: You can find the most up-to-date version of BirdNET-PI at https://github.com/Nachtzuster/BirdNET-Pi[github.com/Nachtzuster/BirdNET-Pi] + Learn more at: https://birdnetpi.com[BirdNETPi.com] | image:https://tuc.cloud/index.php/s/jDtyG9W36WwKpbR/download/logo_box_birdweather.png[BirdWeather,300,link=https://app.birdweather.com] @@ -188,6 +155,12 @@ Using the BirdNET artificial neural network, BirdWeather is continuously listeni Learn more at: https://app.birdweather.com[BirdWeather.com] +| image:https://tuc.cloud/index.php/s/kqT7GXXzfDs3NyA/download/birdnetlib-logo.png[birdnetlib,300,link=https://joeweiss.github.io/birdnetlib/] +| *birdnetlib* + +A python api for BirdNET-Analyzer and BirdNET-Lite. `birdnetlib` provides a common interface for BirdNET-Analyzer and BirdNET-Lite. + +Learn more at: https://joeweiss.github.io/birdnetlib/[github.io/birdnetlib] + | image:https://tuc.cloud/index.php/s/zpNkXJq7je3BKNE/download/logo_box_ecopi_bird.png[ecoPI:Bird,300,link=https://oekofor.netlify.app/en/portfolio/ecopi-bird_en/] | *ecoPi:Bird* + The ecoPi:Bird is a device for automated acoustic recordings of bird songs and calls, with a self-sufficient power supply. @@ -241,6 +214,41 @@ We have created an interactive map of projects that use BirdNET. If you are work You can access the map here: https://kahst.github.io/BirdNET-Analyzer/projects.html[Open projects map] +== Model version update + +[discrete] +==== V2.4, June 2023 + +* more than 6,000 species worldwide +* covers frequencies from 0 Hz to 15 kHz with two-channel spectrogram (one for low and one for high frequencies) +* 0.826 GFLOPs, 50.5 MB as FP32 +* enhanced and optimized metadata model +* global selection of species (birds and non-birds) with 6,522 classes (incl. 10 non-event classes) + +You can find a list of previous versions here: https://github.com/kahst/BirdNET-Analyzer/tree/main/checkpoints[BirdNET-Analyzer Model Version History] + +[discrete] +==== Species range model V2.4 - V2, Jan 2024 + +* updated species range model based on eBird data +* more accurate (spatial) species range prediction +* slightly increased long-tail distribution in the temporal resolution +* see https://github.com/kahst/BirdNET-Analyzer/discussions/234[this discussion post] for more details + +== Technical Details + +Model V2.4 uses the following settings: + +* 48 kHz sampling rate (we up- and downsample automatically and can deal with artifacts from lower sampling rates) +* we compute 2 mel spectrograms as input for the convolutional neural network: +** first one has fmin = 0 Hz and fmax = 3000; nfft = 2048; hop size = 278; 96 mel bins +** second one has fmin = 500 Hz and fmax = 15 kHz; nfft = 1024; hop size = 280; 96 mel bins +* both spectrograms have a final resolution of 96x511 pixels +* raw audio will be normalized between -1 and 1 before spectrogram conversion +* we use non-linear magnitude scaling as mentioned in http://ceur-ws.org/Vol-2125/paper_181.pdf[Schlüter 2018] +* V2.4 uses an EfficienNetB0-like backbone with a final embedding size of 1024 +* See https://github.com/kahst/BirdNET-Analyzer/issues/177#issuecomment-1772538736[this comment] for more details + == Setup === Setup (Raven Pro) @@ -252,14 +260,21 @@ https://tuc.cloud/index.php/s/2TX59Qda2X92Ppr/download/BirdNET_GLOBAL_6K_V2.4_Mo === Setup (Python package) -The easiest way to setup BirdNET on your machine is to install https://pypi.org/project/birdnet/[birdnet] through pip with: +The easiest way to setup BirdNET on your machine is to install https://joeweiss.github.io/birdnetlib/[birdnetlib] or https://pypi.org/project/birdnet/[birdnet] through pip with: [source,sh] ---- + +pip3 install birdnetlib + +or + pip3 install birdnet ---- -You can run BirdNET with: +Please take a look at the https://joeweiss.github.io/birdnetlib/#using-birdnet-analyzer[birdnetlib user guide] on how to analyze audio with `birdnetlib`. + +When using the `birdnet`-package, you can run BirdNET with: [source,python] ---- @@ -283,7 +298,7 @@ print(f"predicted '{prediction}' with a confidence of {confidence:.6f}") ---- For more examples and documentation, make sure to visit https://pypi.org/project/birdnet/[pypi.org/project/birdnet/]. -For any feature request or questions regarding *birdnet*, please add an issue or PR at https://github.com/birdnet-team/birdnet[github.com/birdnet-team/birdnet]. +For any feature request or questions regarding `birdnet`, please add an issue or PR at https://github.com/birdnet-team/birdnet[github.com/birdnet-team/birdnet]. === Setup (Ubuntu) diff --git a/docs/assets/js/projects_data.js b/docs/assets/js/projects_data.js index ee7bebee..39db3e4e 100644 --- a/docs/assets/js/projects_data.js +++ b/docs/assets/js/projects_data.js @@ -1040,6 +1040,36 @@ var projects_data = [ "Species Image": "https://upload.wikimedia.org/wikipedia/commons/4/46/Barred_Owl_%28Strix_varia%29_RWD.jpg", "Species Image Credit": "Dick Daniels, via Wikimedia Commons", "Species Icon": "feather" +}, +{ + "Project name": "Coffee production bird monitoring", + "Organization/Project lead": "Marius Somveille, University College London", + "Target species": "Birds", + "Country": "Mexico", + "Region/Location": "Soconusco Region, Chiapas", + "Latitude": 15.31, + "Longitude": -92.72, + "Contact": "marius@somveille.com", + "Website": null, + "Paper": "https://www.biorxiv.org/content/10.1101/2024.07.12.603271v1", + "Species Image": "https://upload.wikimedia.org/wikipedia/commons/5/5c/Collared_Aracari_%2844517373101%29.jpg", + "Species Image Credit": "Andy Morffew from Itchen Abbas, via Wikimedia Commons", + "Species Icon": "feather" +}, +{ + "Project name": "Common cuckoo vocalisations as an indicator", + "Organization/Project lead": "Adam Mickiewicz University in Poznan, Poznan, Poland", + "Target species": "Common cuckoo", + "Country": "Poland", + "Region/Location": "Odolanow, Poland", + "Latitude": 51.575, + "Longitude": 17.676, + "Contact": "dmwiniarska1@gmail.com (D. Winiarska)", + "Website": null, + "Paper": "https://www.sciencedirect.com/science/article/pii/S1470160X24008574", + "Species Image": "https://upload.wikimedia.org/wikipedia/commons/4/44/Common_Cuckoo_%28Cuculus_canorus%29_%288079424957%29_%282%29.jpg", + "Species Image Credit": "Ron Knight, via Wikimedia Commons", + "Species Icon": "feather" } ]; diff --git a/docs/projects.html b/docs/projects.html index 50e091b6..5a9ed402 100644 --- a/docs/projects.html +++ b/docs/projects.html @@ -45,7 +45,7 @@