<!DOCTYPE article
PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Archiving and Interchange DTD with MathML3 v1.3 20210610//EN" "JATS-archivearticle1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="1.3" xml:lang="en" article-type="research-article"><?properties manuscript?><processing-meta base-tagset="archiving" mathml-version="3.0" table-model="xhtml" tagset-family="jats"><restricted-by>pmc</restricted-by></processing-meta><front><journal-meta><journal-id journal-id-type="nlm-journal-id">8607378</journal-id><journal-id journal-id-type="pubmed-jr-id">5476</journal-id><journal-id journal-id-type="nlm-ta">Laryngoscope</journal-id><journal-id journal-id-type="iso-abbrev">Laryngoscope</journal-id><journal-title-group><journal-title>The Laryngoscope</journal-title></journal-title-group><issn pub-type="ppub">0023-852X</issn><issn pub-type="epub">1531-4995</issn></journal-meta><article-meta><article-id pub-id-type="pmid">37672667</article-id><article-id pub-id-type="pmc">10915103</article-id><article-id pub-id-type="doi">10.1002/lary.31042</article-id><article-id pub-id-type="manuscript">NIHMS1928573</article-id><article-categories><subj-group subj-group-type="heading"><subject>Article</subject></subj-group></article-categories><title-group><article-title>Cough Sounds in Screening and Diagnostics: a Scoping Review</article-title></title-group><contrib-group><contrib contrib-type="author"><name><surname>Hegde</surname><given-names>Siddhi</given-names></name><degrees>MBBS</degrees><xref rid="A1" ref-type="aff">1</xref><xref rid="FN1" ref-type="author-notes">*</xref></contrib><contrib contrib-type="author"><name><surname>Sreeram</surname><given-names>Shreya</given-names></name><degrees>MBBS</degrees><xref rid="A1" ref-type="aff">1</xref><xref rid="FN1" ref-type="author-notes">*</xref></contrib><contrib contrib-type="author"><name><surname>Alter</surname><given-names>Isaac L.</given-names></name><degrees>AB</degrees><xref rid="A2" ref-type="aff">2</xref><xref rid="FN1" ref-type="author-notes">*</xref></contrib><contrib contrib-type="author"><name><surname>Shor</surname><given-names>Chaya</given-names></name><degrees>BA</degrees><xref rid="A3" ref-type="aff">3</xref></contrib><contrib contrib-type="author"><name><surname>Valdez</surname><given-names>Tulio A.</given-names></name><degrees>MD, MSc</degrees><xref rid="A4" ref-type="aff">4</xref></contrib><contrib contrib-type="author"><name><surname>Meister</surname><given-names>Kara D.</given-names></name><degrees>MD, FAAP, FACS</degrees><xref rid="A4" ref-type="aff">4</xref><xref rid="FN2" ref-type="author-notes">&#x02020;</xref></contrib><contrib contrib-type="author"><name><surname>Rameau</surname><given-names>Ana&#x000ef;s</given-names></name><degrees>MD, MSc MPhil MS</degrees><xref rid="A3" ref-type="aff">3</xref><xref rid="FN2" ref-type="author-notes">&#x02020;</xref></contrib></contrib-group><aff id="A1"><label>1.</label>KVG Medical College and Hospital, Sullia, Karnataka, India</aff><aff id="A2"><label>2.</label>Columbia University Vagelos College of Physicians and Surgeons, New York, NY, U.S.A.</aff><aff id="A3"><label>3.</label>Weill Cornell Medicine, Sean Parker Institute for the Voice, New York, NY, U.S.A</aff><aff id="A4"><label>4.</label>Division of Pediatric Otolaryngology, Otolaryngology--Head &#x00026; Neck Surgery, Stanford University, Stanford, California, U.S.A</aff><author-notes><fn fn-type="equal" id="FN1"><label>*</label><p id="P1">Authors contributed equally and are considered co-first authors.</p></fn><fn fn-type="equal" id="FN2"><label>&#x02020;</label><p id="P2">Authors contributed equally and are considered co-last authors</p></fn><corresp id="CR1"><underline>Corresponding Author</underline>: Ana&#x000ef;s Rameau, MD MPhil MS FACS, Sean Parker Institute for the Voice, Department of Otolaryngology &#x02013; Head and Neck Surgery, Weill Cornell Medical College, 240 E 59th St 2nd Floor, New York, NY 10022, <email>anr2783@med.cornell.edu</email></corresp></author-notes><pub-date pub-type="nihms-submitted"><day>11</day><month>9</month><year>2023</year></pub-date><pub-date pub-type="ppub"><month>3</month><year>2024</year></pub-date><pub-date pub-type="epub"><day>06</day><month>9</month><year>2023</year></pub-date><pub-date pub-type="pmc-release"><day>01</day><month>3</month><year>2025</year></pub-date><volume>134</volume><issue>3</issue><fpage>1023</fpage><lpage>1031</lpage><abstract id="ABS1"><sec id="S1"><title>Objective:</title><p id="P3">To examine applications of cough sounds towards screening tools and diagnostics in the biomedical and engineering literature, with particular focus on disease types, acoustic data collection protocols, data processing and analytics, accuracy, and limitations.</p></sec><sec id="S2"><title>Data Sources:</title><p id="P4">PubMed, EMBASE, Web of Science, Scopus, Cochrane Library, IEEE Xplore, Engineering Village, and ACM Digital Library were searched from inception to August 2021.</p></sec><sec id="S3"><title>Review Methods:</title><p id="P5">A scoping review was conducted on screening and diagnostic uses of cough sounds in adults, children, and animals, in English peer-reviewed and gray literature of any design.</p></sec><sec id="S4"><title>Results:</title><p id="P6">Of 438 abstracts screened, 108 articles met inclusion criteria. Human studies were most common (77.8%); the majority focused on adults (57.3%). Single-modality acoustic data collection was most common (71.2%), with few multimodal studies, including plethysmography (15.7%) and clinico-demographic data (7.4%). Data analytics methods were highly variable, with 61.1% using machine learning, the majority of which (78.8%) were published after 2010. Studies commonly focused on cough detection (41.7%) and screening of COVID-19 (11.1%); among pediatric studies, the most common focus was diagnosis of asthma (52.6%).</p></sec><sec id="S5"><title>Conclusion:</title><p id="P7">Though the use of cough sounds in diagnostics is not new, academic interest has accelerated in the past decade. Cough sound offers the possibility of an accessible, non-invasive, and low-cost disease biomarker, particularly in the era of rapid development of machine learning capabilities in combination with the ubiquity of cellular technology with high-quality recording capability. However, most cough sound literature hinges on non-standardized data collection protocols and small, non-diverse, single-modality datasets, with limited external validity.</p></sec></abstract><abstract id="ABS2" abstract-type="summary"><title>Lay Summary:</title><p id="P8">Cough sound analysis holds the promise of an accessible disease biomarker. We survey efforts in using cough sounds in adult, pediatric and animal populations, and examine evolution in hardware and analytics. Rapid acceleration is noted in the past decade, though with limitations.</p></abstract><kwd-group><kwd>Machine learning</kwd><kwd>acoustics</kwd><kwd>cough</kwd></kwd-group></article-meta></front><body><sec id="S6"><title>Introduction</title><p id="P9">Cough is an essential defensive reflex of the respiratory tract to clear material from the airway, characterized by the precise coordination of laryngeal, abdominal and chest wall muscles, under the control of medullary and higher cortical regions of the brain.<sup><xref rid="R1" ref-type="bibr">1</xref>,<xref rid="R2" ref-type="bibr">2</xref></sup> Cough is associated with a characteristic sound, reflecting a specific pattern of airway vibration carrying critical information on the condition of the respiratory system.<sup><xref rid="R3" ref-type="bibr">3</xref>,<xref rid="R4" ref-type="bibr">4</xref></sup> Cough sounds are unique and recognizable acoustic events. They have a timbre associated with particular disease states, which have been used by clinicians for diagnostic purposes for centuries. However these characteristic acoustic biomarkers have not yet been translated into applied screening or diagnostic technology and are currently limited to diagnostic clues observed by clinicians.<sup><xref rid="R5" ref-type="bibr">5</xref></sup> Objective measures and automated analyses of cough sounds could transform the screening, diagnosis, and management of many diseases treated by otolaryngologists, pulmonologists, and other health professionals.</p><p id="P10">The current ubiquitous availability of audio recording hardware via cellular technology and exponential advances in data analytics via deep learning have created unprecedented opportunities for collecting and analyzing cough sounds. Several promising results have been published in the distinction of normal and abnormal cough sounds,<sup><xref rid="R6" ref-type="bibr">6</xref></sup> screening of disease such as COVID-19,<sup><xref rid="R7" ref-type="bibr">7</xref>&#x02013;<xref rid="R9" ref-type="bibr">9</xref></sup> and prediction of disease severity in common respiratory conditions like asthma,<sup><xref rid="R10" ref-type="bibr">10</xref></sup> chronic bronchitis,<sup><xref rid="R11" ref-type="bibr">11</xref></sup> and croup.<sup><xref rid="R12" ref-type="bibr">12</xref>,<xref rid="R13" ref-type="bibr">13</xref></sup> Diagnosis of respiratory conditions in low-resource regions is fraught with difficulties due to the lack of field-deployable imaging and laboratory facilities, as well as the scarcity of trained community healthcare workers, and cough sound biomarkers could help improve screening and diagnostics in such settings.</p><p id="P11">However, there is currently no consensus on protocols for cough sounds recording and analysis to guide research teams and aid clinical translation. This scoping review is presented to overview the existing literature and propose priorities for future research, including data collection standards. The primary objective is to survey the bioacoustics literature on cough sounds to highlight data collection protocols, acoustic analysis approaches, analytical accuracy, and limitations. The secondary objectives are to determine which disciplines and regions are active in cough sound analytics, which diagnoses and populations are a focus, and whether existing cough sound databases are open-access and represent a diverse population. Of note, we included animal studies, as use of cough sounds for livestock respiratory illness monitoring is a rich field of study in veterinary sciences with little reference in the medical literature, despite significant scientific overlap.</p></sec><sec id="S7"><title>Methods</title><p id="P12">The protocol was executed in accordance with the Preferred Reporting Items for Systematic reviews and Meta-Analyses extension for Scoping Reviews (PRISMA-ScR) checklist.<sup><xref rid="R14" ref-type="bibr">14</xref></sup> The review protocol, including the objectives, inclusion criteria, and methods were specified in advance and made available on September 3rd, 2021 under the Open Science Framework platform (<ext-link xlink:href="https://osf.io/gh9t2/" ext-link-type="uri">https://osf.io/gh9t2/</ext-link>).</p><sec id="S8"><title>Search strategy</title><p id="P13">A comprehensive search strategy was developed in consultation with a scholarly medical librarian combining relevant subject headings and keywords related to &#x02018;cough sounds.&#x02019; The search was conducted in PubMed, EMBASE, Web of Science, Scopus, Cochrane Library, IEEE Xplore, Engineering Village, and ACM Digital Library from inception to August 4th, 2021. The search strategy is included in <xref rid="SD1" ref-type="supplementary-material">Supporting Information, Appendix 1</xref>. The following gray literature sources were also searched: arXiv, medRxiv, engrXiv, <ext-link xlink:href="https://www.preprints.org" ext-link-type="uri">preprints.org</ext-link>, Open Science Framework preprints, TechRxiv, and Google Scholar.</p></sec><sec id="S9"><title>Study Selection</title><p id="P14">Records were included if they analyzed cough sounds for screening or diagnosis of disease in human or animal subjects. There were no restrictions based on the year of publication, subject age, study techniques, or study location. Only studies with the full text in the English language were included. Narrative reviews, systematic and scoping reviews, research in progress, books, book chapters, conference or meeting abstracts, dissertations, theses, newspaper/magazine articles, editorials, commentaries, and letters to the editor were excluded.</p><p id="P15">Results were uploaded into Covidence systematic review screening software (Veritas Health Innovation, Melbourne, Australia) and de-duplicated. All abstracts (n=433) were reviewed independently by two researchers (S.H. and S.S.) for inclusion eligibility. First, titles and abstracts and then, full texts were independently screened and charted. Disagreements between reviewers were resolved by consensus, and the senior investigators (K.M. and A.R.) were consulted when needed. Inter-rater reliability of the reviewers&#x02019; assessment was reported in the form of Cohen&#x02019;s kappa.<sup><xref rid="R15" ref-type="bibr">15</xref></sup></p></sec><sec id="S10"><title>Data Extraction</title><p id="P16">Following agreement on the data extraction fields and their definition, the research team developed a data extraction sheet, and tested it iteratively for consistent data capture.</p><p id="P17">Data extraction was performed in Covidence and then exported for analysis in Microsoft Excel. Data extraction definitions are included in <xref rid="SD1" ref-type="supplementary-material">Supporting Information, Appendix 2</xref>. Data were independently extracted from selected articles by two team members (S.H, S.S), and verified by a senior author (K.M., A.R.). Descriptive statistics were used to highlight the frequency of modalities of cough sound collection, storage and analysis, and the pathologic conditions represented in the included results.</p></sec></sec><sec id="S11"><title>Results</title><p id="P18">The literature search yielded 433 unique results after deduplication. Following title and abstract screening, 183 studies were excluded. Ultimately, 108 studies underwent a full-text review. The reviewer agreement was moderate for title and abstract screening (Cohen&#x02019;s kappa=0.53) and high for full-text review (Cohen&#x02019;s kappa=0.95). The PRISMA flow diagram outlining the study selection process is delineated in <xref rid="F1" ref-type="fig">Figure 1</xref>. Citations for all included studies is available in <xref rid="SD1" ref-type="supplementary-material">Supporting Information, Appendix 3</xref>. A summary of all included studies can be found in <xref rid="SD2" ref-type="supplementary-material">Table S1</xref>.</p><sec id="S12"><title>Author Affiliations and Journals</title><p id="P19">The body of literature was primarily authored by science, technology, engineering, and mathematics (STEM) specialists (68/108, 62.9%), followed by medical (35/108, 32.4%) and veterinary science specialists (5/108, 4.6%) (<xref rid="F2" ref-type="fig">Figure 2A</xref>). A study was included in an expertise category if at least one author&#x02019;s appointment was in that category. Co-authorship of STEM specialists and physicians and veterinarians was present in 34.3% (37/108) and 9.3% (10/108) of articles respectively. The articles were most commonly published in engineering journals (35/108, 32.4%) (<xref rid="F2" ref-type="fig">Figure 2B</xref>). A wide range of geographic regions were represented in the published data; Europe (47/108, 43.5%) and Asia (28/108, 25.9%) were the most common continents (<xref rid="F2" ref-type="fig">Figure 2C</xref>).</p></sec><sec id="S13"><title>Demographics and Clinical Profiles</title><p id="P20">Human studies were most common (84/108, 77.8%); of the studies which disclosed the age of their study population, the majority focused on adult subjects (35/61, 57.3%), while 31.1% (19/61) focused on pediatric patients (<xref rid="F3" ref-type="fig">Figure 3B</xref>). The most common livestock species studied were pigs (16/23, 66.7%) and bovine cattle (3/23, 13.0%); guinea pigs, mice, and chickens were also subjects of investigation in very few studies (<xref rid="F3" ref-type="fig">Figure 3C</xref>). Only one study involved both human and animal participants. Among human studies, combined adult and pediatric subjects were included in 7 studies. In human studies, demographic information was reported in 76.5% of studies (65/85). Age, sex, and body weight were the most reported clinico-demographic data, whereas race/ethnicity, gender and height were the least reported.</p><p id="P21">Clinical information, such as past medical history and co-morbidities, were overall inconsistently described across the reviewed articles. In human studies, 36 (61/85, 71.7%) studies reported at least some patient co-morbidities of which, asthma (34/61, 55.7%), chronic obstructive pulmonary disease (COPD) (14/61, 22.9%) and bronchiectasis (7/61, 11.5%) were most common.</p></sec><sec id="S14"><title>Screening/Diagnostic Focus</title><p id="P22"><italic toggle="yes">Cough detection</italic> tools were used to monitor subjects&#x02019; cough numbers throughout a given time frame. This is in contrast with <italic toggle="yes">cough quality classification</italic> tools, which aimed to detect the coughs first and then categorize the cough quality based on type (wet, dry), pitch (often for sex differentiation), and/or severity. <italic toggle="yes">Cough diagnosis classification</italic> studies aimed to determine the pathologic condition underlying cough, such as pneumonia, asthma, or COVID-19. <xref rid="F3" ref-type="fig">Figure 3D</xref> describes the proportion of reviewed studies in each category, while <xref rid="T1" ref-type="table">Table 1</xref> divides the studies by both category and authorship.</p><p id="P23">A large number of studies focused on cough detection (45/108, 41.7%) in adults (19/35, 54.2%), children (2/19, 10.5%), and animals (14/23, 60.9%), as well as 43.5% (10/23) of human studies with unspecified age group. A wide number of pathologies were represented in human cough detection studies (<xref rid="F4" ref-type="fig">Figure 4B</xref>). For the 38 studies focused on diagnostics in human subjects, the pathologies examined included most commonly COVID-19 (12/38, 31.6%), asthma (11/38, 28.9%), pneumonia (5/38, 13.2%), pertussis (4/38, 10.5%), croup (3/38, 7.9%), COPD (3/38, 7.9%), and tuberculosis (3/38, 7.9%); of note, many studies focused on diagnosing multiple diseases (<xref rid="F4" ref-type="fig">Figure 4A</xref>). In pediatric studies, the most common diagnostic focus was asthma (10/19, 52.6%). Comparison to a diagnostic gold standard such as chest radiograph or microbiology result was explicitly reported in 21.1% (8/38) of diagnosis classification studies. Only 17 studies (17/108, 15.7%) attempted classification of cough severity. Veterinary diagnostic classification studies were mostly focused on infectious etiologies of cough (like porcine wasting disease, bovine respiratory disease, actinobacillus/pasteurella infections, bronchopneumonia, and other bacterial/viral infections (7/8, 87.5%) rather than chronic non-infectious etiologies, though one study focused on aerial pollutants.</p></sec><sec id="S15"><title>Data acquisition protocols</title><sec id="S16"><title>Microphone and sensor types:</title><p id="P24">The use of an external microphone for data recording was most common (82/108, 75.9%). Twenty-nine studies mentioned the use of a condenser microphone (6 unidirectional, 5 omnidirectional, 11 electret). Three studies also relied on piezoelectric sensors. Sixteen studies used contact microphones, 6 studies mouthpieces and 9 studies lapel microphones. Twenty studies used smartphones, while 14 used publicly available or crowdsourced data, often of unknown or inconsistent recording methodology (<xref rid="F5" ref-type="fig">Figure 5A</xref>). Twelve studies used multiple microphones, with some veterinary studies using as many as eight microphones. One study employed the use of earphones attached to the smartphone to record samples. All reviewed veterinary studies used non-contact microphones in order to avoid affecting the behavior of animals. Cough sounds were recorded from a group of animals rather than individual animals in 39% (9/23) veterinary studies, while the others recorded animals one by one; of these veterinary studies, 55.6% (5/9) mentioned the location of the microphone relative to the animal.</p></sec><sec id="S17"><title>Recording protocol:</title><p id="P25">Voluntary cough sounds are defined as those produced on instruction from non-symptomatic human subjects,<sup><xref rid="R16" ref-type="bibr">16</xref>,<xref rid="R17" ref-type="bibr">17</xref></sup> whereas reflexive cough sounds arise as a spontaneous primitive response to protect the airways.<sup><xref rid="R11" ref-type="bibr">11</xref></sup> Reflexive coughs were captured in most of the studies (55/108, 50.9%), followed by voluntary coughs (28/108, 16.7%) and both types (11/108, 10.2%); 14 studies (13.0%) did not specify whether coughs were voluntary or reflexive (<xref rid="F3" ref-type="fig">Figure 3E</xref>). Distance between the microphone and subject&#x02019;s mouth was mentioned in 16 human studies. The average distance from the subject&#x02019;s mouth and the microphone was 62.0 cm (median 50cm, range: 15&#x02013;400 cm).</p></sec><sec id="S18"><title>Recording environment:</title><p id="P26">Among human studies focused on cough detection, the most common setting was continuous ambulatory recording, in which patients went about their daily routines with a microphone or other cough monitor (11/31, 35.5%). Other environments included isolated or quiet rooms, in purely experimental settings (7/31, 22.6%); inpatient (3/31, 9.7%) or outpatient (2/31, 6.5%) clinical settings; fixed microphones in a bedroom or other space in patients&#x02019; homes (3/31, 9.7%); or public spaces, such as a hospital waiting room or library (2/31, 6.5%). Three or these studies (9.7%) did not specify the recording setting.</p><p id="P27">For cough quality classification studies in humans, the most common environment was a quiet room or other isolated experimental setup (10/14, 71.4%). Two studies (14.3%) involved recordings in an outpatient clinical setting, and one was recorded in an inpatient hospital setting. Finally, one study captured continuous recordings in patients&#x02019; bedrooms.</p><p id="P28">The most commonly represented environment among human studies focused on cough diagnosis classification was isolated rooms (11/39, 28.2%). Nine studies (23.1%) were recorded in inpatient clinical settings, and three (9.7%) were recorded in outpatient settings. Four studies (12.9%) used recordings from multiple settings, such as both inpatient and outpatient environments. One study (3.2%) obtained recordings from patients at home. Eleven additional studies (28.2%) did not specify a recording setting.</p><p id="P29">Finally, among animal studies, the most common recording environment was on farms (10/23, 43.5%), closely followed by laboratory settings (9/23, 39.1%). Four studies (17.4%) used recordings from both a laboratory and a farm.</p></sec></sec><sec id="S19"><title>Dataset characteristics</title><p id="P30">A minority of studies (9/108, 8.3%) did not report on dataset sizes, and more than a quarter of studies (31/108, 28.7%) did not report on the number of cough samples recorded. In those studies specifying dataset size and number of cough sounds collected, an average of 2357 cough samples (2357 &#x000b1; 5086 coughs, median: 804.5, interquartile range: 1955, range: 3 to 30304) were collected in each study, with an average of 617 subjects (617 &#x000b1; 2478 subjects, median: 32, interquartile range: 97, range: 2 to 20,000) and an average of 3.82 coughs were recorded per subject.</p><p id="P31">Single modality in data collection prevailed, with 71.2% (77/108) of studies only collecting acoustic recording for the analysis. Few studies described a multi-modal data collection approach including plethysmography (17/108, 15.7%) or clinical data such as temperature, respiratory rate, or body mass index (8/108, 7.4%). Patient reported outcome measure questionnaires such as the cough visual analogue scale and the Leicester Cough Questionnaire were utilized in four studies. Other multi-modal approaches relied on chest radiographs (1/108, 1%), thermal imaging (2/114, 1.8%), demographic features (3/108, 2.8%), video recording (1/108, 1%), and pressure/flow measurements (3/108, 2.8%).</p></sec><sec id="S20"><title>Data analytics approach</title><sec id="S21"><title>Preprocessing</title><p id="P32">Preprocessing of the acoustic signal was done by de-clipping, de-noising, re-sampling, or windowing in (82/108) 75.9% of all the studies. Various methods of <italic toggle="yes">noise reduction</italic>, i.e., improving signal to noise ratio, were specifically studied in 9 (9/108, 8.3%) studies.</p></sec><sec id="S22"><title>Data analytics</title><p id="P33">Data analytics were highly variable across studies and across time. The majority of studies relied on machine learning (66/108, 61.1%) for either cough detection or diagnosis classification, with the bulk of such studies (52/66, 78.8%) published after 2010 (<xref rid="F5" ref-type="fig">Figure 5B</xref>).</p><p id="P34">The accuracy and sensitivity of the cough recognition models had wide ranges, 40%&#x02212;100% and 32%&#x02212;99.1% respectively. Human studies were more likely to report accuracy data (47/85, 55.3%) as compared to animal studies (10/23, 43.4%). Human studies had a mean accuracy of 88.2% whereas the average animal study accuracy was higher at 93.1%. Accuracy largely improved over time as nonlinear data analytics offered by machine learning models became more sophisticated. However, among the studies that reported accuracy, only two studies (2/57, 3.5%) used entirely separate datasets for training and testing of their algorithms. Much more common (27/57, 47.4%) was the subsetting of one central dataset for training and testing a machine learning algorithm, while four studies (8.5%) used a version of leave-one-out cross-validation. Studies that did not specify what data was used for training and testing of their machine learning model, or studies that used the exact same data for testing and training, were deemed to have unreliable accuracy measurement; such accuracy statistics were not included in <xref rid="SD2" ref-type="supplementary-material">Table S1</xref>.</p></sec></sec><sec id="S23"><title>Publication Years</title><p id="P35">Publication dates ranged from 1980 to 2021, with the majority of the studies being conducted in the recent decade of 2012&#x02013;2021 (61/108, 56.5%). <xref rid="F5" ref-type="fig">Figure 5</xref> depicts the frequency of each modality for cough sound collection and data analysis segmented by decade.</p></sec><sec id="S24"><title>Dataset origin</title><p id="P36">Single institution studies with closed-access datasets were more common (87.0%). A minority, but growing number of studies, (13.0%) relied on open-access datasets. Some of the popular datasets are described in <xref rid="SD1" ref-type="supplementary-material">Table S2</xref>.</p></sec><sec id="S25"><title>Limitations</title><p id="P37">Fifty-seven (57/108, 52.8%) of all studies reported limitations in their approach or models. Several studies reported on a limited sample size (12/108, 11.1%). In the animal studies, several authors cited interference with ambient noise and limited number of microphones, leading to poor area coverage on big farms.</p></sec></sec><sec id="S26"><title>Discussion</title><p id="P38">This scoping review aimed to examine the utilization of cough sounds in screening and diagnostics. The number of publications analyzing cough acoustics over the past decade has been rapidly increasing, which possibly reflects technological advances in recording capabilities &#x02013; especially with the ubiquitous availability of smartphone recording technology and improved aptitude for analysis of complex data with the advent of machine learning. Our study is the first scoping review of the cough sound literature that is agnostic to the analytic methodology. Other unique features of our scoping review is the incorporation of adult, pediatric, and animal studies, surveying the field broadly across disciplines. This is in contrast with Serrurier et al.,<sup><xref rid="R18" ref-type="bibr">18</xref></sup> who performed a comparative literature review of the cough sound literature without standardized methodology. In addition, our publication includes studies from veterinary science, in which many cough analysis advances have been made in recent years that could be relevant to human health. We also specifically categorized the human literature into adult and pediatric studies.</p><p id="P39">The authors found the primary objective of the plurality of studies was diagnostic classification of respiratory illnesses such as COVID-19, COPD, asthma and tuberculosis in the adult study population, and asthma, pneumonia, pertussis and croup in the pediatric population. The number of studies focused on cough detection followed closely. Less commonly, some studies focused on classifying cough sounds based on character and severity.</p><p id="P40">Recording protocols were highly variable and had several limitations. For instance, though most studies relied on reflexive cough sounds, among those that collected voluntary cough sounds, only a fraction of studies described the specific instructions for subjects to produce cough. Though the presence of background noise may affect the quality of cough sound collection, only a minority of studies utilized a completely noise-free room and nearly a quarter of the studies (23%) did not specify the recording environment.</p><p id="P41">With respect to hardware for cough sound acquisition, we found the majority of studies used an external microphone. The type of external microphones varied with the aim of the study. For example, cough counting studies often used lapel microphones to ensure comfort of human subjects during continuous data collection. In contrast, studies focused on the quality of the cough sounds were more likely to use contact microphones to reduce noise by deriving audio signals directly from mechanical vibrations transmitted from the point of physical contact with the subject. Contact microphones may pick up otherwise inaudible acoustic data, potentially improving differentiation of cough sound types.</p><p id="P42">In recent years, cellular phones, with their wide availability and rapidly evolving hardware and software sophistication, have quickly replaced individual condenser microphones and associated recorder via non-contact recording. The use of smartphones was first noted in 2011, becoming more predominant from 2016 onwards. Smartphones offer the convenience of built-in microphones, data storage and processing software under one device. Smartphones also stand out as convenient and ubiquitous for mobile data collection at a population level, and hold the potential of continuous monitoring at the individual level. However, continuous audio sensing by smartphone poses complex privacy concerns, which have limited implementation and adoption.<sup><xref rid="R19" ref-type="bibr">19</xref></sup></p><p id="P43">Nearly all studies used preprocessing techniques of the cough sound prior to running an acoustic analysis. Most acoustic analysis relied on spectrograms, which represent the spectro-temporal correlations present in the audio signal in the form of an image. Of particular concern for cough sounds, which are explosive acoustic events, is the phenomenon of clipping, i.e. the loss of high amplitude acoustic data when an audio signal is too loud. To avoid clipping in the recording of cough sounds, the distance between the subject&#x02019;s mouth and the microphone becomes pertinent. Additionally, if the subject is too close to the microphone, gusts of air cause distortion of audio data, compromising its quality.<sup><xref rid="R20" ref-type="bibr">20</xref></sup> Relatively few studies mentioned the exact distance between the subject and the microphone, with the average distance being 50 cm.</p><p id="P44">Irrespective of their objective, the majority of studies focused on single modality of cough acoustic data in their predictive models. The few studies that used a multi-modality approach combined cough sounds with demographic data or clinical data such as body temperature or respiratory rate. A handful of studies also supplemented their acoustic data with patient reported outcome measures, such as the cough visual analogue scale and the Leicester Cough Questionnaire. A multi-modality approach has been associated with an increase in the precision of disease predictive models based on cough sounds.<sup><xref rid="R21" ref-type="bibr">21</xref></sup></p><p id="P45">Developments in the cough sound literature correlate with technological advancements in audio recording and data analytics. For instance, ambulatory audio recording devices appeared in the 1990s, and enabled continuous monitoring and real-time data capture of cough sounds. In the following decades, the introduction of the Fast Fourier Transform (FFT) underlying spectrograms and Mel-frequency cepstral coefficients (MFCCs) allowed researchers to capture more acoustic data dimensions.<sup><xref rid="R22" ref-type="bibr">22</xref></sup> Additionally, in the past decade, the development of machine learning based on artificial neural networks expanded the ability to analyze vast amounts of complex digitized data, allowing researchers to exploit the potential of cough sounds at a much greater scale and with higher level of predictive precision. Researchers now have programs that simplify and increase the efficiency of data pre-processing, with such capabilities as distinguishing and tagging the contaminant background noises and allowing for drastic increase in the magnitude of data incorporated in predictive models. Data storage has also recently shifted online, mostly to the cloud, making accessibility of data easy and convenient. Availability of open-access large datasets of cough sounds has further contributed to the acceleration of publications in the past decade.</p><p id="P46">Livestock studies demonstrated that a one-to-one ratio of microphone to subject yields the best results in terms of audio acquisition quality. Factors which influenced hardware choice for animal cough sound recording included body weight and the species of animal involved. Most animal studies focused on field recording, with limitations related to ambient noise and poor discrimination between individual animals. This approach was particularly effective in respiratory infection spread monitoring and localization of sick animals. Interestingly, such field recordings have not been implemented for humans, likely due to challenges related to privacy concerns.</p><p id="P47">Our scoping review revealed trends of drawbacks amongst the studies reviewed. Included studies were authored by scientists in resource-affluent countries, despite the potential for higher impact in low-resource settings. In addition, study populations were consistently small, and there was a lack of prospective studies. Inclusion of demographic features were inconsistent across studies, and as highlighted in other scoping reviews in bioacoustics, we found a lack of reporting on race/ethnicity, gender, and socio-economic status of participants in human studies.<sup><xref rid="R23" ref-type="bibr">23</xref></sup></p><p id="P48">This scoping review attempted to examine all relevant articles to cough sounds in screening and diagnostics in humans and animals published in English, though our selected search terms may not have captured all the relevant literature. The lack of critical appraisal of the included literature is inherent to scoping reviews and limits our ability to report on the quality of the medical evidence in this field of bioacoustics.</p><p id="P49">This study is also limited by the date range of the literature search, which does not include advances in the literature in the past two years as the field has continued to expand. Notably, diagnosis of COVID-19 from cough sounds using machine learning has remained a frequent focus.<sup><xref rid="R24" ref-type="bibr">24</xref>&#x02013;<xref rid="R26" ref-type="bibr">26</xref></sup> For instance, a recent study by Davidson et al. sought to classify COVID-19-related pneumonia severity based on cough sounds.<sup><xref rid="R27" ref-type="bibr">27</xref></sup> Kuluozturk et al. used a machine learning model to diagnose COVID-19 as well as heart failure and acute asthma based on cough sounds,<sup><xref rid="R28" ref-type="bibr">28</xref></sup> while Yellapu et al. focused on the diagnosis of pulmonary tuberculosis.<sup><xref rid="R29" ref-type="bibr">29</xref></sup> Novel cough detection algorithms have also continued to be developed,<sup><xref rid="R30" ref-type="bibr">30</xref>,<xref rid="R31" ref-type="bibr">31</xref></sup> and Xu et al. proposed a smartphone-based cough sound analysis as a potential home-based pulmonary function test.<sup><xref rid="R32" ref-type="bibr">32</xref></sup> Importantly, the effort to expand publicly available cough sound datasets has also continued, improving the landscape for better training and testing of future machine learning models.<sup><xref rid="R33" ref-type="bibr">33</xref></sup></p><p id="P50">While no cough detection or diagnostic technology has yet achieved FDA approval, multiple cough monitoring devices, including VitaloJak and the Leicester Cough Monitor, have been used in research.<sup><xref rid="R34" ref-type="bibr">34</xref></sup> More recently, several app-based ventures, notably ResApp (for cough sound diagnosis)<sup><xref rid="R35" ref-type="bibr">35</xref></sup> and Hyfe (for cough counting),<sup><xref rid="R36" ref-type="bibr">36</xref></sup> have continued to develop rapidly and are likely to apply for regulatory approval for clinical use in the near future. Should this technology become clinically available, it has the potential to significantly impact the practice of otolaryngologists. For example, cough diagnosis algorithms could play a role in the diagnosis of conditions such as croup, for which cough sounds have diagnostic value. Cough detection and cough counting technology could transform the management of neurogenic cough managed by otolaryngologists, allowing for reliable and objective assessment of response to treatment as measured by cough number and frequency.</p></sec><sec id="S27"><title>Conclusion</title><p id="P51">Cough sound analysis holds the promise of accessible, non-invasive, and inexpensive diagnostic tools but is limited in practice currently. The majority of current studies hinge on non-standardized data collection protocols and small, non-diverse, private datasets from single institutions. To build a robust model with wide utility and applicability, researchers need a diverse population, in order to capture all possible cough sound variants. The advent of machine learning and the ubiquity of smartphone technology with high quality audio recording capabilities in the past decade has accelerated interest in cough acoustics for screening and diagnosis.</p></sec><sec sec-type="supplementary-material" id="SM1"><title>Supplementary Material</title><supplementary-material id="SD1" position="float" content-type="local-data"><label>Supinfo</label><caption><p id="P56">Table S2. Open access datasets with access links and their availability.</p><p id="P57">Supplementary Appendix 1. Search strategy.</p><p id="P58">Supplementary Appendix 2. Data extraction definitions.</p><p id="P59">Supplementary Appendix 3. Citations for all included articles.</p></caption><media xlink:href="NIHMS1928573-supplement-Supinfo.docx" id="d66e532" position="anchor"/></supplementary-material><supplementary-material id="SD2" position="float" content-type="local-data"><label>Table S1</label><caption><p id="P55">Table S1. Summary of included studies organized by subject area.</p></caption><media xlink:href="NIHMS1928573-supplement-Table_S1.docx" id="d66e538" position="anchor"/></supplementary-material></sec></body><back><ack id="S28"><title>Funding:</title><p id="P52">Ana&#x000ef;s Rameau was supported by a Paul B. Beeson Emerging Leaders Career Development Award in Aging (K76 AG079040) from the National Institute on Aging and by the Bridge2AI award (OT2 OD032720) from the NIH Common Fund.</p></ack><fn-group><fn fn-type="COI-statement" id="FN3"><p id="P53">Conflicts of Interest:</p><p id="P54">Ana&#x000ef;s Rameau owns equity of Perceptron Health, Inc.</p></fn><fn id="FN4"><p id="P90">This project was presented as a poster at the 2022 American Broncho-Esophagological Association Meeting in April 2022 in Dallas, TX.</p></fn></fn-group><ref-list><title>References</title><ref id="R1"><label>1.</label><mixed-citation publication-type="journal"><name><surname>Widdicombe</surname><given-names>J</given-names></name>, <name><surname>Fontana</surname><given-names>G</given-names></name>. <article-title>Cough: what&#x02019;s in a name?</article-title>
<source>Eur Respir J</source>. <year>2006</year>;<volume>28</volume>(<issue>1</issue>):<fpage>10</fpage>&#x02013;<lpage>15</lpage>. doi:<pub-id pub-id-type="doi">10.1183/09031936.06.00096905</pub-id><pub-id pub-id-type="pmid">16816346</pub-id>
</mixed-citation></ref><ref id="R2"><label>2.</label><mixed-citation publication-type="journal"><name><surname>Brooks</surname><given-names>SM</given-names></name>. <article-title>Perspective on the human cough reflex</article-title>. <source>Cough Lond Engl</source>. <year>2011</year>;<volume>7</volume>:<fpage>10</fpage>. doi:<pub-id pub-id-type="doi">10.1186/1745-9974-7-10</pub-id></mixed-citation></ref><ref id="R3"><label>3.</label><mixed-citation publication-type="journal"><name><surname>Leden</surname><given-names>Von</given-names></name>, <name><surname>Isshiki</surname><given-names>N</given-names></name>
<article-title>AN ANALYSIS OF COUGH AT THE LEVEL OF THE LARYNX</article-title>. <source>Arch Otolaryngol Chic Ill 1960</source>. <year>1965</year>;<volume>81</volume>:<fpage>616</fpage>&#x02013;<lpage>625</lpage>. doi:<pub-id pub-id-type="doi">10.1001/archotol.1965.00750050631016</pub-id></mixed-citation></ref><ref id="R4"><label>4.</label><mixed-citation publication-type="confproc"><name><surname>Kosasih</surname><given-names>K</given-names></name>, <name><surname>Abeyratne</surname><given-names>UR</given-names></name>, <name><surname>Swarnkar</surname><given-names>V</given-names></name>. <part-title>High frequency analysis of cough sounds in pediatric patients with respiratory diseases</part-title>. <conf-name>Conf Proc Annu Int Conf IEEE Eng Med Biol Soc IEEE Eng Med Biol Soc Conf</conf-name>. <year>2012</year>;<volume>2012</volume>((<name><surname>Kosasih</surname><given-names>K</given-names></name>) <publisher-name>School of ITEE, The University of Queensland</publisher-name>, <publisher-loc>Brisbane, Australia</publisher-loc>.):<fpage>5654</fpage>&#x02013;<lpage>5657</lpage>.</mixed-citation></ref><ref id="R5"><label>5.</label><mixed-citation publication-type="journal"><name><surname>Korpas</surname><given-names>J</given-names></name>, <name><surname>Vrabec</surname><given-names>M</given-names></name>, <name><surname>Sadlonova</surname><given-names>J</given-names></name>, <name><surname>Salat</surname><given-names>D</given-names></name>, <name><surname>Debreczeni</surname><given-names>LA</given-names></name>. <article-title>Analysis of the cough sound frequency in adults and children with bronchial asthma</article-title>. <source>Acta Physiol Hung</source>. <year>2003</year>;<volume>90</volume>(<issue>1</issue>):<fpage>27</fpage>&#x02013;<lpage>34</lpage>. doi:<pub-id pub-id-type="doi">10.1556/APhysiol.90.2003.1.4</pub-id><pub-id pub-id-type="pmid">12666872</pub-id>
</mixed-citation></ref><ref id="R6"><label>6.</label><mixed-citation publication-type="journal"><name><surname>Abaza</surname><given-names>AA</given-names></name>, <name><surname>Day</surname><given-names>JB</given-names></name>, <name><surname>Reynolds</surname><given-names>JS</given-names></name>, <etal/>
<article-title>Classification of voluntary cough sound and airflow patterns for detecting abnormal pulmonary function</article-title>. <source>Cough</source>. <year>2009</year>;<volume>5</volume>(<issue>1</issue>):<fpage>8</fpage>. doi:<pub-id pub-id-type="doi">10.1186/1745-9974-5-8</pub-id><pub-id pub-id-type="pmid">19930559</pub-id>
</mixed-citation></ref><ref id="R7"><label>7.</label><mixed-citation publication-type="journal"><name><surname>Melek</surname><given-names>M</given-names></name>
<article-title>Diagnosis of COVID-19 and non-COVID-19 patients by classifying only a single cough sound</article-title>. <source>Neural Comput Appl</source>. <comment>Published online</comment>
<year>2021</year>:<fpage>1</fpage>&#x02013;<lpage>12</lpage>. doi:<pub-id pub-id-type="doi">10.1007/s00521-021-06346-3</pub-id></mixed-citation></ref><ref id="R8"><label>8.</label><mixed-citation publication-type="journal"><name><surname>Khanzada</surname><given-names>A</given-names></name>, <name><surname>Hegde</surname><given-names>S</given-names></name>, <name><surname>Sreeram</surname><given-names>S</given-names></name>, <etal/>
<article-title>Challenges and Opportunities in Deploying COVID-19 Cough AI Systems</article-title>. <source>J Voice Off J Voice Found</source>. <year>2021</year>;<volume>35</volume>(<issue>6</issue>):<fpage>811</fpage>&#x02013;<lpage>812</lpage>. doi:<pub-id pub-id-type="doi">10.1016/j.jvoice.2021.08.009</pub-id></mixed-citation></ref><ref id="R9"><label>9.</label><mixed-citation publication-type="journal"><name><surname>Melek Manshouri</surname><given-names>N</given-names></name>
<article-title>Identifying COVID-19 by using spectral analysis of cough recordings: a distinctive classification study</article-title>. <source>Cogn Neurodyn</source>. <comment>Published online</comment>
<month>July</month>
<day>29</day>, <year>2021</year>:<fpage>1</fpage>&#x02013;<lpage>15</lpage>. doi:<pub-id pub-id-type="doi">10.1007/s11571-021-09695-w</pub-id></mixed-citation></ref><ref id="R10"><label>10.</label><mixed-citation publication-type="journal"><name><surname>Toop</surname><given-names>LJ</given-names></name>, <name><surname>Thorpe</surname><given-names>CW</given-names></name>, <name><surname>Fright</surname><given-names>R</given-names></name>. <article-title>Cough sound analysis: A new tool for the diagnosis of asthma?</article-title>
<source>Fam Pract</source>. <year>1989</year>;<volume>6</volume>(<issue>2</issue>):<fpage>83</fpage>&#x02013;<lpage>85</lpage>.<pub-id pub-id-type="pmid">2663580</pub-id>
</mixed-citation></ref><ref id="R11"><label>11.</label><mixed-citation publication-type="journal"><name><surname>Piirila</surname><given-names>P</given-names></name>, <name><surname>Sovijarvi</surname><given-names>AR</given-names></name>. <article-title>Differences in acoustic and dynamic characteristics of spontaneous cough in pulmonary diseases</article-title>. <source>Chest</source>. <year>1989</year>;<volume>96</volume>(<issue>1</issue>):<fpage>46</fpage>&#x02013;<lpage>53</lpage>. doi:<pub-id pub-id-type="doi">10.1378/chest.96.1.46</pub-id><pub-id pub-id-type="pmid">2736992</pub-id>
</mixed-citation></ref><ref id="R12"><label>12.</label><mixed-citation publication-type="confproc"><name><surname>Sharan</surname><given-names>RV</given-names></name>, <name><surname>Abeyratne</surname><given-names>UR</given-names></name>, <name><surname>Swarnkar</surname><given-names>VR</given-names></name>, <name><surname>Porter</surname><given-names>P</given-names></name>. <article-title>Cough sound analysis for diagnosing croup in pediatric patients using biologically inspired features</article-title>. <conf-name>Conf Proc Annu Int Conf IEEE Eng Med Biol Soc IEEE Eng Med Biol Soc Annu Conf</conf-name>. <year>2017</year>;<volume>2017</volume>((<name><surname>Sharan</surname><given-names>RV</given-names></name>; <name><surname>Abeyratne</surname><given-names>UR</given-names></name>; <name><surname>Swarnkar</surname><given-names>VR</given-names></name>; <name><surname>Porter</surname><given-names>P</given-names></name>)):<fpage>4578</fpage>&#x02013;<lpage>4581</lpage>. doi:<pub-id pub-id-type="doi">10.1109/EMBC.2017.8037875</pub-id></mixed-citation></ref><ref id="R13"><label>13.</label><mixed-citation publication-type="journal"><name><surname>Sharan</surname><given-names>RV</given-names></name>, <name><surname>Abeyratne</surname><given-names>UR</given-names></name>, <name><surname>Swarnkar</surname><given-names>VR</given-names></name>, <name><surname>Porter</surname><given-names>P</given-names></name>. <article-title>Automatic croup diagnosis using cough sound recognition</article-title>. <source>IEEE Trans Biomed Eng</source>. <year>2019</year>;<volume>66</volume>(<issue>2</issue>):<fpage>485</fpage>&#x02013;<lpage>495</lpage>. doi:<pub-id pub-id-type="doi">10.1109/TBME.2018.2849502</pub-id><pub-id pub-id-type="pmid">29993458</pub-id>
</mixed-citation></ref><ref id="R14"><label>14.</label><mixed-citation publication-type="journal"><name><surname>Kapadia</surname><given-names>MZ</given-names></name>, <name><surname>Askie</surname><given-names>L</given-names></name>, <name><surname>Hartling</surname><given-names>L</given-names></name>, <etal/>
<article-title>PRISMA-Children (C) and PRISMA-Protocol for Children (P-C) Extensions: a study protocol for the development of guidelines for the conduct and reporting of systematic reviews and meta-analyses of newborn and child health research</article-title>. <source>BMJ Open</source>. <year>2016</year>;<volume>6</volume>(<issue>4</issue>):<fpage>e010270</fpage>. doi:<pub-id pub-id-type="doi">10.1136/bmjopen-2015-010270</pub-id></mixed-citation></ref><ref id="R15"><label>15.</label><mixed-citation publication-type="journal"><name><surname>McHugh</surname><given-names>ML</given-names></name>. <article-title>Interrater reliability: the kappa statistic</article-title>. <source>Biochem Medica</source>. <year>2012</year>;<volume>22</volume>(<issue>3</issue>):<fpage>276</fpage>&#x02013;<lpage>282</lpage>.</mixed-citation></ref><ref id="R16"><label>16.</label><mixed-citation publication-type="journal"><name><surname>Van Hirtum</surname><given-names>A</given-names></name>, <name><surname>Berckmans</surname><given-names>D</given-names></name>. <article-title>Automated recognition of spontaneous versus voluntary cough</article-title>. <source>Med Eng Phys</source>. <year>2002</year>;<volume>24</volume>(<issue>7&#x02013;8</issue>):<fpage>541</fpage>&#x02013;<lpage>545</lpage>. doi:<pub-id pub-id-type="doi">10.1016/S1350-4533(02)00056-5</pub-id><pub-id pub-id-type="pmid">12237051</pub-id>
</mixed-citation></ref><ref id="R17"><label>17.</label><mixed-citation publication-type="journal"><name><surname>Van Hirtum</surname><given-names>A</given-names></name>, <name><surname>Berckmans</surname><given-names>D</given-names></name>. <article-title>Assessing the sound of cough towards vocality</article-title>. <source>Med Eng Phys</source>. <year>2002</year>;<volume>24</volume>(<issue>7&#x02013;8</issue>):<fpage>535</fpage>&#x02013;<lpage>540</lpage>. doi:<pub-id pub-id-type="doi">10.1016/S1350-4533(02)00055-3</pub-id><pub-id pub-id-type="pmid">12237050</pub-id>
</mixed-citation></ref><ref id="R18"><label>18.</label><mixed-citation publication-type="journal"><name><surname>Serrurier</surname><given-names>A</given-names></name>, <name><surname>Neuschaefer-Rube</surname><given-names>C</given-names></name>, <name><surname>R&#x000f6;hrig</surname><given-names>R</given-names></name>. <article-title>Past and Trends in Cough Sound Acquisition, Automatic Detection and Automatic Classification: A Comparative Review</article-title>. <source>Sensors (Basel)</source>. <year>2022</year>
<month>Apr</month>
<day>10</day>;<volume>22</volume>(<issue>8</issue>):<fpage>2896</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s22082896</pub-id>.<pub-id pub-id-type="pmid">35458885</pub-id>
</mixed-citation></ref><ref id="R19"><label>19.</label><mixed-citation publication-type="journal"><name><surname>Agu</surname><given-names>E</given-names></name>, <name><surname>Pedersen</surname><given-names>P</given-names></name>, <name><surname>Strong</surname><given-names>D</given-names></name>, <etal/>
<article-title>The smartphone as a medical device: Assessing enablers, benefits and challenges</article-title>. In: <source>2013 IEEE International Conference on Sensing, Communications and Networking (SECON)</source>. ; <volume>2013</volume>:<fpage>76</fpage>&#x02013;<lpage>80</lpage>. doi:<pub-id pub-id-type="doi">10.1109/SAHCN.2013.6644964</pub-id></mixed-citation></ref><ref id="R20"><label>20.</label><mixed-citation publication-type="journal"><name><surname>Stinson</surname><given-names>MR</given-names></name>, <name><surname>Daigle</surname><given-names>GA</given-names></name>, <name><surname>Quaroni</surname><given-names>JF</given-names></name>. <article-title>Airflow noise in telephone handsets and methods for its reduction</article-title>. <source>J Acoust Soc Am</source>. <year>2005</year>;<volume>118</volume>(<issue>1</issue>):<fpage>205</fpage>&#x02013;<lpage>212</lpage>. doi:<pub-id pub-id-type="doi">10.1121/1.1931087</pub-id><pub-id pub-id-type="pmid">16119343</pub-id>
</mixed-citation></ref><ref id="R21"><label>21.</label><mixed-citation publication-type="journal"><name><surname>Kumar</surname><given-names>S</given-names></name>, <name><surname>Nagar</surname><given-names>R</given-names></name>, <name><surname>Bhatnagar</surname><given-names>S</given-names></name>, <name><surname>Vaddi</surname><given-names>R</given-names></name>, <name><surname>Gupta</surname><given-names>SK</given-names></name>, <name><surname>Rashid</surname><given-names>M</given-names></name>, <name><surname>Bashir</surname><given-names>AK</given-names></name>, <name><surname>Alkhalifah</surname><given-names>T</given-names></name>. <article-title>Chest X ray and cough sample based deep learning framework for accurate diagnosis of COVID-19</article-title>. <source>Comput Electr Eng</source>. <year>2022</year>
<month>Oct</month>;<volume>103</volume>:<fpage>108391</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compeleceng.2022.108391</pub-id><pub-id pub-id-type="pmid">36119394</pub-id>
</mixed-citation></ref><ref id="R22"><label>22.</label><mixed-citation publication-type="journal"><name><surname>K&#x000fc;&#x000e7;&#x000fc;kbay</surname><given-names>SE</given-names></name>, <name><surname>Sert</surname><given-names>M</given-names></name>. <article-title>Audio-based event detection in office live environments using optimized MFCC-SVM approach</article-title>. In: <source>Proceedings of the 2015 IEEE 9th International Conference on Semantic Computing (IEEE ICSC 2015)</source>. ; <volume>2015</volume>:<fpage>475</fpage>&#x02013;<lpage>480</lpage>. doi:<pub-id pub-id-type="doi">10.1109/ICOSC.2015.7050855</pub-id></mixed-citation></ref><ref id="R23"><label>23.</label><mixed-citation publication-type="journal"><name><surname>Bensoussan</surname><given-names>Y</given-names></name>, <name><surname>Vanstrum</surname><given-names>EB</given-names></name>, <name><surname>Johns</surname><given-names>MM</given-names><suffix>3rd</suffix></name>, <name><surname>Rameau</surname><given-names>A</given-names></name>. <article-title>Artificial Intelligence and Laryngeal Cancer: From Screening to Prognosis: A State of the Art Review</article-title>. <source>Otolaryngol Head Neck Surg</source>. <year>2023</year>
<month>Mar</month>;<volume>168</volume>(<issue>3</issue>):<fpage>319</fpage>&#x02013;<lpage>329</lpage>. doi: <pub-id pub-id-type="doi">10.1177/01945998221110839</pub-id>.<pub-id pub-id-type="pmid">35787073</pub-id>
</mixed-citation></ref><ref id="R24"><label>24.</label><mixed-citation publication-type="journal"><name><surname>Celik</surname><given-names>G</given-names></name>
<article-title>CovidCoughNet: A new method based on convolutional neural networks and deep feature extraction using pitch-shifting data augmentation for covid-19 detection from cough, breath, and voice signals</article-title>. <source>Comput Biol Med</source>. <year>2023</year>
<month>Jun</month>
<day>8</day>;<volume>163</volume>:<fpage>107153</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.107153</pub-id>. <comment>Epub ahead of print.</comment><pub-id pub-id-type="pmid">37321101</pub-id>
</mixed-citation></ref><ref id="R25"><label>25.</label><mixed-citation publication-type="journal"><name><surname>Pavel</surname><given-names>I</given-names></name>, <name><surname>Ciocoiu</surname><given-names>IB</given-names></name>. <article-title>COVID-19 Detection from Cough Recordings Using Bag-of-Words Classifiers</article-title>. <source>Sensors (Basel)</source>. <year>2023</year>
<month>May</month>
<day>23</day>;<volume>23</volume>(<issue>11</issue>):<fpage>4996</fpage>.<pub-id pub-id-type="pmid">37299721</pub-id>
</mixed-citation></ref><ref id="R26"><label>26.</label><mixed-citation publication-type="journal"><name><surname>Chowdhury</surname><given-names>NK</given-names></name>, <name><surname>Kabir</surname><given-names>MA</given-names></name>, <name><surname>Rahman</surname><given-names>MM</given-names></name>, <name><surname>Islam</surname><given-names>SMS</given-names></name>. <article-title>Machine learning for detecting COVID-19 from cough sounds: An ensemble-based MCDM method</article-title>. <source>Comput Biol Med</source>. <year>2022</year>
<month>Jun</month>;<volume>145</volume>:<fpage>105405</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.105405</pub-id>. <comment>Epub 2022 Mar 17.</comment><pub-id pub-id-type="pmid">35318171</pub-id>
</mixed-citation></ref><ref id="R27"><label>27.</label><mixed-citation publication-type="journal"><name><surname>Davidson</surname><given-names>C</given-names></name>, <name><surname>Caguana</surname><given-names>OA</given-names></name>, <name><surname>Lozano-Garc&#x000ed;a</surname><given-names>M</given-names></name>, <name><surname>Arita Guevara</surname><given-names>M</given-names></name>, <name><surname>Estrada-Petrocelli</surname><given-names>L</given-names></name>, <name><surname>Ferrer-Lluis</surname><given-names>I</given-names></name>, <name><surname>Castillo-Escario</surname><given-names>Y</given-names></name>, <name><surname>Aus&#x000ed;n</surname><given-names>P</given-names></name>, <name><surname>Gea</surname><given-names>J</given-names></name>, <name><surname>Jan&#x000e9;</surname><given-names>R</given-names></name>. <article-title>Differences in acoustic features of cough by pneumonia severity in patients with COVID-19: a cross-sectional study</article-title>. <source>ERJ Open Res</source>. <year>2023</year>
<month>May</month>
<day>2</day>;<volume>9</volume>(<issue>3</issue>):<fpage>00247</fpage>&#x02013;<lpage>2022</lpage>.<pub-id pub-id-type="pmid">37131524</pub-id>
</mixed-citation></ref><ref id="R28"><label>28.</label><mixed-citation publication-type="journal"><name><surname>Kuluozturk</surname><given-names>M</given-names></name>, <name><surname>Kobat</surname><given-names>MA</given-names></name>, <name><surname>Barua</surname><given-names>PD</given-names></name>, <name><surname>Dogan</surname><given-names>S</given-names></name>, <name><surname>Tuncer</surname><given-names>T</given-names></name>, <name><surname>Tan</surname><given-names>RS</given-names></name>, <name><surname>Ciaccio</surname><given-names>EJ</given-names></name>, <name><surname>Acharya</surname><given-names>UR</given-names></name>. <article-title>DKPNet41: Directed knight pattern network-based cough sound classification model for automatic disease diagnosis</article-title>. <source>Med Eng Phys</source>. <year>2022</year>
<month>Dec</month>;<volume>110</volume>:<fpage>103870</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.medengphy.2022.103870</pub-id>. <comment>Epub 2022 Aug 6.</comment><pub-id pub-id-type="pmid">35989223</pub-id>
</mixed-citation></ref><ref id="R29"><label>29.</label><mixed-citation publication-type="journal"><name><surname>Yellapu</surname><given-names>GD</given-names></name>, <name><surname>Rudraraju</surname><given-names>G</given-names></name>, <name><surname>Sripada</surname><given-names>NR</given-names></name>, <name><surname>Mamidgi</surname><given-names>B</given-names></name>, <name><surname>Jalukuru</surname><given-names>C</given-names></name>, <name><surname>Firmal</surname><given-names>P</given-names></name>, <name><surname>Yechuri</surname><given-names>V</given-names></name>, <name><surname>Varanasi</surname><given-names>S</given-names></name>, <name><surname>Peddireddi</surname><given-names>VS</given-names></name>, <name><surname>Bhimarasetty</surname><given-names>DM</given-names></name>, <name><surname>Kanisetti</surname><given-names>S</given-names></name>, <name><surname>Joshi</surname><given-names>N</given-names></name>, <name><surname>Mohapatra</surname><given-names>P</given-names></name>, <name><surname>Pamarthi</surname><given-names>K</given-names></name>. <article-title>Development and clinical validation of Swaasa AI platform for screening and prioritization of pulmonary TB</article-title>. <source>Sci Rep</source>. <year>2023</year>
<month>Mar</month>
<day>23</day>;<volume>13</volume>(<issue>1</issue>):<fpage>4740</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-023-31772-9</pub-id>. <comment>Erratum in: Sci Rep. 2023 Jun 26;13(1):10353.</comment><pub-id pub-id-type="pmid">36959347</pub-id>
</mixed-citation></ref><ref id="R30"><label>30.</label><mixed-citation publication-type="journal"><name><surname>You</surname><given-names>M</given-names></name>, <name><surname>Wang</surname><given-names>W</given-names></name>, <name><surname>Li</surname><given-names>Y</given-names></name>, <name><surname>Liu</surname><given-names>J</given-names></name>, <name><surname>Xu</surname><given-names>X</given-names></name>, <name><surname>Qiu</surname><given-names>Z</given-names></name>. <article-title>Automatic cough detection from realistic audio recordings using C-BiLSTM with boundary regression</article-title>. <source>Biomed Signal Process Control</source>. <year>2022</year>
<month>Feb</month>;<volume>72</volume>:<fpage>103304</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2021.103304</pub-id>. <comment>Epub 2021 Nov 11.</comment><pub-id pub-id-type="pmid">36569172</pub-id>
</mixed-citation></ref><ref id="R31"><label>31.</label><mixed-citation publication-type="journal"><name><surname>Barata</surname><given-names>F</given-names></name>, <name><surname>Cleres</surname><given-names>D</given-names></name>, <name><surname>Tinschert</surname><given-names>P</given-names></name>, <name><surname>Iris Shih</surname><given-names>CH</given-names></name>, <name><surname>Rassouli</surname><given-names>F</given-names></name>, <name><surname>Boesch</surname><given-names>M</given-names></name>, <name><surname>Brutsche</surname><given-names>M</given-names></name>, <name><surname>Fleisch</surname><given-names>E</given-names></name>. <article-title>Nighttime Continuous Contactless Smartphone-Based Cough Monitoring for the Ward: Validation Study</article-title>. <source>JMIR Form Res</source>. <year>2023</year>
<month>Feb</month>
<day>20</day>;<volume>7</volume>:<fpage>e38439</fpage>.<pub-id pub-id-type="pmid">36655551</pub-id>
</mixed-citation></ref><ref id="R32"><label>32.</label><mixed-citation publication-type="journal"><name><surname>Xu</surname><given-names>W</given-names></name>, <name><surname>He</surname><given-names>G</given-names></name>, <name><surname>Pan</surname><given-names>C</given-names></name>, <name><surname>Shen</surname><given-names>D</given-names></name>, <name><surname>Zhang</surname><given-names>N</given-names></name>, <name><surname>Jiang</surname><given-names>P</given-names></name>, <name><surname>Liu</surname><given-names>F</given-names></name>, <name><surname>Chen</surname><given-names>J</given-names></name>. <article-title>A forced cough sound based pulmonary function assessment method by using machine learning</article-title>. <source>Front Public Health</source>. <year>2022</year>
<month>Oct</month>
<day>25</day>;<volume>10</volume>:<fpage>1015876</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpubh.2022.1015876</pub-id>.<pub-id pub-id-type="pmid">36388361</pub-id>
</mixed-citation></ref><ref id="R33"><label>33.</label><mixed-citation publication-type="journal"><name><surname>Bhattacharya</surname><given-names>D</given-names></name>, <name><surname>Sharma</surname><given-names>NK</given-names></name>, <name><surname>Dutta</surname><given-names>D</given-names></name>, <name><surname>Chetupalli</surname><given-names>SR</given-names></name>, <name><surname>Mote</surname><given-names>P</given-names></name>, <name><surname>Ganapathy</surname><given-names>S</given-names></name>, <name><surname>Chandrakiran</surname><given-names>C</given-names></name>, <name><surname>Nori</surname><given-names>S</given-names></name>, <name><surname>Suhail</surname><given-names>KK</given-names></name>, <name><surname>Gonuguntla</surname><given-names>S</given-names></name>, <name><surname>Alagesan</surname><given-names>M</given-names></name>. <article-title>Coswara: A respiratory sounds and symptoms dataset for remote screening of SARS-CoV-2 infection</article-title>. <source>Sci Data</source>. <year>2023</year>
<month>Jun</month>
<day>22</day>;<volume>10</volume>(<issue>1</issue>):<fpage>397</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41597-023-02266-0</pub-id>.<pub-id pub-id-type="pmid">37349364</pub-id>
</mixed-citation></ref><ref id="R34"><label>34.</label><mixed-citation publication-type="journal"><name><surname>Zhang</surname><given-names>M</given-names></name>, <name><surname>Sykes</surname><given-names>DL</given-names></name>, <name><surname>Brindle</surname><given-names>K</given-names></name>, <name><surname>Sadofsky</surname><given-names>LR</given-names></name>, <name><surname>Morice</surname><given-names>AH</given-names></name>. <article-title>Chronic cough-the limitation and advances in assessment techniques</article-title>. <source>J Thorac Dis</source>. <year>2022</year>
<month>Dec</month>;<volume>14</volume>(<issue>12</issue>):<fpage>5097</fpage>&#x02013;<lpage>5119</lpage>. doi: <pub-id pub-id-type="doi">10.21037/jtd-22-874</pub-id>.<pub-id pub-id-type="pmid">36647459</pub-id>
</mixed-citation></ref><ref id="R35"><label>35.</label><mixed-citation publication-type="journal"><article-title>Pfizer offers AU$100 million for Australian company that developed COVID-19 cough diagnostic app</article-title>. <source>MobiHealthNews</source>. <comment>Published April</comment>
<volume>21</volume>, <fpage>2022</fpage>. <comment>Accessed</comment>
<date-in-citation>July 26, 2023</date-in-citation>. <comment><ext-link xlink:href="https://www.mobihealthnews.com/news/anz/pfizer-offers-au100-million-australian-company-developed-covid-19-cough-diagnostic-app#:~:text=Biopharmaceutical%20giant%20Pfizer%20has%20offered" ext-link-type="uri">https://www.mobihealthnews.com/news/anz/pfizer-offers-au100-million-australian-company-developed-covid-19-cough-diagnostic-app#:~:text=Biopharmaceutical%20giant%20Pfizer%20has%20offered</ext-link></comment></mixed-citation></ref><ref id="R36"><label>36.</label><mixed-citation publication-type="journal"><name><surname>Hammerand</surname><given-names>J</given-names></name>
<article-title>Cough-counting is harder than it sounds, but AI and smartphones can help</article-title>. <source>Medical Design and Outsourcing</source>. <comment>Published April</comment>
<volume>20</volume>, <fpage>2023</fpage>. <comment>Accessed</comment>
<date-in-citation>July 26, 2023</date-in-citation>. <comment><ext-link xlink:href="https://www.medicaldesignandoutsourcing.com/hyfe-chronic-cough-detection-software-counting-ai-smartphones/" ext-link-type="uri">https://www.medicaldesignandoutsourcing.com/hyfe-chronic-cough-detection-software-counting-ai-smartphones/</ext-link></comment></mixed-citation></ref></ref-list></back><floats-group><fig position="float" id="F1"><label>Figure 1.</label><caption><p id="P60">Preferred reporting items for Systematic reviews and Meta-Analyses extension for Scoping Reviews (PRISMA-ScR) flow diagram.</p></caption><graphic xlink:href="nihms-1928573-f0001" position="float"/></fig><fig position="float" id="F2"><label>Figure 2.</label><caption><p id="P61">Pie charts showing the specialties of first authors (A), the fields represented by journals in which reviewed studies were published (B), and the region in which data were collected for each study (C). STEM = science, technology, engineering, and math.</p></caption><graphic xlink:href="nihms-1928573-f0002" position="float"/></fig><fig position="float" id="F3"><label>Figure 3.</label><caption><p id="P62">Pie charts illustrating type of study (A), population studied (B), animal species represented in animal studies (C), analytic focus of the studies (D), and the type of cough analyzed in each study (E).</p></caption><graphic xlink:href="nihms-1928573-f0003" position="float"/></fig><fig position="float" id="F4"><label>Figure 4.</label><caption><p id="P63">Bar graphs showing pathologies diagnosed using cough classification models (A) and the pathologies included in cough detection studies (B).</p></caption><graphic xlink:href="nihms-1928573-f0004" position="float"/></fig><fig position="float" id="F5"><label>Figure 5.</label><caption><p id="P64">Graphs showing prevalence of data collection methods (A) and data analysis methods (B) over time. Crowdsourced and publicly available recordings usually had unspecified or variable recording methodology.</p></caption><graphic xlink:href="nihms-1928573-f0005" position="float"/></fig><table-wrap position="float" id="T1"><label>Table 1:</label><caption><p id="P65">Distribution of author specialties organized by study type</p></caption><table frame="box" rules="all"><colgroup span="1"><col align="left" valign="middle" span="1"/><col align="left" valign="middle" span="1"/><col align="left" valign="middle" span="1"/><col align="left" valign="middle" span="1"/><col align="left" valign="middle" span="1"/></colgroup><thead><tr><th align="left" valign="top" rowspan="1" colspan="1">Type of study</th><th align="left" valign="top" rowspan="1" colspan="1">Papers With At Least One Author in STEM</th><th align="left" valign="top" rowspan="1" colspan="1">Papers With At Least One Author in Medicine</th><th align="left" valign="top" rowspan="1" colspan="1">Papers With Physician and STEM Authors</th><th align="left" valign="top" rowspan="1" colspan="1">Papers With At Least One Author in Veterinary Science</th></tr></thead><tbody><tr><td align="left" valign="top" rowspan="1" colspan="1">Cough Detection (45)</td><td align="left" valign="top" rowspan="1" colspan="1">37 (82.2%)</td><td align="left" valign="top" rowspan="1" colspan="1">22 (48.9%)</td><td align="left" valign="top" rowspan="1" colspan="1">12 (26.7%)</td><td align="left" valign="top" rowspan="1" colspan="1">7 (15.6%)</td></tr><tr><td align="left" valign="top" rowspan="1" colspan="1">Cough Quality Classification (16)</td><td align="left" valign="top" rowspan="1" colspan="1">9 (56.3%)</td><td align="left" valign="top" rowspan="1" colspan="1">13 (81.3%)</td><td align="left" valign="top" rowspan="1" colspan="1">5 (31.3%)</td><td align="left" valign="top" rowspan="1" colspan="1">0 (0%)</td></tr><tr><td align="left" valign="top" rowspan="1" colspan="1">Cough Diagnosis Classification (47)</td><td align="left" valign="top" rowspan="1" colspan="1">39 (83.0%)</td><td align="left" valign="top" rowspan="1" colspan="1">26 (55.3%)</td><td align="left" valign="top" rowspan="1" colspan="1">20 (42.6%)</td><td align="left" valign="top" rowspan="1" colspan="1">5 (10.6%)</td></tr></tbody></table><table-wrap-foot><fn id="TFN1"><p id="P66">STEM = science, technology, engineering, and math.</p></fn></table-wrap-foot></table-wrap></floats-group></article>