@inproceedings{537ff45000d8442bab273276a1db839c,
title = "I-SED: An Interactive sound event detector",
abstract = "Tagging of sound events is essential in many research areas. However, finding sound events and labeling them within a long audio file is tedious and time-consuming. Building an automatic recognition system using machine learning techniques is often not feasible because it requires a large number of human-labeled training examples and fine tuning the model for a specific application. Fully automated labeling is also not reliable enough for all uses. We present I-SED, an interactive sound detection interface using a human-in-The-loop approach that lets a user reduce the time required to label audio that is tediously long (e.g. 20 hours) to do manually and has too few prior labeled examples (e.g. one) to train a state-of-The-Art machine audio labeling system. We performed a human-subject study to validate its effectiveness and the results showed that our tool helped participants label all target sound events within a recording twice as fast as labeling them manually.",
keywords = "Human-in-The-loop system, Interactive machine learning, Sound event detection",
author = "Bongjun Kim and Pardo, \{Bryan A\}",
year = "2017",
month = mar,
day = "7",
doi = "10.1145/3025171.3025231",
language = "English (US)",
series = "International Conference on Intelligent User Interfaces, Proceedings IUI",
publisher = "Association for Computing Machinery",
pages = "553--557",
booktitle = "IUI 2017 - Proceedings of the 22nd International Conference on Intelligent User Interfaces",
note = "22nd International Conference on Intelligent User Interfaces, IUI 2017 ; Conference date: 13-03-2017 Through 16-03-2017",
}