niekrasz.bib

@INPROCEEDINGS{ChaudhriCheyerEtAl06_case,
  AUTHOR = {Vinay K. Chaudhri and Adam Cheyer and Richard Guili and Bill Jarrold
	and Karen L. Myers and John Niekrasz},
  TITLE = {A case study in engineering a knowledge base for an intelligent personal
	assistant},
  BOOKTITLE = {{Proceedings of the Semantic Desktop and Social Semantic Collaboration
	Workshop (SemDesk)}},
  LOCATION = {Athens, Georgia},
  YEAR = {2006},
  ABSTRACT = {We present a case study in engineering a knowledge base to meet the
	requirements of an intelligent personal assistant. The assistant
	is designed to function as part of a semantic desktop application,
	with the goal of helping a user manage and organize his information
	as well as supporting the user in performing tasks. We describe the
	knowledge base development process, the knowledge engineering challenges
	we faced in the process and our solutions to them, and important
	lessons learned during the process.},
  MONTH = NOV,
  PDF = {ChaudhriCheyerEtAl06_case.pdf}
}

@INPROCEEDINGS{ChengBrattEtAl04_Wizard,
  AUTHOR = {Hua Cheng and Harry Bratt and Rohit Mishra and Elizabeth Shriberg
	and Sandra Upson and Joyce Chen and Fuliang Weng and Stanley Peters
	and Lawrence Cavedon and John Niekrasz},
  TITLE = {A {W}izard of {O}z framework for collecting spoken human-computer
	dialogs},
  BOOKTITLE = {{Proceedings of the 8th International Conference on Spoken Language
	Processing (INTERSPEECH - ICSLP)}},
  LOCATION = {Jeju Island, Korea},
  YEAR = {2004},
  PAGES = {2269--2272},
  ABSTRACT = {This paper describes a data collection process aimed at gathering
	human-computer dialogs in high-stress or ''busy'' domains where the
	user is concentrating on tasks other than the conversation, for example,
	when driving a car. Designing spoken dialog interfaces for such domains
	is extremely challenging and the data collected will help us improve
	the dialog systeminterface and performance, understand howhumans
	performthese tasks with respect to stressful situations, and obtain
	speech utterances for extracting prosodic features. This paper describes
	the experimental design for collecting speech data in a simulated
	driving environment.},
  MONTH = OCT,
  PDF = {ChengBrattEtAl04_Wizard.pdf}
}

@INPROCEEDINGS{EhlenLaidebeureEtAl06_Browsing,
  AUTHOR = {Patrick Ehlen and St\{'e}phane Laidebeure and John Niekrasz and Matthew
	Purver and John Dowding and Stanley Peters},
  TITLE = {Browsing meetings: {A}utomatic understanding, presentation and feedback
	for multi-party conversations},
  BOOKTITLE = {{brandial'06: Proceedings of the 10th Workshop on the Semantics and
	Pragmatics of Dialogue (SemDial-10)}},
  LOCATION = {Potsdam, Germany},
  YEAR = {2006},
  PAGES = {173--174},
  ABSTRACT = {We present a system for extracting useful information from multi-party
	meetings and presenting the results to users via a browser. Users
	can view automatically extracted discussion topics and action items,
	initially seeing high-level descriptions, but with the ability to
	click through to meeting audio and video. Users can also add value:
	new topics can be defined and searched for, and action items can
	be edited or corrected, deleted or confirmed. These feedback actions
	are used as implicit supervision by the understanding agents, retraining
	classifier models for improved or user-tailored performance.},
  MONTH = SEP,
  PDF = {EhlenLaidebeureEtAl06_Browsing.pdf}
}

@INPROCEEDINGS{EhlenPurverEtAl07_meeting,
  AUTHOR = {Patrick Ehlen and Matthew Purver and John Niekrasz},
  TITLE = {A meeting browser that learns},
  BOOKTITLE = {{Interaction Challenges for Intelligent Assistants: Papers from the
	2007 AAAI Spring Symposium: Technical Report SS-07-04}},
  LOCATION = {Stanford, California},
  YEAR = {2007},
  PAGES = {33-40},
  PUBLISHER = {AAAI Press},
  ABSTRACT = {We present a system for extracting useful information from multi-party
	meetings and presenting the results to users via a browser. Users
	can view automatically extracted discussion topics and action items,
	initially seeing high-level descriptions, but with the ability to
	click through to meeting audio and video. Users can also add value
	by defining and searching for new topics and editing, correcting,
	deleting, or confirming action items. These feedback actions are
	used as implicit supervision by the understanding agents, retraining
	classifier models for improved or user-tailored performance.},
  ISBN = {978-1-57735-313-3},
  MONTH = MAR,
  PDF = {EhlenPurverEtAl07_meeting.pdf}
}

@INPROCEEDINGS{EhlenThe07_Multimodal,
  AUTHOR = {Patrick Ehlen and {The CALO Team}},
  TITLE = {Multimodal Meeting Capture and Understanding with {The CALO Meeting
	Assistant}},
  BOOKTITLE = {{Proceedings of the 2007 Machine Learning and Multimodal Interaction
	Workshop (MLMI)}},
  LOCATION = {Brno, Czech Republic},
  YEAR = {2007},
  NOTE = {Demo.},
  ABSTRACT = {The CALO Meeting Assistant is a multimodal meeting assistant technology
	that integrates speech, gestures, and multimodal data collected from
	multiparty interactions during meetings. Using machine learning and
	robust discourse processing, it provides a rich, browsable record
	of a meeting.},
  MONTH = JUN,
  PDF = {EhlenThe07_Multimodal.pdf}
}

@INPROCEEDINGS{GruensteinCavedonEtAl04_Managing,
  AUTHOR = {Alexander Gruenstein and Lawrence Cavedon and John Niekrasz and Dominic
	Widdows and Stanley Peters},
  TITLE = {Managing uncertainty in dialogue information state for real time
	understanding of multi-human meeting dialogues},
  BOOKTITLE = {{Proceedings of the 8th Workshop on the Semantics and Pragmatics
	of Dialogue (SEMDIAL)}},
  LOCATION = {Barcelona, Spain},
  YEAR = {2004},
  PAGES = {152--153},
  ABSTRACT = {We are concerned with tracking and understanding dialogue between
	multiple human participants specifically, in meetings in such a way
	that the dialogue system does not intervene. In this scenario, the
	system is not able to provide feedback on whether or not it has understood,
	and is unable to ask for clarification or ambiguity resolution. Our
	ultimate aim is to model humanhuman dialogue (to the extent that
	it is feasible) in real-time, providing useful services (e.g. relevant
	document retrieval) and answering queries about the dialogue state
	and history (e.g. what action items do we have so far?). Our approach
	has been to extend our existing dialogue system, based on the information-state
	update approach which supports a rich semantic interpretation of
	multi-utterance constructions to cope with the added uncertainty
	inherent in two-person meetings in which the participants speak,
	point, and draw on a whiteboard.},
  ISBN = {84-609-2205-7},
  MONTH = JUL,
  PDF = {GruensteinCavedonEtAl04_Managing.pdf}
}

@INCOLLECTION{GruensteinNiekraszEtAl07_Meeting,
  AUTHOR = {Alexander Gruenstein and John Niekrasz and Matthew Purver},
  TITLE = {Meeting Structure Annotation: Annotations Collected with a General
	Purpose Toolkit},
  BOOKTITLE = {{Recent Trends in Discourse and Dialogue}},
  PUBLISHER = {Springer-Verlag},
  YEAR = {2007},
  EDITOR = {L. Dybkjaer and W. Minker},
  SERIES = {Text, Speech and Language Technology series},
  ABSTRACT = {We describe a generic set of tools for representing, annotating, and
	analyzing multi-party discourse, including: an ontology of multimodal
	discourse, a programming interface for that ontology, and NOMOS -
	a flexible and extensible toolkit for browsing and annotating discourse.
	We describe applications built using the NOMOS framework to facilitate
	a real annotation task, as well as for visualizing and adjusting
	features for machine learning tasks. We then present a set of of
	hierarchical topic segmentations and action item subdialogues collected
	over 56 meetings from the ICSI and ISL meeting corpora using our
	tools. These annotations are designed to support research towards
	automatic meeting understanding.},
  PDF = {GruensteinNiekraszEtAl07_Meeting.pdf}
}

@INPROCEEDINGS{GruensteinNiekraszEtAl05_Meeting,
  AUTHOR = {Alexander Gruenstein and John Niekrasz and Matthew Purver},
  TITLE = {Meeting structure annotation: {D}ata and tools},
  BOOKTITLE = {{Proceedings of the 6th SIGdial Workshop on Discourse and Dialogue}},
  LOCATION = {Lisbon, Portugal},
  YEAR = {2005},
  PAGES = {117--127},
  ABSTRACT = {We present a set of annotations of hierarchical topic segmentations
	and action item subdialogues collected over 65 meetings from the
	ICSI and ISL meeting corpora, designed to support automatic meeting
	understanding and analysis. We describe an architecture for representing,
	annotating, and analyzing multi-party discourse, including: an ontology
	of multimodal discourse, a programming interface for that ontology,
	and an audiovisual toolkit which facilitates browsing and annotating
	discourse, as well as visualizing and adjusting features for machine
	learning tasks.},
  INSTITUTION = {Association for Computational Linguistics},
  MONTH = SEP,
  PDF = {GruensteinNiekraszEtAl05_Meeting.pdf}
}

@INPROCEEDINGS{GuptaNiekraszEtAl07_Resolving,
  AUTHOR = {Surabhi Gupta and John Niekrasz and Matthew Purver and Dan Jurafsky},
  TITLE = {Resolving ''You'' in Multi-Party Dialog},
  BOOKTITLE = {{Proceedings of the 9th SIGdial Workshop on Discourse and Dialogue}},
  LOCATION = {Antwerp, Belgium},
  YEAR = {2007},
  ABSTRACT = {This paper presents experiments into the resolution of ''you'' in
	multi-party dialog, dividing this process into three tasks: distinguishing
	between generic and referential uses; distinguishing between singular
	and plural reference; and identifying the referred-to addressee(s).
	First we perform a multi-corpus experiment into referentiality detection,
	achieving an accuracy of 73.8\% on multi-party data. Our next experiment
	deals with singular vs. plural reference, achieving an accuracy of
	71.4\%. Our last experiment is on the task of addressee identification
	for referential ''you'' utterances, achieving an accuracy of 67\%
	without the use of visual information; the output of the first two
	experiments is shown to help.},
  INSTITUTION = {Association for Computational Linguistics},
  MONTH = SEP,
  PDF = {GuptaNiekraszEtAl07_Resolving.pdf}
}

@INPROCEEDINGS{KaiserDemirdjianEtAl04_multimodal,
  AUTHOR = {Ed Kaiser and David Demirdjian and Alexander Gruenstein and Xiaoguang
	Li and John Niekrasz and Matt Wesson and Sanjeev Kumar},
  TITLE = {A multimodal learning interface for sketch, speak and point creation
	of a schedule chart},
  BOOKTITLE = {{Proceedings of the 6th International Conference on Multimodal Interfaces
	(ICMI)}},
  LOCATION = {State College, Pennsylvania},
  YEAR = {2004},
  PAGES = {329--330},
  PUBLISHER = {{ACM Press}},
  ABSTRACT = {We present a video demonstration of an agent-based test bed application
	for ongoing research into multi-user, multimodal, computer-assisted
	meetings. The system tracks a two person scheduling meeting: one
	person standing at a touch sensitive whiteboard creating a Gantt
	chart, while another person looks on in view of a calibrated stereo
	camera. The stereo camera performs real-time, untethered, vision-based
	tracking of the onlooker's head, torso and limb movements, which
	in turn are routed to a 3D-gesture recognition agent. Using speech,
	3D deictic gesture and 2D object de-referencing the system is able
	to track the onlooker's suggestion to move a specific milestone.
	The system also has a speech recognition agent capable of recognizing
	out-of-vocabulary (OOV) words as phonetic sequences. Thus when a
	user at the whiteboard speaks an OOV label name for a chart constituent
	while also writing it, the OOV speech is combined with letter sequences
	hypothesized by the handwriting recognizer to yield an orthography,
	pronunciation and semantics for the new label. These are then learned
	dynamically by the system and become immediately available for future
	recognition.},
  ISBN = {1-58113-995-0},
  MONTH = OCT,
  PDF = {KaiserDemirdjianEtAl04_multimodal.pdf}
}

@INPROCEEDINGS{NiekraszGruenstein06_NOMOS,
  AUTHOR = {John Niekrasz and Alexander Gruenstein},
  TITLE = {{NOMOS}: {A} {S}emantic {W}eb software framework for annotation of
	multimodal corpora},
  BOOKTITLE = {{Proceedings of the 5th International Conference on Language Resources
	and Evaluation (LREC)}},
  LOCATION = {Genoa, Italy},
  YEAR = {2006},
  ABSTRACT = {We present NOMOS, an open-source software framework for annotation,
	processing, and analysis of multimodal corpora. NOMOS is designed
	for use by annotators, corpus developers, and corpus consumers, emphasizing
	configurability for a variety of specific annotation tasks. Its features
	include synchronized multi-channel audio and video playback, compatibility
	with several corpora, platform independence, and mixed display of
	temporal, non-temporal, and relational information. We describe NOMOS
	from two perspectives. First, we present its software architecture,
	highlighting its principal difference from comparable systems: its
	use of an OWL-based semantic annotation back-end which provides automatic
	inference capabilities and a well-defined method for layering datasets.
	Second, we describe how the system is used. For corpus development
	and annotation we present a typical use scenario involving the creation
	of a schema and specialization of the user interface. For processing
	and analysis we describe the GUI- and Java-based methods available,
	including a GUI for query construction and execution, and an automatically
	generated schema-conforming Java API for processing of annotations.
	Additionally, we present some specific annotation and research tasks
	for which NOMOS has been specialized and used, including topic segmentation
	and decision-point annotation of meetings.},
  MONTH = MAY,
  PDF = {NiekraszGruenstein06_NOMOS.pdf}
}

@INPROCEEDINGS{NiekraszGruensteinEtAl04_Multi-human,
  AUTHOR = {John Niekrasz and Alexander Gruenstein and Lawrence Cavedon},
  TITLE = {Multi-human dialogue understanding for assisting artifact-producing
	meetings},
  BOOKTITLE = {{Proceedings of the 20th International Conference on Computational
	Linguistics (COLING)}},
  LOCATION = {Geneva, Switzerland},
  YEAR = {2004},
  PAGES = {432--438},
  ABSTRACT = {In this paper we present the dialogue understanding components of
	an architecture for assisting multi-human conversations in artifact-producing
	meetings: meetings in which tangible products such as project planning
	charts are created. Novel aspects of our system include multimodal
	ambiguity resolution, modular ontology-driven artifact manipulation,
	and a meeting browser for use during and after meetings. We describe
	the software architecture and demonstrate the system using an example
	multimodal dialogue.},
  MONTH = AUG,
  PDF = {NiekraszGruensteinEtAl04_Multi-human.pdf}
}

@INCOLLECTION{NiekraszPurver06_multimodal,
  AUTHOR = {John Niekrasz and Matthew Purver},
  TITLE = {A multimodal discourse ontology for meeting understanding},
  BOOKTITLE = {{Machine Learning for Multimodal Interaction: Second International
	Workshop, MLMI 2005, Edinburgh, UK, July 11--13, 2005, Revised Selected
	Papers}},
  PUBLISHER = {Springer},
  YEAR = {2006},
  EDITOR = {Steve Renals and Samy Bengio},
  VOLUME = {3869},
  SERIES = {Lecture Notes in Computer Science},
  PAGES = {162--173},
  ABSTRACT = {In this paper, we present a multimodal discourse ontology that serves
	as a knowledge representation and annotation framework for the discourse
	understanding component of an artificial personal office assistant.
	The ontology models components of natural language, multimodal communication,
	multi-party dialogue structure, meeting structure, and the physical
	and temporal aspects of human communication. We compare our models
	to those from the research literature and from similar applications.
	We also highlight some annotations which have been made in conformance
	with the ontology as well as some algorithms which have been trained
	on these data and suggest elements of the ontology that may be of
	immediate interest for further annotation by human or automated means.},
  PDF = {NiekraszPurver06_multimodal.pdf}
}

@INPROCEEDINGS{NiekraszPurverEtAl05_Ontology-based,
  AUTHOR = {John Niekrasz and Matthew Purver and John Dowding and Stanley Peters},
  TITLE = {Ontology-based discourse understanding for a persistent meeting assistant},
  BOOKTITLE = {{Persistent Assistants: Living and Working with AI: Papers from the
	2005 AAAI Spring Symposium: Technical Report SS-05-05}},
  LOCATION = {Stanford, California},
  YEAR = {2005},
  PAGES = {26--33},
  PUBLISHER = {AAAI Press},
  ABSTRACT = {In this paper, we present research toward ontology-based understanding
	of discourse in meetings and describe an ontology of multimodal discourse
	designed for this purpose. We investigate its application in an integrated
	but modular architecture which uses semantically annotated knowledge
	of communicative meeting activity as well as discourse subject matter.
	We highlight how this approach assists in improving system performance
	over time and supports understanding in a changing and persistent
	environment. We also describe current and future plans for ontology-driven
	robust natural language understanding in the presence of the highly
	ambiguous and errorful input typical of the meeting domain.},
  ISBN = {1-57735-231-9},
  MONTH = MAR,
  PDF = {NiekraszPurverEtAl05_Ontology-based.pdf}
}

@INPROCEEDINGS{PallottaNiekraszEtAl05_Collaborative,
  AUTHOR = {Vincenzo Pallotta and John Niekrasz and Matthew Purver},
  TITLE = {Collaborative and argumentative models of natural discussions},
  BOOKTITLE = {{Proceedings of the 5th Workshop on Computational Models of Natural
	Argument (CMNA)}},
  LOCATION = {Edinburgh, Scotland},
  YEAR = {2005},
  ABSTRACT = {We report in this paper experiences and insights resulting from the
	first two years of work in two similar projects on meeting tracking
	and understanding. The projects are the DARPA-funded CALO project
	and the Swiss National research project IM2. The findings from these
	two projects have been shared and compared in order to come up with
	a joint ontology as a model for argumentative discussions in meetings.
	We highlight the complexity of the problem in modeling interaction
	and discourse in argumentative discussions and we propose a solution
	based on the construction of a specific knowledge base.},
  KEYWORDS = {Argumentation},
  MONTH = JUL,
  PDF = {PallottaNiekraszEtAl05_Collaborative.pdf}
}

@INPROCEEDINGS{PurverDowdingEtAl07_Detecting,
  AUTHOR = {Matthew Purver and John Dowding and John Niekrasz and Patrick Ehlen
	and Sharareh Noorbaloochi},
  TITLE = {Detecting and Summarizing Action Items in Multi-Party Dialogue},
  BOOKTITLE = {{Proceedings of the 9th SIGdial Workshop on Discourse and Dialogue}},
  LOCATION = {Antwerp, Belgium},
  YEAR = {2007},
  ABSTRACT = {This paper addresses the problem of identifying action items discussed
	in open-domain conversational speech, and does so in two stages:
	firstly, detecting the subdialogues in which action items are proposed,
	discussed and committed to; and secondly, extracting the phrases
	that accurately capture or summarize the tasks they involve. While
	the detection problem is hard, we show that by taking account of
	dialogue structure we can achieve reasonable accuracy. We then describe
	a semantic parser that identifies potential summarizing phrases,
	and show that for some task properties these can be more informative
	than plain utterance transcriptions.},
  INSTITUTION = {Association for Computational Linguistics},
  MONTH = SEP,
  PDF = {PurverDowdingEtAl07_Detecting.pdf}
}

@INCOLLECTION{PurverEhlenEtAl06_Detecting,
  AUTHOR = {Matthew Purver and Patrick Ehlen and John Niekrasz},
  TITLE = {Detecting action items in multi-party meetings: {A}nnotation and
	initial experiments},
  BOOKTITLE = {{Machine Learning for Multimodal Interaction: Third International
	Workshop, MLMI 2006, Bethesda, MD, USA, May 1--4, 2006, Revised Selected
	Papers}},
  PUBLISHER = {Springer},
  YEAR = {2006},
  EDITOR = {Steve Renals and Samy Bengio and Jonathan Fiscus},
  VOLUME = {4299},
  SERIES = {Lecture Notes in Computer Science},
  PAGES = {200--211},
  ABSTRACT = {This paper presents the results of initial investigation and experiments
	into automatic action item detection from transcripts of multi-party
	human-human meetings. We start from our previous flat action item
	annotations, and show that automatic classification performance is
	limited. We then describe a new hierarchical annotation schema based
	on the roles utterances play in the action item assignment process,
	and propose a corresponding approach to automatic detection that
	promises improved classification accuracy while also enabling the
	extraction of useful information for summarization and reporting.},
  KEYWORDS = {Action Items},
  PDF = {PurverEhlenEtAl06_Detecting.pdf}
}

@INPROCEEDINGS{PurverEhlenEtAl06_Shallow,
  AUTHOR = {Matthew Purver and Patrick Ehlen and John Niekrasz},
  TITLE = {Shallow discourse structure for action item detection},
  BOOKTITLE = {{Proceedings of the 2006 HLT-NAACL Workshop on Analyzing Conversations
	in Text and Speech}},
  LOCATION = {New York City, New York},
  YEAR = {2006},
  PAGES = {31--34},
  ABSTRACT = {We investigated automatic action item detection from transcripts of
	multi-party meetings. Unlike previous work (Gruenstein et al., 2005),
	we use a new hierarchical annotation scheme based on the roles utterances
	play in the action item assignment process, and propose an approach
	to automatic detection that promises improved classification accuracy
	while enabling the extraction of useful information for summarization
	and reporting.},
  KEYWORDS = {Action Items},
  MONTH = JUN,
  PDF = {PurverEhlenEtAl06_Shallow.pdf}
}

@INCOLLECTION{PurverNiekraszEtAl07_Automatic,
  AUTHOR = {Matthew Purver and John Niekrasz and Patrick Ehlen},
  TITLE = {Automatic annotation of dialogue structure from simple user interaction},
  BOOKTITLE = {{Machine Learning for Multimodal Interaction: Fourth International
	Workshop, MLMI 2007, Brno, Czech Republic, Revised Selected Papers}},
  PUBLISHER = {Springer-Verlag},
  YEAR = {2007},
  EDITOR = {Andrei Popescu-Belis and Steve Renals and Herve Bourlard},
  VOLUME = {4892},
  SERIES = {Lecture Notes in Computer Science},
  PAGES = {44--59},
  ABSTRACT = {Previously, we presented a method for automatic detection of action
	items from natural conversation. This method relies on supervised
	classification techniques that are trained on data annotated according
	to a hierarchical notion of dialogue structure; data which are expensive
	and time-consuming to produce. Subsequently, we presented a meeting
	browser which allows users to view a set of automatically-produced
	action item summaries and give feedback on their accuracy. In this
	paper, we investigate methods of using this kind of feedback as implicit
	supervision, in order to bypass the costly annotation process and
	enable machine learning through use. We investigate, through the
	transformation of human annotations into hypothetical idealized user
	interactions, the relative utility of various modes of user interaction
	as well as various techniques for automatically producing training
	instances from interaction. We show that performance improvements
	are possible from interaction alone, even with interfaces that present
	very low cognitive load to users.},
  LOCATION = {Brno, Czech Republic},
  PDF = {PurverNiekraszEtAl07_Automatic.pdf}
}

@INPROCEEDINGS{PurverNiekraszEtAl05_Ontology-based,
  AUTHOR = {Matthew Purver and John Niekrasz and Stanley Peters},
  TITLE = {Ontology-based multi-party meeting understanding},
  BOOKTITLE = {{Proceedings of the 2005 CHI Workshop on The Virtuality Continuum
	Revisited}},
  LOCATION = {Portland, Oregon},
  YEAR = {2005},
  ABSTRACT = {This paper describes current and planned research efforts towards
	developing multimodal discourse understanding for an automated personal
	office assistant. The research is undertaken as part of a project
	called The Cognitive Agent that Learns and Organizes (CALO) (see
	http://www.ai.sri.com/project/CALO). The CALO assistant is intended
	to aid users both personally and as a group in performing office-related
	tasks such as coordinating schedules, providing relevant information
	for completing tasks, making a record of meetings, and assisting
	in fulfilling decisions.},
  MONTH = APR,
  PDF = {PurverNiekraszEtAl05_Ontology-based.pdf}
}

@INPROCEEDINGS{VossEhlenEtAl07_CALO,
  AUTHOR = {Lynn Voss and Patrick Ehlen and {The DARPA CALO Meeting Assistant
	Project Team}},
  TITLE = {{The CALO Meeting Assistant}},
  BOOKTITLE = {{Proceedings of the 2007 Annual Conference of the North American
	Chapter of the Association for Computational Linguistics (NAACL-HLT)}},
  YEAR = {2007},
  NOTE = {Demo.},
  ABSTRACT = {The CALO Meeting Assistant is an integrated, multimodal meeting assistant
	technology that captures speech, gestures, and multimodal data from
	multiparty interactions during meetings, and uses machine learning
	and robust discourse processing to provide a rich, browsable record
	of a meeting.},
  MONTH = APR,
  PDF = {VossEhlenEtAl07_CALO.pdf}
}

@COMMENT{{jabref-meta:pdfDirectory:/home/s0093444/data/my-pubs/published-pdfs}}


This file has been generated by bibtex2html 1.66