-
Notifications
You must be signed in to change notification settings - Fork 47
/
maidr.bib
3794 lines (3549 loc) · 395 KB
/
maidr.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@online{2024_microsoft,
title = {Seeing {{AI}} - {{An}} App for Visually Impaired People That Narrates the World around You},
author = {{Microsoft}},
date = {2024},
url = {https://www.microsoft.com/en-us/garage/wall-of-fame/seeing-ai/},
urldate = {2024-04-22},
langid = {american},
organization = {Microsoft Garage},
file = {C:\Users\jseo1005\Zotero\storage\IXGSV3QI\seeing-ai.html}
}
@online{accessibilityatpennstateChartsAccessibility2014,
title = {Charts \& {{Accessibility}}},
author = {{Accessibility at Penn State}},
date = {2014},
url = {https://accessibility.psu.edu/images/charts/},
urldate = {2024-04-21},
abstract = {Page Content Synopsis Text Description Repeat Data in Tables Color in Charts Charts, graphs and maps use visuals to convey complex images to users. But since they are images, these media provide se…},
langid = {american},
file = {C:\Users\jseo1005\Zotero\storage\T2Z3QREU\charts.html}
}
@online{AccessibleCOVID19Data,
title = {Accessible {{COVID-19}} Data},
url = {https://covid.ski.org/?fbclid=IwAR0kqAZIeQkyelOjMpRA_NrKVM8gKYGEVSZeFgT0vSe61f8aLE0z4oB8DzI},
urldate = {2022-08-21},
file = {C:\Users\jseo1005\Zotero\storage\D2ILW7QE\covid.ski.org.html}
}
@online{AccessibleGraphs,
title = {Accessible {{Graphs}}},
url = {https://accessiblegraphs.org/},
urldate = {2023-09-11},
abstract = {Helping blind people see graphs using sound and touch},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\DST4J3U2\accessiblegraphs.org.html}
}
@software{AccessibleGraphsProject2023,
title = {The {{Accessible Graphs}} Project},
date = {2023-01-17T09:12:00Z},
origdate = {2019-10-10T21:52:42Z},
url = {https://github.com/hasadna/accessible-graphs},
urldate = {2023-09-11},
abstract = {The Accessible Graphs project},
organization = {The Public Knowledge Workshop}
}
@article{ACMDSeminarFully2017,
title = {{{ACMD Seminar}}: {{Towards Fully Accessible Data Visualisation}}},
shorttitle = {{{ACMD Seminar}}},
date = {2017-06-06T09:43-04:00},
journaltitle = {NIST},
url = {https://www.nist.gov/itl/math/acmd-seminar-towards-fully-accessible-data-visualisation},
urldate = {2022-08-21},
abstract = {Volker SorgeSchool of Computer Science, University of Birmingham, UK},
langid = {english},
annotation = {Last Modified: 2019-11-15T19:42-05:00},
file = {C:\Users\jseo1005\Zotero\storage\8ZZW8M5Y\acmd-seminar-towards-fully-accessible-data-visualisation.html}
}
@online{airaWeReAira2024,
title = {We’re {{Aira}}, a {{Visual Interpreting Service}}.},
author = {{Aira}},
date = {2024},
url = {https://airaio.kinsta.cloud/},
urldate = {2024-04-22},
abstract = {Aira is live, on-demand visual interpreting, a productivity tool that connects you to actual humans who describe your visual surroundings using your smartphone.},
langid = {american},
file = {C:\Users\jseo1005\Zotero\storage\W63UZR8X\aira.io.html}
}
@article{alamEnablingAccessibleCharts2022,
title = {Enabling {{Accessible Charts Through Interactive Natural Language Interface}} for {{People}} with {{Visual Impairments}}},
author = {Alam, Md Zubair Ibne},
date = {2022-11-18},
url = {http://hdl.handle.net/10315/40987},
urldate = {2024-03-25},
abstract = {Web-based data visualizations have become very popular for exploring data and communicating insights. Newspapers, journals, and reports regularly publish visualizations to tell compelling stories with data. Unfortunately, most visualizations are inaccessible to readers with visual impairments. For many charts on the web, there are no accompanying alternative (alt) texts, and even if such texts exist they do not adequately describe important insights from charts. To address the problem, we first interviewed 15 blind users to understand their challenges and requirements for reading data visualizations. Based on the insights from these interviews, we developed \textbackslash seechart, an interactive tool that automatically deconstructs charts from web pages and then converts them to accessible visualizations for blind people by enabling them to hear the chart summary as well as to interact through data points using the keyboard. Our evaluation with 14 blind participants suggests the efficacy of SeeChart in understanding key insights from charts and fulfilling their information needs while reducing their required time and cognitive burden.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\BJ87T5GI\Alam - 2022 - Enabling Accessible Charts Through Interactive Nat.pdf}
}
@online{alamSeeChartEnablingAccessible2023,
title = {{{SeeChart}}: {{Enabling Accessible Visualizations Through Interactive Natural Language Interface For People}} with {{Visual Impairments}}},
shorttitle = {{{SeeChart}}},
author = {Alam, Md Zubair Ibne and Islam, Shehnaz and Hoque, Enamul},
date = {2023-02-15},
eprint = {2302.07742},
eprinttype = {arxiv},
eprintclass = {cs},
doi = {10.1145/3581641.3584099},
url = {http://arxiv.org/abs/2302.07742},
urldate = {2023-02-16},
abstract = {Web-based data visualizations have become very popular for exploring data and communicating insights. Newspapers, journals, and reports regularly publish visualizations to tell compelling stories with data. Unfortunately, most visualizations are inaccessible to readers with visual impairments. For many charts on the web, there are no accompanying alternative (alt) texts, and even if such texts exist they do not adequately describe important insights from charts. To address the problem, we first interviewed 15 blind users to understand their challenges and requirements for reading data visualizations. Based on the insights from these interviews, we developed SeeChart, an interactive tool that automatically deconstructs charts from web pages and then converts them to accessible visualizations for blind people by enabling them to hear the chart summary as well as to interact through data points using the keyboard. Our evaluation with 14 blind participants suggests the efficacy of SeeChart in understanding key insights from charts and fulfilling their information needs while reducing their required time and cognitive burden.},
pubstate = {preprint},
keywords = {Computer Science - Human-Computer Interaction},
file = {C:\Users\jseo1005\Zotero\storage\6M9NTXPQ\Alam et al. - 2023 - SeeChart Enabling Accessible Visualizations Throu.pdf}
}
@online{americanprintinghousefortheblindAnnualReports2021,
title = {Annual {{Reports}}},
author = {{American Printing House for the Blind}},
date = {2021},
url = {https://www.aph.org/app/uploads/2022/04/annual-report-fy2021.pdf},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\UU2U8PQE\Limitless Possibility.pdf}
}
@online{anthropicIntroducingNextGeneration2024,
title = {Introducing the next Generation of {{Claude}}},
author = {{Anthropic}},
date = {2024},
url = {https://www.anthropic.com/news/claude-3-family},
urldate = {2024-04-20},
abstract = {Today, we're announcing the Claude 3 model family, which sets new industry benchmarks across a wide range of cognitive tasks. The family includes three state-of-the-art models in ascending order of capability: Claude 3 Haiku, Claude 3 Sonnet, and Claude 3 Opus.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\XGZNJTIL\claude-3-family.html}
}
@online{AudioGraphsApple,
title = {Audio {{Graphs}} | {{Apple Developer Documentation}}},
url = {https://developer.apple.com/documentation/accessibility/audio_graphs},
urldate = {2022-08-21},
file = {C:\Users\jseo1005\Zotero\storage\3Y4533ZN\audio_graphs.html}
}
@inproceedings{aultEvaluationLongDescriptions2002,
title = {Evaluation of {{Long Descriptions}} of {{Statistical Graphics}} for {{Blind}} and {{Low Vision Web Users}}},
booktitle = {Computers {{Helping People}} with {{Special Needs}}},
author = {Ault, H. K. and Deloge, J. W. and Lapp, R. W. and Morgan, M. J. and Barnett, J. R.},
editor = {Miesenberger, Klaus and Klaus, Joachim and Zagler, Wolfgang},
date = {2002},
series = {Lecture {{Notes}} in {{Computer Science}}},
pages = {517--526},
publisher = {Springer},
location = {Berlin, Heidelberg},
doi = {10.1007/3-540-45491-8_99},
abstract = {The objective of this research was to maximize not only accessibility but also user comprehension of web pages, particularly those containing tabular and graphical information. Based on literature and interviews with blind and low vision students and their teachers, the research team developed guidelines for web developers to describe charts and graphs commonly used in statistical applications. A usability study was then performed to evaluate the effectiveness of these new guidelines. Accessibility and comprehension for both blind and low vision users were increased when web pages were developed following the new guidelines.},
isbn = {978-3-540-45491-5},
langid = {english},
keywords = {Accessibility Guideline,Blind User,Lesson Plan,Screen Reader,Worcester Polytechnic Institute},
file = {C:\Users\jseo1005\Zotero\storage\ZPH9N8YJ\3-540-45491-8_99.pdf}
}
@article{bachChallengesOpportunitiesData2023,
title = {Challenges and {{Opportunities}} in {{Data Visualization Education}}: {{A Call}} to {{Action}}},
shorttitle = {Challenges and {{Opportunities}} in {{Data Visualization Education}}},
author = {Bach, Benjamin and Keck, Mandy and Rajabiyazdi, Fateme and Losev, Tatiana and Meirelles, Isabel and Dykes, Jason and Laramee, Robert S. and AlKadi, Mashael and Stoiber, Christina and Huron, Samuel and Perin, Charles and Morais, Luiz and Aigner, Wolfgang and Kosminsky, Doris and Boucher, Magdalena and Knudsen, Søren and Manataki, Areti and Aerts, Jan and Hinrichs, Uta and Roberts, Jonathan C. and Carpendale, Sheelagh},
date = {2023},
journaltitle = {IEEE Transactions on Visualization and Computer Graphics},
shortjournal = {IEEE Trans. Visual. Comput. Graphics},
pages = {1--12},
issn = {1077-2626, 1941-0506, 2160-9306},
doi = {10.1109/TVCG.2023.3327378},
url = {https://ieeexplore.ieee.org/document/10310184/},
urldate = {2024-01-15},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\VE6QQJDA\Bach et al. - 2023 - Challenges and Opportunities in Data Visualization Education A Call to Action.pdf}
}
@article{bacieroTouchScopePassiveHapticDevice,
title = {{{TouchScope}}: {{A Passive-Haptic Device}} to {{Investigate Tactile Perception Using}} a {{Refreshable Braille Display}}},
shorttitle = {{{TouchScope}}},
author = {Baciero, Ana and Perea, Manuel and Duñabeitia, Jon Andoni and Gómez, Pablo},
journaltitle = {Journal of Cognition},
shortjournal = {J Cogn},
volume = {6},
number = {1},
eprint = {37152833},
eprinttype = {pmid},
pages = {21},
issn = {2514-4820},
doi = {10.5334/joc.271},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10162198/},
urldate = {2023-11-14},
abstract = {The sense of touch is underrepresented in cognitive psychology research. One of the reasons is that controlling the timing of stimulus presentation, which is a hallmark of cognitive research, is significantly more difficult for tactile stimuli than visual or auditory stimuli. In the present work, we present a system to display tactile stimuli (braille cells) and collect response time with the capability for static and dynamic (passive haptic) stimuli prsentation that will contribute to the development of tactile research. While the system requires some construction, it can be put together with commercially available materials. Here, we present the step-by-step instructions for constructing the tool, the code used to control it, and some basic experiments to validate it. The data from the experiments show that the device can be used for a variety of tactile perception experiments.},
pmcid = {PMC10162198},
file = {C:\Users\jseo1005\Zotero\storage\SQ7V4KRS\Baciero et al. - TouchScope A Passive-Haptic Device to Investigate Tactile Perception Using a Refreshable Braille Di.pdf}
}
@inproceedings{bakerEducationalExperiencesBlind2019,
title = {Educational {{Experiences}} of {{Blind Programmers}}},
booktitle = {Proceedings of the 50th {{ACM Technical Symposium}} on {{Computer Science Education}}},
author = {Baker, Catherine M. and Bennett, Cynthia L. and Ladner, Richard E.},
date = {2019-02-22},
pages = {759--765},
publisher = {ACM},
location = {Minneapolis MN USA},
doi = {10.1145/3287324.3287410},
url = {https://dl.acm.org/doi/10.1145/3287324.3287410},
urldate = {2024-04-04},
eventtitle = {{{SIGCSE}} '19: {{The}} 50th {{ACM Technical Symposium}} on {{Computer Science Education}}},
isbn = {978-1-4503-5890-3},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\L6DWQQVW\Baker et al. - 2019 - Educational Experiences of Blind Programmers.pdf}
}
@thesis{bakerUnderstandingImprovingBlind2017,
type = {Thesis},
title = {Understanding and {{Improving Blind Students}}’ {{Access}} to {{Visual Information}} in {{Computer Science Education}}},
author = {Baker, Catherine Marie},
date = {2017-08},
url = {https://digital.lib.washington.edu:443/researchworks/handle/1773/40540},
urldate = {2024-04-14},
abstract = {Teaching people with disabilities tech skills empowers them to create solutions to problems they encounter and prepares them for careers. However, computer science is typically taught in a highly visual manner which can present barriers for people who are blind. The goal of this dissertation is to understand and decrease those barriers. The first projects I present looked at the barriers that blind students face. I first present the results of my survey and interviews with blind students with degrees in computer science or related fields. This work highlighted the many barriers that these blind students faced. I then followed-up on one of the barriers mentioned, access to technology, by doing a preliminary accessibility evaluation of six popular integrated development environments (IDEs) and code editors. I found that half were unusable and all had some inaccessible portions. As access to visual information is a barrier in computer science education, I present three projects I have done to decrease this barrier. The first project is Tactile Graphics with a Voice (TGV). This project investigated an alternative to Braille labels for those who do not know Braille and showed that TGV was a potential alternative. The next project was StructJumper, which created a modified abstract syntax tree that blind programmers could use to navigate through code with their screen reader. The evaluation showed that users could navigate more quickly and easily determine the relationships of lines of code when they were using StructJumper compared to when they were not. Finally, I present a tool for dynamic graphs (the type with nodes and edges) which had two different modes for handling focus changes when moving between graphs. I found that the modes support different approaches for exploring the graphs and therefore preferences are mixed based on the user’s preferred approach. However, both modes had similar accuracy in completing the tasks. These projects are a first step towards the goal of making computer science education more accessible to blind students. By identifying the barriers that exist and creating solutions to overcome them, we can support increasing the number of blind students in computer science.},
langid = {american},
annotation = {Accepted: 2017-10-26T20:48:55Z},
file = {C:\Users\jseo1005\Zotero\storage\A4MQ5YMJ\Baker - 2017 - Understanding and Improving Blind Students’ Access to Visual Information in Computer Science Educati.pdf}
}
@article{baldwinTangibleDesktopMultimodal2017,
title = {The {{Tangible Desktop}}: {{A Multimodal Approach}} to {{Nonvisual Computing}}},
shorttitle = {The {{Tangible Desktop}}},
author = {Baldwin, Mark S. and Hayes, Gillian R. and Haimson, Oliver L. and Mankoff, Jennifer and Hudson, Scott E.},
date = {2017-08-11},
journaltitle = {ACM Transactions on Accessible Computing},
shortjournal = {ACM Trans. Access. Comput.},
volume = {10},
number = {3},
pages = {9:1--9:28},
issn = {1936-7228},
doi = {10.1145/3075222},
url = {https://dl.acm.org/doi/10.1145/3075222},
urldate = {2023-11-28},
abstract = {Audio-only interfaces, facilitated through text-to-speech screen reading software, have been the primary mode of computer interaction for blind and low-vision computer users for more than four decades. During this time, the advances that have made visual interfaces faster and easier to use, from direct manipulation to skeuomorphic design, have not been paralleled in nonvisual computing environments. The screen reader–dependent community is left with no alternatives to engage with our rapidly advancing technological infrastructure. In this article, we describe our efforts to understand the problems that exist with audio-only interfaces. Based on observing screen reader use for 4 months at a computer training school for blind and low-vision adults, we identify three problem areas within audio-only interfaces: ephemerality, linear interaction, and unidirectional communication. We then evaluated a multimodal approach to computer interaction called the Tangible Desktop that addresses these problems by moving semantic information from the auditory to the tactile channel. Our evaluation demonstrated that among novice screen reader users, Tangible Desktop improved task completion times by an average of 6 minutes when compared to traditional audio-only computer systems.},
keywords = {Accessibility,assistive technology,blindness,haptic,hardware,tangible,vibrotactile feedback,visual impairment},
file = {C:\Users\jseo1005\Zotero\storage\DEU8ER2J\Baldwin et al. - 2017 - The Tangible Desktop A Multimodal Approach to Nonvisual Computing.pdf}
}
@online{bangMultitaskMultilingualMultimodal2023,
title = {A {{Multitask}}, {{Multilingual}}, {{Multimodal Evaluation}} of {{ChatGPT}} on {{Reasoning}}, {{Hallucination}}, and {{Interactivity}}},
author = {Bang, Yejin and Cahyawijaya, Samuel and Lee, Nayeon and Dai, Wenliang and Su, Dan and Wilie, Bryan and Lovenia, Holy and Ji, Ziwei and Yu, Tiezheng and Chung, Willy and Do, Quyet V. and Xu, Yan and Fung, Pascale},
date = {2023-11-28},
eprint = {2302.04023},
eprinttype = {arxiv},
eprintclass = {cs},
doi = {10.48550/arXiv.2302.04023},
url = {http://arxiv.org/abs/2302.04023},
urldate = {2024-04-20},
abstract = {This paper proposes a framework for quantitatively evaluating interactive LLMs such as ChatGPT using publicly available data sets. We carry out an extensive technical evaluation of ChatGPT using 23 data sets covering 8 different common NLP application tasks. We evaluate the multitask, multilingual and multi-modal aspects of ChatGPT based on these data sets and a newly designed multimodal dataset. We find that ChatGPT outperforms LLMs with zero-shot learning on most tasks and even outperforms fine-tuned models on some tasks. We find that it is better at understanding non-Latin script languages than generating them. It is able to generate multimodal content from textual prompts, via an intermediate code generation step. Moreover, we find that ChatGPT is 63.41\% accurate on average in 10 different reasoning categories under logical reasoning, non-textual reasoning, and commonsense reasoning, hence making it an unreliable reasoner. It is, for example, better at deductive than inductive reasoning. ChatGPT suffers from hallucination problems like other LLMs and it generates more extrinsic hallucinations from its parametric memory as it does not have access to an external knowledge base. Finally, the interactive feature of ChatGPT enables human collaboration with the underlying LLM to improve its performance, i.e, 8\% ROUGE-1 on summarization and 2\% ChrF++ on machine translation, in a multi-turn "prompt engineering" fashion. We also release codebase for evaluation set extraction.},
pubstate = {preprint},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computation and Language},
file = {C:\Users\jseo1005\Zotero\storage\NAWSTLMS\2302.html}
}
@inproceedings{banovicUncoveringInformationNeeds2013,
title = {Uncovering Information Needs for Independent Spatial Learning for Users Who Are Visually Impaired},
booktitle = {Proceedings of the 15th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {Banovic, Nikola and Franz, Rachel L. and Truong, Khai N. and Mankoff, Jennifer and Dey, Anind K.},
date = {2013-10-21},
pages = {1--8},
publisher = {ACM},
location = {Bellevue Washington},
doi = {10.1145/2513383.2513445},
url = {https://dl.acm.org/doi/10.1145/2513383.2513445},
urldate = {2022-08-21},
eventtitle = {{{ASSETS}} '13: {{The}} 15th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
isbn = {978-1-4503-2405-2},
langid = {english}
}
@inproceedings{belleAltTexifyPipelineGenerate2022b,
title = {Alt-{{Texify}}: {{A Pipeline}} to {{Generate Alt-text}} from {{SVG Visualizations}}:},
shorttitle = {Alt-{{Texify}}},
booktitle = {Proceedings of the 17th {{International Conference}} on {{Evaluation}} of {{Novel Approaches}} to {{Software Engineering}}},
author = {Belle, Aspen and Goh, Vanessa and Kumar, Akshay and Pranjatno, Richard and Yip, Pui and Wickramaratne, Umayangani and Obie, Humphrey},
date = {2022},
pages = {275--281},
publisher = {{SCITEPRESS - Science and Technology Publications}},
location = {Online Streaming, --- Select a Country ---},
doi = {10.5220/0010994600003176},
url = {https://www.scitepress.org/DigitalLibrary/Link.aspx?doi=10.5220/0010994600003176},
urldate = {2024-02-08},
eventtitle = {17th {{International Conference}} on {{Evaluation}} of {{Novel Approaches}} to {{Software Engineering}}},
isbn = {978-989-758-568-5},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\G6ZTBCIH\Belle et al. - 2022 - Alt-Texify A Pipeline to Generate Alt-text from S.pdf}
}
@online{bemyeyesAnnouncingBeMy2023,
title = {Announcing ‘{{Be My AI}},’ {{Soon Available}} for {{Hundreds}} of {{Thousands}} of {{Be My Eyes Users}}},
author = {{Be My Eyes}},
date = {2023},
url = {https://www.bemyeyes.com/blog/announcing-be-my-ai},
urldate = {2024-04-20},
abstract = {Be My Eyes’ AI assistant, powered by GPT-4, is rolling out to hundreds of thousands of iOS and Android users over the next several weeks.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\ZUAL4S9B\announcing-be-my-ai.html}
}
@online{bemyeyesBeMyEyes2024,
title = {Be {{My Eyes}} - {{See}} the World Together},
author = {{Be My Eyes}},
date = {2024},
url = {https://www.bemyeyes.com/},
urldate = {2024-04-22},
abstract = {Whether you need a pair of sharp eyes or have some sight to lend, Be My Eyes is a simple, free tool to support people see the world better, together.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\4QEJR7EE\www.bemyeyes.com.html}
}
@online{bendelHowCanGenerative2024,
title = {How {{Can Generative AI Enhance}} the {{Well-being}} of {{Blind}}?},
author = {Bendel, Oliver},
date = {2024-02-02},
eprint = {2402.07919},
eprinttype = {arxiv},
eprintclass = {cs},
doi = {10.48550/arXiv.2402.07919},
url = {http://arxiv.org/abs/2402.07919},
urldate = {2024-04-20},
abstract = {This paper examines the question of how generative AI can improve the well-being of blind or visually impaired people. It refers to a current example, the Be My Eyes app, in which the Be My AI feature was integrated in 2023, which is based on GPT-4 from OpenAI. The author's tests are described and evaluated. There is also an ethical and social discussion. The power of the tool, which can analyze still images in an amazing way, is demonstrated. Those affected gain a new independence and a new perception of their environment. At the same time, they are dependent on the world view and morality of the provider or developer, who prescribe or deny them certain descriptions. An outlook makes it clear that the analysis of moving images will mean a further leap forward. It is fair to say that generative AI can fundamentally improve the well-being of blind and visually impaired people and will change it in various ways.},
pubstate = {preprint},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Human-Computer Interaction,I.2,K.3},
file = {C:\Users\jseo1005\Zotero\storage\IA8DSCLS\2402.html}
}
@inproceedings{bighamVizWizNearlyRealtime2010,
title = {{{VizWiz}}: Nearly Real-Time Answers to Visual Questions},
shorttitle = {{{VizWiz}}},
booktitle = {Proceedings of the 23nd Annual {{ACM}} Symposium on {{User}} Interface Software and Technology},
author = {Bigham, Jeffrey P. and Jayant, Chandrika and Ji, Hanjie and Little, Greg and Miller, Andrew and Miller, Robert C. and Miller, Robin and Tatarowicz, Aubrey and White, Brandyn and White, Samual and Yeh, Tom},
date = {2010-10-03},
series = {{{UIST}} '10},
pages = {333--342},
publisher = {Association for Computing Machinery},
location = {New York, NY, USA},
doi = {10.1145/1866029.1866080},
url = {https://doi.org/10.1145/1866029.1866080},
urldate = {2024-04-20},
abstract = {The lack of access to visual information like text labels, icons, and colors can cause frustration and decrease independence for blind people. Current access technology uses automatic approaches to address some problems in this space, but the technology is error-prone, limited in scope, and quite expensive. In this paper, we introduce VizWiz, a talking application for mobile phones that offers a new alternative to answering visual questions in nearly real-time - asking multiple people on the web. To support answering questions quickly, we introduce a general approach for intelligently recruiting human workers in advance called quikTurkit so that workers are available when new questions arrive. A field deployment with 11 blind participants illustrates that blind people can effectively use VizWiz to cheaply answer questions in their everyday lives, highlighting issues that automatic approaches will need to address to be useful. Finally, we illustrate the potential of using VizWiz as part of the participatory design of advanced tools by using it to build and evaluate VizWiz::LocateIt, an interactive mobile tool that helps blind people solve general visual search problems.},
isbn = {978-1-4503-0271-5},
keywords = {blind users,non-visual interfaces,real-time human computation}
}
@incollection{blancoOlliExtensibleVisualization2022,
title = {Olli: {{An Extensible Visualization Library}} for {{Screen Reader Accessibility}}},
shorttitle = {Olli},
booktitle = {{{IEEE VIS Posters}}},
author = {Blanco, Matt and Zong, Jonathan and Satyanarayan, Arvind},
date = {2022-10-19T00:00:00+00:00},
url = {http://vis.csail.mit.edu/pubs/olli/},
urldate = {2023-11-06},
abstract = {Though recent research has explored the design of rich screen reader visualization experiences, accessible visualizations for blind and low vision users remain rare on the web. While some visualization toolkits offer accessible solutions, toolkit-specific implementations can present idiosyncratic user experiences that limit learnability. We present Olli, an open source library that converts visualizations into a keyboard-navigable structure accessible to screen readers. Using an extensible adapter design pattern, Olli is agnostic to the specific toolkit used to author the visualization. Olli renders a chart as an accessible tree view following the HTML Accessible Rich Internet Applications (ARIA) standard. Olli helps visualization developers easily create accessible visualizations across visualization toolkits.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\D5X9Y53T\Blanco et al. - 2022 - Olli An Extensible Visualization Library for Scre.pdf}
}
@article{blattmannStableVideoDiffusion,
title = {Stable {{Video Diffusion}}: {{Scaling Latent Video Diffusion Models}} to {{Large Datasets}}},
author = {Blattmann, Andreas and Dockhorn, Tim and Kulal, Sumith and Mendelevitch, Daniel and Kilian, Maciej and Lorenz, Dominik and Levi, Yam and English, Zion and Voleti, Vikram and Letts, Adam and Jampani, Varun and Rombach, Robin and Ai, Stability},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\GH3D9JFN\Blattmann et al. - Stable Video Diffusion Scaling Latent Video Diffu.pdf}
}
@inproceedings{boucherEducationalDataComics2023,
title = {Educational {{Data Comics}}: {{What}} Can {{Comics}} Do for {{Education}} in {{Visualization}}?},
shorttitle = {Educational {{Data Comics}}},
booktitle = {2023 {{IEEE VIS Workshop}} on {{Visualization Education}}, {{Literacy}}, and {{Activities}} ({{EduVis}})},
author = {Boucher, Magdalena and Bach, Benjamin and Stoiber, Christina and Wang, Zezhong and Aigner, Wolfgang},
date = {2023-10-22},
pages = {34--40},
publisher = {IEEE},
location = {Melbourne, Australia},
doi = {10.1109/EduVis60792.2023.00012},
url = {https://ieeexplore.ieee.org/document/10344064/},
urldate = {2024-01-15},
abstract = {This paper discusses the potential of comics for explaining concepts with and around data visualization. With the increasing spread of visualizations and the democratization of access to visualization tools, we see a growing need for easily approachable resources for learning visualization techniques, applications, design processes, etc. Comics are a promising medium for such explanation as they concisely combine graphical and textual content in a sequential manner and they provide fast visual access to specific parts of the explanations. Based on a first literature review and our extensive experience with the subject, we survey works at the respective intersections of comics, visualization and education: data comics, educational comics, and visualization education. We report on five potentials of comics to create and share educational material, to engage wide and potentially diverse audiences, and to support educational activities. For each potential we list, we describe open questions for future research. Our discussion aims to inform both the application of comics by educators and their extension and study by researchers.},
eventtitle = {2023 {{IEEE VIS Workshop}} on {{Visualization Education}}, {{Literacy}}, and {{Activities}} ({{EduVis}})},
isbn = {9798350330304},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\NQPFU5ZU\Boucher et al. - 2023 - Educational Data Comics What can Comics do for Education in Visualization.pdf}
}
@article{bovairAcquisitionPerformanceTextEditing1990,
title = {The {{Acquisition}} and {{Performance}} of {{Text-Editing Skill}}: {{A Cognitive Complexity Analysis}}},
shorttitle = {The {{Acquisition}} and {{Performance}} of {{Text-Editing Skill}}},
author = {Bovair, Susan and Kieras, David E. and Polson, Peter G.},
date = {1990-03-01},
journaltitle = {Human–Computer Interaction},
volume = {5},
number = {1},
pages = {1--48},
publisher = {Taylor \& Francis},
issn = {0737-0024},
doi = {10.1207/s15327051hci0501_1},
url = {https://www.tandfonline.com/doi/abs/10.1207/s15327051hci0501_1},
urldate = {2023-09-07},
abstract = {Kieras and Polson (1985) proposed an approach for making quantitative predictions on ease of learning and ease of use of a system, based on a production system version of the goals, operators, methods, and selection rules (GOMS) model of Card, Moran, and Newel1 (1983). This article describes the principles for constructing such models and obtaining predictions of learning and execution time. A production rule model for a simulated text editor is described in detail and is compared to experimental data on learning and performance. The model accounted well for both learning and execution time and for the details of the increase in speed with practice. The relationship between the performance model and the Keystroke-Level Model of Card et al. (1983) is discussed. The results provide strong support for the original proposal that production rule models can make quantitative predictions for both ease of learning and ease of use.},
file = {C:\Users\jseo1005\Zotero\storage\FUDR4HX3\Bovair et al. - 1990 - The Acquisition and Performance of Text-Editing Skill A Cognitive Complexity Analysis.pdf}
}
@online{BraillePatterns,
title = {Braille {{Patterns}}},
url = {https://unicode.org/charts/nameslist/c_2800.html},
urldate = {2023-01-12},
file = {C:\Users\jseo1005\Zotero\storage\EAIRJ2XZ\c_2800.html}
}
@article{brewsterVisualizationToolsBlind2002,
title = {Visualization Tools for Blind People Using Multiple Modalities},
author = {Brewster, S.},
date = {2002-01-01},
journaltitle = {Disability and Rehabilitation},
volume = {24},
number = {11-12},
eprint = {12182801},
eprinttype = {pmid},
pages = {613--621},
publisher = {Taylor \& Francis},
issn = {0963-8288},
doi = {10.1080/09638280110111388},
url = {https://doi.org/10.1080/09638280110111388},
urldate = {2023-11-28},
abstract = {Purpose : There are many problems when blind people need to access visualizations such as graphs and tables. Current speech or raised-paper technology does not provide a good solution. Our approach is to use non-speech sounds and haptics to allow a richer and more flexible form of access to graphs and tables. Method : Two experiments are reported that test out designs for both sound and haptic graph solutions. In the audio case a standard speech interface is compared to one with non-speech sounds added. The haptic experiment compares two different graph designs to see which was the most effective. Results : Our results for the sound graphs showed a significant decrease in subjective workload, reduced time taken to complete tasks and reduced errors as compared to a standard speech interface. For the haptic graphs reductions in workload and some of the problems that can occur when using such graphs are shown. Conclusions : Using non-speech sound and haptics can significantly improve interaction with visualizations such as graphs. This multimodal approach makes the most of the senses our users have to provide access to information in more flexible ways.},
file = {C:\Users\jseo1005\Zotero\storage\65EPHUUY\Brewster - 2002 - Visualization tools for blind people using multiple modalities.pdf}
}
@article{brookeSUSRetrospective2013,
title = {{{SUS}}: A Retrospective},
shorttitle = {{{SUS}}},
author = {Brooke, John},
date = {2013-01-01},
journaltitle = {Journal of Usability Studies},
shortjournal = {Journal of Usability Studies},
volume = {8},
pages = {29--40}
}
@article{brookeSUSRetrospective2013a,
title = {{{SUS}}: A Retrospective},
shorttitle = {{{SUS}}},
author = {Brooke, John},
date = {2013-02-01},
journaltitle = {Journal of Usability Studies},
shortjournal = {J. Usability Studies},
volume = {8},
number = {2},
pages = {29--40},
abstract = {Rather more than 25 years ago, as part of a usability engineering program, I developed a questionnaire---the System Usability Scale (SUS)---that could be used to take a quick measurement of how people perceived the usability of computer systems on which they were working. This proved to be an extremely simple and reliable tool for use when doing usability evaluations, and I decided, with the blessing of engineering management at Digital Equipment Co. Ltd (DEC; where I developed SUS), that it was probably something that could be used by other organizations (the benefit for us being that if they did use it, we potentially had something we could use to compare their systems against ours). So, in 1986, I made SUS freely available to a number of colleagues, with permission to pass it on to anybody else who might find it useful, and over the next few years occasionally heard of evaluations of systems where researchers and usability engineers had used it with some success.}
}
@inproceedings{brownVizTouchAutomaticallyGenerated2012a,
title = {{{VizTouch}}: Automatically Generated Tactile Visualizations of Coordinate Spaces},
shorttitle = {{{VizTouch}}},
booktitle = {Proceedings of the {{Sixth International Conference}} on {{Tangible}}, {{Embedded}} and {{Embodied Interaction}}},
author = {Brown, Craig and Hurst, Amy},
date = {2012-02-19},
pages = {131--138},
publisher = {ACM},
location = {Kingston Ontario Canada},
doi = {10.1145/2148131.2148160},
url = {https://dl.acm.org/doi/10.1145/2148131.2148160},
urldate = {2023-09-09},
eventtitle = {{{TEI}}'12: {{Sixth International Conference}} on {{Tangible}}, {{Embedded}}, and {{Embodied Interaction}}},
isbn = {978-1-4503-1174-8},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\WEMJHXZ2\Brown and Hurst - 2012 - VizTouch automatically generated tactile visualiz.pdf}
}
@online{caiLeveragingLargeLanguage2023,
title = {Leveraging {{Large Language Models}} for {{Scalable Vector Graphics-Driven Image Understanding}}},
author = {Cai, Mu and Huang, Zeyi and Li, Yuheng and Wang, Haohan and Lee, Yong Jae},
date = {2023-06-09},
eprint = {2306.06094},
eprinttype = {arxiv},
eprintclass = {cs},
url = {http://arxiv.org/abs/2306.06094},
urldate = {2024-02-06},
abstract = {Recently, large language models (LLMs) have made significant advancements in natural language understanding and generation. However, their potential in computer vision remains largely unexplored. In this paper, we introduce a new, exploratory approach that enables LLMs to process images using the Scalable Vector Graphics (SVG) format. By leveraging the XML-based textual descriptions of SVG representations instead of raster images, we aim to bridge the gap between the visual and textual modalities, allowing LLMs to directly understand and manipulate images without the need for parameterized visual components. Our method facilitates simple image classification, generation, and in-context learning using only LLM capabilities. We demonstrate the promise of our approach across discriminative and generative tasks, highlighting its (i) robustness against distribution shift, (ii) substantial improvements achieved by tapping into the in-context learning abilities of LLMs, and (iii) image understanding and generation capabilities with human guidance. Our code, data, and models can be found here https://github.com/mu-cai/svg-llm.},
pubstate = {preprint},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computation and Language,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning},
file = {C\:\\Users\\jseo1005\\Zotero\\storage\\X83C2XSY\\Cai et al. - 2023 - Leveraging Large Language Models for Scalable Vect.pdf;C\:\\Users\\jseo1005\\Zotero\\storage\\IKKXHUP6\\2306.html}
}
@inproceedings{cervoneAdaptableAccessibilityFeatures2019,
title = {Adaptable {{Accessibility Features}} for {{Mathematics}} on the {{Web}}},
booktitle = {Proceedings of the 16th {{International Web}} for {{All Conference}}},
author = {Cervone, Davide and Sorge, Volker},
date = {2019-05-13},
series = {{{W4A}} '19},
pages = {1--4},
publisher = {Association for Computing Machinery},
location = {New York, NY, USA},
doi = {10.1145/3315002.3317567},
url = {https://dl.acm.org/doi/10.1145/3315002.3317567},
urldate = {2023-12-10},
abstract = {Accessibility of mathematics is still a challenging problem and providing the right level of support to a reader depends on many factors, such as their particular assistive technology needs, their level of expertise, and the subject area they are working in. We present work towards making math accessibility more adaptable to the reader's personal needs that is implemented in the MathJax library for rendering mathematics on the web. While MathJax provided accessibility support for several years, the new version 3 has both more new features and means of personalization. In particular, it provides adaptable combinations of highlighting, colorization, and magnification techniques. Both Braille and speech output can be generated, with different speech rule sets allowing readers to flexibly change presentation and adaptation for better interpretation of formulas in different subject areas, like Physics, Chemistry, and Logic.},
isbn = {978-1-4503-6716-5},
keywords = {Mathematics,MathJax,STEM Accessibility},
file = {C:\Users\jseo1005\Zotero\storage\HZSM4IP8\Cervone and Sorge - 2019 - Adaptable Accessibility Features for Mathematics o.pdf}
}
@book{charltonNothingUsUs1998,
title = {Nothing {{About Us Without Us}}: {{Disability Oppression}} and {{Empowerment}}},
shorttitle = {Nothing {{About Us Without Us}}},
author = {Charlton, James I.},
date = {1998},
eprint = {ohqff8DBt9gC},
eprinttype = {googlebooks},
publisher = {University of California Press},
abstract = {James Charlton has produced a ringing indictment of disability oppression, which, he says, is rooted in degradation, dependency, and powerlessness and is experienced in some form by five hundred million persons throughout the world who have physical, sensory, cognitive, or developmental disabilities. Nothing About Us Without Us is the first book in the literature on disability to provide a theoretical overview of disability oppression that shows its similarities to, and differences from, racism, sexism, and colonialism. Charlton's analysis is illuminated by interviews he conducted over a ten-year period with disability rights activists throughout the Third World, Europe, and the United States. Charlton finds an antidote for dependency and powerlessness in the resistance to disability oppression that is emerging worldwide. His interviews contain striking stories of self-reliance and empowerment evoking the new consciousness of disability rights activists. As a latecomer among the world's liberation movements, the disability rights movement will gain visibility and momentum from Charlton's elucidation of its history and its political philosophy of self-determination, which is captured in the title of his book. Nothing About Us Without Us expresses the conviction of people with disabilities that they know what is best for them. Charlton's combination of personal involvement and theoretical awareness assures greater understanding of the disability rights movement.},
isbn = {978-0-520-22481-0},
langid = {english},
pagetotal = {222},
keywords = {Medical / Health Care Delivery}
}
@online{ChartsContentComponents,
title = {Charts - {{Content}} - {{Components}} - {{Human Interface Guidelines}} - {{Design}} - {{Apple Developer}}},
url = {https://developer.apple.com/design/human-interface-guidelines/components/content/charts},
urldate = {2022-09-27},
file = {C:\Users\jseo1005\Zotero\storage\NQVXZWEW\charts.html}
}
@online{cherukuruVisualsExaminingExperiences2022,
title = {Beyond {{Visuals}} : {{Examining}} the {{Experiences}} of {{Geoscience Professionals With Vision Disabilities}} in {{Accessing Data Visualizations}}},
shorttitle = {Beyond {{Visuals}}},
author = {Cherukuru, Nihanth W. and Bailey, David A. and Fourment, Tiffany and Hatheway, Becca and Holland, Marika M. and Rehme, Matt},
date = {2022-07-26},
eprint = {2207.13220},
eprinttype = {arxiv},
eprintclass = {cs},
doi = {10.48550/arXiv.2207.13220},
url = {http://arxiv.org/abs/2207.13220},
urldate = {2022-10-20},
abstract = {Data visualizations are ubiquitous in all disciplines and have become the primary means of analysing data and communicating insights. However, the predominant reliance on visual encoding of data continues to create accessibility barriers for people who are blind/vision impaired resulting in their under representation in Science, Technology, Engineering and Mathematics (STEM) disciplines. This research study seeks to understand the experiences of professionals who are blind/vision impaired in one such STEM discipline (geosciences) in accessing data visualizations. In-depth, semi-structured interviews with seven professionals were conducted to examine the accessibility barriers and areas for improvement to inform accessibility research pertaining to data visualizations through a socio-technical lens. A reflexive thematic analysis revealed the negative impact of visualizations in influencing their career path, lack of data exploration tools for research, barriers in accessing works of peers and mismatched pace of visualization and accessibility research. The article also includes recommendations from the participants to address some of these accessibility barriers.},
pubstate = {preprint},
keywords = {Computer Science - Computers and Society,Computer Science - Graphics,Computer Science - Human-Computer Interaction},
file = {C:\Users\jseo1005\Zotero\storage\B2F2B2R2\Cherukuru et al. - 2022 - Beyond Visuals Examining the Experiences of Geos.pdf}
}
@inproceedings{choiTactileDisplayBraille2004,
title = {Tactile Display as a {{Braille}} Display for the Visually Disabled},
booktitle = {2004 {{IEEE}}/{{RSJ International Conference}} on {{Intelligent Robots}} and {{Systems}} ({{IROS}}) ({{IEEE Cat}}. {{No}}.{{04CH37566}})},
author = {Choi, H.R. and Lee, S.W. and Jung, K.M. and Koo, J.C. and Lee, S.I. and Choi, H.G. and Jeon, J.W. and Nam, J.D.},
date = {2004-09},
volume = {2},
pages = {1985-1990 vol.2},
doi = {10.1109/IROS.2004.1389689},
abstract = {Tactile sensation is one of the most important sensory functions along with the auditory sensation for the visually impaired because it replaces the visual sensation of the persons with sight. In this paper, we present a tactile display device as a dynamic Braille display that is the unique tool for exchanging information among them. The proposed tactile cell of the Braille display is based on the dielectric elastomer and it has advantageous features over the existing ones with respect to intrinsic softness, ease of fabrication, cost effectiveness and miniaturization. We introduce a new idea for actuation and describe the actuating mechanism of the Braille pin in details capable of realizing the enhanced spatial density of the tactile cells. Finally, results of psychophysical experiments are given and its effectiveness is confirmed.},
eventtitle = {2004 {{IEEE}}/{{RSJ International Conference}} on {{Intelligent Robots}} and {{Systems}} ({{IROS}}) ({{IEEE Cat}}. {{No}}.{{04CH37566}})},
keywords = {Actuators,Auditory displays,Engineering management,Fabrication,Humans,Lungs,Mechanical engineering,Pins,Psychology,Skin},
file = {C\:\\Users\\jseo1005\\Zotero\\storage\\W28ZJ4ZI\\Choi et al. - 2004 - Tactile display as a Braille display for the visua.pdf;C\:\\Users\\jseo1005\\Zotero\\storage\\QT327JMP\\1389689.html}
}
@article{choiVisualizingNonVisual2019,
title = {Visualizing for the {{Non}}‐{{Visual}}: {{Enabling}} the {{Visually Impaired}} to {{Use Visualization}}},
shorttitle = {Visualizing for the {{Non}}‐{{Visual}}},
author = {Choi, Jinho and Jung, Sanghun and Park, Deok Gun and Choo, Jaegul and Elmqvist, Niklas},
date = {2019-06},
journaltitle = {Computer Graphics Forum},
volume = {38},
number = {3},
pages = {249--260},
publisher = {Wiley-Blackwell},
issn = {01677055},
doi = {10.1111/cgf.13686},
url = {https://proxy2.library.illinois.edu/login?url=https://search.ebscohost.com/login.aspx?direct=true&db=bsu&AN=137771620&site=eds-live&scope=site},
urldate = {2023-09-04},
abstract = {The majority of visualizations on the web are still stored as raster images, making them inaccessible to visually impaired users. We propose a deep‐neural‐network‐based approach that automatically recognizes key elements in a visualization, including a visualization type, graphical elements, labels, legends, and most importantly, the original data conveyed in the visualization. We leverage such extracted information to provide visually impaired people with the reading of the extracted information. Based on interviews with visually impaired users, we built a Google Chrome extension designed to work with screen reader software to automatically decode charts on a webpage using our pipeline. We compared the performance of the back‐end algorithm with existing methods and evaluated the utility using qualitative feedback from visually impaired users.},
keywords = {CCS Concepts,Data modeling,Data visualization,Google Chrome (Computer software),Human‐centered computing → Visual analytics,People with visual disabilities,Visual analytics,Visualization,Visualization toolkits,Work design},
file = {C:\Users\jseo1005\Zotero\storage\CR795AQV\Choi et al. - 2019 - Visualizing for the Non‐Visual Enabling the Visua.pdf}
}
@article{choiVisualizingNonVisualEnabling2019,
title = {Visualizing for the {{Non-Visual}}: {{Enabling}} the {{Visually Impaired}} to {{Use Visualization}}},
shorttitle = {Visualizing for the {{Non-Visual}}},
author = {Choi, Jinho and Jung, Sanghun and Park, Deok Gun and Choo, Jaegul and Elmqvist, Niklas},
date = {2019},
journaltitle = {Computer Graphics Forum},
volume = {38},
number = {3},
pages = {249--260},
issn = {1467-8659},
doi = {10.1111/cgf.13686},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.13686},
urldate = {2022-08-22},
abstract = {The majority of visualizations on the web are still stored as raster images, making them inaccessible to visually impaired users. We propose a deep-neural-network-based approach that automatically recognizes key elements in a visualization, including a visualization type, graphical elements, labels, legends, and most importantly, the original data conveyed in the visualization. We leverage such extracted information to provide visually impaired people with the reading of the extracted information. Based on interviews with visually impaired users, we built a Google Chrome extension designed to work with screen reader software to automatically decode charts on a webpage using our pipeline. We compared the performance of the back-end algorithm with existing methods and evaluated the utility using qualitative feedback from visually impaired users.},
langid = {english},
keywords = {• Human-centered computing → Visual analytics,CCS Concepts,Visualization toolkits},
file = {C:\Users\jseo1005\Zotero\storage\ZBUN9DYE\cgf.html}
}
@article{choiVisualizingNonVisualEnabling2019a,
title = {Visualizing for the {{Non-Visual}}: {{Enabling}} the {{Visually Impaired}} to {{Use Visualization}}},
shorttitle = {Visualizing for the {{Non-Visual}}},
author = {Choi, Jinho and Jung, Sanghun and Park, Deok Gun and Choo, Jaegul and Elmqvist, Niklas},
date = {2019},
journaltitle = {Computer Graphics Forum},
volume = {38},
number = {3},
pages = {249--260},
issn = {1467-8659},
doi = {10.1111/cgf.13686},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.13686},
urldate = {2023-11-14},
abstract = {The majority of visualizations on the web are still stored as raster images, making them inaccessible to visually impaired users. We propose a deep-neural-network-based approach that automatically recognizes key elements in a visualization, including a visualization type, graphical elements, labels, legends, and most importantly, the original data conveyed in the visualization. We leverage such extracted information to provide visually impaired people with the reading of the extracted information. Based on interviews with visually impaired users, we built a Google Chrome extension designed to work with screen reader software to automatically decode charts on a webpage using our pipeline. We compared the performance of the back-end algorithm with existing methods and evaluated the utility using qualitative feedback from visually impaired users.},
langid = {english},
keywords = {• Human-centered computing → Visual analytics,CCS Concepts,Visualization toolkits},
file = {C:\Users\jseo1005\Zotero\storage\Z8TQENML\Choi et al. - 2019 - Visualizing for the Non-Visual Enabling the Visually Impaired to Use Visualization.pdf}
}
@article{chunduryUnderstandingSensorySubstitution2022,
title = {Towards {{Understanding Sensory Substitution}} for {{Accessible Visualization}}: {{An Interview Study}}},
shorttitle = {Towards {{Understanding Sensory Substitution}} for {{Accessible Visualization}}},
author = {Chundury, Pramod and Patnaik, Biswaksen and Reyazuddin, Yasmin and Tang, Christine and Lazar, Jonathan and Elmqvist, Niklas},
date = {2022-01},
journaltitle = {IEEE Transactions on Visualization and Computer Graphics},
volume = {28},
number = {1},
pages = {1084--1094},
issn = {1941-0506},
doi = {10.1109/TVCG.2021.3114829},
url = {https://ieeexplore.ieee.org/abstract/document/9552177},
urldate = {2023-11-28},
abstract = {For all its potential in supporting data analysis, particularly in exploratory situations, visualization also creates barriers: accessibility for blind and visually impaired individuals. Regardless of how effective a visualization is, providing equal access for blind users requires a paradigm shift for the visualization research community. To enact such a shift, it is not sufficient to treat visualization accessibility as merely another technical problem to overcome. Instead, supporting the millions of blind and visually impaired users around the world who have equally valid needs for data analysis as sighted individuals requires a respectful, equitable, and holistic approach that includes all users from the onset. In this paper, we draw on accessibility research methodologies to make inroads towards such an approach. We first identify the people who have specific insight into how blind people perceive the world: orientation and mobility (O\&M) experts, who are instructors that teach blind individuals how to navigate the physical world using non-visual senses. We interview 10 O\&M experts—all of them blind—to understand how best to use sensory substitution other than the visual sense for conveying spatial layouts. Finally, we investigate our qualitative findings using thematic analysis. While blind people in general tend to use both sound and touch to understand their surroundings, we focused on auditory affordances and how they can be used to make data visualizations accessible—using sonification and auralization. However, our experts recommended supporting a combination of senses—sound and touch—to make charts accessible as blind individuals may be more familiar with exploring tactile charts. We report results on both sound and touch affordances, and conclude by discussing implications for accessible visualization for blind individuals.},
eventtitle = {{{IEEE Transactions}} on {{Visualization}} and {{Computer Graphics}}},
file = {C\:\\Users\\jseo1005\\Zotero\\storage\\D49SRMC6\\Chundury et al. - 2022 - Towards Understanding Sensory Substitution for Accessible Visualization An Interview Study.pdf;C\:\\Users\\jseo1005\\Zotero\\storage\\JFPHYWHC\\9552177.html}
}
@inproceedings{ciuhaVisualizationConcurrentTones2010,
title = {Visualization of Concurrent Tones in Music with Colours},
booktitle = {Proceedings of the International Conference on {{Multimedia}} - {{MM}} '10},
author = {Ciuha, Peter and Klemenc, Bojan and Solina, Franc},
date = {2010},
pages = {1677},
publisher = {ACM Press},
location = {Firenze, Italy},
doi = {10.1145/1873951.1874320},
url = {http://dl.acm.org/citation.cfm?doid=1873951.1874320},
urldate = {2022-08-21},
eventtitle = {The International Conference},
isbn = {978-1-60558-933-6},
langid = {english}
}
@article{clarkDualCodingTheory1991,
title = {Dual Coding Theory and Education},
author = {Clark, James M. and Paivio, Allan},
date = {1991-09-01},
journaltitle = {Educational Psychology Review},
shortjournal = {Educ Psychol Rev},
volume = {3},
number = {3},
pages = {149--210},
issn = {1573-336X},
doi = {10.1007/BF01320076},
url = {https://doi.org/10.1007/BF01320076},
urldate = {2023-01-17},
abstract = {Dual coding theory (DCT) explains human behavior and experience in terms of dynamic associative processes that operate on a rich network of modality-specific verbal and nonverbal (or imagery) representations. We first describe the underlying premises of the theory and then show how the basic DCT mechanisms can be used to model diverse educational phenomena. The research demonstrates that concreteness, imagery, and verbal associative processes play major roles in various educational domains: the representation and comprehension of knowledge, learning and memory of school material, effective instruction, individual differences, achievement motivation and test anxiety, and the learning of motor skills. DCT also has important implications for the science and practice of educational psychology — specifically, for educational research and teacher education. We show not only that DCT provides a unified explanation for diverse topics in education, but also that its mechanistic framework accommodates theories cast in terms of strategies and other high-level psychological processes. Although much additional research needs to be done, the concrete models that DCT offers for the behavior and experience of students, teachers, and educational psychologists further our understanding of educational phenomena and strengthen related pedagogical practices.},
langid = {english},
keywords = {imagery,unified educational theory,verbal processes},
file = {C:\Users\jseo1005\Zotero\storage\7WPYLRAX\Clark and Paivio - 1991 - Dual coding theory and education.pdf}
}
@online{cloudsightImageRecognitionAPI2024,
title = {Image {{Recognition API}} \& {{General Purpose Computer Vision}} and {{Captioning}} - {{CloudSight AI}}},
author = {{CloudSight}},
date = {2024},
url = {https://cloudsight.ai/},
urldate = {2024-04-20},
file = {C:\Users\jseo1005\Zotero\storage\2VGT662D\cloudsight.ai.html}
}
@incollection{dealmeidavasconcellosChapter18Interactive2005,
title = {Chapter 18 {{Interactive}} Mapping for People Who Are Blind or Visually Impaired},
booktitle = {Modern {{Cartography Series}}},
author = {family=Almeida (Vasconcellos), given=Regina Araujo, prefix=de, useprefix=true and Tsuji, Bruce},
editor = {Taylor, D. R. Fraser},
date = {2005-01-01},
series = {Cybercartography},
volume = {4},
pages = {411--431},
publisher = {Academic Press},
doi = {10.1016/S1363-0814(05)80021-8},
url = {https://www.sciencedirect.com/science/article/pii/S1363081405800218},
urldate = {2023-11-14},
abstract = {Static and interactive tactile maps are discussed in the context of providing survey and mobility information to people who are visually impaired or blind. The heterogeneous nature of visual impairment is examined, as is the nature of tactile perception. Technologies associated with tactile maps are reviewed and the application of interactive tactile maps for populations, in addition to those who are visually impaired, is also considered. Cybercartography has considerable potential in this respect.},
file = {C:\Users\jseo1005\Zotero\storage\57ZTZR38\S1363081405800218.html}
}
@inproceedings{degreefInterdependentVariablesRemotely2021,
title = {Interdependent {{Variables}}: {{Remotely Designing Tactile Graphics}} for an {{Accessible Workflow}}},
shorttitle = {Interdependent {{Variables}}},
booktitle = {The 23rd {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {De Greef, Lilian and Moritz, Dominik and Bennett, Cynthia},
date = {2021-10-17},
pages = {1--6},
publisher = {ACM},
location = {Virtual Event USA},
doi = {10.1145/3441852.3476468},
url = {https://dl.acm.org/doi/10.1145/3441852.3476468},
urldate = {2023-09-06},
abstract = {In this experience report, we offer a case study of blind and sighted colleagues creating an accessible workflow to collaborate on a data visualization-focused project. We outline our process for making the project’s shared data representations accessible through incorporating both handmade and machine-embossed tactile graphics. We also share lessons and strategies for considering team needs and addressing contextual constraints like remote collaboration during the COVID-19 pandemic. More broadly, this report contributes to ongoing research into the ways accessibility is interdependent by arguing that access work must be a collective responsibility and properly supported with recognition, resources, and infrastructure.},
eventtitle = {{{ASSETS}} '21: {{The}} 23rd {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
isbn = {978-1-4503-8306-6},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\YGPSINZ3\De Greef et al. - 2021 - Interdependent Variables Remotely Designing Tacti.pdf}
}
@online{DesmosGraphingCalculator,
title = {Desmos | {{Graphing Calculator}}},
url = {https://www.desmos.com/calculator},
urldate = {2023-01-02},
abstract = {Explore math with our beautiful, free online graphing calculator. Graph functions, plot points, visualize algebraic equations, add sliders, animate graphs, and more.},
langid = {english},
organization = {Desmos},
file = {C:\Users\jseo1005\Zotero\storage\VTL3FFZK\calculator.html}
}
@online{Diagcess,
title = {Diagcess},
url = {https://www.npmjs.com/package/diagcess},
urldate = {2022-08-21},
abstract = {A diagram explorer for progressiveaccee.com style diagram annotations.. Latest version: 1.1.4, last published: 7 months ago. Start using diagcess in your project by running `npm i diagcess`. There are no other projects in the npm registry using diagcess.},
langid = {english},
organization = {npm},
file = {C:\Users\jseo1005\Zotero\storage\NJM4U6JH\diagcess.html}
}
@article{dowWizardOzSupport2005,
title = {Wizard of {{Oz}} Support throughout an Iterative Design Process},
author = {Dow, S. and MacIntyre, B. and Lee, J. and Oezbek, C. and Bolter, J.D. and Gandy, M.},
date = {2005-10},
journaltitle = {IEEE Pervasive Computing},
volume = {4},
number = {4},
pages = {18--26},
issn = {1558-2590},
doi = {10.1109/MPRV.2005.93},
abstract = {The Wizard of Oz prototyping approach, widely used in human-computer interaction research, is particularly useful in exploring user interfaces for pervasive, ubiquitous, or mixed-reality systems that combine complex sensing and intelligent control logic. The vast design space for such nontraditional interfaces provides many possibilities for user interaction through one or more modalities and often requires challenging hardware and software implementations. The WOz method helps designers avoid getting locked into a particular design or working under an incorrect set of assumptions about user preferences, because it lets them explore and evaluate designs before investing the considerable development time needed to build a complete prototype.},
eventtitle = {{{IEEE Pervasive Computing}}},
keywords = {audio tours,Computational modeling,design process,HCI methods,Intelligent control,Intelligent sensors,Intelligent systems,Iterative methods,mixed reality,Process design,Prototypes,prototyping,Sensor systems,ubiquitous computing,User interfaces,Virtual reality,Wizard of Oz},
file = {C\:\\Users\\jseo1005\\Zotero\\storage\\FEII3JJW\\Dow et al. - 2005 - Wizard of Oz support throughout an iterative desig.pdf;C\:\\Users\\jseo1005\\Zotero\\storage\\AXPNQFXY\\1541964.html}
}
@inproceedings{ebelVisualizingEventSequence2021,
title = {Visualizing {{Event Sequence Data}} for {{User Behavior Evaluation}} of {{In-Vehicle Information Systems}}},
booktitle = {13th {{International Conference}} on {{Automotive User Interfaces}} and {{Interactive Vehicular Applications}}},
author = {Ebel, Patrick and Lingenfelder, Christoph and Vogelsang, Andreas},
date = {2021-09-20},
series = {{{AutomotiveUI}} '21},
pages = {219--229},
publisher = {Association for Computing Machinery},
location = {New York, NY, USA},
doi = {10.1145/3409118.3475140},
url = {https://dl.acm.org/doi/10.1145/3409118.3475140},
urldate = {2023-09-07},
abstract = {With modern In-Vehicle Information Systems (IVISs) becoming more capable and complex than ever, their evaluation becomes increasingly difficult. The analysis of large amounts of user behavior data can help to cope with this complexity and can support UX experts in designing IVISs that serve customer needs and are safe to operate while driving. We, therefore, propose a Multi-level User Behavior Visualization Framework providing effective visualizations of user behavior data that is collected via telematics from production vehicles. Our approach visualizes user behavior data on three different levels: (1) The Task Level View aggregates event sequence data generated through touchscreen interactions to visualize user flows. (2) The Flow Level View allows comparing the individual flows based on a chosen metric. (3) The Sequence Level View provides detailed insights into touch interactions, glance, and driving behavior. Our case study proves that UX experts consider our approach a useful addition to their design process.},
isbn = {978-1-4503-8063-8},
file = {C:\Users\jseo1005\Zotero\storage\27AG4E2E\Ebel et al. - 2021 - Visualizing Event Sequence Data for User Behavior Evaluation of In-Vehicle Information Systems.pdf}
}
@online{EducationalDataComics,
title = {Educational {{Data Comics}}: {{What}} Can {{Comics}} Do for {{Education}} in {{Visualization}}? | {{IEEE Conference Publication}} | {{IEEE Xplore}}},
url = {https://ieeexplore.ieee.org/abstract/document/10344064},
urldate = {2024-01-15},
file = {C:\Users\jseo1005\Zotero\storage\35E26565\10344064.html}
}
@article{edwardsHowAltText2023,
title = {How the {{Alt Text Gets Made}}: {{What Roles}} and {{Processes}} of {{Alt Text Creation Can Teach Us AboutInclusive Imagery}}},
shorttitle = {How the {{Alt Text Gets Made}}},
author = {Edwards, Emory J. and Gilbert, Michael and Blank, Emily and Branham, Stacy M.},
date = {2023-06-30},
journaltitle = {ACM Transactions on Accessible Computing},
shortjournal = {ACM Trans. Access. Comput.},
volume = {16},
number = {2},
pages = {1--28},
issn = {1936-7228, 1936-7236},
doi = {10.1145/3587469},
url = {https://dl.acm.org/doi/10.1145/3587469},
urldate = {2024-01-19},
abstract = {Many studies within Accessible Computing have investigated image accessibility, from what should be included in alternative text (alt text), to possible automated, human-in-the-loop, or crowdsourced approaches to alt text generation. However, the processes through which practitioners make alt text in situ have rarely been discussed. Through interviews with three artists and three accessibility practitioners working with Google, as well as 25 end users, we identify four processes of alt text creation used by this company—The User-Evaluation Process, The Lone Writer Process, The Team Write-A-Thon Process, and The Artist-Writer Process—and unpack their potential strengths and weaknesses as they relate to access and inclusive imagery. We conclude with a discussion of what alt text researchers and industry professionals can learn from considering alt text in situ , including opportunities to support user feedback, cross-contributor consistency, and organizational or technical changes to production processes.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\MF7ID7TT\Edwards et al. - 2023 - How the Alt Text Gets Made What Roles and Process.pdf}
}
@article{elavskyDataNavigatorAccessibilityCentered2023,
title = {Data {{Navigator}}: {{An Accessibility-Centered Data Navigation Toolkit}}},
shorttitle = {Data {{Navigator}}},
author = {Elavsky, Frank and Nadolskis, Lucas and Moritz, Dominik},
date = {2023},
journaltitle = {IEEE Transactions on Visualization and Computer Graphics},
shortjournal = {IEEE Trans. Visual. Comput. Graphics},
pages = {1--11},
issn = {1077-2626, 1941-0506, 2160-9306},
doi = {10.1109/TVCG.2023.3327393},
url = {https://ieeexplore.ieee.org/document/10301522/},
urldate = {2023-11-06},
abstract = {Making data visualizations accessible for people with disabilities remains a significant challenge in current practitioner efforts. Existing visualizations often lack an underlying navigable structure, fail to engage necessary input modalities, and rely heavily on visual-only rendering practices. These limitations exclude people with disabilities, especially users of assistive technologies. To address these challenges, we present Data Navigator: a system built on a dynamic graph structure, enabling developers to construct navigable lists, trees, graphs, and flows as well as spatial, diagrammatic, and geographic relations. Data Navigator supports a wide range of input modalities: screen reader, keyboard, speech, gesture detection, and even fabricated assistive devices. We present 3 case examples with Data Navigator, demonstrating we can provide accessible navigation structures on top of raster images, integrate with existing toolkits at scale, and rapidly develop novel prototypes. Data Navigator is a step towards making accessible data visualizations easier to design and implement.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\BJTRTD75\Elavsky et al. - 2023 - Data Navigator An Accessibility-Centered Data Navigation Toolkit.pdf}
}
@article{elavskyDataNavigatorAccessibilityCentered2024,
title = {Data {{Navigator}}: {{An Accessibility-Centered Data Navigation Toolkit}}},
shorttitle = {Data {{Navigator}}},
author = {Elavsky, Frank and Nadolskis, Lucas and Moritz, Dominik},
date = {2024-01-01},
journaltitle = {IEEE Transactions on Visualization and Computer Graphics},
volume = {30},
number = {01},
pages = {803--813},
publisher = {IEEE Computer Society},
issn = {1077-2626},
doi = {10.1109/TVCG.2023.3327393},
url = {https://www.computer.org/csdl/journal/tg/2024/01/10301522/1RFC0Gz2dEY},
urldate = {2024-02-25},
abstract = {Making data visualizations accessible for people with disabilities remains a significant challenge in current practitioner efforts. Existing visualizations often lack an underlying navigable structure, fail to engage necessary input modalities, and rely heavily on visual-only rendering practices. These limitations exclude people with disabilities, especially users of assistive technologies. To address these challenges, we present Data Navigator: a system built on a dynamic graph structure, enabling developers to construct navigable lists, trees, graphs, and flows as well as spatial, diagrammatic, and geographic relations. Data Navigator supports a wide range of input modalities: screen reader, keyboard, speech, gesture detection, and even fabricated assistive devices. We present 3 case examples with Data Navigator, demonstrating we can provide accessible navigation structures on top of raster images, integrate with existing toolkits at scale, and rapidly develop novel prototypes. Data Navigator is a step towards making accessible data visualizations easier to design and implement.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\3J8CVA3Z\Elavsky et al. - 2024 - Data Navigator An Accessibility-Centered Data Navigation Toolkit.pdf}
}
@article{elavskyHowAccessibleMy2022,
title = {How Accessible Is My Visualization? {{Evaluating}} Visualization Accessibility with {{Chartability}}},
shorttitle = {How Accessible Is My Visualization?},
author = {Elavsky, Frank and Bennett, Cynthia and Moritz, Dominik},
date = {2022-06},
journaltitle = {Computer Graphics Forum},
shortjournal = {Computer Graphics Forum},
volume = {41},
number = {3},
pages = {57--70},
issn = {0167-7055, 1467-8659},
doi = {10.1111/cgf.14522},
url = {https://onlinelibrary.wiley.com/doi/10.1111/cgf.14522},
urldate = {2022-10-17},
abstract = {Novices and experts have struggled to evaluate the accessibility of data visualizations because there are no common shared guidelines across environments, platforms, and contexts in which data visualizations are authored. Between non-specifc standards bodies like WCAG, emerging research, and guidelines from specifc communities of practice, it is hard to organize knowledge on how to evaluate accessible data visualizations. We present Chartability, a set of heuristics synthesized from these various sources which enables designers, developers, researchers, and auditors to evaluate data-driven visualizations and interfaces for visual, motor, vestibular, neurological, and cognitive accessibility. In this paper, we outline our process of making a set of heuristics and accessibility principles for Chartability and highlight key features in the auditing process. Working with participants on real projects, we found that data practitioners with a novice level of accessibility skills were more confdent and found auditing to be easier after using Chartability. Expert accessibility practitioners were eager to integrate Chartability into their own work. Refecting on Chartability’s development and the preliminary user evaluation, we discuss tradeoffs of open projects, working with high-risk evaluations like auditing projects in the wild, and challenge future research projects at the intersection of visualization and accessibility to consider the broad intersections of disabilities.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\3GJLUDSM\Elavsky et al. - 2022 - How accessible is my visualization Evaluating vis.pdf}
}
@inproceedings{engelAnalysisTactileChart2017,
title = {Analysis of {{Tactile Chart Design}}},
booktitle = {Proceedings of the 10th {{International Conference}} on {{PErvasive Technologies Related}} to {{Assistive Environments}}},
author = {Engel, Christin and Weber, Gerhard},
date = {2017-06-21},
series = {{{PETRA}} '17},
pages = {197--200},
publisher = {Association for Computing Machinery},
location = {New York, NY, USA},
doi = {10.1145/3056540.3064955},
url = {https://dl.acm.org/doi/10.1145/3056540.3064955},
urldate = {2023-11-13},
abstract = {Tactile charts are widely used by blind people to get access to visual charts. They are often a transcription of visual charts. However, design guidelines for tactile charts are not sufficient for effective tactile chart design. An effective design supports the reader understanding the chart's underlying data. We explore how design can improve the readability of tactile charts and can support the user by getting insights from the data. We analyzed 69 tactile charts, including bar charts, line charts, pie charts, area charts, and scatter plots. The charts are taken from publications, guidelines and transcriber's institutes. In particular, we studied how axes and tick marks are designed and how labels and legends are used. Furthermore, we looked into the style of chart elements as well as design considerations for specific chart types. Based on the findings, we derived basic design guidelines for bar charts. The presented study is the first stage of our research which aims to develop design guidelines for tactile charts.},
isbn = {978-1-4503-5227-7},
keywords = {accessible charts,design guidelines,effective design,information visualization,tactile chart design,tactile graphics},
file = {C:\Users\jseo1005\Zotero\storage\IWD9X3XZ\Engel and Weber - 2017 - Analysis of Tactile Chart Design.pdf}
}
@inproceedings{engelAnalysisTactileChart2017a,
title = {Analysis of {{Tactile Chart Design}}},
booktitle = {Proceedings of the 10th {{International Conference}} on {{PErvasive Technologies Related}} to {{Assistive Environments}}},
author = {Engel, Christin and Weber, Gerhard},
date = {2017-06-21},
series = {{{PETRA}} '17},
pages = {197--200},
publisher = {Association for Computing Machinery},
location = {New York, NY, USA},
doi = {10.1145/3056540.3064955},
url = {https://dl.acm.org/doi/10.1145/3056540.3064955},
urldate = {2023-11-13},
abstract = {Tactile charts are widely used by blind people to get access to visual charts. They are often a transcription of visual charts. However, design guidelines for tactile charts are not sufficient for effective tactile chart design. An effective design supports the reader understanding the chart's underlying data. We explore how design can improve the readability of tactile charts and can support the user by getting insights from the data. We analyzed 69 tactile charts, including bar charts, line charts, pie charts, area charts, and scatter plots. The charts are taken from publications, guidelines and transcriber's institutes. In particular, we studied how axes and tick marks are designed and how labels and legends are used. Furthermore, we looked into the style of chart elements as well as design considerations for specific chart types. Based on the findings, we derived basic design guidelines for bar charts. The presented study is the first stage of our research which aims to develop design guidelines for tactile charts.},
isbn = {978-1-4503-5227-7},
keywords = {accessible charts,design guidelines,effective design,information visualization,tactile chart design,tactile graphics},
file = {C:\Users\jseo1005\Zotero\storage\H8I6Q824\Engel and Weber - 2017 - Analysis of Tactile Chart Design.pdf}
}
@inproceedings{engelSVGPlottAccessibleTool2019,
title = {{{SVGPlott}}: An Accessible Tool to Generate Highly Adaptable, Accessible Audio-Tactile Charts for and from Blind and Visually Impaired People},
shorttitle = {{{SVGPlott}}},
booktitle = {Proceedings of the 12th {{ACM International Conference}} on {{PErvasive Technologies Related}} to {{Assistive Environments}}},
author = {Engel, Christin and Müller, Emma Franziska and Weber, Gerhard},
date = {2019-06-05},
series = {{{PETRA}} '19},
pages = {186--195},
publisher = {Association for Computing Machinery},
location = {New York, NY, USA},
doi = {10.1145/3316782.3316793},
url = {https://dl.acm.org/doi/10.1145/3316782.3316793},
urldate = {2024-02-21},
abstract = {Charts, such as bar or line charts, are used in many different areas, newspapers, in education, and other areas of life. Accessing charts or understanding data is a main requirement in various professions. Audio-tactile charts can be explored by touch. Producing tactile charts takes a high effort and the design is challenging. Especially, blind and visually impaired people are excluded from chart creation. That is why we investigated several studies and developed a concept for a tool that aims to improve the creation process as well as the quality of audio-tactile charts. We present our concept and describe ideas and properties for user input, input data, graphical user interface, the rendering process as well as the output files in detail. Afterwards, we present our current implementation that includes an accessible graphical user interface which supports well-designed default parameters for chart generation. Most properties are customizable while the GUI supports a live preview of the current chart. In addition, an accessible legend and description will be generated. We support the generation of grouped and stacked bar charts, line charts, and scatterplots in an accessible SVG format. We evaluated the resulting charts, the usability and accessibility of the GUI within two pilot studies with blind and sighted people where we find implications for further investigations.},
isbn = {978-1-4503-6232-0},
keywords = {accessible graphics,audio-tactile charts,automation tool,blind and visually impaired people,data visualization,effective design,tactile charts},
file = {C:\Users\jseo1005\Zotero\storage\9PU9DQPY\Engel et al. - 2019 - SVGPlott an accessible tool to generate highly ad.pdf}
}
@online{ExperienceLearnEducational,
title = {Experience + {{Learn}} / {{Educational Media}} / {{Effective Practices}} for {{Description}} of {{Science Content}} within {{Digital Talking Books}} / {{NCAM}}},
url = {http://ncamftp.wgbh.org/ncam-old-site/experience_learn/educational_media/stemdx.html},
urldate = {2023-08-28}
}
@article{fanAccessibilityDataVisualizations2022,
title = {The {{Accessibility}} of {{Data Visualizations}} on the {{Web}} for {{Screen Reader Users}}: {{Practices}} and {{Experiences During COVID-19}}},
shorttitle = {The {{Accessibility}} of {{Data Visualizations}} on the {{Web}} for {{Screen Reader Users}}},
author = {Fan, Danyang and Siu, Alexa F. and Rao, Hrishikesh V. and Kim, Gene S-H and Vazquez, Xavier and Greco, Lucy and O’Modhrain, Sile and Follmer, Sean},
date = {2022-08-18},
journaltitle = {ACM Transactions on Accessible Computing},
shortjournal = {ACM Trans. Access. Comput.},
pages = {3557899},
issn = {1936-7228, 1936-7236},
doi = {10.1145/3557899},
url = {https://dl.acm.org/doi/10.1145/3557899},
urldate = {2023-01-12},
abstract = {Data visualization has become an increasingly important means of efective data communication and has played a vital role in broadcasting the progression of COVID-19. Accessible data representations, on the other hand, have lagged behind, leaving areas of information out of reach for many blind and visually impaired (BVI) users. In this work, we sought to understand (1) the accessibility of current implementations of visualizations on the web; (2) BVI users’ preferences and current experiences when accessing data-driven media; (3) how accessible data representations on the web address these users’ access needs and help them navigate, interpret, and gain insights from the data; and (4) the practical challenges that limit BVI users’ access and use of data representations. To answer these questions, we conducted a mixed-methods study consisting of an accessibility audit of 87 data visualizations on the web to identify accessibility issues, an online survey of 127 screen reader users to understand lived experiences and preferences, and a remote contextual inquiry with 12 of the survey respondents to observe how they navigate, interpret and gain insights from accessible data representations. Our observations during this critical period of time provide an understanding of the widespread accessibility issues encountered across online data visualizations, the impact that data accessibility inequities have on the BVI community, the ways screen reader users sought access to data-driven information and made use of online visualizations to form insights, and the pressing need to make larger strides towards improving data literacy, building conidence, and enriching methods of access. Based on our indings, we provide recommendations for researchers and practitioners to broaden data accessibility on the web. CCS Concepts: · Human-centered computing → Empirical studies in accessibility; Visualization application domains.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\Y6Q8L9IR\Fan et al. - 2022 - The Accessibility of Data Visualizations on the We.pdf}
}
@article{fanAccessibilityDataVisualizations2023,
title = {The {{Accessibility}} of {{Data Visualizations}} on the {{Web}} for {{Screen Reader Users}}: {{Practices}} and {{Experiences During COVID-19}}},
shorttitle = {The {{Accessibility}} of {{Data Visualizations}} on the {{Web}} for {{Screen Reader Users}}},
author = {Fan, Danyang and Fay Siu, Alexa and Rao, Hrishikesh and Kim, Gene Sung-Ho and Vazquez, Xavier and Greco, Lucy and O'Modhrain, Sile and Follmer, Sean},
date = {2023-03-29},
journaltitle = {ACM Transactions on Accessible Computing},
shortjournal = {ACM Trans. Access. Comput.},
volume = {16},
number = {1},
pages = {4:1--4:29},
issn = {1936-7228},
doi = {10.1145/3557899},
url = {https://dl.acm.org/doi/10.1145/3557899},
urldate = {2023-08-27},
abstract = {Data visualization has become an increasingly important means of effective data communication and has played a vital role in broadcasting the progression of COVID-19. Accessible data representations, however, have lagged behind, leaving areas of information out of reach for many blind and visually impaired (BVI) users. In this work, we sought to understand (1) the accessibility of current implementations of visualizations on the web; (2) BVI users’ preferences and current experiences when accessing data-driven media; (3) how accessible data representations on the web address these users’ access needs and help them navigate, interpret, and gain insights from the data; and (4) the practical challenges that limit BVI users’ access and use of data representations. To answer these questions, we conducted a mixed-methods study consisting of an accessibility audit of 87 data visualizations on the web to identify accessibility issues, an online survey of 127 screen reader users to understand lived experiences and preferences, and a remote contextual inquiry with 12 of the survey respondents to observe how they navigate, interpret, and gain insights from accessible data representations. Our observations during this critical period of time provide an understanding of the widespread accessibility issues encountered across online data visualizations, the impact that data accessibility inequities have on the BVI community, the ways screen reader users sought access to data-driven information and made use of online visualizations to form insights, and the pressing need to make larger strides towards improving data literacy, building confidence, and enriching methods of access. Based on our findings, we provide recommendations for researchers and practitioners to broaden data accessibility on the web.},
keywords = {Accessibility,accessible data visualization,audit,blind,data visualization,user experience,visually impaired,web accessibility},
file = {C:\Users\jseo1005\Zotero\storage\L48LCCW3\Fan et al. - 2023 - The Accessibility of Data Visualizations on the We.pdf}
}
@inproceedings{fanSlideToneTiltTone1DOF2022,
title = {Slide-{{Tone}} and {{Tilt-Tone}}: 1-{{DOF Haptic Techniques}} for {{Conveying Shape Characteristics}} of {{Graphs}} to {{Blind Users}}},
shorttitle = {Slide-{{Tone}} and {{Tilt-Tone}}},
booktitle = {{{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Fan, Danyang and Siu, Alexa Fay and Law, Wing-Sum Adrienne and Zhen, Raymond Ruihong and O'Modhrain, Sile and Follmer, Sean},
date = {2022-04-29},
pages = {1--19},
publisher = {ACM},
location = {New Orleans LA USA},
doi = {10.1145/3491102.3517790},
url = {https://dl.acm.org/doi/10.1145/3491102.3517790},
urldate = {2023-01-12},
eventtitle = {{{CHI}} '22: {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
isbn = {978-1-4503-9157-3},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\KTLG8G4D\Fan et al. - 2022 - Slide-Tone and Tilt-Tone 1-DOF Haptic Techniques .pdf}
}
@inproceedings{farihaMiningFrequentPatterns2013,
title = {Mining {{Frequent Patterns}} from {{Human Interactions}} in {{Meetings Using Directed Acyclic Graphs}}},
booktitle = {Advances in {{Knowledge Discovery}} and {{Data Mining}}},
author = {Fariha, Anna and Ahmed, Chowdhury Farhan and Leung, Carson Kai-Sang and Abdullah, S. M. and Cao, Longbing},
editor = {Pei, Jian and Tseng, Vincent S. and Cao, Longbing and Motoda, Hiroshi and Xu, Guandong},
date = {2013},
series = {Lecture {{Notes}} in {{Computer Science}}},
pages = {38--49},
publisher = {Springer},
location = {Berlin, Heidelberg},
doi = {10.1007/978-3-642-37453-1_4},
abstract = {In modern life, interactions between human beings frequently occur in meetings, where topics are discussed. Semantic knowledge of meetings can be revealed by discovering interaction patterns from these meetings. An existing method mines interaction patterns from meetings using tree structures. However, such a tree-based method may not capture all kinds of triggering relations between interactions, and it may not distinguish a participant of a certain rank from another participant of a different rank in a meeting. Hence, the tree-based method may not be able to find all interaction patterns such as those about correlated interaction. In this paper, we propose to mine interaction patterns from meetings using an alternative data structure—namely, a directed acyclic graph (DAG). Specifically, a DAG captures both temporal and triggering relations between interactions in meetings. Moreover, to distinguish one participant of a certain rank from another, we assign weights to nodes in the DAG. As such, a meeting can be modeled as a weighted DAG, from which weighted frequent interaction patterns can be discovered. Experimental results showed the effectiveness of our proposed DAG-based method for mining interaction patterns from meetings.},
isbn = {978-3-642-37453-1},
langid = {english},
keywords = {Data mining,Directed Acyclic Graphs,Frequent patterns,Human interaction,Modeling meetings},
file = {C:\Users\jseo1005\Zotero\storage\M2HRTAMH\Fariha et al. - 2013 - Mining Frequent Patterns from Human Interactions i.pdf}
}
@article{fengChartUnderstandingLarge2023,
title = {Chart {{Understanding}} with {{Large Language Model}}},
author = {Feng, John},
date = {2023-12-12},
publisher = {Engineering Archive},
doi = {10.31224/3401},
url = {https://engrxiv.org/preprint/view/3401},
urldate = {2024-03-25},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\WBH6E445\Feng - 2023 - Chart Understanding with Large Language Model.pdf}
}
@inproceedings{fitzpatrickProducingAccessibleStatistics2017,
title = {Producing {{Accessible Statistics Diagrams}} in {{R}}},
booktitle = {Proceedings of the 14th {{International Web}} for {{All Conference}}},
author = {Fitzpatrick, Donal and Godfrey, A. Jonathan R. and Sorge, Volker},
date = {2017-04-02},
pages = {1--4},
publisher = {ACM},
location = {Perth Western Australia Australia},
doi = {10.1145/3058555.3058564},
url = {https://dl.acm.org/doi/10.1145/3058555.3058564},
urldate = {2022-08-21},
abstract = {Blind people are at risk of being left behind in the infor mation age if efforts are not made to improve the access to information that is not traditionally conveyed in text, whether that text be accessed in braille, audio, or a com puter’s screen reading software. Most graphics summarise a scene or some aspect of data that the author hopes will in form their audience; good statistical graphics are commonly used to great effect for the sighted world, but are practi cally useless to a blind audience. Our work aims to provide an accessible way for blind users to easily, efficiently, and most importantly accurately, explore and query the data contained in diagrams such as bar charts, box plots, time series, and many more. We employ the statistical software environment R to compute rich semantics for these diagrams and make them web accessible by supporting screen reading and interactive exploration.},
eventtitle = {{{W4A}} '17: {{Web For All}} 2017 - {{The Future}} of {{Accessible Work}}},
isbn = {978-1-4503-4900-0},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\ASU5R8I9\Fitzpatrick et al. - 2017 - Producing Accessible Statistics Diagrams in R.pdf}
}
@inproceedings{fitzpatrickProducingAccessibleStatistics2017a,