-
Notifications
You must be signed in to change notification settings - Fork 1
/
MLLMs.html
562 lines (481 loc) · 33.1 KB
/
MLLMs.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
<script src="http://www.google.com/jsapi" type="text/javascript"></script>
<script type="text/javascript">google.load("jquery", "1.3.2");</script>
<style type="text/css">
body {
font-family: "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Helvetica, Arial, "Lucida Grande", sans-serif;
font-weight:300;
font-size:18px;
margin-left: auto;
margin-right: auto;
width: 1100px;
}
h1 {
font-size:32px;
font-weight:300;
}
#modelTable {
width: 100%;
border-collapse: separate;
border-spacing: 0;
margin: 10px auto;
border: 1px solid black;
border-radius: 10px;
overflow: hidden;
}
#modelTable th, #modelTable td {
border: none;
padding: 5px 10px;
text-align: center;
}
#modelTable th {
background-color: #f2f2f2;
}
#modelTable tr:nth-child(even) {background-color: #f9f9f9;}
#modelTable tr:hover {background-color: #e8e8e8;}
/* Image preview styles */
.img-preview {
display: none;
position: absolute;
z-index: 1000;
border: 1px solid #ddd;
border-radius: 5px;
padding: 5px;
background-color: white;
}
.img-link:hover + .img-preview {
display: block;
}
.img-link {
cursor: pointer;
}
.disclaimerbox {
background-color: #eee;
border: 1px solid #eeeeee;
border-radius: 10px ;
-moz-border-radius: 10px ;
-webkit-border-radius: 10px ;
padding: 20px;
}
video.header-vid {
height: 140px;
border: 1px solid black;
border-radius: 10px ;
-moz-border-radius: 10px ;
-webkit-border-radius: 10px ;
}
img.header-img {
height: 140px;
border: 1px solid black;
border-radius: 10px ;
-moz-border-radius: 10px ;
-webkit-border-radius: 10px ;
}
img.rounded {
border: 1px solid #eeeeee;
border-radius: 10px ;
-moz-border-radius: 10px ;
-webkit-border-radius: 10px ;
}
a:link,a:visited
{
color: #1367a7;
text-decoration: none;
}
a:hover {
color: #208799;
}
td.dl-link {
height: 160px;
text-align: center;
font-size: 22px;
}
.caption {
margin-top: 8px; /* Space between image and caption */
font-style: italic; /* Italicize the caption text */
}
.layered-paper-big { /* modified from: http://css-tricks.com/snippets/css/layered-paper/ */
box-shadow:
0px 0px 1px 1px rgba(0,0,0,0.35), /* The top layer shadow */
5px 5px 0 0px #fff, /* The second layer */
5px 5px 1px 1px rgba(0,0,0,0.35), /* The second layer shadow */
10px 10px 0 0px #fff, /* The third layer */
10px 10px 1px 1px rgba(0,0,0,0.35), /* The third layer shadow */
15px 15px 0 0px #fff, /* The fourth layer */
15px 15px 1px 1px rgba(0,0,0,0.35), /* The fourth layer shadow */
20px 20px 0 0px #fff, /* The fifth layer */
20px 20px 1px 1px rgba(0,0,0,0.35), /* The fifth layer shadow */
25px 25px 0 0px #fff, /* The fifth layer */
25px 25px 1px 1px rgba(0,0,0,0.35); /* The fifth layer shadow */
margin-left: 10px;
margin-right: 45px;
}
.paper-big { /* modified from: http://css-tricks.com/snippets/css/layered-paper/ */
box-shadow:
0px 0px 1px 1px rgba(0,0,0,0.35); /* The top layer shadow */
margin-left: 10px;
margin-right: 45px;
}
.layered-paper { /* modified from: http://css-tricks.com/snippets/css/layered-paper/ */
box-shadow:
0px 0px 1px 1px rgba(0,0,0,0.35), /* The top layer shadow */
5px 5px 0 0px #fff, /* The second layer */
5px 5px 1px 1px rgba(0,0,0,0.35), /* The second layer shadow */
10px 10px 0 0px #fff, /* The third layer */
10px 10px 1px 1px rgba(0,0,0,0.35); /* The third layer shadow */
margin-top: 5px;
margin-left: 10px;
margin-right: 30px;
margin-bottom: 5px;
}
.vert-cent {
position: relative;
top: 50%;
transform: translateY(-50%);
}
hr
{
border: 0;
height: 1px;
background-image: linear-gradient(to right, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.75), rgba(0, 0, 0, 0));
}
</style>
<html>
<head>
<title>MLLM Robustness</title>
<meta property="og:image" content="./assets/dilution_insertion.png"/>
<meta property="og:title" content="Robustness of Multimodal Large Language Models Against Cross-Modal Insertions and Dilutions" />
<meta property="og:description" content="by Gaurav Verma (gverma@gatech.edu) and Srijan Kumar (srijan@gatech.edu); Affiliation: Georgia Institute of Technology" />
</head>
<body>
<br>
<center>
<span style="font-size:36px"><b>Are Multimodal LLMs Robust to Cross-Modal Variations?</b></span><br/><br/>
<span><a href="https://gaurav22verma.github.io/">Gaurav Verma</a> <em><span style="font-size: 10pt;">and</span></em> <a href="https://faculty.cc.gatech.edu/~srijan/">Srijan Kumar</a></span><br/>
<span style="font-size: 11pt;">Georgia Institute of Technology</span><br/>
<a href="https://www.cc.gatech.edu/"><img src="./assets/gt-logo.png" width=170px></a><br/><br/>
</center>
<!-- table of contents -->
<div style="padding-left: 120px;">
<h1>Here's what we are going to cover:</h1>
<ol>
<li><a href="#summary">Summary</a> <span style="font-size: 15px;"><i>– tell us what you did and what you found!</i></span></li>
<li><a href="#tech-details">Technical details</a> <span style="font-size: 15px;"><i>– show us the prompts!</i></span></li>
<li><a href="#confusing-examples">What kind of cross-modal variations confuse GPT-4(V)?</a> <span style="font-size: 15px;"><i>– can we see some qualitative examples?</i></span></li>
<li><a href="#resources">Resources</a> <span style="font-size: 15px;"><i>– can you share the code?</i></span></li>
<li><a href="#related-work">Related papers</a> <span style="font-size: 15px;"><i>– can we read some related papers?</i></span></li>
</ol>
</div>
<hr>
<center>
<table align=center width=850px>
<tr>
<td width=260px>
<b>Multimodal Large Language Models (MLLMs)</b> allow language-based reasoning over other modalities, which is predominantly images right now but will very soon include videos and even <a href="https://arxiv.org/abs/2309.16058" target="_blank">sensor data</a>. Recently, there has been a lot of interest in understanding the robustness of LLMs–the major backbone of multimodal LLMs–to variations in the input data – ranging from unintended changes (<a href="https://arxiv.org/abs/2310.11324" target="_blank">Sclar et al., 2023</a>) to adversarial ones (<a href="https://arxiv.org/abs/2307.15043" target="_blank">Zou et al., 2023</a>). In the same spirit, we conduct robustness evaluations on the zero-shot capabilities of multimodal LLMs to plausible changes in the input text.<br/><br/>
<hr/>
<!-- <center> -->
<center><h1 id="summary">Summary</h1></center>
<b>How robust are multimodal LLMs to realistic changes in multimodal data?</b> A simple way to do this is to evaluate how sensitive the predictions of a multimodal LLM are to <em>relevant</em> and <em>plausible</em> insertions or dilutions in the text. In multimodal settings (i.e., vision + language settings), one way to emulate relevant and plausible variations in the text is by doing cross-modal dilutions (<a href="https://arxiv.org/abs/2211.02646" target="_blank">Verma et al., EMNLP 2022</a>) and insertions (<a href="https://arxiv.org/abs/2306.11065" target="_blank">Ramshetty et al., ACL 2023</a>) – see Figure 1 below for examples. It is undesirable that a model's predictions change quite a few times on introducing these variations. Here, we will do this evaluation for 5 different types of multimodal LLMs (GPT-4(V), Claude-3, Gemini Pro, LLaVA-1.5, and moondream1) while considering a simple 5-class classification task in a zero-shot setting. For reference, we also consider the robustness of a fully fine-tuned fusion-based multimodal model.<br/><br/>
<center>
<img src="./assets/dilution_insertion.png" width="800">
<div class="caption" style="font-size: 13px; color: #0e515d;"><b>Figure 1</b>: Examples of cross-modal dilution and cross-modal insertion. Introducing variations in the text by relying on the information in the corresponding image modality (i.e., cross-modal grounding) ensures that the variations are realistic. The outputs for original and modified multimodal inputs should be the same. </div><br/>
</center>
<b>The classification task is</b> a simple one, with a practical Social Good use-case in mind and involves inference on user-generated data: given a social media post during crisis that comprises and image and text, categorize it into one of the following classes to help determine what kind of humanitarian aid is needed, if at all it is needed: <i>rescue_volunteering_or_donation_effort</i>, <i>infrastructure_and_utility_damage</i>, <i>affected_individuals</i>, <i>other_relevant_information</i> and <i>not_humanitarian</i>. We supply the MLLMs with the definition of these categories. Given the nature of user-generated data on social platforms, making an inference based on both the modalities is important. For this multimodal task, the model's job is to predict the correct classification label given an image and a textual description.<br/><br/>
<b>We find that</b> propreitary MLLMs like GPT-4(V), Claude 3 Opus, and Gemini Pro demonstrate pretty good robustness, with the latter two being slightly more robust than GPT-4(V). The robustness of propreitary MLLMs is way better than that of a fully fine-tuned fusion-based multimodal model, which is impressive. With regard to the dependence on the type of variations, we notice that: <i>(a)</i> dilutions almost always hurt performance, <i>(b)</i> while insertions improve the results, which could hint at a good data augmentation strategy for enabling more accurate fine-tuning, and <i>(c)</i> a combination of dilution and insertion hurts the MLLM performance, which we believe is largely driven by the effect of dilution. However, the zero-shot classification abilities of these models is subpar to the fusion-based multimodal classifier. This brings us to current landscape that indicates a trade-off between accuracy and robustness: MLLMs achieve subpar zero-shot classification performance, while their robustness is notably better. Finally, it is worth noting that these benefits of better robustness are not reflected in LLaVA-1.5, a leading open-source MLLM. LLaVA-1.5 achieves notably lower classification performance while also doing worse on robustness in comparison to the fine-tuned fusion-based classifier.<br/><br/>
<center>
<table id="modelTable">
<tr>
<th>Model</th>
<th>Original (F1)</th>
<th>Failure rate</th>
<th>Dilution</th>
<th>Insertion</th>
<th>Dilution + Insertion</th>
<th>All results</th>
</tr>
<tr>
<td>Random (uniform)</td>
<td>0.17</td>
<td>--</td>
<td>--</td>
<td>--</td>
<td>--</td>
<td>
--
</td>
</tr>
<tr>
<td>MM-Fusion</td>
<td>0.75</td>
<td>0.00 %</td>
<td>-21.53 %</td>
<td>-2.82 %</td>
<td>-19.92 %</td>
<td>
<a href="./assets/humanitarian_mm_fusion.png" target="_blank" class="img-link">View</a>
<div class="img-preview"><img src="./assets/humanitarian_mm_fusion.png" style="width: 100px;"></div>
</td>
</tr>
<tr>
<td>GPT-4(V)</td>
<td>0.67</td>
<td>0.00 %</td>
<td>-4.47 %</td>
<td>+1.49 %</td>
<td>-2.98 %</td>
<td>
<a href="./assets/humanitarian_gpt4v.png" target="_blank" class="img-link">View</a>
<div class="img-preview"><img src="./assets/humanitarian_gpt4v.png" style="width: 100px;"></div>
</td>
</tr>
<tr>
<td>Claude 3 Opus</td>
<td>0.56</td>
<td>0.01 - 0.04 % %</td>
<td>+1.59 %</td>
<td>+5.71 %</td>
<td>+5.80 %</td>
<td>
<a href="./assets/humanitarian_claude3opus.png" target="_blank" class="img-link">View</a>
<div class="img-preview"><img src="./assets/humanitarian_claude3opus.png" style="width: 100px;"></div>
</td>
</tr>
<tr>
<td>Gemini Pro</td>
<td>0.36</td>
<td>0.10 - 0.32 %</td>
<td>-2.78 %</td>
<td>+5.56 %</td>
<td>+2.78 %</td>
<td>
<a href="./assets/humanitarian_gemini.png" target="_blank" class="img-link">View</a>
<div class="img-preview"><img src="./assets/humanitarian_gemini.png" style="width: 100px;"></div>
</td>
</tr>
<tr>
<td>LLaVA-1.5-13B</td>
<td>0.39</td>
<td>0.00 %</td>
<td>-28.20 %</td>
<td>+5.13 %</td>
<td>-25.64 %</td>
<td>
<a href="./assets/humanitarian_llaval1point5.png" target="_blank" class="img-link">View</a>
<div class="img-preview"><img src="./assets/humanitarian_llaval1point5.png" alt="Model B Image" style="width: 100px;"></div>
</td>
</tr>
<tr>
<td>moondream1</td>
<td>0.13</td>
<td>5.46 - 13.05 %</td>
<td>-7.69 %</td>
<td>0.00 %</td>
<td>+7.69 %</td>
<td>
<a href="./assets/humanitarian_moondream1.png" target="_blank" class="img-link">View</a>
<div class="img-preview"><img src="./assets/humanitarian_moondream1.png" alt="Model B Image" style="width: 100px;"></div>
</td>
</tr>
<!-- Add more rows here -->
</table>
<div id="caption" style="font-size: 13px; color: #0e515d;"><em><b>Results Table</b>: The table shows the classification performance (macro F1 score) of the models on the original data, and the percentage change in performance due to dilution, insertion, and dilution + insertion. The failure rate is the percentage of instances where the model refuses to answer or answers in an unparsable format. The last column contains the results for all the classes. Click on view image to see the expanded set of results in a different tab. </em></div> <br/>
</center><br/>
<b>Lastly</b>, it is worth emphasizing that while the robustness of propreitary MLLM-based zero-shot classifiers is pretty good in comparison to the fusion-based mutimodal classifier, there is work that needs to be done to achieve robustness that affords a "<a href="https://en.wikipedia.org/wiki/High_availability">three 9s</a>"-level of robustness in multimodal systems.<br/><br/>
<em>Continue reading for more technical details and qualitative examples of instances for which GPT-4(V) falters...<br/> and ... stay tuned for more multimodal tasks and model variants (like fine-tuned MLLMs)!</em>
<br/><br/>
<hr/>
<center><h1 id="tech-details">Technical Details</h1></center>
<b>The classification task</b> is to categorize the Twitter images + post in the <a href="https://crisisnlp.qcri.org/crisismmd">CrisisMMD</a> dataset. We use the standard train/dev/test splits – note that teh train and dev splits are only useful for training the fusion-based multimodal classifier. Since every other LLM-based model is used in a zero-shot setting, we only need the test set for them. The multimodal fusion model involves concatenating the modality-specific representations of fine-tuned unimodal classifiers (DistilBERT for text and VFF-16 for image) and fusing them by learning a multilayer perceptron (MLP) with 3 hidden layers. For the multimodal LLMs, we use the following prompt. A couple of things that are worth noticing in the prompt below: we randomly shuffle the labels in the labels_string to avoid any position bias, and supply the <a href="./assets/label_definition.txt" target="_blank">definitions of the labels</a>. We do not introduce model-specific modifications to the prompt.<br/><br/>
<div style="padding-left: 20px;">
<code style="color: purple;">
Given the image and the accompanying textual description, assign one of the five labels based on the information conveyed jointly by the image and the text: {labels_string}. The definitions of the labels are provided below.<br/><br/>
{labels_random[0]}: {labels_definitions[labels_random[0]]}<br/>
{labels_random[1]}: {labels_definitions[labels_random[1]]}<br/>
{labels_random[2]}: {labels_definitions[labels_random[2]]}<br/>
{labels_random[3]}: {labels_definitions[labels_random[3]]}<br/>
{labels_random[4]}: {labels_definitions[labels_random[4]]}<br/><br/>
You must respond only with one of the following strings: {labels_string}, and nothing else.<br/><br/>
Textual description: {text}<br/>
Label:
</code><br/>
</div>
<br/>
<b>Experimental setup for testing robustness</b>: So, sure, the above describes how the models classify. But since the primary aim is to test their robust to dilutions and insertions, we need variants of original <code style="color: purple;">{text}</code> above that include these dilutions and insertions. We rely on prompting GPT-4(V) to generate these variants. Here's how we prompt the GPT-4(V) model to introduce these variations: <br/><br/>
<div style="padding-left: 20px;">
<i>Introducing dilutions:</i><br/>
<code style="color: purple;">
Given the image and the accompanying textual description, generate new text that follows the following constraints: the new text (i) should be relevant to the given image, (ii) should be relevant to the provided textual descriptiton, and (iii), when appended to the textual description, the combined text should be fluent and natural. The goal of generating the new text is to dilute the information conveyed by the image and text data, under realistic settings of relevancy and fluency. Generate about 2 sentences as new text.<br/><br/>
Textual description: {text}<br/>
New Text:
</code><br/><br/>
<i>Introducing insertions:</i><br/>
<code style="color: purple;">
You are an agent that inserts new words into the provided textual description while ensuring that the inserted words are strictly related to the attributes of objects in the provided image. The attributes could include properties like color, shape, size, etc. The inserted attributes should be included without modifying the content of the original textual description. Only insert the attributes that you are confident about based on the image and textual description. Output the new text that includes the inserted attributes. It is important that you do not add new sentences and only insert words in the original textual description.<br/><br/>
Textual description: {text}<br/>
New Text:
</code><br/><br/>
<i>Introducing dilutions + insertions:</i> For this, we combine the dilution generated by GPT-4(V) with the original text and then prompt the model to introduce insertions using the same prompt as the one above to introduce insertions in the original + dilution text.<br/><br/>
</div>
Once we have these dilutions, insertions, and dilutions + insertions, we plug them in place of <code style="color: purple;">{text}</code> in the classification prompt and evaluate the models on the new data. Evaluation involves quantifying the performance of the model on the original data, for which we use macro-averaged F1 scores. We also quantify the failure rate, which is the percentage of instances where the model refuses to answer or answers in an unparsable format. And most importantly, we quantify the percentage change in macro F1 due to dilution, insertion, and dilution + insertion. <br/><br/>
Now, certainly, it is important to <b>check whether GPT-4(V) generates good dilutions and insertions</b> – can we rely on these generations for our robustness assessments? We did some qualitative checks and consider them to be really good. Here's the <a href="https://github.com/claws-lab/MLLM-robustness/blob/main/data/generated_dilutions_insertions.tsv" target="_blank">complete TSV</a> that shows the original text, and the generated dilution, insertion, and dilution + insertion; take a look! As a side note, we have proposed our own approaches to generate these dilutions and insertions while ensuring cross-modal grounding, and human evaluations suggest that these automated variations are indeed realistic. <br/><br/>
<br/>
<hr/>
<center><h1 id="confusing-examples">What kind of cross-modal variations confuse GPT-4(V)?</h1></center>
GPT-4(V) seems to be the best MLLM -- the zero-shot performance is 0.67, which is pretty close to that of a fine-tuned multimodal fusion model (0.75). Additionally, the failure rate, and changes in performance due to dilution, insertion, and dilution + insertion are also quite low for GPT-4(V). Let us look at the instances where GPT-4(V) makes an incorrect prediction after the variations in text are introduced.<br/><br/>
<span style="text-align: left;"><b>On introducing dilutions:</b> Looking back at the definitions of these categories, which were made available in the prompt, the classifications made by GPT-4(V) on diluting the original text do not make a lot of sense.</span>
<center>
<table>
<tr>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<center>
<img src="./assets/920216066416173056_0.png" width="320px"><br/>
</center>
</td>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<center>
<img src="./assets/910258440077025280_0.png" width="260px">
</center>
</td>
</tr>
<tr>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<b>original</b>: Sarasota group escapes CA wildfire #usa #vacation #travel <span style="color: blue;">not_humanitarian <span style="color: green;">✓</span></span> <br>
<b>diluted</b>: Sarasota group escapes CA wildfire #usa #vacation #travel. In the comforting serenity of a vineyard, the group finds solace, celebrating their safety with a well-deserved feast and local wine. Their smiles serve as a reminder of the ability to find joy.<span style="color: red;"> affected_individuals <span style="color: red;">✗</span></span>
</td>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<b>original</b>: @username Hi FirstName LastName here had a scary time last plane from Orlando b4 #Irma hope all in #Florida r ok <span style="color: blue;">not_humanitarian <span style="color: green;">✓</span></span> <br>
<b>diluted</b>: @username Hi FirstName LastName here had a scary time last plane from Orlando b4 #Irma hope all in #Florida r ok. It's comforting to be back home with the family pet after such an ordeal. Our fluffy cat sure seems oblivious to the hurricanes and is just happy to have us home<span style="color: red;"> other_relevant_information <span style="color: red;">✗</span></span>
</td>
</tr>
</table>
</center><br/><br/>
<span style="text-align: left;"><b>On introducing insertions:</b> Overall, GPT-4(V) demonstrates slightly better performances when relevant insertions are made from the image to the text. However, there are still some examples where insertions cause incorrect predcitions.</span>
<center>
<table>
<tr>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<center>
<img src="./assets/912990487501557761_0.png" width="270px">
</center>
</td>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<center>
<img src="./assets/910111384045854720_0.png" width="310px">
</center>
</td>
</tr>
<tr>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<b>original</b>: @username: Tell us how you really feel! #OBX #hurricanemaria <span style="color: blue;"> not_humanitarian <span style="color: green;">✓</span></span> <br>
<b>insertions</b>: @username: Tell us how you really feel about the bold, yellow Outer Banks Motor Lodge sign! #OBX #hurricanemaria <span style="color: red;"> other_relevant_information <span style="color: red;">✗</span></span>
</td>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<b>original</b>: .@username crews to return today after helping with Irma recovery in Georgia #vt <span style="color: blue;"> rescue_volunteering_or_donation_effort <span style="color: green;">✓</span></span> <br>
<b>insertions</b>: .@username crews with their yellow and white trucks are set to return today after helping with Irma recovery in Georgia #vt <span style="color: red;"> infrastructure_and_utility_damage <span style="color: red;">✗</span></span>
</td>
</tr>
</table>
</center><br/>
<br/>
<span style="text-align: left;"><b>On introducing both dilutions and insertions:</b> Similar to the examples above, the GPT-4(V) predictions change incorrectly after the changes have been introduced in the text. At an aggregate level, given that dilutions lead to a drop in performance while insertions boost the performance, it seems that the drop in introducing dilutions + insertions could be largely attributed to the dilutions. The example on the right below illustrates this.</span>
<center>
<table>
<tr>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<center>
<img src="./assets/930415579294584834_0.png" width="270px">
</center>
</td>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<center>
<img src="./assets/904830090386735105_0.png" width="220px">
</center>
</td>
</tr>
<tr>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<b>original</b>: Iran: Survivors struggle in aftermath of year's most deadly earthquake <span style="color: blue;"> infrastructure_and_utility_damage <span style="color: green;">✓</span></span> <br>
<b>dilution + insertions</b>: Iran: Many survivors struggle in aftermath of year's most deadly earthquake. Amidst the grey rubble, communities across the region are coming together to show strength and solidarity. <span style="color: red;"> rescue_volunteering_or_donation_effort <span style="color: red;">✗</span></span>
</td>
<td style="border: 2px solid black; border-radius: 10px; padding: 10px;">
<b>original</b>: The effects of Harvey... <span style="color: blue;"> infrastructure_and_utility_damage <span style="color: green;">✓</span></span> <br>
<b>dilution + insertions</b>: The effects of Harvey... remain visible in the debris scattered across neighborhoods in these images. Piles of personal belongings, now converted to trash, line the streets as a testament to the disaster's reach. <span style="color: red;"> other_relevant_information <span style="color: red;">✗</span></span>
</td>
</tr>
</table>
</center><br/>
<br/>
<!-- </center> -->
</td>
</tr>
</table>
<hr>
</center>
<table align=center width=850px>
<center><h1 id="resources">Resources</h1></center>
<tr>
<td>
<b>Data</b>: You can download the CrisisMMD dataset <a href="https://crisisnlp.qcri.org/crisismmd" target="_blank">here</a>. We use the standard train/dev/test split.<br/>
<b>Code</b>: All the code resides in this <a href="https://github.com/claws-lab/MLLM-robustness">GitHub repository</a>. This includes the code for generating dilutions and insertions using GPT-4(V) and evaluating the multimodal LLMs. The code for training the fusion-based multimodal classifier si also available. Finally, the generated dilutions and insertions are available in <a href="https://github.com/claws-lab/MLLM-robustness/blob/main/data/generated_dilutions_insertions.tsv" target="_blank">this TSV</a>. <br/>Please feel free to reach out to us if you have any questions or comments.<br/><br/>
We ran the generation and inference experiments with GPT-4(V) in the second half of January 2024. The inference experiments with Gemini Pro were done in February 2024. The inference experiments with Claude 3 Opus were done in March 2024.
</td>
</tr>
</table>
<br>
<hr>
<table align=center width=850px>
<center><h1 id="related-work">Related Papers on Multimodal Robustness</h1></center>
<center>
<span style="font-size:11pt">The experiments and insights presented here are developed on top of our prior work.<br/> Please refer to the following papers and the associated resources for more details:</span><br/><br/>
</center>
<tr>
<td><img class="layered-paper-small" style="height:155px" src="./assets/acl2023.png"/></td>
<td><span style="font-size:11pt">
<b>Cross-Modal Attribute Insertions for Assessing the Robustness of Vision-and-Language Learning</b><br>
Shivaen Ramshetty*, Gaurav Verma*, Srijan Kumar<br>
In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (ACL 2023).<br><br/>
code: <a href="https://github.com/claws-lab/multimodal-robustness-xmai">https://github.com/claws-lab/multimodal-robustness-xmai</a><br/>
arXiv: <a href="https://arxiv.org/abs/2306.11065">https://arxiv.org/abs/2306.11065</a><br><br/><br/>
</td>
</tr>
<tr>
<td><img class="layered-paper-small" style="height:155px" src="./assets/emnlp2022.png"/></td>
<td><span style="font-size:11pt">
<b>Robustness of Fusion-based Multimodal Classifiers to Cross-Modal Content Dilutions</b><br>
Gaurav Verma, Vishwa Vinay, Ryan A. Rossi, Srijan Kumar<br>
In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing (EMNLP 2022).<br><br/>
webpage: <a href="https://claws-lab.github.io/multimodal-robustness/">https://claws-lab.github.io/multimodal-robustness/</a><br/>
code: <a href="https://github.com/claws-lab/multimodal-robustness">https://github.com/claws-lab/multimodal-robustness</a><br/>
arXiv: <a href="https://arxiv.org/abs/2211.02646">https://arxiv.org/abs/2211.02646</a><br><br/><br/>
</td>
</tr>
</table>
<br>
<hr>
<br>
<br>
<script>
function adjustColors() {
var table = document.getElementById("modelTable");
for (var i = 1, row; row = table.rows[i]; i++) {
// Iterate through rows, skip the first (header row)
for (var j = 3, col; col = row.cells[j]; j++) {
// Iterate through columns, skip the first two
var value = parseInt(col.innerHTML, 10);
var color = getColorForPercentage(value);
col.style.backgroundColor = color;
}
}
}
function getColorForPercentage(percentage) {
var intensity = Math.abs(percentage) > 100 ? 0 : 255 - Math.round(2.55 * Math.abs(percentage));
if (percentage > 0) {
return `rgb(${intensity}, 255, ${intensity})`; // Shades of green for positive values
} else {
return `rgb(255, ${intensity}, ${intensity})`; // Shades of red for negative values
}
}
// Run adjustColors function after the page is fully loaded
window.onload = adjustColors;
</script>
</body>
</html>