diff --git a/demo/tutorials/llm_notebooks/Visual_QA.ipynb b/demo/tutorials/llm_notebooks/Visual_QA.ipynb
new file mode 100644
index 000000000..7045e71c3
--- /dev/null
+++ b/demo/tutorials/llm_notebooks/Visual_QA.ipynb
@@ -0,0 +1 @@
+{"cells":[{"cell_type":"markdown","metadata":{"id":"D285OP467TeS"},"source":["![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAUgAAABcCAYAAAAMJCwKAAAgAElEQVR4nOy9f5gcZ3Xn+znnra5pjcfKZCyNfqDIQgghZMdxZMfGxpbbwhjM2g4h2Ak/Nol3Aw5xEsLu5eHh8vCofNl9uFluLhiwhUi4zib3ZomcZBMgARsjt4RxbGIritcSsiyE0GpleSQLMYxHPd1V59w/qnq6Z6ZnNJJG/Ej6+zw9PW911fueeqvq1Pn9CucASZJokkzZaudirC666KKLcwWZ+y4TveyWJeW4/lKZYYD5mI2m8+YdH61Wk3Tux+uiiy66ODeYYwaZaKUysNSI7xSVtfj4MCPi9t8WLhzY+sADt9fndswuuuiii3ODaO66ShQSM7lvvYj8B6A8/pMIiM4/evToTuDI3I3ZRRdddHHuMIcMMocgC9ysFwx3DBzVyFzCQBpF8VyP10UXXXRxrjDnDBJygdFyl4wiTS3egJPnYrguuuiii3MCPRedem57NHBk3A6pwLxzMVwXXXTRxTnBnEmQSZJ/xP2gaDjhrv00vTSigB12tVqSJNrcf/p+uiFBXXTRxY8ec+7Fvuqq+f1RT/ktgl40PogwbKn/XQgv7KhUsJwBJjNIr10G2UUXXfzocU7iICsV9AfnL4k5nG85//zYKpXv1pMksStv+uT8eKy0RtyWqU9U8U1cU5e9Mb17qtU7anNPWxdddNHF7HEOGOTUTJpKBa1UsC271kYLjh79zyL6bnefP3F4b5JzxLEPvrhw4Z/v7sZMdtFFFz9CnBMGORW5On1V5YLVsUT/CNJrlnXcUzXg+JfU7c5K5ehQ1x7ZRRdd/KhwTsJ8JqMpTW7dzlJc+swykBZ3HpcdAfcMkVAGLVerKHl8UBdddNHFDx3nJMxn2sHMFYrEmrbtPyQxtosuuujitPBDlSDXbwgqDo4grUTtCRJkF1100cWPC+aIQc4uZMdMLAhtzDH/lo7KdhdddNHFjxZzwCATXbuWCNZO8/sWBgdfUvhuCh75hN8mM8P2djfKp4suuvjR4iwYZKLXvq7/YrGeD7jbIBxF3NskyZZ/JTc9LkyBBdP5XNxBwETV8OwwcKJSwarVM6ewiy666OJscEb6bJIkWq0uXOkS/ptqaZ1ZSqsoxQxwU/f28J7Jxzil6LwnG/aDD2zf+rtbz4S2Lrrooou5whlLkCa+LmjP8ix9KXUkEloWxBm+TaTwnDsmok+L6iHcIxcxaBzP0h98bnvlxe1szetLnu0JdtFFF12cKc6YQbprjLgiolKECzXlwVN9Fz2kmdumyPyhNLhGmRhEI9XqnceongFzLIpg0A0s76KLLuYILQaZJAobIZFZMphsgnQ4W7g7ICaAqp2oXHfs4K5dREePthsnZ2BySdPOWS2+K5bTvLG5rcsgu+iiizlBziCTRyIWDpY5ursO5PnPic8QunM3ofgvZ46T2eSp2tB04iRJYkmSpDOmFCau44x77e6II3GZ0s+U0bEyvq+PTc/2Ic8tw5fGJL5l9ky+iy666GJ65AxyydJVuN7OYh/lM88OIQwjz42QygjKMJ6OYlajhzqhd5Q7qFPJO/Ai7Lv5fx7VOHO7CfdZZPJsPtwLe9fxmb2D4H286IuJWYTqAvS8BbgsRmwAGCTL9gFb5mhuuuiii3/lyBlkqsuZN+8OsvogIaqhOgqhRikbJUtHca2TpaM0pE5afzBJNn5m/bb7VGkP8p74/3TtcSapBhODIjvDvj9I+fy7kbCGtF7GrBfPYtwUc8vXd3AIEdC5AEYXXXTRxZkgZ5Alt9yg6BH1sX5gfsHbNOdnriBQ7jVOvpRWqH72rHVYY3bGSytFNBqLkXSQrFFInN70hBffbmiYZYdddNFFF7NDIUECJcgZjytNxtiEA7iRpYqQTu2mubPMsi2AIGKz5LMCmOKmHeMtu3yxiy66OAeI2v6eIthbirVlRGGyq3imlMHJ7bbM60ICzMuatSrsTlmXRrFZqeNddNFFF3OIXEXtIBNOz5CauvfZQ0TqANXqRH47qyK5XYbZRRddnGNMlCDbMUWY7MyR2r3Ys4XjiKC4r61UPnMQsrJpi0lm+olDpfTE4Wo16cS6p6Gviy666GJuMZE1+mTD4/RcyFWsGcRzOpCWAKogHzGyjwATdPbg8QF06d2Vyv2fn75WRbc0WhdddHFuMclJAy3GM7lG4xSHSwp5QLa7W3uwT4t1easHkem1cqHVrWMi0XIXeY9Qa/LHtmOno+cnH801wydt6wa9d9HFjwgdVOxTOVya8N2W1YdE4wXi2YxH5BFERidm5u75/sVPDmAZIEsta/QC9YnHdex9GhrPHJ2YVbH9HDCsRG+6aaCvWg29k3+pVDanlcrzx//lMMr2eW2d08SVMP+lnOuPEdoz485Vptnk7LvTHSdxhbvJ04anw91nXm+hSV87XaeYl4kqdrsXe4oGOy7iWZWKVbJtu2HwfZlnG8VZPC1RCuLgbgMg/ePVfMaHLAZpfakI5gBxTOvHSUzwHGrY0zHHczXWU08tKZ8YyX4f918uwt5VwAwipfF0tbrkvUmS/EQzyZwBJkYClSo6NFRELly0FtjNll1Q1P+05vz/JJ9vF2eARGxqrYV2VIqaC8nE9ONT9lvUmWj2u2VXG9/bDbuHLO+bKf1Ob4OcUqpxIiOrVLAk+e2HIdl62WVLykuXTkfd8wCcGB78UAjRfzCrRyAzVBGapTR4jpjjbbdtiavVY+sybIUIRhaADIJHiB4DHprrMYeGxqK4HF6uIbrYLVMpXgiRBixr1EulenzKTn5skWilglarS/qvrty7LFTlNSby6gWLfJkg/Rw7rrB4FOG4kR1av97/6aGq7CXWw5VKcnxGR10Xs8Omb61A9l0OGXhQPv2tnfzOq/fOWf/JIxFLll2CPbsq3yCK6yj3f2c7d7z8xCmP37Ir5lhpGZEuxp5dCroAedl8JJQR78ElxTmJ7x0G389nnjuI7B0i8eP5+DMwysSVnzown/i5FaitI7rwSk74UpA+xFPcj7P0woPw3C42P/c0YfcBEj/R7HN6RuU+KS6yybgKKRVyzpwk9tRTjD711LQUKsC111nqba6Yyd7vZnvWPvEp9J09KpUkOjR8qC/WeXeKh7fnGToOLghR5GZPcg4Y5Lx5wTL31C2z3BSRM0jLR09H53rAHwKaUmC1urA3w25Q4ZYS4Ro3WyUiKqJ4YcMW0DyyIeBqtZLqARq+AwY/BTz+Iz2Rn2Q0JSd/7mpCuAejTKlkYB8C5oZBJolywZJBotIHSeVW8BSIEB2hkd4BfKHJJzof78rRby9nXvmjZI31CPNxi0GLpBAthCEDF0PCMCE6hNsOFu39Mg39exIfmZZJLn52HRq/DS29kbSxGhFFFEQUHBzDHUxSotJBTP+SZbs/1mSSE+MgRVpSZJP5TG5PqEp2ahWoZVcquivY38QCFq32KVleJ/rm0ATZM3aeQkCQCCd2J3aIEVVkJsn37CCtOyEPgZrgiPrJxBe/uKScuX44aM/HwX8NfBU47hlmDSyr5x+r45ZinoEQ46zGeKuJLYcfrsnjXxaaaqUoqhEiMVEMOoPD9ExQ0lVIuJjcfFYGIkLUj+hNwKn5hKS9qCwDGaD5rIWIfBGWDDzL81OiHiWEftzW4PZOeno/TmQbedm+pR2rj21+9hqi8iZEfhv31WgUIZr32RiDtFgJQRVEIpxVGOsIvdOo2DBVahxvnzkXShL42rai+0nGw9MNE+pM31w7aQzM8WbON27F2+aHgJ9873zTrnre+endIfT8dpaNxTiKoHnWapvtuWi3NRRxQ+WAethd9Ne1RZ4NJrAOn7uKqYkra3dHHLN1pPXlxeJTxRgZmN/A//vcfN75yuHpO7kb5J2FFJfm6cRwgKzxNwj/E6eGiaLWh6SvxFmPllbgBo2xBcQ9v0Wj3s/CAx8i8aFxO+aSfZcS9XycrL4OMyOUFLLDGF/CfRduI0BMlr4c90twW8d5fQsYPvY1vvuq4dxZNNmL3ZTOxnmYTGqfBQwIs+lqMmMYyw+cvEs7fXMNV/WiMlBLqJbTZ+b/SrFlF9HCkfR3Qii/O01PxiIStU+d5Kq1tiWdGoKKY/nLCEXYWS8xVKkkUdcOORdwxl/ycyk/vhAW0Ft+HZmVUVXS9CuUoktxHyREqxitryfxvwdmthU26z3kmtROTD7KC684NuWY+7/TT73+a2j0XsxXkDViSvHtZNn/4MIDnyHxlEXfHsDlA5hdipmhoY5nW8jC3bzn5QemjJ24sujAcn7w4luw7AtTnTQT4iCZJtJnbpjDqXtpqdo5q+yZ0OrYyU+usNUBk+M8f7JQLOi2lhDdlqVjfcJEdU5EUxE9CLbHPT3miKlIHxIGUF2M23KgTJb+c2znDXdXtpwrTHSyzgkSMe57bjlZdmmxxRC/n6h0F5ktQAOkfhNUv0Jy/Wm85DwizSKuQ0naH+674bsrhlny/B+TvZQSlT5CI+1HrZcQ3sBIbQtUh5CfWUccX06jDhqBsJVG9hGGXnFw2kLgL6w4SCL/9+TNp1Gs4sxQVAxXhe+rBMuQIrB8qoMGwAUTFBEZcer5pJ6qNNo5oHvSALPeczycZdK24vuslZvJ/Z+q79kEn7diECfHJZ4+vdUqmrpfEcxX57p06zeRAOJfERu7B0r76uXGcM+YGMRlPOuzLBuUwKVo6UqX8Pj1679bb94/pzqHs6F5ch/5N0yOx5yu/5lspDPRM/m4TmOeaozZn2+bdjgXKnYzHCYK1yC6ODdLZUOkPEpmr8eya8hSRaPXMPiy5SR+4LTjIrdhU45JNirPL6mx8MBfo+k7CKXX5GdkawjxAi5ccZyxxsWk9aW4QVwe4eTI3zH0qoP58dPQMA3j7BzmM9lDfJYe4yRJ7NprP/Gwp/V3hKh86cyKtqu51zJPv9DosSPAYO5JnkRnRw/73KEps+aUztx/O5NKinbTNzXl+5QPcbOo8ERUq2iSJIz3P8n5Nf3DO3176kOXKLPstxOSJNEvPzHQW66Fi9ysb9zmSG6gcLNhj/QDgeN7Ad5wVf6oVquMAMe2b0/23XbbliePHv3eFqE80hw3/y5oSzoO3U7EeJhFqyrU7BaBa55ra15a85Mk01/D6embpRNz/LgZmanl3uDmhsljnQpzrJWMMxq/CRUgMpxvsqh+jO/V/wcS1fAsJu5dRnbychLZf0rypqDDGlOJ5PNwdOMQS57bQ6nnNaR1cPqwrJ8fSMw8/Rncy+ApwgjoPujAbDuez0RMVLHbvdhNJjQeG3l2TOjrX//9pyuVe/+NWe0t7lZkjDTvvxZt4sFcbU9w2f7El39vhJvfNJinNLbR1ZG+uUXrwW6Xb6dWLE+SRLfsWhsNHj0yuH7Dp1bLtvCaRwivuA4WQBY/4jricOhasn/m2vt2fPnL6QFg+HSlnaEh9KuP9i+9Juu5YSty5XUbfCnmPLJN9nuWfSPL0scrleRwXhkp77dS2bQiwy/11FJVVVOxrdsye+3rP7Xz9a998UheZm7higy9/LrruQp0BdssAj3yCPbPlcq926vV3j1JktRnS2vISmURHURzb7XguIuJBpzs4Ne/dmRPMXPtqvN43xddtDtNkuRYs33ZZZt7zz+/foUZ860qputVATz69KEXLxh8ZvDobhsbmz9fe3rWbt2u16x3+XnB5rNBRrZW/cA1lU8+GNGzE5ITM9kyK5UkeuihRQPr19+76pFtevl118urcJaSe2VrW6scuZb0Wat86tFqNT5QqeT9VSr3l2H0cjMbaNJnKqbmCvcc2779vY91GqvOwou3bpPl11TMqIKuV0313oOPVe/aOXX/+8uZ1i6Rbb6Y9cWEVc2iikZZ+OTer3/t93af+so0X/fMnQ3yvj2X4H4NaUMRMdz/jtsvqrP52R2E6ABuq0nTAcRfxyef+wrHV00fjnMmj7Fbffx/kTpRGOWkKm5Riy+IgkzJUJstpqYaTpYUJ4f7nAWq1buOAPedar9WDF2HHzvSdy6NkNImQU50FiVJol/9av+yhfHRm116flHcLgcGkOZNEEAEcVdcUonCgbLKX1+74dN/Ua0e250kSZ0OaB9RALFQvmBwwVvUone523rRkN/iWkjiwm9GpWg7LL4HfusrkEuYW7dlG5Tojzx4DUHVzUTiUW003l+tLvxLM26UEL1PsHUQehGseY754pPRPhi9p1rt2wIc60DqjBhfkUhcPU9HXXbttYMXv+51Q8/kNHZUVydsmzcvW+we/YEIl6q4oYCLikd/0//9F38XLlhe6gn/HuRmcVla1CzNRxZXNfl3HvE3kl2wqVJJdnZikle94Y8HsrGxDaUe/SWMG9xYIKoTGEkeiqcaiR5w2Oos+KvLLttchXqvubwHid6q5PSpuEnQ2C3aWakkV7WPmSSJfvUbFwyW0ujDbtnNiqSIqASNStjDwE3ttFUqj0Rp2LU8ePRRd7+6SZO6mmsoq/EeYBYMsg1z5cVWuYFSOSIdM5BDYE8CUPf9SGMvImuwFOLyJdjoCrj7mbkZeCMs291PI1pNVoTqiB7ETx6j96U6dv4xJKQgkGXzwS7jwgMPkST1001TnL4e5GScczvfRJyWLekcO2m8k/yfJFqtXrA6RPGnIPrP4De4eb+54Vkzxq+BZ3XcU8AjsJUov68S3Zux4M1ffGpJOZfiOp9MMeWxpPZOJXwUZL27q2f1vN+sgWcNwMuOvxENH69U7nvNuBqdaU01KEgZJ0aIVUOs7ksz+A2Nev4Q/Grce90LWpv9muFuKyF8xCj/1k03fXL+bOIR43qtbm7H3a3wSkPLbCD9ov7Rr1YHr9iya+2kJYc7I4rE0JCiGmHEOLEEjZQwX+q22qV0r4j+O5ylbpm25iWPrQTvF5O3u0QfzbKB1ZP7r1TuXRzX7UMq0cfBf9VhgWOYNcav43if7ubmy8F/TSW+5/zz7feGFv70sKg+JSKG5/RhRSygyKpG44LBibdNYpr5MlFdKSqtawORO5dWKpsXTKRvm6mzGMIyEYnHx4AyeE1cpkioM6KIvT4rJIly/3f6gdcXy6AoIjtI64dJXHnx+SHcniCKR4EU95WIrJ05x7oN0wljSaLjtsK0VKHUs5YsNZAU9ypmx3j+sjruu4ii44hAWu8lKr2Z2tjVrL0tym2ns4+rzXecHObzI8aPX9zb1HmpVC9YnRE2icrNbul890wR0yYrLbJFtJ25upu6W+yZXy4e/vC8kcbNUyWacS++uhuOrBb0P7r7cstSLVxammcESB5bKK7uZu7Zmgzf+NBDixbkc+i1PI7eQUxx1KwRu8htKuH95o1lZinuZjjmbX2Cq3umjs8XLb3rByd1PcwmaPv7I0L2zyI6MjHeFXAzRG6MNHzugqGhjZXKp9aQd2rkJocpfTcaYybjBUscxNUtU7N0tbr/IcgVbhYVvNha8yKKgONq1oiRaL2WSu+f2HuirtHHReTd7tni/HwzBVcBXFAR1bbzUMSa46+QEH9w4dDQ73iWPSOqRxAMseJ6ZIjo/FJJV7aGK87RwnJ3W+qeX5e2/QfNGmsLm2lrPlJdhtsCt2J/DNEA5nvghT0zX49JmCsnTb1+MaXyGiw1oEaWfoOFHM+LSVyfYjwOHMctIksHiEpXMbCvb+blpAtMJ4s1+cLi564h6vkAWTqAqqL6NHbyAY4+MAoYFu3A/BmcCDMQ1hJKH+NY/MbChpnHSs6Clok7zCgl/ngwz444x8JtK+snI0kSrVQ2rXDCx1R0vecXILeL5a/nVELphIjsNfc9IcRDImEiE/RMRWWxEG2+9nX3XXLyZKaTw2HGz0noBe/L/1VUo1SQnKG17SqCmmdpFHpeE+L0LUmSqKnXJ3QoqHtWBrnULFuGmZL3aaKKeMs+JCKIiLplkWe2LEjpjmp14eBkp087kiSxSgUT9+2CPi46yd6UF0lWz7I1IcT/u0v0j9dtuO/Prq3c9+bXfnXJsi1b1kaTmWSppOZNHWe80ImD+EoRvcIsNQRVVUSDFT/bhIQrcfWsHrn7r61ff+/VkOhll23uXV8Z/AOV8KtZNtYLFo2fN2IaolGVsB9nt4TosGioC0W/goJFWVbrDaXeD6Csc2cvIupe3C3uphppBs0QGBLy1Etcf8GzbAGeL4ZXVLMy1aAeqOQ25MSqVbRaXdiL+s+6Zf15VpxAca+4yN9Xq0n6Q800ShKF65RM14MMgqRE8X5UHmf32nSciVn9ScZGnyaKQQKIVuixaSs2FCgW4ZMyJZayaPEyNn1rBfftXcnmZ9fw2b03sOQ7mwjRf8fSy9EIgj6O1d/LnWt35IxPjLtW7SPLPkb5vL2okku5cimBv+Wz+/8rn917Awt3D0JVT8UoO8dBdsT0XChx1yLwfE6QnKtyTKeBiT5yz62CrrlDRl+8WQjXFA/nuKoooiaqO71R36QavknGaCb1derhXaJhvVsWk8cwqVlmqqV+Se0DIZTeZ3gqjk728I8nZmrY75buMOe4qi4vJKeBPPOkuZdHZo35SrjuoccW/XUkmRVse1IuRe52EpW6oI+aNQ4gUtYQXeKWXTJZzc+7tyvAlkFy5NRe4Rf3Zb7gc0HjNe4sds90vB6ooI5hWcMQ6ROJ3i6kb45i/+bCRcf/qlod+AJwqOmpbzTESrGk3kZ38yxwN5HIVGSve7bTzU5I0NWIrMOy/lawQ26nVonVqN8CyWPnnffpimjp7WluP8sZjjuCGnAo8+xz5tnfSxSOq9sKcf6tiLzV3fpaHmGP0sbYAkF/CU+HNET1jCxu7w+4qDlfCfDahs0v9ZTWuhvuaZt06nlMs8vP33LL5t4vfvH5WrWKXX2j9pbSsAo3xX2cRvdsGPWvz3wXT4OzYqcb4WX7FuPhKtJ6nKuxjd00xiZ6qe+6aIRNzz6I6M1kYyC6CgmXksie6SvxCGCgcjla2gyhmTgQgffhtpigfWQpwGG88RUyPs6RVROl6MSVIzzEon0fpjzvD2iMrSgkXSPSd5Lpmyj1PsqSpV9G9lQ5fGR/EfIwTbmzM1GxN26EJOETu04ul2dH3+S/IhHuhoQzn37PDAKf+NWxR39/Tc/TZ9zPHKAV4tPGpAQbPHpk0CX+JfD5tN9qriYiJ9wb/3HDhmOPNjfv2rX20JEXXzyo5veAXOHuxUPratYwDfE1sTQuMbfc09tWetidIutEdpqnH80auj2ObbQRxgaiLHqnavR+t6y/RbXg5mgUrQhZulhdzCfFIgKIYwh1N/usRX5P5DIE9ahhsiYS+SOQi/OiGQV7dVPQxYJeDDyZJFPDh5oowmSoVuVLnjUGRMNHRaI+LyQ9mhlJuRqf21CFPjeviMrlaPn69Rs+/alq9dhjlQo0GuDixaJtE9ITTTQC829CfaNQ3yk6r4bbYkPuFA3vxrK+1jUS3DMQW1epbF7gkv0i7oMTcyDERMOwe/qpejn77BNfPj5S/HCgUhnYax56VUu3uzVyVb4ZDKa6yiwbVbeaIHFz3twzcF9dqfzU/GolGSZJrFTZNGDua5quxXH2KCi5mr36e99rLAP2QWKa3dcHvpKiDB5Cs97CHjLfe0axn2cjfiRibPrWKuKe1aR1I4pr1Eef4OjQMZKLWiXDAHTvw2SNEZBeNJSx7A3A508dD6n9aLSu+D9/EIpsXxr1lHweTiD+jwhD42M2+22mG76w6i9Z8u06qncRxVcDZRpjIKEfsVuReAORfpNFS/8W+/W/hOTI5MIas3fStIjPaSharqzE5f0CH0T0g4h/UNo+p9NG9QOi9gF3W3c6FJ17FGxSvJYSLnbzy3MnRpukpaqI/7Xasceq1evG4yIvumh3uviCC3YiPCAhGqG4PXMV1k1hIHO7HogmhDMB4KYhOu6SbQr0fimOXzherRwd/cbDJw6JN+7DssdEI9zb46QwdwZClg20r/Mz3qNDblPXrZbJPVE2dLBaPToK3x95fWXom5h/yt1TL9TUNptqZMgrZjNbuap9dHRkJPoTJ/tdYK+GWIubfeI5NhklmbpZn3t2q0rPPSkL3ghAb/uuzZNonoupB7sbjldh5ESlcnQUjh5Q5L+CPENbFXvH86ElLDUdW6caX+JmOm4eaaq41tiRxvqnN13ZZI5JEat5/DCBexxLc2bbJMrVzfpBBtzTWq5mA1DYFcNSiBZX8pU71Sxbi2XL3QxcwN3cyRMn3Ey1NKAlXdOkO8p8qbstd2tZs91NPfUdUDsx1ck3C5ypCJO4cv93yki4nLS+vAinOU4WHodKEaeZaDOPmedX78PZQVTKGZzZhsK5MzM8HSUdO0ha309aP0BaP0jWOIGIUe6NCAFCWM28+R/B5HMsfnbdxFqStOIan/+fX6KR3oll7ydLdxL1KFFJMQNPe0nTDcTzPkKJTWzad3F+bMtkMdFJMytPdfHMFXMgSorIqED+cUZo+0xoU7RpfSb9PuowKh3X3v7hYrKKXbzv64peJyrz80IWkjNJF3PLhh17II+N22btQc4PPLA7bbhvxX1IhOYDhLtoljV6Bb8cvJ/2cnCOiahmWX3Ig26tVr9br1aTwsaTWLX6vhMmfFk1dApk70uRPjWxKdIjmCg1cftiFA0drFQo+kvSJEksy6wqovtVWyFN7m6ImogOMkskSWK33PJ8bfsjd/1pGuQNZul/EtHdGnpG8WAgaev9InnxCnE1y2K37OJI40/Bomva+2wG0DuF9CiyY/vWux6qVpO0SX+lgp1/vu53T3eIaJ2mKNw80r2XNLrW8pTGCVCNMOVvH3voPUNF8HdxbP7/9q13PYbzpIQSTAjeFVWVsjsHRQPgzegzk1CanyKrxvcN4ToJIXYc1Qjwb6roweZS9OY+X+DSSmWccV+C+4LcOQOCpqLhmEn29Wrl+8OTVwSdHs2XPGcnQY6MDRDF16MaUeqBsZM7iE7sbDk/ig9AIinIA2SZkaVQ6lnOWHrD9J27FXRuh3Ataf3nSMd+lpPRzxHkZ2nUr4lUAr8AACAASURBVOXkS/8HIjuAlNEf9FMq3Uyp9//js/tvnVJkNxEjuT5l6JUHOLzyM8ThtaT1X6Y+9nlK8UE0GGZG/eR8gt5KpA+y6G2Xw8ZxJjnNu8QnqduT2y2IuYGnhtfBUnJ5tPPH2769rQ0pWNGWVPxUl3ASPefAf9SxSyNCfDWiJmBN+5yoIqqHTfwAdPbC+1jPQbf0cBFnaOMrO4orooOO9I+rn+MQBEZcs1pnlVYONetHTiyI45GgEaRtFq6m1wIDHcnwY3n17ok9RlGoC+SFSGWCGwiE0yrc25yHbzx858Ht1aGN4v4rno19VFQeEo0Oi2hK4RgaL3snglmmDstd+DCjcVSYGZjw2hJBjCPFSBPu48sue76myAtISPPzLc5B8nMQZRVu88enq/g2S8F9GtNOPoaITPrdEcFAyiqyF3dEirAmwRR6BVlRrWJr1xLltlyMgkE6uh2V/VLEznrWKLv5RbCkH8Al/KxoZDhWOHNURA+QsTe/dKeTauhn96wkYvREK/BsXe5gQlGG8f71fGbPGyd8Fu99I5959k14I8ZtBFFDxBC/iS27TnEfSUqqdY6uHeWui0Z438tP8K5XHuLoXzzO0OGP4GPvIEv/BNE6acOwdDUiG1my7JKOITxNafKOl9c48ud/g/a9i3r9DtLGnxLFJ9AI6jXQsJhS+WMs3bOqGZI0UcX2JuMZt8xPbY+jzSvj1BCpC1ITpCZyZh+EGlBDfHoJshN959SLPSFPPHZncOJdVgwucjzKQsfAb0isp+fQMHBMVWkvC+wO4tILEkNhMyzGbf2djjKvNfdoUz+104RMYbyGTX64kiTRRqTmkp9H03c/V2+gavWF3SLH/ou4v8fTsd8F+WNURmj6porxRFDPUhC9JoR0DWitKfw0YwUACFNfpM30wsyzurTJSs1XiLur4QvcPPY2ppFL9lkaEXUMiG97kRwZZw5FzwV6Ef8ndxsZZ+aOmmW94K+47JYl5YGBwWU4a1pFkQ1RnkD0ADC+sJ1GpeVZyJYmSaK4r83PurjOKlia7g2hdPA0pr5F55nGQTbVV/cKyCCWKY0xQ/RWouiPCD2fm/iJ/yj/lN6PWx9uSqMGGl/B96KVM4fYOJTHtPOyC9uMw2v2kcUfAdtCFEd5LCSXIvqOZsjYVPrb7J53Lh3lhVXbKcfvx+obCeEQGnImKXI5pu/gwgMxietEFRumMsJTqN2ipDmDo+ZCzdXqLlZ3L75ltm3qAjXwus2kBHSi7xxGII0/jrnEGkkeqNuyXTVvXJd6o6EdCysAVKuYIB0YqBgaVCZyiVlh5uq92Sn3mA06BsmfEZqmgSStVF44uGHDi19qjI1+yN3vEuFA4T0eH89xVKLY1K91UqWI5/TCwTPZMz89/cW3FDpsXso8br2AJrhL0jRk07zkmpCxcRW6SamBO+UU9uCyVzQycTcH3LNYkRXn/yCdLxGXiJb6MENENEsbdXWextLv5jZJDMHcWCoNX/zEE6v6EFbiha3U3VTDCGL/dGYLuZ3FszLOYPQNSGFL1qBEpQFgGSJLO390MSGKgNzuV4oW4375zI4agU5l9NvV96MrhsjsHiwbHY+Qc7uVe3f1zZgt01L/jRUHRvDz/gRr3IOEEUQhrZcpla9mNFsGc/AEpSmIWj2gGJh625uh+aKcZdudVHBcT9MGOUfPcLWKVSpphER9orlHeFzykkLddclVhZz28ZqGDr2lkk3jUUy0Urkwdk72NVlqy/nh6m41F6nLhBqJZ4hxlTLMvN8s0KJzbkX05hxVKsnw0MJlWwaODcVBo4+5Wb9IW9FVHHHWgMduTRUcaIsBPRXG59llvOakC3VEwFrsMZckJY4yZszbdbfzRbStXsr4CGnJ5TBBtnor9lFxjBAPYukCsNeqKJm4iUQK2d5K5ej+rdsu2Ccan3DL+t1dRWxQRFaMjIwckuCL3VtXwtyPoZxe9kzz/Jrc8UxtkPfuvRT8NWSN3K5kthfP9mAetdJrOw3tA2i4FKxMo94P0ev4+D99ie+fGMkXy/r26dHRYq5P80f7dhNK64qCFSuQsJIkyVMaT/UCuf76lOQRWPgzX6As/waXDQgpqsvRxjIS2TdRxT6ddMKNG4tDPBWRmkNNoO5IzZGaS/E5jTbqNReti4fTu4RzJEHmapSWaa7SKC0lU3Nj4xFROdQ+Ty0Hji2uYx09dEkCjdLIgIsvNjOgXfoUHDuheYXjlq3wNJhS59PPOM3whNPs/9Q4VQBztZqkg0d3W+S6WzU6RFtgeZ6P7gAxPiGb5bTombCvkJfTcx8SpD6+zEfBdTVEajbVeVOcSxF9wEpErKm+53lNggjHwWrm2T+4pXVENF9SRUxF+qGxGPe1ZllhRwSQJ5MkMXU9KKJDCCaCOl520VeGYKtVS3mWkGOiQS2r71Orn17udfPkzxYRNxKXI/KMpRouG3n+lb+Enn8bPaXpP0HuIpSeyV9KppTii+ntWwnbjLMNoHbJFwVzz71sQeaf4ohJqBiMHaFeP4Bqmj/O3otob37Krb9nhsjNTWuKmEEuR07Rfjrxu6nPjpF7XSU79xLkxLp/UKmgSZKk69dvWolk42EW446/nA8edOGo5OEhxc+Cu6mIDqpwCbBzciB1ksD6DaxRiRabp4wvN5BXuUnF0n2GRHqGrOicmmDPoP9OZdSa8zxRwk40l9qzMnh5siMwd1n5CYR+0dzHebr0tDQANHegaOruB1TCCcda0qKTB4wrVyVJ8qVOmkClcm+fua+T9vvZx42jB8BHXMMeNfYDa8wzlTy4e74RLhVhZV60Q3C31Mi+AZAGORwsPYSzGjBRAdFV7vYDFaWotI5IhEj69Wr1fSfOrIiwnNnNkiTKsn/fT+Pk68kaoAFE9yAndwDw/JJa5wML5jfwjv301J9Gw7p8jRlbidvFcN0cxDrnWWb5v2ago62c71nWg4t+2vAf1HKeZNY+SR1Y48RMjqntAm2MXyH1fGU6y4qU2BwtBaa1TSe1WxARyzNWbAYJshN9p4/JD0ClklCpJLr1Eb9LVPvNsjw+zwsmaKkiPEua7XMNI7j0uuQ5u7ntSGNxfxvwp8UImveLwoVRaiOvV2WBu1vTGC+CqZaGU8+eELefZ8JbY/bnNc0V4mwtKGf2LCVarS5a7mK3O/5MpXL/1mr1jmm88HDllQN9mcstkqYrEJ9EsIDotwS5zJuhQPlmbb+zZsbE2VEJqWm6C5FDIEvHexHUrAGU3vjwwwvur1SS/fnSxq2eTLhRJVpheXC7FhRansrOznovwyHzuro+jdvaptfZ3frEea2jA4ghqoAcDsiTAFHmQ+bZXtFSxTyFzFXUVpl5LJKNu/TMGmTIGdZXPxsv9kZo7LuEnvJqxk6ChgjsSYLlDq0Z6ywmyvFVIyx69h+Ie9/C2EvzcesnlK/ip1Z8gUsPjHB62eQth9GSvQO4ryJLc6btNkw9O3L65/eDXlwGsbQo2yajICMwOdVwfIXA5k0jrfY0T4umpRTSmqOWhzugrcfcaQmUxcbJAmZ72y0X1CSawYvdib7ZY+3aJB4cXHS1iS/1NN3nrieiKMRbt/pKUb9DVG81y3TcvuS5ucXhYObp0yX1Iy6lRxG/Ec8lcgTFUtMQ3bi+cu//1hjr+X96eg4VMWoLyyYnbw3S83bL0phchcpVJtHIspMHAjxs8PNeLHrkM7C8TpjgZsgdSLTbICevHHk6aB07OyRJYus33Ls60vPuzGxsmVntmfWVz2zH7B9V2Z8GhqJMLAvSGzJfaeLvwv1N7lY4UYq5QcnS2qiKPezwC+30nO55tJ+/4+oi+ywd+6ZoWGd56FbO7NxNlLUhkg/Coru3bHnhcJKQVqsXxnnNR/+ISRp5U5b1XMbVEO03sr+76crjI7t2ra0NHRv6Bwi34pTzQPJ0PrABsd7WlZKdwJE8E+aukfXXf/op1WjY0rQ/L4jhqwVZbtbIox60hFu2uyRHnzytk++E5vM203KsTSSee5Nl6XqcBagaGp2g0djG80PD8MDMYyWJkWxULNpO/eRhRPoRNczWMy9dyrZte1j0zkkHzeKhXvJ8GdffptSzgEbNiGIwHuPFVUdy73el5c2eaclZqkr2skvp6bmYRj1Pa/TsAMYhEtepSy6cUT1IrUsza2Py8ZM16RnahhgK0YTg3kk4i3qQuXTzU72m4VfE7TcJ0Ql1GTUhQhlAQtkss0lDGGAisr3k8QGIR8xH/0IlrMN1QdOp4DmTBJcPx3Hj1akt3HbttYxmLlep6O2epUvBtWlbaxaeyCz9XP1kOtRT1gjBcLS9HuRsMZVlZMW8hDNijNB8lGdPS5IkumULkWSsymx00N0jCdGlAusMUhOGg8mwo6mYlc19UDXEmRW1KNqcHqKKW/b5RoPDUezllg9b8NNw0sCkF4N7/gIJ/ldCuFHUV7lleYiNoG5ZJITbHR+8YHDwi1+r+rGgtVWWydtEdY2bjWsADiaqdcuyh+aVSzvzEKPd6QvbFz0j6BHwFYVwoUBuG3Mxx8zddo6OlIab8/a17faMWXZCkCKHXGKYGHcqKtXqI8k06uypZ2EqNkIyUzTARqCqLBlcisZXktbLedSF7CewO2dC15/aX5CIkTxygMVLHyOetzZP99OVqFxBkuxm0+3ka08V8OKZvo4iYHsjucpaqM6Lvr0Az94KelcRagRuJzC7H6rK4LLL0W/3k922k7suOjI1pKjoKxHj3r2XEOR3SRurwYxo3ijpS9tYYIcY6iRBTodpHDgaxtLM4xqSV0M5mzx4AcMhUzk9G+RpPC31uBzHKQs89zAOoDIghSrtZHnwdrPb3GZlInoos/pfBV48AZDFi/5eG/yChNJveFYvN1W+/CR8vov8RkDfCpK6WX9epqrlnRUXE1V1S78QGPt8Z4/zGbpG5Ix9lB26On0MDv5Ur6Gvxr0XUMtSy/3FROLaj0o/4uNOmMzSybdWKqqK2ZMe/F5ixnn9mUnAHc6jAcdeHHx84cKhTaLh4+QRNCYi6oJC1gv6JhWtAKPu3gfEZqZ5EXsHxDSUEOdxs9q9Dz74nuMA1eojkbL7oIscQFg5ZXwRUwnHzPyfb7nl+RrkNuqr3pDuK9X0gGi0sjBUNZlwbj7FasC2fP8zWXvHARRLI5yL2LT3ZngO/Fe1df81K+Y3289C9DLDWIPIxUVoD2SN3YTy1NUBZ0Jyfcpn9j6IZe/GHUKIsfQm4E8mO+EQYsT72D04zIW/njK6OyJ6Wxn2LiCTdZTC67HoTbgtAIworuPp54nqW7lwRR+mb0PCrdT9m2za8yD+rd2kpUMMMMxL56WE28qk+xZz395LifRdIFdjmVEqK86TpKUt7H5FSlIwtdmZqjo/sHWLLcJriMbkthhMMHVTkyh32bppvq1gPqKFimJKsX+zPwXIZggU74RZPjdJkthrX7u5TMziwnsMnqdw5fbrdkkjV/5D6BnNvPG5gD7ctpzB0A03fOIPGo3yAo3i2y2tNyWaXDV3U3fpQ9wQz+v3FZKPoIiqmttXAvLhavX7w5XKwl6bUUL/yUA+v5+YX4rDxS5mZm0vnPwFpLl0MEntzf/Ns0tCrJ6lzxD8w4svGHzm8IkXFnQebXbocGtYCKndfvvu9IknBv7kpZPyStHwW+T1N1NBiqfBcJMyeWFammuku+dZPSGU1PG9Da+//xtfP76nybSq1W122WVLDp/Xlz4jGq5xyyLaXroI6iIHVdnfnDOAN1yVnPhadeGOoGFDXui3FWCV2yzZL954uv2Y00I+x0paLxNKt1OK3zTrl3CWlUkb/eBQikcYe+kJDi87cdqLcIlvJ02PoNFg7qxhPZv2DY4vP49ofhvI5YSwGWSYWqNOiCKM+USlBZRKg2SNATzLmWpcTmmMfYGGf5yja0+waM9yovJrEF+KyFuJz9uAZ8fRxnFG/BiM1ElLfYQwSFxaSv1kwWR7FPchxkY/xNE1+5vnNlHgG1dX2yeu2e7MhcolTOCkZz7q4qPuPiomNXcZFfOamNda2/Lf3bzmxfb8t3w/cR91l9FsxjjITvTNHqVSvdexQciZFS4mxSdPe5O0CKlINcRDDat/eNEFA/8lL4TQujGvuebEIZEjv25p/ZOi4VirTmOzVqNT2NVM0BTHVCOTEB9yz/6vQPquavU9z7Q7AYq0RcPF2p+pjkGzraMoDMtN+ovtgbT15kvHf5dgrRTCTjjJeICqF7RIUQl4Fo9DVupRkFS1NKIarIitMRFJBTWcPG3O1fJ2HjKjoZRq6DnmWf2PLbLbtq8/+vBFF+1uuw/yfvL9i3Oc1eOpNK9JM60xyyIFuPLK4yPnzcs+hGXvFaI9QeNiPClSIL2Nkef0qqppKJ2wrLElqzdu+Ub1xR2txcEAEnvqqedruD2hWjohzb5a18c8G9sD9XEJrOn1D/A1MwMN7fsX9gd/cmysMTQ5rXLWEPL7BAHL+qifXEy9NrtPkzlqgLQxhPmjpx2ek7hy56uOoeEhQpQ7Yks9g3h6I9Rb9ImmqPQTQoWo52ZKpbcQ4lsJ0QbMLqZRGwSUuHcUZD+1l95Pze7k6CtypqZaJkQpUZybIhq1ftJ0JSJXEKI3EUpvRsONWHYJjbEBRCGeN4LZwzTGfpGjax5vJ7tDPcjJjHBm8axu5BWfFdP8T4H266gdtnVoN3OwZ7JBdqLvtKSvKBL0sKiWTaQPtzJ54QkDqSMyjPsQlu0Usb94tPrbDwM8MMkWXTwQtUrl/g+kfvKL6nabhJ5LgWW49UlegFVB6yI6jNgRS9OnTep/dnxo0WO33747bYZqnH9+ZN//QXZYNX7aMFQL35UEGo2TB0qlUsfsjgaMlDXeIRN0VDFERyRNR4AR1Z4draI2CrghOuI6Ntxxek6GNJSj/aj0mQYTXB1MpaSucqjt3Dvi8eoLB6+5ZvBOVasgvFajaK0QBtyZD152L7SWfC2WuiDH3bMhz+o7UR5UOfbQhmuxR5PEEhK9+sYoVQ0HBN1pmk2gJ5NakW43MaQqSUA0OhZC/DRCLG03mkjpsPjJ0eYSq0mSjFSrfLbuCx8LJreFKGxwD0vzXG0rjpVUJIwAx9zGnvEs+++qjYe2P/q+E52X+YVqlR0i4fEQlZY1tzuYalxv1EYeqX69FarTCpy/d6e7PR6intjVinPNXyBpdvJrPT3DwzOVmpsWlg0T9T4DVj4jI5ijBUNTRr/3GPN69p7u2i7jCPwVIaxFepSe82Cs9mpMHqdU3oPQh3kZiPHm85NnF0GooTJKo3GcNN2PNZ5ArMp7Xr13Qmrh86v3snTPHWR6IyLXEc9bBT6AWR9mEZiimiLRKBKOU39pH7XRv0PCF3jPq4YmO67yJ+uze2+g1LuZdGw5WTadwp3r6I3aX/Kq//W2ZFvFkkTs4986uQLxN6vPQV5b4eixzKvvW3teHmN1775V9ER/i9uaYvW0Dge6EfVAlj3N83922UwXr1K5v5yFk6s9s+UqMmDIAnWPwVLxMOyeHVHVg8C+SuXo6GzVmZtu+uT8kZFohUS+SmCxYX3iquJ+3NWPqLf6hElMJkn0tV/tX1YqlQbaOWFQVxdGouzY/k6LTV150yfnxyO6KgstVScGsiAWsrGDJ08Gi+Ppf69W33dicp+33bYlfv740Apx+jJrHRfU1cZKx77xjTtPmQPcZBqVyr19WQjLQ9YYNNEBy7yfQF4d3RkVYVjdh0APQe+havWOGsWSuW3ZNhEsXJGpz59MTzAZrlbv2teJhqtv3DQY123p1DeLpmPn6/6nvnjnuFzelOB27VobHTl+fJVYusKdpYL3g0YOI2I+BHJo3ryePQ8++JvHTzUHt922JT569IWVmUpvO90A3jN28B8e/A8d+kj06spPrw1ZiJvX7FTXa1b4410D1MMymqnFTWGoUXzP1G7/PxJljCF+75WHzogOgHt39SHzVhIKPpPKML3hEA1bTqO+gCjqwzxGPcI9ArW8iogWoTc+hDeGOLo2v36d1PymY2fZoX7Sl1biuhjxAdA+3CPUR3E5TqZH0Jf28Z6fG5qO3JzbbNqzgZ6+zaS1FTmX7Yj8DdKo/w090duS766oJ4nYJ58bXeaZ3+yEGMfOyktjBqpIJtX3ru3J04U2P7sGjf8WfNW0DNLdKPWAZzt41yt+YeoOE9G+/nG+ZOtLOjT0Xbv9dtL2dZFP19bTYgxJBBcW8/jdZimufK3safucSXWa/phKBW0vedUsk9XcNt3veYzf6fU78zEdeimqgrevTz15/NYa3zP1e/r05BELE49p+3WasI8Wc06SRHftIjp69EJtv4ZF37Ocg6nX9NTzOPGY2V2vU5Exi3VgZoWqwjY7Y+lxCj3NcJxpajlOe9wM+0zYv2CUrf4Vqkwc8+4ZUxJzbrP52Wso9W6mMbYan4FBaqRY+ijiv8Tzq4+TiG1+1hec9Nobxa0X1bP0oBpmmhJk+/f//P88kCSJsenZKwjRF4EFZOn0EmRpHmTpdt698vrZj9fK8ICm6jIXC4ZN7vfHbRGyHxXaM2pgbub63GFittWPN61dzAKniovsACFxZelzl1Cat5n62OXj3qGOfhkB1b1kY7/MC6/eTSJ27y7vS8NL17iEQU5Zx/HUUPfR1OZVhx/gRJKIsXnv2xG9H/N4gkNmAn1uxL2QNv6ad6+8bVYBsF100UUXp0CzWMUwaTact8fTuXJMKExrRqmnHymtgbtJ3PXoEDVTjoh7TfC647Uz/Yh4aipDw0O0ORDCL6AhHndZji9X10afA5aBUtjHZrn+bhdddNHFDMgZZNw4QTZ2pChZNFHymqzSZul84Cou/PU4AZLrJY0bHBHXE47XBK1LpnWh7XPKttcFr5tRH3Pbz7a7cxru/04ZYUPhYe6cqSPFtiyFzJ6d+ynqoosu/rUiZ5CH1p7A2UUUj+YS2jRhMyJKlsbEPeupp2uboVBHh847JioH1b2mntZUqam3fU7ZDjXB63h04OSreo/AxrwOx8n6G9FwMWld8WncP05RXUSOIeSOnblcg7aLLrr4V4vWUonC0+CdY+Pa4Q5ZuhbRm1m4u5ck0eR6SV+M4wOWlo5khLq518y9ZqH4tP/f3m7bniHHYi/tTUQsgTzfslS6sxhzyuJTEyGgYTcuh7r2xy666GKu0JLKgj5NOnaIEGkH70wbXHEvA/8WDVfkbnTX5OVSmzcW71NPjyleV3wio/S2Txtz1NTrkqbH5WR939G1jJK4suSpMpK9EwmvIa3TvnznFIgYuGHZDsbsBFw3RyENXXTRxb92FG5vMf7XoSNktpWoB5gpk4XcIQIr///27ifEruoO4Pj3d869972ZvsQYnTCRYEIYUpmFRBoGXdVAd13ZVpe1QWiKWVYLUkrvUIrYLooUq6YuFARtCy5aKaWbDLRKrS66KLY0dkwlZpKZMB3j+ObNfef+jov73sub/2/GSSPl94FhOMx973Bn8eOce3/n98P5H7L/vapgZR7d6RPS/O++xrRGuaROm1LGIJIUErQQ6fsJWlR/06IUuVxvNqY/Or7vWt7dGWvjXlz2CGW7AVvkcImAS66i5RvMjy2Sn7zpLWONMf8fVi4Vf/HPu3H+LYQM7ZSFiquu7tWHFCWtKaF4lVA8ztzs1W4CZh6jOzhDPSx/spdm0mg5XHSFYxnqaaaFoknQlk+GFubGaeYiSn4ugfuVQ++fILpniXo3ZTtZVeVj1ePRCN4r4v9AaJ3hyl0fbPsAvTHGbGDtXvr5f7+C9w91muC4zXfbUcnqBWX7t8TiKW6Nf+fd8dAfpPJzMeEIyUhzLoER5marPtj5SQnXM+MnYeTBYZyfIKs/g8a7KNsbTLpq/trwAq3mE8wee2GrrHhjjNmO6+Gv+3Lj7L++giQvEXWUUjcPkFW2tuLTgJbvoPpL2vIa82OLOZOdjhAb5CT2H/85cP5OvDyE84+AHKVsb/0cMaIkCSBTEB7mw7FLtno0xuymleEvzx2HH95LO/wY5Nuods4vbkkRgbQ2S2vpjzh+Ra35JqfuWVj3HGg3kD3z/ii++Bo++zqRE8Sy0TvJM8iczjtUH+Ty2GsrvtcYY3bB2kiUR8fBfxwn3fNzQjGBbljdp09nJQmQZAqySFieBvkLTt6mHS+RyiKxdJRxP94fBb5EZILa0CHay/XqxU/cOjjG7vPPuqLlr/mweQpWbuuNMWY3rB8gc1GeO/8NstrPCMVoFSQHLNsdY7Wa9KnDewgBNFR9dKvVaB2fgnMQ2lAG3TSNZ+0EikuA+FdieYqZV3Zem84YYzax/vY3jw75wu9pffIsiEOcDlyUVsQRoyMUyvKSom065wHrIBkxQnsZlpd08ODYPd0TOw165AKqP2UmTG/jXo0xZls2Xhbm0XHLhb0Mhadx8k1Uldh5ntjrM9qp5r3huG+K6+lBdBqUDPD5vjFU5eLTbJ6y/AHt1svMjTdta22MuVE2Xr3lonx05Bqe76O8iEsCzmkv6PWauMsm41U5jL1CE4N+vvsVUq0c01qL0H6C1L3I3G8sOBpjbqitHyzm0THy7gF88jhJ7Vto2IeuetPcW+XJjRgr3iuRi8T4JKfHzu74bo0xZhu2fv6XizI3PovwJGUxSZJdxGdVWbQYtfNWmV7zrN0aRxSRquct7k20/C4Mv3xD/xvGGNNnsLfHuSgzx+bJ0rOE9hkiUyRZwCeuU0OyIn1b452Pq+CbZHRSh14gLJ1hf/t1Zg62dnSXxhizA37gK6cmI/fcqnz8wHka8+dQvQJ6lNrQHlQFYlldGGVNy4beKrFroz7bUqXwJGmLMryDxu8RWs8xO36JuRG1Z47GmP+lwQMkwNRU5H4RFh+4xmO3vcFXH/0dZXsJn9ZIa/Wqx7QH5yIinf1ylPWDo4A4xbkqenrfojZ0haL1JzT8BIk/4jvH3mbiQCA/qUxNbqf5tTHGfGYDZn+vo9eshxRnXwAAALtJREFU+8uOO0aPojIBch/p8HGkPEQobyfGYbzXNdNEdagqIk18chHVC4Tib0TewvNnTn/xam8OSwI3xtwkOw+QcD2Adc9b73+vQcYhXLyDUu9E/GHSZBTxDaJmAGhs4uICoZyB+AGlTEOcxV+7zMzrrV4fW2OMuck+W4Bcrb8Rd34u4fCRhI9Dxp7EsdC5xgfFF8rwcOA/RwK5hF4tSAuMxpjPkd0NkP16W3BYWfJssjPu/LagaIz5nPoUBSp4D1AF9yMAAAAASUVORK5CYII=)"]},{"cell_type":"markdown","metadata":{"id":"_8dMBi8UNtg1"},"source":["[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/langtest/blob/main/demo/tutorials/llm_notebooks/Visual_QA.ipynb)"]},{"cell_type":"markdown","metadata":{"id":"_EzC6SKhjdk7"},"source":["**LangTest** is an open-source python library designed to help developers deliver safe and effective Natural Language Processing (NLP) models. Whether you are using **John Snow Labs, Hugging Face, Spacy** models or **OpenAI, Cohere, AI21, Hugging Face Inference API and Azure-OpenAI** based LLMs, it has got you covered. You can test any Named Entity Recognition (NER), Text Classification, fill-mask, Translation model using the library. We also support testing LLMS for Question-Answering, Visual question-answering, Summarization and text-generation tasks on benchmark datasets. The library supports 60+ out of the box tests. For a complete list of supported test categories, please refer to the [documentation](http://langtest.org/docs/pages/docs/test_categories).\n","\n","Metrics are calculated by comparing the model's extractions in the original list of sentences against the extractions carried out in the noisy list of sentences. The original annotated labels are not used at any point, we are simply comparing the model against itself in a 2 settings."]},{"cell_type":"markdown","metadata":{"id":"v9Yd7KhpZOTF"},"source":["# Getting started with LangTest"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kJ-dxTWu7bcA"},"outputs":[],"source":["!pip install langtest==2.4.0"]},{"cell_type":"markdown","metadata":{"id":"cXOI5kBFlO6w"},"source":["# Harness and its Parameters\n","\n","The Harness class is a testing class for Natural Language Processing (NLP) models. It evaluates the performance of a NLP model on a given task using test data and generates a report with test results.Harness can be imported from the LangTest library in the following way."]},{"cell_type":"code","execution_count":1,"metadata":{"executionInfo":{"elapsed":4291,"status":"ok","timestamp":1692340616139,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"w1g27-uxl1AA"},"outputs":[],"source":["#Import Harness from the LangTest library\n","from langtest import Harness"]},{"cell_type":"markdown","metadata":{"id":"PXBMpFHIl7n9"},"source":["It imports the Harness class from within the module, that is designed to provide a blueprint or framework for conducting NLP testing, and that instances of the Harness class can be customized or configured for different testing scenarios or environments.\n","\n","Here is a list of the different parameters that can be passed to the Harness function:\n","\n","
\n","\n","\n","\n","| Parameter | Description |\n","| - | - |\n","| **task** | Task for which the model is to be evaluated (Visual Question Answering) |\n","| **model** | Specifies the model(s) to be evaluated. This parameter can be provided as either a dictionary or a list of dictionaries. Each dictionary should contain the following keys:
- model (mandatory): \tPipelineModel or path to a saved model or pretrained LLM pipeline/model from hub.
- hub (mandatory): Hub (library) to use in back-end for loading model from public models hub or from path
|\n","| **data** | The data to be used for evaluation. A dictionary providing flexibility and options for data sources. It should include the following keys: - data_source (mandatory): The source of the data.
- subset (optional): The subset of the data.
- feature_column (optional): The column containing the features.
- target_column (optional): The column containing the target labels.
- split (optional): The data split to be used.
- source (optional): Set to 'huggingface' when loading Hugging Face dataset.
|\n","| **config** | Configuration for the tests to be performed, specified in the form of a YAML file. |\n","\n","\n","
\n","
"]},{"cell_type":"markdown","metadata":{"id":"KLC_lBv09ZuN"},"source":["# Robustness Testing\n","\n","Model robustness can be described as the ability of a model to maintain similar levels of accuracy, precision, and recall when perturbations are made to the data it is predicting on. For example, In the case of images, the goal is to understand how modifications such as resizing, rotation, noise addition, or color adjustments affect the model's performance compared to the original images it was trained on.\n","\n","\n","**`Supported Robustness tests :`**
\n","\n","### Text\n","\n","| **Test Name** | **Short Description** |\n","|-------------------------------|----------------------------------------------------------------------------------------|\n","| **`uppercase`** | Capitalization of the text set is turned into uppercase |\n","| **`lowercase`** | Capitalization of the text set is turned into lowercase |\n","| **`titlecase`** | Capitalization of the text set is turned into title case |\n","| **`add_punctuation`** | Adds punctuation to the text set |\n","| **`strip_punctuation`** | Removes punctuation from the text set |\n","| **`add_typo`** | Introduces typographical errors into the text |\n","| **`swap_entities`** | Swaps named entities in the text |\n","| **`american_to_british`** | Converts American English spellings to British English |\n","| **`british_to_american`** | Converts British English spellings to American English |\n","| **`add_context`** | Adds additional context to the text set |\n","| **`add_contraction`** | Introduces contractions (e.g., do not → don't) |\n","| **`dyslexia_word_swap`** | Swaps words in a way that mimics dyslexic reading errors |\n","| **`number_to_word`** | Converts numbers to words in the text set (e.g., 1 → one) |\n","| **`add_ocr_typo`** | Adds optical character recognition (OCR) specific typos to the text |\n","| **`add_abbreviation`** | Replaces certain words with their abbreviations |\n","| **`add_speech_to_text_typo`** | Adds speech-to-text transcription errors |\n","| **`add_slangs`** | Introduces slang terms into the text |\n","| **`multiple_perturbations`** | Applies multiple perturbations to the text at once |\n","| **`adjective_synonym_swap`** | Swaps adjectives in the text with their synonyms |\n","| **`adjective_antonym_swap`** | Swaps adjectives in the text with their antonyms |\n","| **`strip_all_punctuation`** | Removes all punctuation from the text |\n","| **`randomize_age`** | Randomizes the age mentioned in the text |\n","| **`add_new_lines`** | Inserts new lines into the text set |\n","| **`add_tabs`** | Inserts tab characters into the text set |\n","\n","### Images\n","\n","| **Test Name** | **Short Description** |\n","|----------------------|--------------------------------------------------------|\n","| **`image_resize`** | Resizes the image to a different dimension |\n","| **`image_rotate`** | Rotates the image by a specified angle |\n","| **`image_blur`** | Applies a blur filter to the image |\n","| **`image_noise`** | Adds random noise to the image |\n","| **`image_contrast`** | Adjusts the contrast of the image |\n","| **`image_brightness`**| Adjusts the brightness of the image |\n","| **`image_sharpness`** | Adjusts the sharpness of the image |\n","| **`image_color`** | Adjusts the color balance of the image |\n","| **`image_flip`** | Flips the image either horizontally or vertically |\n","| **`image_crop`** | Crops a portion of the image |\n","\n","
"]},{"cell_type":"markdown","metadata":{"id":"cVIzXdGMjX47"},"source":["## Testing robustness of a pretrained LLM models\n","\n","Testing a LLM model's robustness gives us an idea on how our data may need to be modified to make the model more robust. We can use a pretrained model/pipeline or define our own custom pipeline or load a saved pre trained model to test.\n","\n","Here we are directly passing a pretrained model/pipeline from hub as the model parameter in harness and running the tests."]},{"cell_type":"markdown","metadata":{"id":"78THAZm3cRu7"},"source":["### Test Configuration\n","\n","Test configuration can be passed in the form of a YAML file as shown below or using .configure() method\n","\n","\n","**Config YAML format** :\n","```\n","tests: \n"," {\n"," \"defaults\": {\n"," \"min_pass_rate\": 0.5,\n"," },\n"," \"robustness\": {\n"," \"image_noise\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"noise_level\": 0.5\n"," }\n","\n"," },\n"," \"image_rotate\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"angle\": 45\n"," }\n"," },\n"," \"image_blur\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"radius\": 5\n"," }\n"," },\n"," \"image_resize\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"resize\": 0.5 # 0.01 to 1.0 means 1% to 100% of the original size\n"," }\n"," },\n"," }\n"," }\n"," \n","```\n","\n","If config file is not present, we can also use the **.configure()** method to manually configure the harness to perform the needed tests.\n"]},{"cell_type":"code","execution_count":2,"metadata":{},"outputs":[],"source":["import os \n","os.environ['OPENAI_API_KEY'] = \"sk-XXXXXXXX\""]},{"cell_type":"markdown","metadata":{},"source":["## Visual Question Answering (VQA)\n","\n","This notebook demonstrates how to perform a Visual Question Answering (VQA) using the `PIL` library to load images and a harness for running the task. The model being used is `gpt-4o-mini` from the OpenAI hub, and the data comes from the MMMU dataset, specifically the `Clinical_Medicine` subset."]},{"cell_type":"code","execution_count":3,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"c274bf01644a432fb0e254fd1e8ebb75","version_major":2,"version_minor":0},"text/plain":["Resolving data files: 0%| | 0/60 [00:00, ?it/s]"]},"metadata":{},"output_type":"display_data"},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"3cc1882c9281421f8b7f42f54a3999ce","version_major":2,"version_minor":0},"text/plain":["Resolving data files: 0%| | 0/32 [00:00, ?it/s]"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Test Configuration : \n"," {}\n"]}],"source":["harness = Harness(\n"," task=\"visualqa\",\n"," model={\n"," \"model\": \"gpt-4o-mini\",\n"," \"hub\": \"openai\"\n"," },\n"," data={\"data_source\": 'MMMU/MMMU',\n"," \"subset\": \"Clinical_Medicine\",\n"," # \"feature_column\": \"question\",\n"," # \"target_column\": 'answer',\n"," \"split\": \"dev\",\n"," \"source\": \"huggingface\"\n"," },\n"," config={}\n",")"]},{"cell_type":"markdown","metadata":{"id":"jGEN7Q0Ric8H"},"source":["We can use the .configure() method to manually define our test configuration for the robustness tests."]},{"cell_type":"code","execution_count":4,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":91,"status":"ok","timestamp":1692340473373,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"C08dW5tue_6d","outputId":"c12433af-296e-4e9b-d2e2-cdd68f5426ea"},"outputs":[{"data":{"text/plain":["{'tests': {'defaults': {'min_pass_rate': 0.5},\n"," 'robustness': {'image_noise': {'min_pass_rate': 0.5,\n"," 'parameters': {'noise_level': 0.5}},\n"," 'image_rotate': {'min_pass_rate': 0.5, 'parameters': {'angle': 55}},\n"," 'image_blur': {'min_pass_rate': 0.5, 'parameters': {'radius': 5}},\n"," 'image_resize': {'min_pass_rate': 0.5, 'parameters': {'resize': 0.5}}}}}"]},"execution_count":4,"metadata":{},"output_type":"execute_result"}],"source":["harness.configure({\n"," \"tests\": {\n"," \"defaults\": {\n"," \"min_pass_rate\": 0.5,\n"," },\n"," \"robustness\": {\n"," \"image_noise\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"noise_level\": 0.5\n"," }\n","\n"," },\n"," \"image_rotate\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"angle\": 55\n"," }\n"," },\n"," \"image_blur\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"radius\": 5\n"," }\n"," },\n"," \"image_resize\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"resize\": 0.5 # 0.01 to 1.0 means 1% to 100% of the original size\n"," }\n"," },\n"," }\n"," }\n","})"]},{"cell_type":"markdown","metadata":{"id":"FLLzeE_Pix2W"},"source":["Here we have configured the harness to perform image robustness tests (image_blur, image_resize, image_rotate, and image_noise) and defined the minimum pass rate for each test."]},{"cell_type":"markdown","metadata":{},"source":["To ensure we work with a smaller subset of data, we'll limit the dataset to the first 50 entries. This is useful for faster prototyping and testing without needing to process the entire dataset.\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["harness.data = harness.data[:50]"]},{"cell_type":"markdown","metadata":{},"source":["In this section, we will reset the test cases in the `Harness` object by setting `harness._testcases` to `None`. This can be useful if you want to clear any previously loaded test cases or start fresh without any predefined cases.\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["harness._testcases = None"]},{"cell_type":"markdown","metadata":{"id":"MomLlmTwjpzU"},"source":["\n","### Generating the test cases.\n","\n","\n"]},{"cell_type":"code","execution_count":5,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":23034,"status":"ok","timestamp":1692340496325,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"njyA7h_tfMVo","outputId":"481382ae-630d-4c62-d6d8-c8108982df89"},"outputs":[{"name":"stderr","output_type":"stream","text":["Generating testcases...: 100%|██████████| 1/1 [00:00, ?it/s]\n"]},{"data":{"text/plain":[]},"execution_count":5,"metadata":{},"output_type":"execute_result"}],"source":["harness.generate()"]},{"cell_type":"markdown","metadata":{"id":"C_qyYdl8FYoD"},"source":["harness.generate() method automatically generates the test cases (based on the provided configuration)"]},{"cell_type":"markdown","metadata":{},"source":["This code snippet will display an HTML table based on the DataFrame returned by `harness.testcases()`. The `escape=False` parameter allows HTML content within the DataFrame to be rendered without escaping special characters."]},{"cell_type":"code","execution_count":11,"metadata":{},"outputs":[{"data":{"text/html":["\n"," \n"," \n"," | \n"," category | \n"," test_type | \n"," original_image | \n"," perturbed_image | \n"," question | \n"," options | \n","
\n"," \n"," \n"," \n"," 3 | \n"," robustness | \n"," image_noise | \n"," | \n"," | \n"," What person's name is associated with the fracture shown below? | \n"," A. Monteggia\\nB. Bennett\\nC. Jones\\nD. Smith | \n","
\n"," \n"," 15 | \n"," robustness | \n"," image_resize | \n"," | \n"," | \n"," Identify the following rhythm: | \n"," A. Sinus Rhythm with PAC's\\nB. Junctional Rhythm\\nC. 2nd Degree AV Block, Type I\\nD. 3rd Degree AV Block\\nE. Normal Sinus Rhythm with PVC's\\nF. Idioventricular Rhythm | \n","
\n"," \n"," 6 | \n"," robustness | \n"," image_rotate | \n"," | \n"," | \n"," A 56-year-old woman is undergoing chemotherapy for treatment of breast carcinoma. The gross appearance of her skin shown here is most typical for which of the following conditions? | \n"," A. Thrombocytopenia\\nB. Gangrene\\nC. Congestive heart failure\\nD. Metastatic breast carcinoma | \n","
\n"," \n"," 18 | \n"," robustness | \n"," image_resize | \n"," | \n"," | \n"," What person's name is associated with the fracture shown below? | \n"," A. Monteggia\\nB. Bennett\\nC. Jones\\nD. Smith | \n","
\n"," \n"," 17 | \n"," robustness | \n"," image_resize | \n"," | \n"," | \n"," Based on , what's the most likely diagnosis? | \n"," A. first degree atrioventricular block\\nB. third degree atrioventricular block\\nC. Second degree type II atrioventricular block\\nD. atrial flutter | \n","
\n"," \n","
"],"text/plain":[""]},"metadata":{},"output_type":"display_data"}],"source":["from IPython.display import display, HTML\n","\n","\n","df = harness.testcases()\n","html=df.sample(5).to_html(escape=False)\n","\n","display(HTML(html))"]},{"cell_type":"markdown","metadata":{"id":"fRyNPRBokXNZ"},"source":["### Running the tests."]},{"cell_type":"code","execution_count":12,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":68268,"status":"ok","timestamp":1692340564519,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"3kUPTsNvjkgr","outputId":"4c4815e4-4cab-4dbf-99ba-1a231656f1e3"},"outputs":[{"name":"stderr","output_type":"stream","text":["Running testcases... : 100%|██████████| 20/20 [00:44<00:00, 2.21s/it]\n"]},{"data":{"text/plain":[]},"execution_count":12,"metadata":{},"output_type":"execute_result"}],"source":["harness.run()"]},{"cell_type":"code","execution_count":13,"metadata":{},"outputs":[{"data":{"text/html":["\n"," \n"," \n"," | \n"," category | \n"," test_type | \n"," original_image | \n"," perturbed_image | \n"," question | \n"," options | \n"," expected_result | \n"," actual_result | \n"," pass | \n","
\n"," \n"," \n"," \n"," 5 | \n"," robustness | \n"," image_rotate | \n"," | \n"," | \n"," Identify the following rhythm: | \n"," A. Sinus Rhythm with PAC's\\nB. Junctional Rhythm\\nC. 2nd Degree AV Block, Type I\\nD. 3rd Degree AV Block\\nE. Normal Sinus Rhythm with PVC's\\nF. Idioventricular Rhythm | \n"," Answer: UnRecognizable. | \n"," Answer: UnRecognizable. | \n"," True | \n","
\n"," \n"," 4 | \n"," robustness | \n"," image_noise | \n"," | \n"," | \n"," The best diagnosis for the appendix is: | \n"," A. simple appendicitis\\nB. appendix abscess\\nC. normal appendix\\nD. cellulite appendicitis | \n"," Answer: UnRecognizable. | \n"," I'm unable to recognize the content of the image. Thus, I cannot determine the correct diagnosis for the appendix. \\n\\nAnswer: UnRecognizable. | \n"," False | \n","
\n"," \n"," 7 | \n"," robustness | \n"," image_rotate | \n"," | \n"," | \n"," Based on , what's the most likely diagnosis? | \n"," A. first degree atrioventricular block\\nB. third degree atrioventricular block\\nC. Second degree type II atrioventricular block\\nD. atrial flutter | \n"," Answer: UnRecognizable. | \n"," Answer: UnRecognizable. | \n"," True | \n","
\n"," \n"," 9 | \n"," robustness | \n"," image_rotate | \n"," | \n"," | \n"," The best diagnosis for the appendix is: | \n"," A. simple appendicitis\\nB. appendix abscess\\nC. normal appendix\\nD. cellulite appendicitis | \n"," Answer: UnRecognizable. | \n"," Answer: A. simple appendicitis. | \n"," False | \n","
\n"," \n"," 0 | \n"," robustness | \n"," image_noise | \n"," | \n"," | \n"," Identify the following rhythm: | \n"," A. Sinus Rhythm with PAC's\\nB. Junctional Rhythm\\nC. 2nd Degree AV Block, Type I\\nD. 3rd Degree AV Block\\nE. Normal Sinus Rhythm with PVC's\\nF. Idioventricular Rhythm | \n"," Answer: UnRecognizable. | \n"," Answer: UnRecognizable. | \n"," True | \n","
\n"," \n","
"],"text/plain":[""]},"metadata":{},"output_type":"display_data"}],"source":["from IPython.display import display, HTML\n","\n","\n","df = harness.generated_results()\n","html=df.sample(5).to_html(escape=False)\n","\n","display(HTML(html))"]},{"cell_type":"markdown","metadata":{},"source":["Called after harness.generate() and is to used to run all the tests. Returns a pass/fail flag for each test."]},{"cell_type":"markdown","metadata":{"id":"106TE41ffw43"},"source":["This method returns the generated results in the form of a pandas dataframe, which provides a convenient and easy-to-use format for working with the test results. You can use this method to quickly identify the test cases that failed and to determine where fixes are needed."]},{"cell_type":"markdown","metadata":{"id":"_0gnozMlkoF0"},"source":["### Report of the tests"]},{"cell_type":"code","execution_count":15,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":112},"executionInfo":{"elapsed":22,"status":"ok","timestamp":1692340564522,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"YKFvMs0RGHO7","outputId":"3a0ed33b-aa59-4e98-86d0-8d407391b0e4"},"outputs":[{"data":{"text/html":["\n","\n","
\n"," \n"," \n"," | \n"," category | \n"," test_type | \n"," fail_count | \n"," pass_count | \n"," pass_rate | \n"," minimum_pass_rate | \n"," pass | \n","
\n"," \n"," \n"," \n"," 0 | \n"," robustness | \n"," image_noise | \n"," 3 | \n"," 2 | \n"," 40% | \n"," 50% | \n"," False | \n","
\n"," \n"," 1 | \n"," robustness | \n"," image_rotate | \n"," 2 | \n"," 3 | \n"," 60% | \n"," 50% | \n"," True | \n","
\n"," \n"," 2 | \n"," robustness | \n"," image_blur | \n"," 2 | \n"," 3 | \n"," 60% | \n"," 50% | \n"," True | \n","
\n"," \n"," 3 | \n"," robustness | \n"," image_resize | \n"," 2 | \n"," 3 | \n"," 60% | \n"," 50% | \n"," True | \n","
\n"," \n","
\n","
"],"text/plain":[" category test_type fail_count pass_count pass_rate \\\n","0 robustness image_noise 3 2 40% \n","1 robustness image_rotate 2 3 60% \n","2 robustness image_blur 2 3 60% \n","3 robustness image_resize 2 3 60% \n","\n"," minimum_pass_rate pass \n","0 50% False \n","1 50% True \n","2 50% True \n","3 50% True "]},"execution_count":15,"metadata":{},"output_type":"execute_result"}],"source":["harness.report()"]},{"cell_type":"markdown","metadata":{"id":"bSP2QL6agTH_"},"source":["Called after harness.run() and it summarizes the results giving information about pass and fail counts and overall test pass/fail flag."]}],"metadata":{"accelerator":"GPU","colab":{"machine_shape":"hm","provenance":[],"toc_visible":true},"gpuClass":"standard","kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.8.10"}},"nbformat":4,"nbformat_minor":0}
diff --git a/langtest/datahandler/datasource.py b/langtest/datahandler/datasource.py
index 4de9999f4..c12a11662 100644
--- a/langtest/datahandler/datasource.py
+++ b/langtest/datahandler/datasource.py
@@ -95,6 +95,12 @@
"anti-stereotype": ["anti-stereotype"],
"unrelated": ["unrelated"],
},
+ "visualqa": {
+ "image": ["image", "image_1"],
+ "question": ["question"],
+ "options": ["options"],
+ "answer": ["answer"],
+ },
}
@@ -183,7 +189,7 @@ def __init__(self, file_path: Union[str, dict], task: TaskManager, **kwargs) ->
raise ValueError(Errors.E024)
if "data_source" not in file_path:
- raise ValueError(Errors.E025)
+ raise ValueError(Errors.E025())
self._custom_label = file_path.copy()
self._file_path = file_path.get("data_source")
self._size = None
@@ -1246,6 +1252,7 @@ class HuggingFaceDataset(BaseDataset):
"summarization",
"ner",
"question-answering",
+ "visualqa",
]
LIB_NAME = "datasets"
@@ -1709,6 +1716,7 @@ class PandasDataset(BaseDataset):
"legal",
"factuality",
"stereoset",
+ "visualqa",
]
COLUMN_NAMES = {task: COLUMN_MAPPER[task] for task in supported_tasks}
diff --git a/langtest/langtest.py b/langtest/langtest.py
index d7a1f15cd..09df1b57d 100644
--- a/langtest/langtest.py
+++ b/langtest/langtest.py
@@ -605,6 +605,7 @@ def generated_results(self) -> Optional[pd.DataFrame]:
"model_name",
"category",
"test_type",
+ "original_image",
"original",
"context",
"prompt",
@@ -613,8 +614,10 @@ def generated_results(self) -> Optional[pd.DataFrame]:
"completion",
"test_case",
"perturbed_context",
+ "perturbed_image",
"perturbed_question",
"sentence",
+ "question",
"patient_info_A",
"patient_info_B",
"case",
@@ -838,6 +841,7 @@ def testcases(self, additional_cols=False) -> pd.DataFrame:
"model_name",
"category",
"test_type",
+ "original_image",
"original",
"context",
"original_context",
@@ -863,7 +867,9 @@ def testcases(self, additional_cols=False) -> pd.DataFrame:
"correct_sentence",
"incorrect_sentence",
"perturbed_context",
+ "perturbed_image",
"perturbed_question",
+ "question",
"ground_truth",
"options",
"expected_result",
diff --git a/langtest/modelhandler/llm_modelhandler.py b/langtest/modelhandler/llm_modelhandler.py
index c65387402..968928e12 100644
--- a/langtest/modelhandler/llm_modelhandler.py
+++ b/langtest/modelhandler/llm_modelhandler.py
@@ -13,6 +13,7 @@
import logging
from functools import lru_cache
from langtest.utils.custom_types.helpers import HashableDict
+from langchain.chat_models.base import BaseChatModel
class PretrainedModelForQA(ModelAPI):
@@ -452,3 +453,57 @@ class PretrainedModelForSycophancy(PretrainedModelForQA, ModelAPI):
"""
pass
+
+
+class PretrainedModelForVisualQA(PretrainedModelForQA, ModelAPI):
+ """A class representing a pretrained model for visual question answering.
+
+ Inherits:
+ PretrainedModelForQA: The base class for pretrained models.
+ """
+
+ @lru_cache(maxsize=102400)
+ def predict(
+ self, text: Union[str, dict], prompt: dict, images: List[Any], *args, **kwargs
+ ):
+ """Perform prediction using the pretrained model.
+
+ Args:
+ text (Union[str, dict]): The input text or dictionary.
+ prompt (dict): The prompt configuration.
+ images (List[Any]): The list of images.
+ *args: Additional positional arguments.
+ **kwargs: Additional keyword arguments.
+
+ Returns:
+ dict: A dictionary containing the prediction result.
+ - 'result': The prediction result.
+ """
+ try:
+ if not isinstance(self.model, BaseChatModel):
+ ValueError("visualQA task is only supported for chat models")
+
+ # prepare prompt
+ prompt_template = PromptTemplate(**prompt)
+ from langchain_core.messages import HumanMessage
+
+ images = [
+ {
+ "type": "image_url",
+ "image_url": {"url": image},
+ }
+ for image in images
+ ]
+
+ messages = HumanMessage(
+ content=[
+ {"type": "text", "text": prompt_template.format(**text)},
+ *images,
+ ]
+ )
+
+ response = self.model.invoke([messages])
+ return response.content
+
+ except Exception as e:
+ raise ValueError(Errors.E089(error_message=e))
diff --git a/langtest/tasks/task.py b/langtest/tasks/task.py
index 93af99114..0e5134eae 100644
--- a/langtest/tasks/task.py
+++ b/langtest/tasks/task.py
@@ -851,3 +851,44 @@ def create_sample(
class FillMask(BaseTask):
pass
+
+
+class VisualQA(BaseTask):
+ _name = "visualqa"
+ _default_col = {
+ "image": ["image"],
+ "question": ["question"],
+ "answer": ["answer"],
+ }
+ sample_class = samples.VisualQASample
+
+ def create_sample(
+ cls,
+ row_data: dict,
+ image: str = "image_1",
+ question: str = "question",
+ options: str = "options",
+ answer: str = "answer",
+ dataset_name: str = "",
+ ) -> samples.VisualQASample:
+ """Create a sample."""
+ keys = list(row_data.keys())
+
+ # auto-detect the default column names from the row_data
+ column_mapper = cls.column_mapping(keys, [image, question, options, answer])
+
+ options = row_data.get(column_mapper.get(options, "-"), "-")
+
+ if len(options) > 3 and options[0] == "[" and options[-1] == "]":
+ options = ast.literal_eval(row_data[column_mapper["options"]])
+ options = "\n".join(
+ [f"{chr(65 + i)}. {option}" for i, option in enumerate(options)]
+ )
+
+ return samples.VisualQASample(
+ original_image=row_data[column_mapper[image]],
+ question=row_data[column_mapper[question]],
+ options=options,
+ expected_result=row_data[column_mapper[answer]],
+ dataset_name=dataset_name,
+ )
diff --git a/langtest/transform/__init__.py b/langtest/transform/__init__.py
index 3cb59ebd6..0c4f41c9b 100644
--- a/langtest/transform/__init__.py
+++ b/langtest/transform/__init__.py
@@ -22,6 +22,8 @@
from langtest.transform.grammar import GrammarTestFactory
from langtest.transform.safety import SafetyTestFactory
+from langtest.transform import image
+
# Fixing the asyncio event loop
nest_asyncio.apply()
@@ -47,4 +49,5 @@
SycophancyTestFactory,
GrammarTestFactory,
SafetyTestFactory,
+ image,
]
diff --git a/langtest/transform/image/__init__.py b/langtest/transform/image/__init__.py
new file mode 100644
index 000000000..f02586ce0
--- /dev/null
+++ b/langtest/transform/image/__init__.py
@@ -0,0 +1,3 @@
+from .robustness import ImageResizing, ImageRotation, ImageBlur, ImageNoise
+
+__all__ = [ImageResizing, ImageRotation, ImageBlur, ImageNoise]
diff --git a/langtest/transform/image/robustness.py b/langtest/transform/image/robustness.py
new file mode 100644
index 000000000..3444abfe9
--- /dev/null
+++ b/langtest/transform/image/robustness.py
@@ -0,0 +1,286 @@
+import random
+from typing import List, Tuple, Union
+from langtest.logger import logger
+from langtest.transform.robustness import BaseRobustness
+from langtest.utils.custom_types.sample import Sample
+from PIL import Image, ImageFilter
+
+
+class ImageResizing(BaseRobustness):
+ alias_name = "image_resize"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample],
+ resize: Union[float, Tuple[int, int]] = 0.5,
+ *args,
+ **kwargs,
+ ) -> List[Sample]:
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_resize"
+ if isinstance(resize, float):
+ sample.perturbed_image = sample.original_image.resize(
+ (
+ int(sample.original_image.width * resize),
+ int(sample.original_image.height * resize),
+ )
+ )
+ else:
+ sample.perturbed_image = sample.original_image.resize(resize)
+
+ return sample_list
+
+
+class ImageRotation(BaseRobustness):
+ alias_name = "image_rotate"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], angle: int = 90, exapand=True, *args, **kwargs
+ ) -> List[Sample]:
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_rotate"
+ sample.perturbed_image = sample.original_image.rotate(angle, expand=True)
+
+ return sample_list
+
+
+class ImageBlur(BaseRobustness):
+ alias_name = "image_blur"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], radius: int = 2, *args, **kwargs
+ ) -> List[Sample]:
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_blur"
+ sample.perturbed_image = sample.original_image.filter(
+ ImageFilter.GaussianBlur(radius)
+ )
+
+ return sample_list
+
+
+class ImageNoise(BaseRobustness):
+ alias_name = "image_noise"
+ supported_tasks = ["visualqa"]
+
+ @classmethod
+ def transform(
+ cls, sample_list: List[Sample], noise: float = 0.1, *args, **kwargs # Noise level
+ ) -> List[Sample]:
+ try:
+ if noise < 0 or noise > 1:
+ raise ValueError("Noise level must be in the range [0, 1].")
+
+ # Get image size
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_noise"
+ sample.perturbed_image = cls.add_noise(
+ image=sample.original_image, noise_level=noise
+ )
+ return sample_list
+
+ except Exception as e:
+ logger.error(f"Error in adding noise to the image: {e}")
+ raise e
+
+ @staticmethod
+ def add_noise(image: Image.Image, noise_level: float) -> Image:
+ width, height = image.size
+
+ # Create a new image to hold the noisy version
+ noisy_image = image.copy()
+ pixels = noisy_image.load() # Access pixel data
+
+ # Check if the image is grayscale or RGB
+ if image.mode == "L": # Grayscale image
+ for x in range(width):
+ for y in range(height):
+ # Get the pixel value
+ gray = image.getpixel((x, y))
+
+ # Generate random noise
+ noise_gray = int(random.gauss(0, 255 * noise_level))
+
+ # Add noise and clip the value to stay in [0, 255]
+ new_gray = max(0, min(255, gray + noise_gray))
+
+ # Set the new pixel value
+ pixels[x, y] = new_gray
+
+ elif image.mode == "RGB": # Color image
+ for x in range(width):
+ for y in range(height):
+ r, g, b = image.getpixel((x, y)) # Get the RGB values of the pixel
+
+ # Generate random noise for each channel
+ noise_r = int(random.gauss(0, 255 * noise_level))
+ noise_g = int(random.gauss(0, 255 * noise_level))
+ noise_b = int(random.gauss(0, 255 * noise_level))
+
+ # Add noise to each channel and clip values to stay in range [0, 255]
+ new_r = max(0, min(255, r + noise_r))
+ new_g = max(0, min(255, g + noise_g))
+ new_b = max(0, min(255, b + noise_b))
+
+ # Set the new pixel value
+ pixels[x, y] = (new_r, new_g, new_b)
+
+ else:
+ raise ValueError("The input image must be in 'L' (grayscale) or 'RGB' mode.")
+
+ return noisy_image
+
+
+class ImageConstrast(BaseRobustness):
+ alias_name = "image_contrast"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], contrast_factor: float = 0.5, *args, **kwargs
+ ) -> List[Sample]:
+ from PIL import ImageEnhance
+
+ if contrast_factor < 0:
+ raise ValueError("Contrast factor must be above 0.")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_contrast"
+ img = ImageEnhance.Contrast(sample.original_image)
+ sample.perturbed_image = img.enhance(contrast_factor)
+
+ return sample_list
+
+
+class ImageBrightness(BaseRobustness):
+ alias_name = "image_brightness"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], brightness_factor: float = 0.3, *args, **kwargs
+ ) -> List[Sample]:
+ from PIL import ImageEnhance
+
+ if brightness_factor < 0:
+ raise ValueError("Brightness factor must be above 0.")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_brightness"
+ enchancer = ImageEnhance.Brightness(sample.original_image)
+ sample.perturbed_image = enchancer.enhance(brightness_factor)
+
+ return sample_list
+
+
+class ImageSharpness(BaseRobustness):
+ alias_name = "image_sharpness"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], sharpness_factor: float = 1.5, *args, **kwargs
+ ) -> List[Sample]:
+ from PIL import ImageEnhance
+
+ if sharpness_factor < 0:
+ raise ValueError("Sharpness factor must be above 0.")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_sharpness"
+ enchancer = ImageEnhance.Sharpness(sample.original_image)
+ sample.perturbed_image = enchancer.enhance(sharpness_factor)
+
+ return sample_list
+
+
+class ImageColor(BaseRobustness):
+ 3
+ alias_name = "image_color"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], color_factor: float = 0, *args, **kwargs
+ ) -> List[Sample]:
+ from PIL import ImageEnhance
+
+ if color_factor < 0:
+ raise ValueError("Color factor must be in the range [0, inf].")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_color"
+ enchancer = ImageEnhance.Color(sample.original_image)
+ sample.perturbed_image = enchancer.enhance(color_factor)
+
+ return sample_list
+
+
+class ImageFlip(BaseRobustness):
+ alias_name = "image_flip"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], flip: str = "horizontal", *args, **kwargs
+ ) -> List[Sample]:
+ if flip not in ["horizontal", "vertical"]:
+ raise ValueError("Flip must be either 'horizontal' or 'vertical'.")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_flip"
+ if flip == "horizontal":
+ sample.perturbed_image = sample.original_image.transpose(
+ Image.FLIP_LEFT_RIGHT
+ )
+ else:
+ sample.perturbed_image = sample.original_image.transpose(
+ Image.FLIP_TOP_BOTTOM
+ )
+
+ return sample_list
+
+
+class ImageCrop(BaseRobustness):
+ alias_name = "image_crop"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample],
+ crop_size: Union[float, Tuple[int, int]] = (100, 100),
+ *args,
+ **kwargs,
+ ) -> List[Sample]:
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_crop"
+ if isinstance(crop_size, float):
+ sample.perturbed_image = sample.original_image.crop(
+ (
+ 0,
+ 0,
+ int(sample.original_image.width * crop_size),
+ int(sample.original_image.height * crop_size),
+ )
+ )
+ else:
+ sample.perturbed_image = sample.original_image.crop(
+ (0, 0, crop_size[0], crop_size[1])
+ )
+
+ return sample_list
diff --git a/langtest/transform/utils.py b/langtest/transform/utils.py
index 4540155bf..0fc2dcd23 100644
--- a/langtest/transform/utils.py
+++ b/langtest/transform/utils.py
@@ -397,6 +397,8 @@ def filter_unique_samples(task: str, transformed_samples: list, test_name: str):
no_transformation_applied_tests[test_name] += 1
else:
no_transformation_applied_tests[test_name] = 1
+ elif task == "visualqa":
+ return transformed_samples, no_transformation_applied_tests
else:
for sample in transformed_samples:
if sample.original.replace(" ", "") != sample.test_case.replace(" ", ""):
diff --git a/langtest/utils/custom_types/__init__.py b/langtest/utils/custom_types/__init__.py
index 41d60e870..82e3e62f0 100644
--- a/langtest/utils/custom_types/__init__.py
+++ b/langtest/utils/custom_types/__init__.py
@@ -22,6 +22,7 @@
CrowsPairsSample,
StereoSetSample,
TextGenerationSample,
+ VisualQASample,
)
from .helpers import Span, Transformation
from .output import (
diff --git a/langtest/utils/custom_types/sample.py b/langtest/utils/custom_types/sample.py
index 8477fb9bb..f6e088b39 100644
--- a/langtest/utils/custom_types/sample.py
+++ b/langtest/utils/custom_types/sample.py
@@ -3,6 +3,8 @@
import importlib
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union, Callable
from copy import deepcopy
+
+from langtest.modelhandler.modelhandler import ModelAPI
from ...errors import Errors
from pydantic import BaseModel, PrivateAttr, validator, Field
from .helpers import Transformation, Span
@@ -2751,6 +2753,320 @@ class FillMaskSample(TextGenerationSample):
pass
+class VisualQASample(BaseModel):
+ """
+ A class representing a sample for the Visual Question Answering task.
+
+ Attributes:
+ original_image (str): The original image used for the test.
+ perturbed_image (str): The perturbed image used for the test.
+ question (str): The question asked about the image.
+ ground_truth (str): The ground truth answer to the question.
+ expected_result (str): The expected result of the test.
+ actual_result (str): The actual result of the test.
+ """
+
+ from PIL.Image import Image
+
+ original_image: Union[Image, str, Any] = None
+ perturbed_image: Union[Image, str, Any] = None
+ question: str = None
+ options: str = None
+ ground_truth: str = None
+ expected_results: str = None
+ actual_results: str = None
+ dataset_name: str = None
+ category: str = None
+ test_type: str = None
+ state: str = None
+ task: str = None
+ ran_pass: bool = None
+ metric_name: str = None
+ config: Union[str, dict] = None
+ state: str = None
+ task: str = Field(default="visualqa", const=True)
+ distance_result: float = None
+ eval_model: str = None
+ feedback: str = None
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def __init__(self, **data):
+ super().__init__(**data)
+ self.original_image = self.__load_image(self.original_image)
+
+ def to_dict(self) -> Dict[str, Any]:
+ """
+ Converts the VisualQASample object to a dictionary.
+
+ Returns:
+ Dict[str, Any]: A dictionary representation of the VisualQASample object.
+ """
+ self.__update_params()
+
+ result = {
+ "category": self.category,
+ "test_type": self.test_type,
+ "original_image": self.convert_image_to_html(self.original_image),
+ "perturbed_image": self.convert_image_to_html(self.perturbed_image),
+ "question": self.question,
+ }
+
+ if self.options is not None:
+ result["options"] = self.options
+
+ if self.state == "done":
+ if self.expected_results is not None and self.actual_results is not None:
+ result.update(
+ {
+ "expected_result": self.expected_results,
+ "actual_result": self.actual_results,
+ "pass": self.is_pass(),
+ }
+ )
+ if "evaluation" in self.config and "metric" in self.config["evaluation"]:
+ if self.config["evaluation"]["metric"].lower() == "prometheus_eval":
+ result.update({"feedback": self.feedback})
+ elif self.config["evaluation"]["metric"].lower() != "llm_eval":
+ result.update({"eval_score": self.distance_result})
+
+ return result
+
+ def run(self, model: ModelAPI, **kwargs):
+ """
+ Run the VisualQASample test using the provided model.
+
+ Args:
+ model: The model used for VisualQASample testing.
+ **kwargs: Additional keyword arguments for the model.
+
+ Returns:
+ bool: True
+ """
+
+ dataset_name = self.dataset_name.split("-")[0].lower()
+ prompt_template = kwargs.get(
+ "user_prompt",
+ default_user_prompt.get(
+ dataset_name,
+ (
+ """You are an AI Vision bot specializing in providing accurate and concise answers to multiple-choice questions. You will be presented with a question and options. Choose the correct answer.
+
+Example:
+
+Question: What is the capital of France ?
+
+Options:
+A. Berlin
+B. Madrid
+C. Paris
+D. Rome
+
+Answer: C. Paris.
+
+Example 2:
+
+Question: What is in the image ?
+
+Options:
+A. Dog
+B. Cat
+C. Elephant
+D. Ear
+
+Answer: UnRecognizable.
+"""
+ " Similary \n Question: {question}\nOptions: {options}\n Answer:"
+ ),
+ ),
+ )
+
+ server_prompt = kwargs.get("server_prompt", " ")
+
+ text_dict = {
+ "question": self.question,
+ }
+ input_variables = ["question"]
+
+ if self.options is not None:
+ text_dict["options"] = self.options
+ input_variables.append("options")
+
+ payload = {
+ "text": text_dict,
+ "prompt": {
+ "template": prompt_template,
+ "input_variables": input_variables,
+ },
+ }
+
+ # convert the image to base64 url
+ orig_image = self.convert_image_to_bas64_url(self.original_image)
+ pred_image = self.convert_image_to_bas64_url(self.perturbed_image)
+
+ self.expected_results = model(
+ **payload,
+ images=(orig_image,),
+ server_prompt=server_prompt,
+ )
+ self.actual_results = model(
+ **payload,
+ images=(pred_image,),
+ server_prompt=server_prompt,
+ )
+ return True
+
+ def transform(self, func: Callable, params: Dict, **kwargs):
+ """
+ Transform the original image using a specified function.
+
+ Args:
+ func (Callable): The transformation function.
+ params (Dict): Parameters for the transformation function.
+ **kwargs: Additional keyword arguments for the transformation.
+
+ """
+ sens = [self.original_image]
+ self.perturbed_image = func(sens, **params, **kwargs)
+ self.category = func.__module__.split(".")[-1]
+
+ return self
+
+ def __load_image(self, image_path):
+ # check the image path as url using regex
+ import requests
+ from PIL.Image import Image
+ import io
+ import base64
+
+ if isinstance(image_path, dict) and "bytes" in image_path:
+ image = Image.open(io.BytesIO(image_path["bytes"]))
+ elif isinstance(image_path, str) and re.match(r"^https?://", image_path):
+ response = requests.get(image_path)
+ image = Image.open(io.BytesIO(response.content))
+ elif isinstance(image_path, str) and re.match(r"^data:image", image_path):
+ image = Image.open(io.BytesIO(base64.b64decode(image_path.split(",")[1])))
+ elif isinstance(image_path, Image):
+ image = image_path
+ else:
+ image = Image.open(image_path)
+ return image.convert("RGB")
+
+ def convert_image_to_html(self, image: Image):
+ import io
+ import base64
+
+ if image is not None:
+ image = image.copy()
+ buffered = io.BytesIO()
+ image.thumbnail((200, 200))
+ image.save(buffered, format="PNG")
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
+ return f''
+
+ def convert_image_to_bas64_url(self, image: Image):
+ import io
+ import base64
+
+ if image is not None:
+ image = image.copy()
+ buffered = io.BytesIO()
+ image.thumbnail((400, 400))
+ image.save(buffered, format="PNG")
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
+ return f"data:image/png;base64,{img_str}"
+
+ def __update_params(self):
+ from ...langtest import HARNESS_CONFIG as harness_config
+
+ self.config = harness_config
+ self.metric_name = (
+ self.config.get("evaluation", {}).get("metric", "llm_eval").lower()
+ )
+
+ if self.state == "done":
+ from ...langtest import EVAL_MODEL
+
+ if (
+ "evaluation" in harness_config
+ and "metric" in harness_config["evaluation"]
+ ):
+ if harness_config["evaluation"]["metric"].lower() == "llm_eval":
+ model = harness_config["evaluation"].get("model", None)
+ hub = harness_config["evaluation"].get("hub", None)
+ if model and hub:
+ from ...tasks import TaskManager
+
+ load_eval_model = TaskManager(self.task)
+ self.eval_model = load_eval_model.model(
+ model, hub, **harness_config.get("model_parameters", {})
+ )
+
+ else:
+ self.eval_model = EVAL_MODEL
+
+ def is_pass(self) -> bool:
+ """Checks if the sample has passed the evaluation.
+
+ Returns:
+ bool: True if the sample passed the evaluation, False otherwise.
+ """
+
+ if self.ran_pass is not None:
+ return self.ran_pass
+ elif self.expected_results.strip().lower() == self.actual_results.strip().lower():
+ self.ran_pass = True
+ return True
+ else:
+ self.__update_params()
+ try:
+ metric_module = importlib.import_module(
+ "langtest.utils.custom_types.helpers"
+ )
+ metric_function = getattr(metric_module, f"is_pass_{self.metric_name}")
+ except (ImportError, AttributeError):
+ raise ValueError(f"Metric '{self.metric_name}' not found.")
+
+ if self.metric_name == "string_distance":
+ selected_distance = self.config["evaluation"].get("distance", "jaro")
+ threshold = self.config["evaluation"].get("threshold")
+
+ elif self.metric_name == "embedding_distance":
+ selected_distance = self.config["evaluation"].get("distance", "cosine")
+ threshold = self.config["evaluation"].get("threshold")
+
+ if self.metric_name in (
+ "string_distance",
+ "embedding_distance",
+ ):
+ self.distance_result, result = metric_function(
+ answer=self.expected_results,
+ prediction=self.actual_results,
+ selected_distance=selected_distance,
+ threshold=threshold,
+ )
+ self.ran_pass = result
+ return result
+ elif self.metric_name == "llm_eval":
+ if isinstance(self.eval_model, dict):
+ self.eval_model = list(self.eval_model.values())[-1]
+ result = metric_function(
+ eval_model=self.eval_model,
+ dataset_name=self.dataset_name,
+ original_question=" " + self.question,
+ answer=self.expected_results,
+ perturbed_question=" " + self.question,
+ prediction=self.actual_results,
+ )
+
+ self.ran_pass = result
+ return result
+
+ else:
+ raise ValueError(f"Metric '{self.metric_name}' not found.")
+
+
Sample = TypeVar(
"Sample",
MaxScoreSample,
@@ -2772,4 +3088,5 @@ class FillMaskSample(TextGenerationSample):
LegalSample,
CrowsPairsSample,
StereoSetSample,
+ VisualQASample,
)
diff --git a/poetry.lock b/poetry.lock
index 3526d8014..b3655893c 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -3344,7 +3344,7 @@ files = [
name = "pillow"
version = "10.0.0"
description = "Python Imaging Library (Fork)"
-optional = true
+optional = false
python-versions = ">=3.8"
files = [
{file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"},
@@ -5753,4 +5753,4 @@ transformers = ["accelerate", "datasets", "torch", "transformers"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
-content-hash = "f43231a0fd18c0d2b740ccad37045fd68294240109c0744b13973dc3ec2f445d"
+content-hash = "7c8dc3eabf8a4d28f97b9be0f2a9fb70261baef10e3d2ef996fe56a906c36a45"
diff --git a/pyproject.toml b/pyproject.toml
index 724645aab..074ef4b92 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -82,6 +82,7 @@ importlib-resources = "^6.4.0"
click = "^8.1.7"
openpyxl = "^3.1.5"
tables = "3.8.0"
+pillow = "10.0.0"
[tool.poetry.extras]
transformers = ["transformers", "torch", "accelerate", "datasets"]
diff --git a/tests/test_robustness.py b/tests/test_robustness.py
index 70e6bd78f..8b332db87 100644
--- a/tests/test_robustness.py
+++ b/tests/test_robustness.py
@@ -469,7 +469,10 @@ def setUp(self) -> None:
test: list(scenarios.keys()) for test, scenarios in test_scenarios.items()
}
- self.perturbations_list = self.available_tests["robustness"]
+ self.perturbations_list = [
+ i for i in self.available_tests["robustness"] if not i.startswith("image_")
+ ]
+
self.supported_tests = self.available_test()
self.samples = {
"question-answering": [