diff --git a/docs/cassettes/chatbot_16.msgpack.zlib b/docs/cassettes/chatbot_16.msgpack.zlib new file mode 100644 index 0000000000000..ee5287d564f0c --- /dev/null +++ b/docs/cassettes/chatbot_16.msgpack.zlib @@ -0,0 +1 @@ +eNptVQtsE3UYL4+oiUpIRAGNoQzEV+9613dBhK5rWTe2butgDwP1evdve/Tu/rd79DHCECRBMRGOh4+YkLCVlizb2ITAQIeCEdAIaHTiUDSCgkFQgxLAGPF/XSdb4JI+/t/3/X/f6/d9tzafBJLMQmFcFysoQKJoBR1kbW1eAi0qkJV1OR4occhka4Kh+g5VYoeeiSuKKM81mymRxaEIBIrFacibk6SZjlOKGf0XOVCAyUYgkxl6aWUJD2SZigG5ZK7xhZUlNESuBAUdSsrZmcbAk7yxFEbwEpOxRIIc0OWqDKSSVcuQhIcM4HRRTFQwG8R4VmB1SwHJSPQrKxKgeHSIUpwMkEABvIgSUVRJRyJw56p8HFAMSvN7w+RsHMqK1jM29N0UTQOEDgQaMqwQ07pjraxoMjIgylEK6ETxCqBQGK0zAYCIURybBLnhW1ovJYocS1O63rxChkJXMT9MyYjgTnWnnhuGqiEo2t4gCsITMNdkUI0FI4nbXDjRm8ZkhWIFDhUN4ygUT04s6N8brRApOoFAsGL/tNzw5Z7RNlDWdlZRdDA0BpKS6Li2k5J4h23PaLmkCgrLAy3vrbnTXVF5250VJ0nc2TcGWM4ItLaz0Ij9Yy4DRcpgNEQY2g4iR0OYYIE2dDUcpqPhCD+/MhX3NvjcvkQgE/Evaom6oKXBllGVQBR4EklHxJKmrZ5YY8a7JIGRTovT5nDYSBdG4gRO4iRWQTjgoromni+TGKLG77QmZWFpNFjqaFWZtAf3lkb9ajAN1DIcOJsbY4FUucXS0lRaugJvttaHV0hKeUuSteLVcqiijqpM+SvCjb5kap4RRacmWWZ+uZ3ylfPxeHiJB/IysaQqmai2OJx1lYnWJGyx2JdCAvf5M77mitio8AirFSOKEToIm4vQn54RbnBAiClxrYO0uHZJQBbRtICXc6hkiiqvzSIegs+O54tj0x6svE3hh7NliJPagF9iTUaL0xgCotFCWGxG0j3X5pxLEMZFVfVd3qKb+rtSsK9eogQ5imjoG6F8no6rQgIwnd67kn1AJzvqpB4+mlEMpEUoA6wYldbViNUN7wssULZneLIwKMUogW0tuNUGCqxPtaZTDK0yTDyZ4gl3q83KRoBKR/cWr4gS1N2ggDBe1jqsbqKnqBnhXSfKlcBIAiPIA2j0WRqNmZ6MCCUFkwGNNpSS0YZMPJXWZ2y+lbRbHajw84ysQHMqA0JqpAzyiJnyPKMoAQ5SzME0hvYF4FieRY0pfBe3n6xl7ehy/50GCkwAtCfztkJbiUOjLSSg4+tJ3Iaxud3u9+9uNAJlRSZup/vgWCsZjI6GtPBy/50GRYh2Qu5Kj1hjLKMNzUaHsJ2hKYsjQtppB0m5iQjDRJy0Gx1pO2F1MfRurx/zUnQcYKEC/7R8WVO1pyrg3deIjSYSFhSH3w55AcoCG43mQkBCjdE6aQ6qDFqWEsghrDpPk7bXRbsBQdAWwk1H3ITDjpWiNTSC9j/tsvqmLbwm1uT0dgqxj8cJM167z1B4JqDPrVvKphPCt8Tkdb+3briyvfvII+t2DT7RG5l9cubCoG9N++SnNrzh3NH/wAH/pXvarpduviUd962f3tYxlAkaNjWbxl1b+upvA1VvpgYvnb7adzC/xO/7ZkH3jOXPdv4b8CxoO8cZ+zOO5uqvGk4M7Pe/cpb59cWtb81Uaxlfm7zFfGij/1R334SvMwG5eqW0NfqnKXjRd/BGMrt/8Zky04+p8YY/nm9bv73Hvrpv3c3L2b/m+Aenm05tNBC9j0+pJ+um/DwxNjW056HzRy8l1m54x9x7wfn5p4dnLZ54OXPvke4zPyzfcphL1j545ezJMx1troa+725+WPXc37FV3tfP7Zvk9yU/gh29x76YP9uw+uiW1IyehTP6r9fd/8E050T83acnmb3ht4+5axq3DvwylK85+2gtc36Qc12bdXrZ+AsXN5/fNu167eIwPK7OubHwyw2PhadmOq62f7Lt6E+b/rlXL+oEQ2xNDRkabzD8B46XMHk= \ No newline at end of file diff --git a/docs/cassettes/chatbot_17.msgpack.zlib b/docs/cassettes/chatbot_17.msgpack.zlib index 675c29b7ccbee..0121d6017bb95 100644 --- a/docs/cassettes/chatbot_17.msgpack.zlib +++ b/docs/cassettes/chatbot_17.msgpack.zlib @@ -1 +1 @@ -eNptVWtsFFUULtYQJJpQE1ESHuMKgZDOdGb23aZqu6UPYNvSXboUYjZ379zdne7O3Ok82t1FUBBBEJAJPkhQCKXsam15hFIeUjEQowKigtEUgR8o0UQFAaMGCXh3u5U2MMk+7jnnfuf1nTMrM+1I1UQsj+kRZR2pAOrkoJkrMypqM5Cmr0pLSI9ioauxweffaaji4OyoritaaUkJUEQGK0gGIgOxVNLOlcAo0EvIfyWOcjBdISwkB1cstUhI00AEaZZSaslSC8TElayTg6VWfJqqmylRlThkKaYsKo6jrNjQkGpZ9gKRSFhA8awooui0lbHTuqGGcNZWJlKO/Gq6ioBEDmEQ1xAR6EhSSCbEMIvFMs5lmSgCAsnzUsGErijWdHP36Nj3AAgRwUcyxIIoR8zeSEpUiikBheNAR90kYBnlKmN2xxBSaBAX21F66Ja5FyhKXIQgqy9p1bDck0+Q1pMKul/dnc2OJuWQdbOvgQRRUVfSmCRFlimOsbkYdm+C1nQgynFSNToOSDxpJaf/aKRCATBGQOh8A8300OXdI22wZu7yAtjgGwUJVBg1dwFVctj2j5SrhqyLEjIznsb73eWV99xZGY5jnPtGAWtJGZq7co04OOoy0tUkDTHBMHewaYhxTETm4I1gEIaDIalcrEoGXHWsKiwKIWtUUoWKOczCqnom5bQ3zautbU44YIvuratvd0Oac/JOm51321maY1iGYzg6CZy+KK+wAc1wArbVmhJBKiDH2oJigAmwsZhtkTjXb8MLbWExZWs1HLFGX62gR6QgCFS2yj6/p8kXYRa0ekONNbHUYjExN2ibgyNlFInOaBeF8jrWUW1vrNZc9bWVzWHF1eGtku1aBT8/mJi3ACt6m1evYRK+igUuPCI8h9VBs/kIHazNxWaf3cPciCM5okfNnRzvfl9FmkLGBb2SJiXTDW1lF+EhOv15Jj83nQ3z7lF4YlcV4aQ5UK2KxRTvpHxIoXiWt1Gco5TlS608VeP193jybvwPpOA+vwpkLUxoOGeY8hkYNeQYEro9DyT7QJbspJPZ8MmU0iihYA3R+ajMnkV009DCoOuq9g9NFo3VCJDFVM6tOZBjfUcq0SFAQxCi7R0S607ZrGIIGTDcl7+iqDjrhgRES5q5k+ccu/OaYd51k1xJ51ma5Q6T0RchGbNsMgpWdVpDkKwoPWkOFksgkZ2xcitntzpI4csoUYZxQ0A+I1SFJcJMrYxSVBTHQDiSoMm+QHFREkljct/59aeZXXZy+dD9BjqOIbIoM7ZcW9mPR1qoKIufTeIejM3tdh99sNEwlJWYuJ3uI6OtNDQyGo6XtEP3G+QhOlmtJzFsTYuCOTidHIKAFTi3y40EIRQK8XaEoBO5nA6BF0JOwQWFPZ5q2gNgFNG+HP/MTFVLfYW3ztO/iB5JJLpBGXo9ZGSsyWI4nPYhlTTG7IZxbAhkWaooTbCaKlrMPhd0w5Az7ASQ5dysw01XkjU0jPY/7bqymzYD4qR37dDcH7WWW0ptNquljJJAuctBqpt7iaxIZ3stRz4dA6a9Pq4g9xSSz927+qZ5+Dg7YdW12+t6v3p4hljUyiyc9rj/3dltFQufmBR5RJ6CBeyfdflYwPHG154S9+l/tp4+s7yA0meMn9D6TIu9t27q9pmpO+H+c1c6txafvbXm7K7zp/m/+fecoafGLV7Wedlqb/q2b2zL5vLzJ3cIj34wOH37hQ03/22bumH1bNuYD5fUyzU/rio72Dn9yxvX117Y1PzLnDHXCgteXnZ79cb+472zqCnb3ho78I593eUb45538RVFZ5f+3rJ44olLfmGS1FPugeO2RY1C662xM+ijx4tWTjE+8X9x8YT9N+VictLJH85s2bz8UB/nP7ix+c5PwT8P9J/Hc9ec3fzYS3/N733tKij4o2jP1TuTLYtrvKdmnHvy/IQtVz5zfXPtRbRtYtH8Yu+en+9WoleLD1wPnFoLn5N+fZYaeHN9YPyxm+u9h7/rr/6+dMqa9Usmjz2Wq2NhwfIg+3bNQwUF/wHhwSyT \ No newline at end of file +eNptVQlsFGUULiURa5RgIgJBw1A5EuhMZ3ans91y1O0e7baWrW2BFpA6nfl3d9id+afzz+wFCHKoAa8BTYzhCLTdNbW2HBUQBUWDJ55RoAkxUWNUqhGRgGAI/rPdSkuZZI//Hd97/3vfe7MhEwMakqAypltSdKDxgo4PyNyQ0UCbAZC+KS0DPQzFjrpAQ2O7oUn9c8K6rqKy4mJelSioAoWXKAHKxTGmWAjzejH+r0ZBFqajFYrJ/qurC2WAEB8CqLCMWL66UIA4lKLjQ2GVNJ3wz5aJCthKFRYRhRqMAktuIKAVri0ibjG27KYTVTBOCLxC+AkeIQnpRBIahA5FPlk+HGNQyWPfW4GW4jxnI0JOEgovg/LRgR/DEhmKIGqJQqpOspCUJUWyLBUsY/Av0jXAy/gQ5KMIYIEOZBVXUDc0C4mmHGszYcCLuL7f503oCEOkmz0ja9bLCwLA6EARoCgpIfONUEpSiwgRBKO8DrpwygrIdsTsigCgknxUioH0oJe5j1fVqCTwlr54FYJKd+6KpJ5UwWh1l3U3ErdB0c2+AE7C5S+uS+LmKgRDsaUUvS9B4npJShR3i4zyOJ+0mtW/PVyh8kIEg5A54pjpQeee4TYQmZ21vBBoGAHJa0LY7OQ1mWMPDpdrhqJLMjAz7rrR4XLKm+HsFMNQjv0jgFFSEczObCMOj3AGupYkBYgxzD10WoAwIgGz/2JLixBsaZUX1MTD7qVepzfiT7b6KtuCpdC2lE0auj8IXJEY12pLCHZXqCnpXhwhGYfNwXIcy5SSDEVTDMWQ1TQHK+ubZdmjiXSdz2GPIWVJMFDBpQwx4aLcFUGfEUgAw0MBx7KmkD9eZbO1NVdUrKKW2RtbVml6VVtMslOLUEN1PV8T91W3NHlj8XkEzs6ISeKCqhLeWyWHwy2LXVBG9OLaWGSRjXPU10RSMdhmK1kCacrrS3qXVYeGpUfb7SSdy5Cj2VLaenqGuBEFSkgPm+021vmaBpCKxxRsTOOS6Qba0IF5CE59nMnN695AzU0KT+zwYE6ax3yaVETYHEQDUAkbbWMJxlnGOspomqisbex258I03paC+xs1XkFBTEPvEOUzQthQIkDsct+W7McssuNOWunjGSVBQoUIkLmszO4msn5wUZF+z8HBySKhFuIVKZUNax7Lsj6eSsRFwRDFcCwu084Ua5dagSEE+3IuqgatMDghUkZmO+uw9+Q0Q7zrwnelSYYmaeYtPPqSgMfMuowKNZ1EQMCrUU+a/UUyn7BmbIGdKbFzuPDzCEkRooYIGoxWD5QxM9E8QtVAFPLi0QSJ9wWISrKEG5P9zq1dZHaUYOcjow10GAF4QWfYbFvp48MtNGDhW5e4CcM6nc53bm80BGXHJk6OPTrSCoHh2TA2GR0ZbZCD2Euj7sSQNSmJZv8MfGhhgw5At5ZwzmApzTEcC4CDc7I4I04oLWEFsdftI928EAZkQ5Z/ZsbTvMhV63cfaiKHE4kMqIOvpYwCkSIFg+kGoOHGmF1CFBoiXpYaSGOselez2VcqOAFNC5wNE99JcyVkBV5DQ2j/067D2rTZ99OTaaudSujkmNi0rXfmZZ+x+HPjhv7iiUUf0BM8f17dcvKXO96c3rdnkmdMwaZv53ZN2LJD3H7k7MrPYuu7zr/7UEbOH0ctPPX3s8HYtDH0o/tn7jvw8/FG34fxK68//VwisWLdF+Urfy9b++lKePzwC5uPLR2YxBWMn3Oo6drBgQMzwIyU69ozNVvPCVU7yW3est2ZT/rGbZnz0onTk6dl7pu7x7vjke3sLurH5ph2eEa1Hi2A9+a9d3ydO/DYD47lFbu/m3vXzp/uOfDzhYL1zdtc/DnnD/PP7Kr1NW27/Nq/56eeLwr3TNi0ceLESRc8LXXaPV9+tZfd1z5+/uI1joJ/LsefPXkUzX/k650vzb5OX9r27eSazfP+cvVuvr/nc1KrLNPVXXcP/DY9P/Hnlfi6WYzgn7J79bWHH8zv/UZ4YGrvwvf7p6Qurng+9er10KdzL62oujLw1O9nA78G735l/Mx/Xz71x47Kl89UOMLPTfxo++lZvQcH8q+uMTuLyjzl2eKOzfM9UR57PD8v7z9er1OE \ No newline at end of file diff --git a/docs/cassettes/chatbot_18.msgpack.zlib b/docs/cassettes/chatbot_18.msgpack.zlib deleted file mode 100644 index 4958beda43294..0000000000000 --- a/docs/cassettes/chatbot_18.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNptVX1sE2UYB1EDRBE/EJ1Dj4liyO561+8ykWwd2zroVtZBh0Sbt3dv21vv7j3uvdvaDglfKhFCcjFGifwho7SyzLEBCoKoifELTRDRxCYMFRNFiESMYogovtd1sjmatL33+fg9z/s8v+e5jYUuqGERKZP7RUWHGuB1csDmxoIG1xgQ65vzMtSTSMiFWsPtuw1NLC5I6rqKF9psQBUZpEIFiAyPZFsXZ+OTQLeRZ1WCJZhcDAmZ4t89VTLEGCQgrlpIre6p4hEJpejkUNUkzqUC82WqDsWqqqkqDUnQEhsYalXPVFPjbaEkIctyLtWEuikeKFSAAhiLWKcyyKB0JIDM4rEwI0pA3P+PFSGZzseUnKEUIMPFE2M/RSQyEqBkiRKqTjsYF60bWgxZtgqRcuQf6xoEMjnEgYQhEehQVkkViaGFxTKeZwpJCARS4zOTZuaSCOvmwPi67QM8Dwk+VHgkiErCfCORFdVqSoBxCeiwjyStwFJXzL4UhCoNJLEL5ke8zEGgqpLIA0tv68RI6S9fktYzKpyo7rNuR5NWKLp5sJUkURuwhTKkwQrFMU4vww6maVIxUZFIx2gJkHzyakl/dKxCBXyKgNBl8pj5EeeBsTYIm3uCgG8Nj4MEGp809wBNdjsPjJVrhqKLMjQL/tDEcGXl9XAOhuMYz9A4YJxReHNPqRGHxjlDXcvQPCIY5i42zyOUEqFZ/C0a5ePRmLxIrM9EvAFWEzpi0JGUNaF2CbOivoXJelxtS5uaVqbd/Co9GGjp8vE057F7nC67z8XSHMMyHMPRGeAJJ+0qG8GGB7CdjqwIshEltSYqRpgIm0o5O8Tmdida4YyLWWen4U6Fwk2CnpCjIFLXqYTb/W3hBLO8MxgLNaayT4rp5qhzCUrUUCQ7o0sUFgVYd4Mr1IC9LU11K+OqtztYr7hwrX1ZNL10OVL1NUG9kUmHa5d70Zj03A43zZYzdLNOL2t9Bka5IUEloSfN3XaX43UNYpWMKtyUJyXTDbwxR3gIP/+kUJ7Z3tal1yk8K1dPOGkea9DEasruocJQpeys3Ulx7oWsfaHDQTUG2/v95TDtN6TgULsGFBwnNFwySvkCnzSUFBT6/Dck+zGL7KSTVvpkSmmYVhGGdDkrs7+DbhtZVnSg/sDIZNFISwBFzJbCmsdKrO/OprsF3hCEZFe3zPqyTocYgwYfP1h2UTVkhSEJ0TImxWHdA2XNKO/6yF1J51ma5d4moy/yZMysy6hI02kMebIe9YxZrJZB2pqxRQ7O5XCTwtdQosJLhgDDRqweyYSZuIZSNSghIBxJ02RfQEmURdKY0m959WIz5yLOhyca6CgFyZIuOEttZd8da6FBC9+6xHUYp8/ne+fGRqNQDmLic7uOjLfCcGw2nF3GhycalCF6WdyfHrWmRcEsziOHqNfpsQMHgWcFzg49LiB4WBfv5liHN84JLNznb6D9gE9COlzin1moX9VSGwz43+qgxxKJblVHXk0FBWFFjMfzYaiRxph9vIQMgSxLDeYJVlvtKvOgl/fxMU88xnl9wMe6fXQdWUOjaP/RLmdt2tI7akPeaqeS+HBy+KGtUyeVPlPI99o1PTSkbGJnDl+Z/hneMvXmXftd7b98MePnjhWz7OrMwkeJYuPXuePvRbzTcv6XHzhz9M+Lw39Nu++H/d/7fvijJTT8wKwzs3esXZe+suDpQ69+8NZjl1d9Oe9jPnVH6/DD0W3T+y+HTq14s3/IM72p94lTb95Syx4Z2O9dXbj0VEVP78e3fvXYhdue/dR+uzg467mTC1psx+ccr6g8Uex5/z5+8brt9xtTvn08cfLsC79/t/Pc+p9O3cWf7q24k9n54vbNj777yqcVrg2Xh75VB5dtffAVXdr09W9dJ+pqvNyPX21qm33VOWflN/dvfenor3efaaaHBw5SlecvbXnYefpAYf3QHde++3W+w3Nvj/+R7+efO8kGTnStHdAf3Vc5d5rrgwvnFgeLk6btPR1svFrZel5xb8lv+ydwccb212yr99YMftQc3bHv7D1Xb7IqN2XSnufjZzny/C/FVElN \ No newline at end of file diff --git a/docs/cassettes/chatbot_19.msgpack.zlib b/docs/cassettes/chatbot_19.msgpack.zlib new file mode 100644 index 0000000000000..50204dbbb689a --- /dev/null +++ b/docs/cassettes/chatbot_19.msgpack.zlib @@ -0,0 +1 @@ +eNptVXlsFFUYLxCOYBHkUIiJrhuUKLztzO7sWRC225YeltYe9DBQpjNvd4fdmTedY7tbCpEifwgBmQhBYjRQll1TF2ihICCXoAHDVYIJFIKAAQNiCHIJSsE32620gUn2eO/7vt93/b5vmuMhKMkcEgYkOEGBEs0o+CBrzXEJ1qtQVj6J8VDxIzZaUlxWvlGVuK73/Ioiyq6MDFrkTEiEAs2ZGMRnhMgMxk8rGfi/GIRJmGgdYiNdSxYYeSjLtA/KRpfhowVGBmFXgoIPxkpsMUk28BGDQPNwunGKwSihINRFqgwl48I5+IZHLAzqVz5RARQCPCdwuqaA70j8KysSpHl88NJBGeILBfIizkVRJR2JMNkXxv2QZnGmv6aNivqRrGhb+ke/lWYYiNGhwCCWE3zaZl8jJ04xsNAbpBXYikMWYLI2WmsAQhHQQS4EYz1WWhstikGOoXV5xnwZCYlUikCJiPB5caueG8AFERStoxgH4c7PKIngMgsG0kQ5TERbGMgKzQlBXDcQpHE8MTEp/76vQKSZAAYBqRZqsR7jLX11kKxtKqKZ4rJ+kLTE+LVNtMTbqO197yVVUDgeanFPyfPuUsJn7iwmkjTZ2/sByxGB0TYlG/FdP2OoSBHAIIyhbSBiDEIBDmpdd2prGW9tHT+tsMHvqcxx5gTyI3W5M+u9DmSupCKqku+F7kDIVmcOMxa3ryriqQgA0m62UzYbRToAaSJMpIkEBYQNzSyt5vlsiSVKcu2WkCzM9hZn2RpVNuw2ebK8uWpxGKrZJmivqfLlN+SZzfXVWVnzTTWW8tr5kpJXH+IspllyWUEpXdiQW1BblRNqyDTg6NQQx07Ls9I5ebzfX1vhRrxMVBSFArPMNntpYaAxhOrN1tmIMOXkRnJqCnx9wiMsFkCkIrQRlIPQny293AhCwaf4tY2khfxGgrKIBwYuieGSKarcHMU8hMePxlOT01Jc+IzC46LZmJPavlyJm2Iw2w1lUDSYCTNlIJ0uyu4iSMPMovKEJ+Wm/IUUbC+XaEH2Yhrm9FI+zvhVIQDZVs8Lyb5PJzvupB4+nlEAwyKSIUhFpSWqQGnPygD52dt7JgsgyUcLXGPSrbYvyfqGxnADy6gs6w818ISzkbJwdVBlvB0pE1FCuhscEOBlbSNFObekJL28a8W5EoAkAEHuxqPPMXjM9GREJClAhgxeUkpE65rC02F9xqZZSKvFhgufaeAEJqiysEyty0Y8ZqacaRAlGEQ0uycM8L6AQY7ncGOS36kFKGtRKzbe9byCggIQr8o4lWwrsb+vhgR1fD2JZzCU0+nc+2KlXigLVnHanXv6a8mwbzSkmZd3Pa+Qgmgh5ES4VxtwrNY1ER9qIYRmliZYr9dKOhmWctTZdXYyBGFlocPr2OrJBR6a8UNQluSfFs+unuUuyvfsrAJ9iQSKxZ4XRFxAssB5vbEyKOHGaK1MEKksXpYSjGGsUne11uFgnJAgGNrBWu1OwmYFWXgN9aL9T7uovmmTb4rFMb2dgu+nAdvfXD4sLfkMwp+nT5UP24ULM9Ivdo/7EcxNHJTKj90v7ThRsnFY58Abv60nTk4q2PNAGbty9Zy3bh94bZJztku5cvuNS4cXXD2WPqMri3evq1nO1axtevz4Bo22PXn8sPvI2cftU39+kv9g5MVHTU/vFYGW5pUHOo9++8fJkjOusYWLokUzwPg5Z5rW+G6qOwuGtmwYOGd1xeaJE95ecd9WDSqrtx3/XJErzrIjtp0/dDc97TL8goxn3P7s/CHpr2XU0cTk8gsXRn88/KqxhdoOT8/9pc44IuKenn3pxOvzhm+oPdg+5pzbvchUcnNMs23IOvewmvQP5ma27Y92Xj83+p7mWvy16/re5cunqXfHDDzrmjdg9sHGVefN4+9MSp969Nbll776wT8yc/zhEa9mLb01fe+ICW1/Vh3xPDq5e/Ki3TtC5TsWr2y6hsLcvmv/jCqxPDq3ammNtW3thIOnruy5OH1oGb8+tmZJJ7p3miiamWntVoyVX05+d+L5oofd/z799OXBy3auvNSUt+JRYus7S66Bq52l9aeaT2zo2H+oc0jm+4ngyGu/X3zlvusSOz+0rnuw3plBaX9faJq3f2Ba2n8fyFxp \ No newline at end of file diff --git a/docs/cassettes/chatbot_20.msgpack.zlib b/docs/cassettes/chatbot_20.msgpack.zlib deleted file mode 100644 index 7e0015e571f05..0000000000000 --- a/docs/cassettes/chatbot_20.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNptVQtsU2UUHhAFgghGIaATmgkhkt32PvrcnDi69+heLXRzQPf33r/tXXsf3Hu7tZ0gGwpBInJDTJgxZi9a2MbGHPIYm+IDIg+NKAlOEwUVDAR8gBEDhvm362TLuEkf/3/O+c7rO+c2xeqgJLMCP6Wb5RUoAVpBB1ltiklwQxDKymtRDio+gekoK7U72oMSO7zcpyiinKHTAZHVCiLkAaulBU5XR+hoH1B06L8YgAmYDrfAhIdfb0jjoCwDL5TTMjTVDWm0gFzxCjqkOZHFMlnDhTU84OCKtHRNmiQEYFwUlKGUtnEduuEEBgbiV15RwSitAVOCkluI6/LolkC/siJBwKGDBwRkiC4UyIkoG6QYx8K1po0xHwQMyvWHlLkdPkFW1J6J8fcCmoYIH/K0wLC8Vz3gjbBiuoaBngBQYCcKmoeJ6qidfghFDATYOhgdtVIPAlEMsDSIy3W1ssB3J5PElLAIJ4s749lhqCS8oh4qRUFkF+rKwqjQvIbQ6s1a/GAIkxXA8gFUOSwAUDxRMSE/Pl4gAtqPQLBkE9XoqHHPeB1BVvfaAF1qnwAJJNqn7gUSZ9T3j7+XgrzCclCNWcsmu0sKH7ijtAShNfVNAJbDPK3uTTTiyARjqEhhjBYQhtqKR2lB8LNQHb7lctEel5vLYnPCTnMhLjGVbkj5OInJztWuzinRRkyGiuKCgjUhI12l2ApL6iw0RphIk95AWgw4RmhxLaElsDAw2X2kiDvloAngtVSEBREn79/gYp1aJ+736yvZIodeWK33sBF9bdDoL7MXMIqXcwHnylre7rBW2L3a8lqbuyzfH3mZDRW59LmCN1ODogvWsUxWIW7MM5TlyeaSgpVrPKK53pbDG+RscpUrVFwuiMoGm5KvDdmzy83CuPCMlBHDkxEacb0Zjz89Y9wIQN6r+NR2gqL2SVAW0cjALVFUMiUoN3UgHsJzn8eSs9NWWvyAwvM6chAn1aE8iU3XkCaNHYoaEif1GsKYgZMZFKXJtzm6rUk3jodSsM8hAV72IBrmjlE+RvuCvB8yndaHkn0oTnbUyXj4aEoxGBIFGWLJqNTuSqxidGlghTn9o5OFCZIX8Gwk4VYdSrC+PhKqZ+ggw/jq6jncEtFTrBsGac+hpIkoCXE3KCCMk9V2PYX3JCVjvOtEuaLO4xhOHEOjz9JozOLJiIKkYDKk0ZpSwupwOgdC8RnLoggDZUSFz9SwPB0IMtAedOcIHGKmnKkRJRgQADMQwtC+gAGWY1FjEt/JFSirHQZkfHSygiL4IVqWMX2irfiH4zUkGMePJ/EARm+xWAYfrjQGRSEVi8kyMFFLhuOjIUhOPjpZIQnRhsvdoTFtjGXU4SXo4AIMIE1m6KYMNGM0EJQJZ0jcbKDceo+BNHmYXmseZgW0D2L2BP/UWE5VSbat0Hq4EhtPJKxUHH1FxHhB5lmPJ2qHEmqM2kkHhCCDlqUEowirIrtKPWSmLbQbgZvcHr0FN1qwlWgNjaH9T7uO+KZNvCsao/F28t6TUzYv3jEjJfFMQ5+REaX845KpxNyh2/N2rx9MaTw5W91RPqeobXr79u2ndr2feWP+9822zFn8jBF+QLNjus52d/2P5xp6jy/aTB57p/FMEdbcdePSVb520U357/v/3Kox+ZcLumee/ujFA7+fPfLB9fxHdBf3nO6t3dPXlf4GyPiqOR9bENsZO/0Xaev5urDY/OX5vKry0zXkqukV95bOyft5SdbyYuPCRw1b3lqUcqInqz3/zJ/VX9RcGMlN3doyy9H/zXMpV4bzcq7NXTXX1nb32WZiZoW/YdNlzcE7tsVXd1HrZrzyhNB6+btp7LYW6+BL+00LqrY9dRs8nrf1jxPV4kCjJXX+ydjFX7te6LneQjs6D2T9dG93/dncK+GFO/+d6VpItq4NXWh8/u5jn7xdO7xxzqX+m/vuP1m9+r11S2dd1nWYxXdTT+3/zZlBNqcWgvOHP1127dW23J1tb872ZnVtWWHNNdUo5HDU0Tqzie8jHS29g6xfOP7tnU2JMk9L+WztL8uCU1NS/gOzpzPh \ No newline at end of file diff --git a/docs/cassettes/chatbot_21.msgpack.zlib b/docs/cassettes/chatbot_21.msgpack.zlib new file mode 100644 index 0000000000000..681a79dce4038 --- /dev/null +++ b/docs/cassettes/chatbot_21.msgpack.zlib @@ -0,0 +1 @@ +eNqtVWtsHFcVtuOSoraogRIaXmViQEaRZzyzM7vjdRKi9Xq93liJHT8SOyFs787c3Z3dmbnjuTO7Ow7BqQsqNAE6FfwhQNrY2S2WYzet1QanjtJWfVhqi0QMqus2lD6UIiigkKC0RAp31mtiJ5H4w0j7uOfxne/e890zw6UcNLGC9OpxRbegCSSLLLA7XDLhgA2x9b2iBq00kkc7O7p7RmxTmd+QtiwDNzU0AENhkAF1oDAS0hpyXIOUBlYD+W+osAwzmkCy80b10L5aDWIMUhDXNlF79tVKiNTSLbKobVPWU7E6jWpGCaa2nqo1kQo9u42hWbu/nrou2ItbT7WhPCUBnYpRAGMFW5SDbMpCMnC2LMdYdAKSez3QLkK0DlOaQ+lAg1v+Z+F+ZJvlUErB11NIQ9W4RqDec/+/WOwlFg3JUPVMKcOiBURriq54kTqxceQXWyYEGlkkgYohMVhQM0gjLdv0kFhG3F9KQyCTNp+rWjOaRthyJ1a2bhJIEiToUJeQrOgp93hqUDHqKRkmVWDBMUJZh2VhuGNZCA0aqEoOFhez3MeBYaiKBDx/QwYjfbyyRdpyDHije8zbG03EoFvuVAchEYo1dDpEYzrFMUIjwz5eoMl5KbpKNEOrgPApGmX/qeUOA0hZAkJX9OsWF5Mnlscg7B7bBqSO7hWQwJTS7jFgagHhyeV209YtRYNuKdx5Y7mK81o5nuE4RjyxAhg7uuQeKzfi6RXJ0DIdWkIEw32ULUoIZRXozl+Ix6VkPKFtbs+nw7siwUg25iRaowPJRuTbJTi2FUvCUDYXSPgKEh9K9Tnh3izNiT5RCAQErpHmGJbhGI7eygZQtKtf01pMme1sFfkc1ncmO5oDg7ZcCDHh5mSr3VGAdgsDxd19qVi+zecb6G9uzjC7+Z54xrTaBnIKz2zH3Vu7QHu+dWu8L5LLb6QIOzunyJvb/CDSpqXT8d4Q0jDbuy2X3e4LiF3t2cEcGvD5dyKWibQ6kd1bU8vosTxPsxWGAVZoZL1nYkkbKtRTVtod4Rv5x0yIDTIt4P1FcmSWjYdHiQ7hKy+XKlPjaEf7NQmvHW0hmnRnWk2lnvKJVDc0KB/rEygu2CSITayPim7rGQ9XyvTcVIInekyg4ySRYWRJ8iUpbetZKI+Fbyr2GU/spJMefXJHaVgwEIZ0hZU73kd3Lc5LOtby5OLNopGZAroyWC7rzpRVnx8s5GXJluV0Lq+xwUGBVxLQlpJTlRTDRF4ZQojWsDsiCvxExbOkuzGyV5bmWJrlfkOuviKRa+ZtxkCmRWMokQltOe58vQYK3h3bzHN+PkAOfiOl6JJqy7DbTrQgjSgTb6QME6oIyNMFmswLqCqaQhpT/q5Mf+yO+knyyRsDLJSF5D1REsptZU8vjzChh+9t4hqMEAwGn7l50BIUT0KCgji9MgrD5Ww4n4ZP3hhQgTjK4vHCUjStyO7818giDllWhEno5/2JhD8pczKAAAQkISFLjaLo4ybDrXQYSGlId5f155Za+reHtsXCT/XRy4VEdxiLb8eSjrCuJJPFbmiSxrhjkopsmQxLExYJVleo351qlIKkrpRkE4IvyAb8dDMZQ0to/5XdqDdpy6/J+4peO/XUC9WHvnLwk1Xlp4Z8rl613BCaY9d8/+8frf3qEfH3ybWfef32zO1tP6hp+1b60w9+KfXwHX8pPPOvFqv3w6HobPGw+fY7z//7yovoV0fuqor86I37fTOf4wuHz5374/7CptPOlYsDh+I/iX/3TfEl9PTF906fmnY6D88+ewkJF96Z5jYVDjb/lXn9lrnhs3Pjb/7M2fHeKXV1jZiu23P+668Gv/zyjumFv12w9hTnF154i//8ocydW+6qOnD+o1+Y06uPzN3z1OG74Zl19gOZoZlbHlm1ENvR1Prg0V8+9uptn9oYEqNDb3MHztasipx89+PV34w+dE/NcK7uz7P2ms1fyMye+WH08vMj2pnfVYfWtSzUfPDzqHP14/sOfLju0tToF4/vvfhP/4n3X7t14t3nJgfbf5yIf2LOfeunzG37/Gfxqn9k9eSVVP/7NXd+e8OfaqaVe098duQPV/vAzvrOvqm9l564fOTXmU5h/vLYXns2OrHnVnVy/Xm6d1Pk2Uczs/a67zzSPn988onp1w6Of2MhvyFYN1TtHTk5+Ma1vx1cVVX1Hzp3kQM= \ No newline at end of file diff --git a/docs/cassettes/chatbot_22.msgpack.zlib b/docs/cassettes/chatbot_22.msgpack.zlib deleted file mode 100644 index 19eb6bf7a4587..0000000000000 --- a/docs/cassettes/chatbot_22.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNqtVW1sFEUYLqJoSvzAxIgadbkYMNDd7t7tfVFQS4/SE653cKUtohxzu3N309vdWXZ227sjmBS/JQHXj6ihSKTlDkspNCAgUCMYkUT5iD9MqqgxMX5i4kcUEyU4e71KKyT+cX/c7cz7zPO+M88z764vdUKDIKxNGkCaCQ0gmXRA7PUlA66xIDEfL6rQzGC5LxaNt/RaBhqZnTFNncytrQU64rAONYA4Cau1nUKtlAFmLX3XFVim6UtiOf/JJHmtS4WEgDQkrrnMyrUuCdNcmkkHriY0gwnPUpkFOOmqYVwGVqAzbRFouNbVMBOxUFGwg5zBNOEuRgIaE2YAIYiYTB5bjIllkL9/PM1oENDl/+Zqo6XOIoyaZzSgwvv/M/cKbBllKIOIUwL3P6V5hM6oWIaKM5XWTdbDeVnTMpLYwWp0VqD/xDQgUOkgBRQC6YQJVZ2qRYEOF8/515UyEMhUy8+rburLYGLagxP12Q0kCVJ+qElYRlra3pUuIL2GkWFKASbsp0VrsKy+3Z+FUGeBgjphcXSVvQfouoIk4MRrOwjWBiqbZM28Di8P9zu7Y6nimmnvi9Ii6sO1sTw1ksYInBjg+D05lp4Y0hRqDFYBtJ6iXo4fHh/QgZSlJGzFpHZxdPHgeAwm9vYIkKLxCZTAkDL2dmCoPnHv+HnD0kykQrvUELs8XSV4KZ2HEwTOPzSBmOQ1yd5eFuLAhMXQNPKshCmH/TpflDDOImiP/JJISKlEUp2PQvm2QJg35PYk9GRUQ65fyC0PNXMFv3fZ4qam1pxPWmFGws2dQYkV/G6/6HUHvTwrcDwncAKbB/54xq3zbcTyA77DU0Cg0KZl1yRQG9fGZ7NiO3qwRcTLxRQqiB2WLxuLN8lmWk2AtgUdWrylYVk8zS3tiCRji7KFh1DuwYS4EKfrGFqd1Ynk+WHe1+iNNZJAc9OC1pQe6IqENC+pdy9J5BYvxbq5JmIu4nLx+qUBPK48n8fH8pUKfbwY4J1ncMwbCtTSZsbu9Xh9OwxIdNoS4GNFemSmRdb3UR/CD0+UKq1hW3TxJQvf0heinrSHGw1Uw7j9TBzqjJt3i4zgm8u753pEZlGkZaChkqblihYcajGARlLUhgvHLF+SMpaWhXJ/wxXNPuyYnSrplE9vKQtzOiaQrVRlD7Szy0abIhsO7R29WSw20kBDhXJae7js+q5CrkuWLFnOdHapfLAgelASWlJqX2WJbmAnDS2IVYnd6+b9g5XImO/66V6p8jzLC2/Rq48kes2czejYMFkCJdqGzbw9UqOCnHPH5nsEr8dHD76OQZqkWDKMW8kQVqkzSR2jG1DBQD6UY2m/gApSERWm/Ftp8cTu89LFBy8HmDgL6cegJJZl5d8ejzCgw+9s4hKNGAwGj1wZNEbloZCgVzw0EUXg+GoEt0oOXg6oUGzjyUBuDM0i2R65hw4SPAjKYtAPgEcSAm6P2+31BIHAp2DKJ1LnBHc3NLINQMpANl72n10KrWiuj4Qb9rez443ERvXRT2BJw0RDqVQxDg0qjN0vKdiSabM0YJFyLatfYe8LSEEpGaBiJem+eF+QXUDb0BjbP7brczpt+VvYXXTk1NLvTYrdveG6qvIz2YwdbT72wE3D5+f41havro4M+Znv6yMv5s5Mb9j/Y6z1QN3mLaprxl+HR7a/WPjJ/dT5r4fzv26cJj3cOi995OzUNl/jex9YM7d2rIneeHrnyV385sMvH/gNDC1ZtfecvenIVRtq7/1soZcbEqp7QuuKi6ce6+7akgr7oi9sNu96esqc2KrWO249fRyde2jl6nmvzkSls7013blT97mOT6978jvP99WP5k7N7J36UZW9o/ubCzdLZ7f9ct2toed7zmzcNCg/80n3DUNfDL25ZMOUV+7Zfyw57+zePdnwHz2Hlve/G70AnnhzqD32/vk/mjvCf+66pn7HhWlGJrc6Ok3a+uXR36MprcZ3XL3z0cd+f/K7c8r7u1+64cRG8u3jt7+z4aL4aXXPc89OufNnMu+NH3Ye7Ts9/bWvTObak52zrOs/br9t9tTlPdsS9OguXpxcVf3NA0nhqqqqvwGQFmWF \ No newline at end of file diff --git a/docs/cassettes/chatbot_27.msgpack.zlib b/docs/cassettes/chatbot_27.msgpack.zlib deleted file mode 100644 index ea21ee9070f5d..0000000000000 --- a/docs/cassettes/chatbot_27.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNptVWtsFFUUrhKDwQioaBqiOK7GB+lMZ3ZnX20QypayCywt3WJblCx3Z+7uTndm7nTunXZ3CRp5BER/OFHRKKCUsmtqpfJQeVgUomiUxGiE2BDAR4yCKEgMGoLine1W2sD82J17z3e/c+453zmzstgFTawg/YZ+RSfQBBKhC2yvLJqw04KYrC5okKSR3NvUGGvZapnK0PQ0IQauqa4GhsIhA+pA4SSkVXcJ1VIakGr6bqiwRNObQHJu6MJylwYxBimIXTXM48tdEqKudEIXrnZkMcCEDGDSUDWSlsoAjBVMgE44pk7H3dBkgKoypVgcSoYghqQhk6BrBiWZHLIoIqGoCslxrirGZSIVOsw4hwnUXCuqmDEO08p9TOQhjUmgxGi0haHpWrGU7mhIhqqzlTII6+G8LLHMBHKwOt0V6D8mJgQaXSSBiiHdoH4MmjoKdLh4zr+imIZApok9WTG5N40wsbePTdYAkCRI+aEuIVnRU/bbqbxiVDEyTKqAwD4asA5LpbD7MhAaLFCVLlgYPmW/AwxDVSTg2Ks7MNL7yxdkSc6A15r7nNuxNP86sXc30iDqItVNOVpVnRE4McDx72RZmnJFV2mZWBXQeApGyb5/tMEAUoaSsGXF2IXhw9tHYxC2t0WB1BgbQwlMKW1vA6bmE3eN3jctnSgatIuhpmvdlY1X3Xk4QeD8O8YQ45wu2dtKhXh/zGFIzBwrIcphb+ELEkIZBdpDF+JxKRlPaDOU+lxrIMKbclsCetKaKdfN4RbXL+Tyfm/z/HD4saxPaifRyMKuoMQKfrdf9LqDXp4VOJ4TOIHNAX8s7Tb4Vmz5Ad/hySsg36pnOuNKK9fKZzJimzKvRUSLxaSSFzssX6YpFpZJSouD1tkdeqwl1BxLcYs6oommuZn8EiU7Ly7OQalahkZndSnyjAjva/A2NeDAwvDsx5JGoDtar3txnXtBPDt/ETJIZ5TM5bKxukUBNCo8n8fH8uUIfbwY4J1n+4g2VKinSNre6hbdb5oQG7SZ4KoCTRmx8MpeqkN45LNiuVF7GudflfCdvfVUk/Zgg6lUMW4/E4MG4+bdIiP4anh3jUdk5kZb+kNlNy3XleCOFhPoOEllOGdE8kUpbekZKPeFriv2QUfstJJO+LRLWZg1EIZsOSq7v41tHp5QbKR+13BnschMAV3Jl9zagyXVd+ez3bJkyXK6q1vjg3nRoySgJSV3l48YJnLc0IBYDdtbhYB/e9kyors+eldaeZ7lhb209RWJtplzGQOZhMVQojOR5OyhKg1knR6b4RG8Hh9NfC2j6JJqyTBmJeqRRpWJaxnDhCoC8r4sS+cFVBVNoYUp/ZbnLbZ7vfTwnmsBBGUgncxFsVRW/sBohAkdfucSV2nEYDD4wfVBI1QeCgl6g/vGojAcHY3g1vCeawFlih4e92dH0Kwi20MP0EU8CX2eRNIneqHb4+eTHi8Q3B6vIHgTohiQeO9AqIENASkN2VhJf3axvn1hXTQSeq+NHS0kttEY/h4VdYR1JZksxKBJC2P3SSqyZDosTVigXM117fbugBSUEgHeyyf9IMj7guxsOoZG2P6XXa8zaUsfpqcLTjn11Cc3PHHvszdXlJ5x5Pk6tHLW5DXnLq8/vfngw+HwXvHYNHXBHeeiobsqO3/bGepe25a5fO7WdYlJg0b21IFPX+u+ULF6+pwJPd9GuDMN45+FPSe7Fu/899Ijk75IX9k5VDXp1zXChWUvnq8d/9Pyqr/PvDqlxT+13d773aEe+WxzKvLKkR/PC5v/eqNwu5V6a+rEnsjdRgZPKOzip22sbb6pxj60f3yFcebDpXNjP7wfBmfiL03d9MKd6/+8ePOstq83uXrx5zvEqsu33SVXRuPL1yx5YdKXL878aP3EpvC4AaPzoL1k2fSf/jh9Uus8XjP0rv/tY6uePHD0qxbvBtN//rUDixv2Lk0uqnGfGLgndn8+f7Hi0Uu7js98ed3h5hPG4T+fKdzy3KH5GyvXTvte9c16cEP+m5+/eqrQX79h3v79pz/e9rr7aCU/+NLZgSm//1L5q/rWgjXv7eiL7zz1w9l/bqyouHJlXMWMYGBLmL7/B9ecUAI= \ No newline at end of file diff --git a/docs/cassettes/chatbot_28.msgpack.zlib b/docs/cassettes/chatbot_28.msgpack.zlib new file mode 100644 index 0000000000000..85059e884e01e --- /dev/null +++ b/docs/cassettes/chatbot_28.msgpack.zlib @@ -0,0 +1 @@ +eNptVQ1sFGUaLhI8E0MEcxygEqbFCGJndmb/t00Tt9td+kMpdgu2Ndj7dubb3Y+dmW86P9vdIpdYNKAYdRQRA2IPtrtYSqmCxQNr/EVzhUu86F0KKJCeJ8JxdyoceBxw32y30gYm2Z/ve9/3ef+e952uXBKqGsLylD4k61AFvE4OmtmVU2G7ATX9qawE9TgWMssbwk07DRWNLI7ruqKV2WxAQQxWoAwQw2PJluRsfBzoNvJfEWEeJhPBQnrkH2tKJKhpIAa1kjLqsTUlPCauZJ0cSlqwQelATFAiSkAKUApSgQ4Zyi9rHVClgChS+TAsNErHlB6HVIScKRyl0tggGhEkIj3NlJRSJSoWoQWqpTUdSiVrS6lJvqpRMVWzUKJqkTRJ3dCgWrJ2FbmRsABF6yqm6LQT0xKSkaUpkzuO/Gq6CoFEDlEgapBcEDcKKZpuqBYSy3jW5uIQCKSk3xTNyMSxppv9k8u0F/A8JOhQ5rGA5Ji5J9aJlFJKgFGRZN5L4pVhvglmbwJChQYiSsLsmJU5ABRFRDyw5LbVGpb7CvnRelqBN4t7rdxoUnlZN/c3kCD8NbbladJPmeIYp5dhB1K0pgMki6RBtAhIPFklLz80UaAAPkFA6AJXzOyYcf9EHayZPfWAbwhPggQqHzd7gCq5nfsm3quGrCMJmrnA8pvdFYQ33DkYjmM8b00C1tIyb/bkG3FgkjHU1TTNY4Jh/p7N8hgnEDRHfmxr46NtEamiriMeeDToCyZq0pHQkvaoF9sfdaYNvSYK/YmkO2JP8Q5/rDkdWJGgOY/d43S7nZyX5hiW4RiOrmXdeEljiyRVqQK7PORxJDV5ZbSh0t1pCCk/E6iMhoyGFDSqGOhpbY7VdFTb7e0tlZWrmVZHU9tqVa9uTyIHs0wL1zaCuo5QbVtzMNlRTpHojCQSKqpdIFgtxeNtK/xY0tgV9cnEMrvb01iX6EzidrtrJWaZYCgdbK2NTQiPdThothChm3V6WevpH+eGCOWYHjd32h3uXSrUFDJLcF2WlEw3tK4M4SE88nmuMKI7GupuUHhWpopw0hwKqaiUsnuoMFQoO2t3UpyvzOkpYx3UkvqmvkDBTdMtKfhWkwpkLUpoGBynfI6PG3ICCr2BW5J9yCI76aQVPplRGqYUrEG6EJXZ10w3ju0muqZq39hk0ViNARl15t2aQ3nWd3SmOgTeEIR4skNifZ1OB4pAg4/uL5goKrbckIBoSTN3etyO/oJknHe9JFeW5lia5f5ARh/xZMysZBSs6rQGebIN9bQ5UiqBlDVjFQ7O5XCTwpdTSOZFQ4BhI1KFJcJMrZxSVChiIBxM0daGE5GESGPy34VNq5kZFzF+92YFHScg2ck5Z76t7PsTNVRo4VtJ3IBx+ny+926tNA7lICo+l+/gZC0NToyGs0vauzcrFCB2sFpfalybRoI5cj85tEE3x3EwwgHgYV3QaxeAxytE7IKd9fp8wOvdGwjRAcDHIR3O88/MVbUs89fXBAab6YlEohuUsTdRTsaajKLRbBiqpDFmLy9iQyDLUoVZgtXobzH3e3kfZFnB5Y0Ar491u+hKsobG0X6hXcbatPlX0pNZq51y7NMpB+ZvvKMo/0wln+vXdfOofJyd8dS/Op+dbhtqfprevfl03anMoj/vWN897b8bP+rueeWTLX+7+8SK1gX/PjR707zyigsfffxE+bqRb8O3dy/dubL77ON157uO/vWAB1/MzTd+On969NCVw1+vfe3ksYuXzl79+upoa/1L6wbvah7ed86oZ5p/Hbi24dyGVrbnT707Fs/e+Pai0Kld/q3iqhffy5yYi3+MtG76ef8fY8MvH1oYXrB71v+OFBVtP+y5eGH6tUce2rJs+6z1Rw4G3im7a8quaveDtY57f3M+WM8MPGA789nZc9v/Q1WEtn3wq9H+U4m90wa/uPw8eIHatGhBddWqS3PQby/vmbag7ePp2ZmhE3Ovr952qnjaM4OfP0yf+fvMoefuCd4xZ828OVvuq32jND5372nt4aF1xUs/QB9e/Wq4d+bxJ42yV+f3VXy//qd/bj44Aw0v/N1fHhudfmnrlxueOPby0133fF889bLrm9Tl78TFVarfXVl+4UHl8ftGv/w2ZhvY9nr3i/E1X+z++frSrbcH7x22XauZd2XuyVW3Zb97aXTzs5dKz7j3/DDv8IyjPxw/NoBexYO7Zn/1dtNI8ZU7re5MLUqffObNT24rKvo/8WSElQ== \ No newline at end of file diff --git a/docs/cassettes/chatbot_29.msgpack.zlib b/docs/cassettes/chatbot_29.msgpack.zlib new file mode 100644 index 0000000000000..f527c3ad040b5 --- /dev/null +++ b/docs/cassettes/chatbot_29.msgpack.zlib @@ -0,0 +1 @@ +eNptVW1sHEcZtmMiDFQlrWgoQZDJFclV8a539873kchY5/M5/mhs43MS2xTM3O7c3eR2d9Y7u+e7C1HapAVUq4qXtkgkLVWwfZecTNIPK23quBESbhtSmhaIKicFEkT5UKlStWkDKqXMns/Exrkfdzczz/u8zzvzzDv7ihlkUkz06mmsW8iEssUG1NlXNNGIjah1f0FDVoook709sf4J28QLd6Usy6CbGxqggXliIB1iXiZaQ0ZskFPQamD/DRWVaSbjRMldqH5jt0dDlMIkop7N4Fu7PTJhuXSLDTyDxAYWVNNAxWkEIDCwCS3Eg7BOR5EJoKqCsg6XDlgEWCkE4mwMSALkiM0QcaxiK8d76oHHJCpySWmOWkjz7KkHK3K1402go04DnVhbAbcpMleBwymSc7OZqN4N2AR2suJA3MR6koIcqmih7I+JoMxwxKZglGk3aTNoQS6EIpTGel0ZQW2XiFWKqCsdKhmWpzxHTGAgMwUNCihxN9sNQSpFzSBmIJgGtlEPNEac27RcNKQUUwsysf+vvCwUU6DlgA411Ly61G+zGY0oSHWnkobF+QinYR27SJ3NieyXuqo1NkhApoVNsB1lOqErms0KfGBPMYWgwur9Q9W6yRShlnNspSWOQ1lGjB3pMlHYvjk/T+YxK0ZBCZXVU2KadVQ2nFNKI2RwUMUZVFiMcp6EhqFiGbrrDbso0acrNXJWzkCrl0tubRwzmW45Mz1MRLijoTfHvKsDkfcFeeHJLMc2DOvsCCinQqanYJTXZ5cvGFBOMxKuci+cwmLwseUYQp2pbVDuia2ghKaccqagqfl9zyyfN23dwhpyipHe1ekqi9fTeXlR5ANPrSCmOV12psoH8eyKYGSZOU4mjMM5LBRkQtIYOQvvDQ/LieG41tQ1morsjIai6Y5cvG3rSCJIpJ2+nG11JFA4nfHHpazsDScHcpHtaU4MSAGf3+8Tg5zIC7zIi1yn4Cdb+wY1rdVUhN62gDdD9R2JnhZ/3layYT7Skmize7LIbuVRYGgg2THaLkkjgy0tu/ghb//wLtNqH8lgL99NY519sGu0rXN4IJoZ3QKYOjuDlab2Rhht11Kp4e1holFh+7ZMulvyB/q60vkMGZEadxCBj7blokOdyWXyBK+XEyoK/YIvKLifY0veUJGetFLOhC8gHjERNVjbQPsLbMssm+6bZD5Er7xcrHSjn/V0XbfwbZOtzJPOXJuJ64EUADFkAEmQfEAMbfYFNgs+sHVb/3Skkqb/hhZ8qt+EOk0wG0aXLF+UU7aeRkopckOzz7lmZyfpymd3lENZg1DEVVQ50wNc32If5jpan1m8WRwxk1DH+XJaZ67s+tF8dlSRbUVJZUY1IZT3eXEc2XJiphJimMRNwwRxGmWbE5SOVVaWfFditQqcKHCCeJJdfSyza+YWYxDT4iiSWee3cs5CvQaz7h1r8oqNXj/b+C0A67JqKyhmx1uJxpxJtwDDRCqByvNZzm3mKtYwO5jyd+VVoc5kIwt+bjXAImnE3p+ir3yswgvLESZy+d0irtP4QqHQqRuDlqi8DBIS/c+vRFG0XI0oafS51YAKxYSo0ensEpzDirPwNTYYhnIoJKFGJDYKSAxKolcKeYNS0OdvjAfjfkE6HmnjIu7jwMXKBnSKrYPd4W0dkRMD3HIncT3G4rNb1AnVcSJRiCGTnYxTklViK6xbmqjAuPrCg85MUA4hQVDi8XgQhQR/I9fC+tAS2/98N+m22vL7e1/BPU89OV+9d+NYbVX5U2M5zT1/FtbNf/S9B523P4Ui7R+eCH/OB6y1v76tNtotddx04qX3siHPzB8/fqT5H+bah8auHbp2NfHvL1XXnn/wKz9MjB8Wu+s+unho9uDQhg/vujj7nw+uHb96yxP9v9h79gD405nwB1/3Xz5rvz4+smG/Z/bSixM3Baf0X02/MuY5xye/eyTatJD/4o8eu3ViuvNp39mHZ+72XzzfNL5rfP7mn95Rde+5fz2ev7C++Zc3/+afXeLL9bu/MPvbdfe+O1kXbU/Mhy9Mvf/ZLz9d/c49e67MHYCtn9+x4f4Nl8M/uX3ghdMH3ng3OV7d6225g3sz0vXa4R/cIgu1NZ0bX3z7ypmvztacxVt2v5Y/f6BYOpj/69G/PPrE1e7B+chb35QWjq7/tBkvvFR7CX3jzOXT3APvK/ccWTN3+PE1z87fWvfqxpHvLyQeuG/f75J7Hv5x88nS+v39B0//fnv92kffOfeZvx9961ThklNzsBQ7sfXKqaFX3zxZuvid83eOmYfG/nb7xzVVVZ98UlO112x73VpTVfVfuvWw0Q== \ No newline at end of file diff --git a/docs/cassettes/chatbot_31.msgpack.zlib b/docs/cassettes/chatbot_31.msgpack.zlib deleted file mode 100644 index c8d8fe9ca3142..0000000000000 --- a/docs/cassettes/chatbot_31.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNptVWtsFNcVXpI0bV6KKxV+pKhMLQgh9czO7M6+DE5ir9/ED7wLhqKwvTtzd/d6Z+YOc+/Yu2tZEZg8lEiEIXLCj1ZKsdlNHEN4GMy7rfJQQ4La1BKtjRIlaSXUpkoUFKio1KR31utgC+bH7tx7vvudc8/5zpmdpX5oEYSNJRPIoNACCmUL4uwsWXC7DQndVdQhzWB1rLsrFh+1LTTzaIZSk9R6vcBEAjahAZCgYN3bL3mVDKBe9m5qsEwzlsRqfubqYLUOCQFpSKprua2D1QpmrgzKFtVbsM0BC3KAy0DNTNkaBwhBhAKDCly9QQagxQFN48qxuJQcxRzNQC7J1hxOcXlsM0QSaYjmheoartrCGnSZSZ5QqFcP1XCLHLain3Ntq3WuHekL0TaBVvXQU2xHxyrU3K20SXm/EOCpbSWxizXYrsT+CbUg0NkiBTQC2QbzY7LUMaDLJQqhoVIGApUl9hNP1VgGE+ocWpyst4CiQMYPDQWryEg7B9MFZNZwKkxpgMJxFrABy6VwxrMQmjzQUD8szp1yDgPT1JACXLu3j2BjonJBnuZNeKt53L0dz/JvUGeyiwVR3+btzrOqGpwkyGFBPJzjWcqRobEy8Rpg8RTNsv3MQoMJlCwj4SuKcYpzhw8txGDiHOgASldsESWwlIxzAFh6UD62cN+yDYp06JSi3be6qxhvuvMLkiSEjiwiJnlDcQ6UCzG16DCkVp5XMONwfisWFYyzCDozVxMJJZVI6nWoMd8bbhMtdXMS+jO6pdY3CRsbO4VCKNCzvrV1Uy6obKEdbZ39EYWXQr6QHPBFAiIvCaIgCRKfB6FYxmeKvcQOAbHPX0Cg0GtktydQr9ArZrPyZtQel/FGOYUKcp8dzHbHWlWa1hOgt6HPiMWjPbG0sKGvI9ndki38EuXaE3ITTq/lWHR2P1Lr2sRgc6C7mYQ7Wxs2pczwQEejESD1vicTufUbsEm3d9AWIRer3xDGC8IL+oO8WIkwKMph0X0OzWtDg0aaZpxRn+x73YLEZM0Eh4ssZdQmO8eYDuGHfyxVGnV/1/qbEl461sg06ZxrtlAN5wtxMWhyPtEnc1KwVvTV+gNcS0d8IlpxE7+tBI/ELWCQFJNh07zkS0rGNrJQHY/eVuznXLGzSrrhsy7lYc7EBPKVqJyJzXzP3ITi2xqPzXUWj600MFCh7NY5V1b9QCE3oCq2qmb6B3QxUpD9KAltJTVZOWJa2HXDAuJ1Uk7OoYplXnfj7K6s8iIvSqdY6yOFtZl7GRNblCdQYTOR5p2ZGh3k3B6r80sBf5Alfi2HDEWzVRizk41YZ8okaznTghoG6ukcz+YF1JCOWGHKv5V5S5yxADt88lYAxVnIJnNJLpdVPL8QYUGX373ETRo5EomcvT1onsrPIJFA5PRiFIELo5F8Ojl5K6BCsV8kE7l5NI9UZ2YlWyTCAVFNQVENgUhIDkUiUJLDEIhSIOVPplQ58Fa0mY8CJQP5WFl/TqlxS2d9R1v0xGZ+oZD4LnPue1QyMDFQKlWMQYsVxhlXNGyrbFhasMi4euq3OJNhJaIkw2IopQQiETEY4RvYGJpn+152Y+6kLX+YdhTdchrpd5ckVrz4I0/5uZPuudj5tli166v8C+9eufs4Gn1+W/eO6NaVe/Z86mkY+UKLDf/9gevNVRuHBv6x8n+f/X7VczfWvTwz0+RpuJK8t0Ff9eZR39f/Dk4/eO3l0+9/eSMy+FRw9uOL25bdU/cv77LhT1+79sNHHzspLB0Zqb+krp1Y3v7entoLe7W9D/ei2Z17LfkPm+LLX/JO78tOFXPh9r98cmrVwT+/uOZsk8dz5vpPa2eXP/3OT54dHu7Z/5+jT0h1j91RtfvJ7l3fbBt85cQXl6NNr3x04viVzP0P/OqND+7bNx16BIElv7nvofEda169eO3U5c+K132TU8elc+cfvyTF47svnb/SMjR68Prlkeg/3382MVhsn6QrlpydSv/1aeHHI6NHP186vXq2Ktx5Orz9of/e1TL6M98RefWQ79s/TV5wDgS/1Wo/ujZ9IvyD+19adfkXz+B1yqnZzNbX3vy18ao29beW37FMfvfdnZ7nlccvtN7h8fwfIDVPpQ== \ No newline at end of file diff --git a/docs/cassettes/chatbot_32.msgpack.zlib b/docs/cassettes/chatbot_32.msgpack.zlib deleted file mode 100644 index 92705b16bacbf..0000000000000 --- a/docs/cassettes/chatbot_32.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNptVW1sHMUZPoOiRialpHy0FW2ysZACxbu3e7f35SjQiy/+xJ9ncw4ksuZ2527Ht7uz2Zm17y51WkIDbUEKSyNQSioV4twFyziJMMEJMaJVqUwVoSJTkBspaqUK10WtaJGafqRKZ89ncpazP+52Zp553mfmfd53D5VHoU0QNuumkEmhDRTKBsQ9VLbhfgcS+oOSAamG1YnenuTACcdGi9/WKLVIk98PLCRgC5oACQo2/KOSX9EA9bN3S4cVmok0Vgu/r8MHGgxICMhC0tDEPX6gQcEslknZoGEPdjhgQw5wGtStjKNzgBBEKDCpwMVNMgZtDug6VxHjcXIUc1SDXJqNOZzhCthhiDTSES0IDY1cg4116DGTAqHQaBhv5NYEbEPbuPbtBteBjFq0Q6C9Hgt1HTd60G1cGx7jFGBy7VWBXmCmRQWFh2t5vlC/jizFLmc74YwCZwIDPrw++D42Y2AV6t5U1qJ8UAjx1LHT2MOabFZi/4TaEBhskAE6gWyCHdJiiWNAj0sUIuNlDQKVpfWK744JDRPqTq9N1WmgKJDxQ1PBKjKz7mvZIrIaORVmdEDhJBNtwooR3MkchBYPdDQKSyu73DPAsnSkAG/dP0KwOVU9JE8LFly/POmdjmfJN6k708NExNv9vQXmKZOTBDkqiGfyPLsxZOrMI7wOmJ6SVVl/q3bBAkqOkfBVv7qllc3TtRhM3JNdQOlJrqEEtqK5J4FthOXXa+dtx6TIgG65uXd9uOrijXBBQZKEyNk1xKRgKu7JSiLeXLMZUrvAK5hxuC+LJQXjHILu4j+Gh5XMcNrYiRKFVLRdtNWhNAxqhq3GdwuDiW6hGAn1d7a1PZoPK3toV3v3aEzhpUggIocCsZDIS4IoSILEF0AkqQUsMUWcCBBHgkUEiikzt38YpYSUmMvJQ6hjQMaDcgYV5REnnOtNtqk0awyD1K4RMznQ3J/MCn0jXene1lzxMZTvGJZ34+wOjqlzRpG6s10Mt4R6W0i0u23XoxkrOtaVMEMkHnhkON/Zhy26v4u2CvlkvC+Ka+SFg2FerCoMi3JU9J7pVW/o0MxSzT0RDEdO2ZBYrJLhkyV2ZdQhhyaYD+Gl+XK1S7zS03nDwndPJJgn3bkWGzVygQiXhBYXEAMyJ4WbxEBTMMS1dg1MNVfDDNzUgmcHbGCSDLPh7lXLlxXNMXNQnWy+qdnnPLOzTHryWZXyMG9hAvmqKndqiO9f6Y98e+L1lcrisZ0FJipWwrpzFdePFfNjquKoqjY6ZoixohxEaegomZnqFsvGXhgmiDcIuxwxMF1dWfXdJDsry7zIi9J5VvpIYWXmHcbCNuUJVFhHpgV3sdEAea/GdgalUDDMLn4Hh0xFd1SYdNIJbDBnkh2cZUMdA/VCnmf9AurIQCwxld9qtyfuRIhtnl0PoDgH2XehLFfSKr5di7Chx+8d4gaNHIvFLt4ctEoVZJCYHLywFkVgrRopYJDZ9YAqxSsimcqvonmkuov3scGwGoEwGgxDWQqJSiaaCYpSFEQDqhwLSZFITD3d3MI3A0WDfLLiP7ec2NMd72pvPjfE1xqJ77FWvoZlExMTZTKlJLRZYtxJRceOypqlDUuMqz++x52JKjElHRXTclSFMTEc43exNrTK9oXtJrxOW/ksPlHy0mlm363LbH1mo6/y3Eqf6+y5Rbrr8NVrP35p+olNR+qA8uy9oacSqYfSbQ+8eCx9+fHLOxq2XRtb+KM0ay28++9i9/s737rn+7/tlDa83PHeb4oL589/69PFhU/gzy77v3tpV+zAfw9u2fvqx19tvfOD7tsfm+r7+3u/G1Tn4h/96fahDUeffeCNyN+OPF343vNLoTMziZ/oh5fe//lLY7P1t/1FnTt4kL4wcXeLdPjqPb53pg8mW9/81Vbp8NR4f+Ov77/troXPt/s2z9+3e/7rjro8NHPvqXnpwoV/PjhYd3xmY9Mvnzy3b/bEl+9f/sOWDnnLuYFfTNbvTWvlr3z8YrwuNXh0+Y7UN8iHV5cyT9Vv1TYfuTZ1embh7PEvHXX3jmw+dvHU8tsbPuvz9/4v1vGv73z0+fHlO/fVX2p9OvqBfuWdK59tPDb005at2evHPulUX+26ONT3zf+glFy3eF4dP/nDy/Pa0oN/rd/00J+LHxafGf/0az7f9eu3+paWf7QpeYvP938yuHiC \ No newline at end of file diff --git a/docs/cassettes/chatbot_34.msgpack.zlib b/docs/cassettes/chatbot_34.msgpack.zlib new file mode 100644 index 0000000000000..b5cd2131abbd4 --- /dev/null +++ b/docs/cassettes/chatbot_34.msgpack.zlib @@ -0,0 +1 @@ +eNptVWtsFFUUXh4qGt8immB03NQo2JnOzM7OdtrwY7t9bWu72C3SYkydnbm7O+zM3OncmX0UMAhIIkZ0UKL4AvrYJU2tGApFXlYJ8YVRiBL7Q40xPoIiBuMjVNE72620gfmxO/ee737n3HO+c2ZdIQ1MpEB91pCiW8AUJQsvkLOuYIJuGyBrQ14DVhLK/Usj0fY+21TGFycty0BVFRWioVDQALqoUBLUKtJMhZQUrQr8bqigSNMfg3Ju/O9VXg0gJCYA8lYRD6/yShC70i288HZCmxBNQIhEEqhG3FYJESEFWaJuUURQRxlgEqKqEsVYXErCgoSVBEQMrwkYJ3LQxoiYoipWjlB0ImqIuoKSlLec8JpQBa4TlEMW0LxryokZvhuVu4nwvRpRA2Mz4DYCpnfNI3hHgzJQ3a2EYZEcJDVFV1ykjvcY/I8sE4gaXsRFFQG8gd0YOImWbbpMNBVYU0gCUcYp/spzY38SIssZnpm2N0RJApgd6BKUFT3hvJ7oUYxyQgZxVbTAII5XB8WiOIMpAAxSVJU0yE+ecnaLhqEqkujaK1YiqA+V7kdaOQNcah5070biSuiWMxLBQQTDFUtzuL46wVBcJUXvzpI4+Yqu4oKRqojjyRtF+8HpBkOUUpiELGnHyU8eHp6OgcgZaBGlSHQGpWhKSWdANDWe2zN937R1S9GAUwgtvdRdyXjRnY9iGCrw5gxilNMlZ6BYiNEZh4Fl5kgJYg5nJ52XIEwpwBk/19Ulxbti2pLmTDK0vE6oS4VzsfqG7nglZJdzOdsKx0EwleZjbFbyBRMdudCyFMkE2ADH8xxTSTIUTTEUQzbRPGxo69S0WlOml9YHfGmkPxSP1PA9tpwNUqGaeL0dyQK7lgKBFR2JcKaRZbs7a2pWUit87V0rTauxO634qFYUbWoTmzP1TV0ddelMNYGjs9OKvKTRL9Y1aslk17Ig1BC9rCWdamX5QFtzqicNu1n/Q5Cm6upzdSuaEtPCo30+ki5FyNNcJe0+w1PaUIGesJJOH+tnd5kAGbitwPo8Tpllo3X9WIfg+PuFUsv2RpovSvjW/lqsSedwvamUE2yAiAKDYGmWIxihigtU0RzR0NI+FCq5ab+sBN9sN0UdxbEM66YkX5CStp4C8mDosmI/7IodV9INH/coCbIGRIAsReUMdZBtk7OKDNfumewsEpoJPAd6im6dw0XVZ3qyGVmyZTmZzmi00MP5lBiwpfhI6YhhQtcNDojUkJscfrhkmdLdIL4rTTI0STNv4dZXJNxm7mUMaFokAhKejlbOGS/XxKzbY0t8jN/H48RX48EkqbYMonasFmpYmaiaMEygQlE+kCXxvACqoim4MMXf0uRFTr8fH95/KcCCKYBndIErlpU+Mh1hApffvcRFGk4QhEOXB01R+TBE8PsPzEQhMD0ahtXQ/ksBJYpeGg1lp9CkIjvjZXjRxYHKSpblBF6SfBwdkCV/wBcXBA6wYkBgZfGNUD0ZEqUkIKNF/TmF2s7WYEs4tK+DnC4kMmJMfpkKOkS6Eo/no8DEhXEGJRXaMh6WJshjrrZgpzNSKQmApgHNCTFaoHk/WYPH0BTb/7Lrdydt8RP1eN4tp544Nitz11PzPMVnjvVsS+tRev6GP4VjR3+6cu+xf3YurP2od2z2k7feXOMsfLssv3k7qG5r/vyoceX1a9upjVsu9Jd/ITPbE+xv/E2e6JaWtQNPzFMUe3X1y18NpBYtP7Z+ouNcSnjspch24b0XJhreqdq7fsHZFzf2/cQOnFx57abjdxaurxur2hs4/cxfxvsTh2bdE/5sLzl69/fzX/hw1/6vz/y2v3rTqm/XHyS29iZuyT7t8fAfj/6ilA1/MffAq31tvX96G3hhyexHN1uv098cPhd8cayKfOCDuVzL71/+8MrIj2PPL7ht98Rr/6zeOndfTZg4tXU+RcwfOSKMVh9fXHH66vzJ8ztgZA333akT7971V/iT5x880njHNZH8hdEb5M7Q6E1jD+85eN73RGfDwHXhHfe8VPXrprPfdqw6s+CPnqtuv2/el6dX/7p9kXP7hpMXfk6rX0e2Hbrux9ea+Pu3dU2sBVsWPde3p/VMZNt42cIbFty3sY9JnODPX+Hx/PvvHE/Pp9wjHbM9nv8AoSZdwQ== \ No newline at end of file diff --git a/docs/cassettes/chatbot_36.msgpack.zlib b/docs/cassettes/chatbot_36.msgpack.zlib index 0d10f19b16b5d..39bd881487594 100644 --- a/docs/cassettes/chatbot_36.msgpack.zlib +++ b/docs/cassettes/chatbot_36.msgpack.zlib @@ -1 +1 @@ -eNptVWtsFFUUXvEBGh8YUBIfcVxFBDrT2dk3FbFsn9SlS7dQCsH17szd3dudmTude6fd3VIf+H4ljhJj/IFK211tarVCfFI0qMQHP8RQTWM0YEx8kPoDfOAPxTvbrbSBSfZx7/nud8495ztndpR6oEkQ1s8bQTqFJpApWxB7R8mE3RYk9MGiBmkGK4Ox1nj7gGWiyRUZSg2yqroaGEjABtQBEmSsVfd4quUMoNXsv6HCMs1gEiv5yX/63BokBKQhca/itva5Zcxc6ZQt3J3Y4oAJOcBloGqkLJUDhCBCgU4FrlYnvdDkgKpy5VgcSo5ijmYgl2RrDqe4PLYYIolURPMc0rm4AXREMoK7inObWIWOE5InFGru/ipuju8MupFrXqZxSZycjbYINN3929iOhhWoOltpg/Jewc9Ty0xiB6uzXQ/7JdSEQGOLFFAJZBvMj8GyyIAOlygE+0sZCBSW4+9dCwczmFB7dG7eXgeyDBk/1GWsID1tv5YuIKOKU2BKBRQOs4B1WK6KPZyF0OCBinpgcfqU/QYwDBXJwLFXdxGsj1QuyNO8Ac82Dzu341kpdGrvbWVB1DZXx/KswDrnEXwhQXwjx7PsI11lFeNVwOIpGmX7+7MNBpCzjISviMcuTh8enY3BxB6KArk1PocSmHLGHgKmFvDtmb1vWjpFGrRLkdjZ7irGM+68gscjBMfmEJO8LttD5UK8PecwpGaelzHjsF8WizLGWQTtyROJhJxKJLXVqC7fEWoWTWVzEnozmqnU1gsb69YLhaC/raWpaVMuIHfSaPP6nrDMe4JS0OeXwn6R9wii4BE8fB4E4xnJEDuIFQRil7eAQKFDz3YnUIfQIWazvs1oXbsPb/SlUMHXZQWysXiTQtNaAnSs7dLj7ZG2eFrY0BVNxhqzhS0oty7hq8fpGo5FZ/UgZXWzGGjwxxpIaH3T2k0pI9QbrdP9pFa6M5Fr2YAN2h2ljUIuXrshhGeFF/AGeLESYUD0hUTnGZ3Rhgr1NM3YA5Lf+4oJicH6Cj5QZCmjFtkxyHQID31aqvTs7taWMxK+arCOadIebzBRFScFuTg0OEmUfJwnsEqUVnkDXGO0fSRScdN+TgmOtZtAJykmw/oZyZfkjKVnoTIcOafYxx2xs0o64bMu5WHOwATylajskc182/Sw4pvr9kx3Fo/NNBsEhbJbe7ys+t5CrleRLUXJ9PRqYrjg86IktOTU3soRw8SOGxYQrxF7wBsOjlYsM7obZndllRd50fMua30kszZzLmNgk/IEymw80rw9WaWBnNNjq70evzfAEl/DJpOsWgqMW8k6rDFlkhrOMKGKgfJejmfzAqpIQ6ww5e/K6CX2oJ8dfudsAMVZyIZ0yVcuq7h/NsKEDr9ziTM0vnA4vO/coBkqL4OE/YH35qIInB2NR9LIO2cDKhS7RTKSm0HzSLEnb2aLhJIKBcSgXwoqSZjyJb0gCCRvAEoB2e9RvJ7U65EGPgLkDOTjZf3ZpbrO9bXR5shbm/nZQuJbjelXU0nHREepVDEOTVYYe1hWsaWwYWnCIuNqq+2094bksJwMiVCB/mBYDIT5tWwMzbD9L7tBZ9KW31H3F51y6ulPzuu+4YkFrvJzPvucPk1j0dYDdyzed3Llrj5hcPuRBuGiyy5+bP4P9e6DTw/175yY2m/9NdG0YM3BgQ8OXHD6ebR88RV3/SmeTPWk3uJdL7d5LrBHli57d2LqsDZS3HBbS6Tvp+Ke7utObP/91G+3nJz4aHzBugv5hHhsYPjZmO5/001eWvTIM2N06uB4buEfo7u/ePzT5fXbPpe+mLpnWRc59tyRy98ff2XTaPPSnz9ZuGuLy/Xtr7z3hcVrPl544OijP/48fih2aWuj64AtXHnjeMeuIXClT1n78OH2sb9eXPSVdN+1W+768N8+9ehrp/6et/XudVywfsX1N40g0q8fNTbmOq8WH/rav79m6Vc7OxtbEieXbR968sTxhyK/fLbzg75bVq7o+WZebvSFJfd+vuD4rW/vu7tQH7/mz/nzaw6r3z393T1RaezIZbc/enoR36a8unfN1tbrTix/1n/RseXKRNupp9KHlqyEEv361a5DXU9sO77Eyev5rqlLvpxqmedy/QdG0leg \ No newline at end of file +eNptVX1sHMUVt2uhlLamVmkkJKRkYlVtFN3u7d7eh8/BpOc7n7+wz/FHYqc4x9zu7O3mdnfWO7v3laZqkqKgJkXZPwCJIgXiy13lGoeQiEKIKRVUFERLPyCRGxSpRUSE0oCaFlWt1HT2fCa2nJXudmfee7/33rzfe3OolkcWUbHRPK8aNrKgaNMFcQ/VLDTjIGL/qKojW8FSZSQ1Nj7rWOrSNsW2TdLp90NTZbGJDKiyItb9ed4vKtD2029TQ3WYSgZLpT83H9nfriNCYBaR9k7wvf3tIqa+DJsu2qewA6CFAAQK0kzZ0QAkRCU2NGwWxAxSQBaAmgbqwXiYwMbAVhDI0DXAMihhh2pkVE21S0A1wJgJDZUobLsPtFtYQ54TUiI20tsP+MAa333qFtD/HR1048wadYcga53ygw7HQb4Pa9DnGWwB3kZGjnsvWdAxMB0kYQBLjgQtGwEFl3asBv0irXXIu+mpAZUAvQQMqKMd60OZpjs6lpDmbWVNmwliRlcN1dM06B5P38S2ENTpQoYaQXSDZmzSgtqO5SFxbORATUFQouW+0tRWUTCx3YW1JTwNRRFRdGSIWFKNrPtstqyaPiAhWYM2mqMxG6hOEHcuh5DJQE3No+qylfscNE1NFaEn9+8j2Jhv5MjYJROtF895uTGUFIbtnkvRIGL9/pES5ZoBeDbYwXLPFRl6YKqhUe4wGqTxVM26/OXVAhOKOQrCNHjsVpeNF1brYOKeGoJiamwNJLRExT0FLT0cPLt633IMW9WRW4uPrHfXEN5yJ7A8z0bOrAEmJUN0T9UL8Ys1xsi2SoyIKYb7DFcVMc6pyF36RzotyumM3jVYUOK7e6I9uf5SJtk7I3fgwO5gybH7ZRTL5cOZQFEUYtnJUnwix/CRQCQYDgf5DoZnOZZneWaAC+Pe0SldT1gSN5KMCHli7JJT3eGyIxVjbLxbTjqpInISLIrsmcz2F/oCgZmp7u597B5hPL3Psvtm8qrADpOxgVE4WEgOpCd78oXtgEbn5FWpqy8Ee/p0RUlPxLBOuImhfG44EI6MDubKeTwTCO3CHNuTLPXsGciuCo8TBIZrRBjmgh2c9yyscENDRtZW3Fkhyv/MQsSkHY4OV+mR2Q45VKE8RG//ptaYHidTg7covLGSoJx0F5OW6gOBCBhDJghwgSDgo53BSCcXAr1D4/Pxhpvx21LwzLgFDSJTGvasUL4mKo6RQ9Jc/LZkX/TITivphU97lEFFExPENKJy5yeZ0eW5yfQnzi53FoOtLB1J5bpbd7HO+kK5WJBER5KUfEHnouWgoGaQI8rnGiamhT03NCBGJ+4sz0cXGpIV3s3RXDmG5xiOf4m2virSNvOSMbFlMwSJdFLbJXfJp8Oi12NdAh8SwvTgt9MZKWqOhMacTALrlJlkOzAtpGEonS8ydF4gTdVVWpj6f+MWIG4lRI1fXK9g4xyi90UtWC8r98pqDQt5+F4St2CC0Wj0wu2VVqAEqhIVoufXahG0Oho+oJMX1ys0IE5yZL64os2okrv0LbpIiyiARFEKR7hQJixBIQzlEBeBfFgKyx1cSDwdTzJxKCqIGavzz60lpoZjQ/3xFyaZ1URiUubyLVkzMDFUWa6OIYsWxp0TNexIdFhaqEqxRmNT7rkOMYo4DglhKSNEuXCI6aZjaAXtC9pVvElbvy4PVr1yGtlfN+/dfPTLTfWnhf5u3rSP8/gw1/Zw4QcPfxhNHPzG00tP7nrr4qnHOsc3Bq6CYf3Eh6SS3Pm//XdsOX7X9H0XPr1y4+Xtvcdm3vus9QPLfyg4/EbqnctPfPva8Obf/vz9Qf/QK3eHf/Xa9Z985PrLGy5uOP6Xq3sGgsPxEeX0ZLmt+T/vVvqO+t45a3c1HW796MrFl9qy99yLJp5a+KNv78aZ0Uevd2278/XIJ0Lm2Jkbn33epbfs+P5ru47+NPPDPvvVyJ/axPdnr29kH/nmHbAl/fbIydaHjnz8aa/8h3c33dN2ObN44cSN32/q/+f033f+becvH9ic++SpN55M3n/t2ed//PljG2LKf1Nbn/6gZTFx7EDrVzfdtK/+bnbmma/8u/WFrz/4xKWO+0/8665HfQ+1lR+/ceTmfRMtsa1DhchfL5XfypNX933c4XvzWqL5gfe2XT5Y2Dudunh+jrkUeuTCVmHT9Ml0/RBbmh5//muvf/dLTU3/B/U9eXg= \ No newline at end of file diff --git a/docs/cassettes/chatbot_40.msgpack.zlib b/docs/cassettes/chatbot_40.msgpack.zlib deleted file mode 100644 index 35d91ee4c77e6..0000000000000 --- a/docs/cassettes/chatbot_40.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNptVQ+MFNUZPzRpLVHRRGNr/TOuNpb2ZnZmdvYfcI13txy3HPd3D+4o0e3bmTc773Zm3jBvZm93kSInRA1NuEnR0rTGCHu7cDkPKedhLRgxrYqxUTGBHIlVQqJEjBpRQ0qLfbO3V+4Ck+yf977f+33f+77f981INQ9tgrC5aAKZDrSB7NAF8UaqNtzoQuJsqxjQ0bBS7ulO9e91bTTzC81xLLIsGAQW4rAFTYA4GRvBvBCUNeAE6X9LhzWacgYrxZnLmwIGJARkIQksYzZsCsiYujIdugisxy4DbMgARoO6pbo6AwhBxAGmwzHNJhmGNgN0nanF4lMyDmYcDTIZumawyhSxSxEZpCOnyCCTSVnARETjAo1MwMY69J2QInGgEdjcyCzwraH7mOSDBmVUlPlwl0A7sPlhumNgBer+VtZy2BAXZh3XzmAfa9Jdgf4Sx4bAoAsV6ATSDerIommkQJ+L56KbqxoECk3yvxpuKWuYON7kwsQdALIMKT80ZawgM+u9kC0hq5FRoKoDB47TiE1YK4s3noPQYoGO8rAye8p7EViWjmTg24NDBJsT9RuyTtGCV5vH/duxtBam40110yCak8GeIq2wyQicFOP4FwssTT8ydVoyVgc0nopVs/9tvsECco6SsHX1eJXZw5PzMZh4Y51A7k4toAS2rHljwDYi0qH5+7ZrOsiAXrW152p3deMVdyFOELjowQXEpGjK3litEIcXHIaOXWRlTDm85/mKjHEOQW/m63RaVtMZowkligOxJG8rgxkY0gxbaV7JrU10caVouK+jvX1dISKvdzqTXfm4zApRMSqFxXiYZwWO5wROYIsgmtJEix8gbhTwQ6ESAqUBM7cxjQa4AT6XkwbR6n4Jr5VUVJKG3EiuJ9WuOFkjDQZahsxUf2tfKsv1DnVmelblSr9GhdVpaSXOLmdodG4eKU1JPtIW7mkjsa72lnWqFRvuTJhh0iyuSRc6erHlbOx0VnGFVHNvDM8LLxKKsHw9wggvxXj/mZzThg7NrKN5e8WwtM+GxKKNBR+v0JQ5LhkpUx3Cd96q1pt2T3fHFQnfXk5QTXpH22zUyIhRJgUtRuRFiREiy3hxWSjKrOrsn2itu+m/pgQP9tvAJCqV4co5yVdlzTVzUBlvvabYj/pip5X0w6ddysKChQlk61F5E4Ns3+y0YpOJQ7OdxWI7SydBqebWO1pT/XCpMKzIrqJo+WGDj5ekEMpAV1an6kcsG/tuaECsQby9oTA/WbfM6W6c3pVWnmd54a+09ZFM28y/jIVthyVQpvPRKXozjQYo+D3WFBLCoQhN/HI6mmTdVWDKzSSwQZVJljOWDXUMlFcKLJ0XUEcGooWpfddnL/HKYXr45asBDs5BOqWrUq2s/KvzETb0+f1LXKGR4vH4kWuD5qhCFBIPR15ZiCJwfjSCaJCXrwbUKfbwZKIwh2aR4s08QBfpuCqooiSEI2pYyoQUmaZVAqIUEtWQqITjwoHWNrYVyBpkUzX9edXE+q7mzmTr9CA7X0hstzX7bqqamJhIVSspaNPCeOOyjl2FDksbVihXX/N6byomx+VMTBCVTESM85E420LH0Bzb/2VX9idt7SW1teKX08z+Y5F5744bGmrP9fTz/fdOz7Gu1x+67ciF299tenzp9paPM7t27/7NyA/xG6Njl7Q7Dl86+eGpRMvFH+/87fYfXd6Nlt625JHv+AtqXs3f3fD807tH2slxdGbNl/nIyR2ZX33U9cWj2/701Tm4ZseD8Y7Ayj0blmovHdvVPX5+1+LsaHjm/mdLu373dG7LXTufSK5ItoV7R1tSx8U3jx5zT5w9cWDfHd+eLfd+Uk7dmFkcvLvhsYf/u+4nL7y+RNi+b1PL4hub1Z9XPr2/4czx/idH7zv32om/nJtWVj/z/smDS55bojyw4vd9A9bFVw9/9cjlR7f+svfWt4z95ey2P09H//CDe07B0bWLNtz5wceXSKD1jxemFt+05Y1Tpz8bDTa9/dD0v5v4eO+hSze/lvvwyH9SbbHu7Wd/9sFPT79t8StOP5U//8znb950/O8npi5a3zyn7R8di5zJNG18p2qcvDPx6ajbcev01oODky/9873lB84L59nvnk3Xknp9w/7hLV7HdQ0N/wN40l3V \ No newline at end of file diff --git a/docs/cassettes/chatbot_41.msgpack.zlib b/docs/cassettes/chatbot_41.msgpack.zlib deleted file mode 100644 index 3750672f2c956..0000000000000 --- a/docs/cassettes/chatbot_41.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNptVX1sFMcVN0EpJGkVmoY0pZUysWiCqHdv974Px0LnD7ANZ599R2zXRO7c7uzt+nZ3lp1d3+1R2oTSVEmVkC1BSUSh+bDv4GIcUlspEEAoUtX8EajUpkG4SqqECFSgShqBKjVN0tnzOZxlVrrbnZnf/N57837vzc7yGDKJgvUlk4puIRMKFh0Qd2fZRNtsRKxdJQ1ZMhbHk72p9Cu2qZxfK1uWQdb5fNBQWGwgHSqsgDXfGO8TZGj56LehoirNeAaLzuySJ7Y3aogQmEWkcR0Y3t4oYGpLt+igcQjbAJoIQCAj1ZBsFUBCFGJB3WJBXCd5ZAKoqqDqjMcJLAwsGYEMHQMsAQfbFJFRVMVygKKDlAF1hchsYxNoNLGKPCPEIRbSGnc0gQW2ZeV+0PWgRhlFsR5uE2QuAm+1OQ7ynViFIE3x9wNvIiN16NQ17xPFgGEjEQPo2CI0LQRk7Kyvp/06sEXceXpuBGgO0KGG1i925RE6o2ERqd5U1rCYABtiLNvMYA+r01mevollIqjRgQRVgugEjdmgKaVAj4tjIzvKMoIiTfgHDSvGZUwsd2phEl+DgoAoP9IFLCp61j2cLSpGExCRpEILVajPOqpKxK3kEDIYqCpjqDS3yz0CDUNVBOit+0YJ1idrMTKWY6DFyxUvOobKQrfcmV7qRLzLl3So2nTAs8Eoyx0pMPTAFF2l6mFUSP0pGdX1N+sXDCjkKAlTU7Jbmts8VY/BxJ1IQKE3tYASmoLsTkBTCwen6+dNW7cUDbnltuRic7XFG+YCLM+zkdcXEBNHF9yJaiL+sGAzskyHETDlcF/iSgLGOQW55z8bGRGkkYzWorQ7A9EuzhQHMygga6YY72C3tPewxUiof1Nn58OFsDBkJbp6xmICw0f8kWDIHwtxDM9yLM/yjAMjKdlvcAPEjkBuNFBUYHFAz20bUQbYAS6XCw4q3ekg3hKUlGJw1A7nkqlO0cpqI3CgdVRPpdv6U1m2bzSRSW7MFX+sFLpHgh042wyod/aYIrZ0ceENoeQGEu3pbH1YMqL5RLseInH/5pHCpj5sWNsS1ka2kIr3RXGde+FAmOFqHoa5YJTznql5bahIz1qy+0ogFjhoImLQGke/KNEjs2yyc5zqEL3zdrnWP17u3XRDwivH26km3ZMbTKUJ+CMghQzg5/xBwIfXcf51gQjYmEhPttXMpG8qwdfTJtSJRGXYMS/5siDbeg6Jlbabiv2kJ3aaSc99WqUMKhiYIKbmlTs5yPTPdU6mq316rrIYbGZpUypWzbonq6rPFwt5UbBFUR7La1ysGAwoGWQL0kxti2Fizwx1iNEIPRzeP1VbmdddhcZKM88xHH+Mlr4i0DLzgjGwaTEECbRXW457vkmDBa/GWgJ8KBCmB99Mu6Sg2iJK2Zl2rFFlkmZgmEjFUDxeYGi/QKqiKTQx1f/aPUDc8RDdfHQxwMI5RG+McrCaVu5UPcJEHr8XxA2aYCwWO3Fz0DxVgEJigdjxhSiC6r3h/Ro5uhhQo3iZI5OFeTSjiO751XQwEkYSz2cQlPyhaARxPEJRDgWkSJTKMhSQuNfaNjBtUJARk6rqzy23D/XEE11tbwwy9UJieo25e7KsY6IrklRKIZMmxq0IKrZF2ixNVKJc/fEhdyYqxIRMlJZDVIQxLhxjWmkbmmf7WnbjXqetXpiPlbx06tk/Ltl+36+XN1SfpfT31VfWM2e6l8ZX7PrEeVI62/X27isZNpNu/PjFnbevvauYb97HT//1wmbfvavvvLBn+X8zv9/3u6eW+chBXDk4tfueR//ywqrHur+7Rvlw86WrD70XuTDcuvmB5+7cHT2xv8dx+KGt65/6ZMubpvFZ96GrW99/9o5sZ/Dv3+ppeSm5d3b3sn/3BaNPfLTq7t+6h9Rje84e+b49fu22fPGL+IzNDpbvfnf5geGGhvuuD9sz3zv56revzTZPvLiCezA0+/NbkgcSE2u+GV8W/1Hl/dPPHn7mcO4fe39yrPOhJbcevdB05a3/nFs51ZA4m/zfT78B7nj8w8OrjR+i5UfXBq61ln/lHriITl9ePbxq+m/nPn/6cXIpdtvwqXOXZi8fed556/Qbl7+za3/ln8dv/bQf930Z6439+Zd7Xvj0N+rKe/erH6Wnf7b08z8Nr2mSV7U8/WXgjP+udx/4wdVD4S9mzux79INT4o6J218F6Xcuzl78eP+56yeu33PlPV/1hJc27O3I/+uRWxoa/g+ikIyv \ No newline at end of file diff --git a/docs/cassettes/chatbot_43.msgpack.zlib b/docs/cassettes/chatbot_43.msgpack.zlib new file mode 100644 index 0000000000000..fa62ee2c5d484 --- /dev/null +++ b/docs/cassettes/chatbot_43.msgpack.zlib @@ -0,0 +1 @@ +eNqNVQ1sG9Udd9sxdbRDlRBsAgE3b1ABucudzz7HrbrOcZw0zVKXfDRJW5Y+3z37Lr67d773zh/pso6mWjSIEKeyTEMMVJLYNEsDoVFhlBa2roWxCTQ0TYRKaNpQ+VgntI7SbdLG3jnO4iyVqCU7ee/9/r//x/v9/+9gOQdtrCFz1bRmEmgDmdAFdg+WbZh1ICaHSgYkKlImdiY6u8YdW5u/RyXEwpvq64GlcciCJtA4GRn1OaFeVgGpp/9bOqzQTCSRUnxn9eb9fgNiDNIQ+zcxe/b7ZUR9mYQu/H3IYYANGcCoULdSjs4AjDVMgEk4JmriPLQZoOtMJRiPkyGIISpkknTNoBRTRA5FJDVdI0VGM5m4mdY1rHL+OsZvIx16TnARE2j4h+qYZb6p6caK6zRCypLfa7DM00QxE2DuZQK1aAdDewU2WItYcvL/MKICM4M/l81EjGWjpA6Nr10brwpymplmUo659XPJixBfI2sPLQCjYcYoMiYw4FWo76c7BlKg7m2lLcIGEWtopuYhTbon0L+Y2BAYdJECOoZ0g9baoiIkju0x8Vx4qKxCoFCJvuvbMKEiTNyZ5bJ7BsgypOzQlJFCE3WPpQc1q45RYEoHBE7RmE1YEbU7lYHQYoGu5WBpwcp9FliWrsnAO68fwMicrubIkqIFVx5PebmxVMgmcecSNIhoa/3OIu0PkxG4YAPHP1tgacE0U6d6Z3VA4ylZlfOTtQcWkDOUhK32nltaMJ6pxSDsTrYDOdG5jBLYsupOAtuQgsdr923HJJoB3XJs50p31cMldyInCFx4dhkxLpqyO1m5iOeXGUNiF1kZUQ73CF+SEcpo0J2/1N8vp/qTxpa2vBrriUfimdZisrklm2pAgZ5g0SGtKRjN5KRkoCCL0XRvMdadYYVwIByUpKDQwAoczwmcwG7nJdTS0WcYTbbC72wOizls7kolGqVBRylEuVhjqtlJFKDTxMHw7t50a35bIJDta2wc4HaLXf0DNtmWzWkitwN3bu8Abfnm7f298Vx+M0Ojc3KasmVbCMS3Gara3x1FBua723OZHQEp3NGWGcyhbCC0C/FcvLkY3709XRMeL4osX41Q4oMNvPeZWdSGDs00Ud3xUFh42obYolMJDpdoyYiDD05QHcLfvlauTrynEm1LEr5poolq0j3VbGt1TCDMdEKLCfCBICNENgXDm3iJaWnvmo5V3XRdVYKzXTYwcYrKML4o+bKsOmYGKlOxq4r9lCd2epNe+LRHWViwEIZsNSp3upftWJj1bGvT8YXOYpGdBqY2WHHrnqqoPj9YyCuyoyhqLm/wkcGgqCWhI6fmqiZ0LHluaECsgd1xMczPVE8WdTdFc+VZgWd54ee09TWZtpmXjIVswmIo09eFFN35OgMUvB7bIgohUaKF30znuqw7Cux0kk3IoMrEm+kUhDoCyosFls4LqGuGRi+m8lt9ubA7EaLGL6wEEJSB9I0rByvXyp+uRdjQ4/eSWKIJRiKRl64OWqQSKSQiCi8uR2FYG40QMPALKwFVinHBwNOFRTirKe78N+iiXwlGBF6MJMOCICmiBIOhgCgCmaelkQJiSHom1szGgKxCtrMiQLfc1Lcj2t4aO9HL1iqJTVgLT3vZRNjUUqlSJ7TpzbhTso4chU5LG5YoV0e0z51rkCOQ52E4nKSueSnENtI5tMj2P91NeKO28sY/UPLu00yfXZW746G1vspnDf1+9hl5ZGviDL/h7JXvPnhmXdvwO49Ic7ceuPnQ21/91tq4abd/+cSbb/947PrLJ79e1taseWjkyt/3D358+kbfkYFZ333H7uw59+En//r0wswN+ZeKBwYuPLnrjY3n//r+n88cP7/3B98efWp47OPev/3Ouc/N3tLnbvnj6+PK6NHD/nsvzde9WbqN7xp/+MS57GOdA492a9Ibl8jeicM/e/fUXd1gw7pCu893+mT407nQ93543WhLy9l9a5+LzjYMrd7wBOk6tD563djYWfX7N/U1n+9+eeSxi5EzI5Psxacv75v8xbrCkZv/YA+7woe3rP/lDf+8mB99tftW5fHknmP7hw58NHk0+8TYkfc3Dk1OX/+Xt8bXK/LIK+81tVz50it7nn/532JrQ+Jc9tefPCB98Tv33/6nX1mXV+147vbA7O+Hv/nkf9L3ZJSj5B9OLPtBWRe/MJq48+SP/BdXj9/d87r0wU97Rg7P7O17r+0nX/nN3l3++Y1bK8Vd44tfqEvtW+3z/RfE1cHo \ No newline at end of file diff --git a/docs/cassettes/chatbot_45.msgpack.zlib b/docs/cassettes/chatbot_45.msgpack.zlib new file mode 100644 index 0000000000000..85d17e80065a1 --- /dev/null +++ b/docs/cassettes/chatbot_45.msgpack.zlib @@ -0,0 +1 @@ +eNqNVXtsHEcZvzQEkCrRIhQ1VEk7dRqQGu9692699whWsc938Z3lR/yIH1BZ+5i73dzuznpn9h42hpI0BAlVsGkFKkqLiM93wRinqQ2EJI6QmqqpqFTxkJAdqFqoqESrtjSo/aeizJ7P+IwjNffH3c3M7/t9j/l93xyv5qGDdWTtWNAtAh1JIXSBveNVB066EJPHKiYkGlLL/X2DQ7Ouo68+pBFi41hLi2TrLLKhJemsgsyWPN+iaBJpof9tA9ZoyjJSS2t3HJ5uMiHGUhbiphj42nSTgqgvi9BF0xhygeRAIAENGnbGNYCEsY6JZBEWtFu4AB0gGQaoBeNzAoIA0SCQ6RqgDCghlyJk3dBJCegWSFhZQ8ca29QMmhxkQN8JLmECzaaZZrDFNzX9cs11FiF10+9tWBZoohgEwUEQbES7GDrbsEIjYtPJ/8OIJlk5/IlsFgK2g2QDmg/cHq8m5XUrCzKu9fAnkpcgvk3WEVoAYEpE24gGqLoKUrSIuVu4eYTumEiFhr+VtQkjIMbULd1HWnSPp7+YOFAy6SIjGRjSDVp3mwqSuI7PxLHhmaoGJZXK9dXA3WUNYeItbpXgeUlRIGWHloJUmrT3i+yUbjcDFWYMicB5Gr8FawL35nMQ2oxk6HlYWbfynpVs29AVyT9vOYaRtVDPlyElG24/nvdzY6ioLeIt99Eg2lMt/SXaKxbgWSHCcs8WGVo83TKo9hlDovFU7Nr55cYDW1JylISp96FXWTdebMQg7M31SErf4BZKyVE0b05yTFFYatx3XIvoJvSq8f7t7uqHm+5CLM+z4QtbiHHJUry52kX8eosxJE6JURDl8H7KVRSEcjr0Vt+fmFAyE7LZ1l3Q4iOJaCKXKsnJw5OZCAqOCCWXpDKwPZcX5WBRCbVnR0vx4RzDh4NhQRQFPsLwLMfyLM+kOREdHhgzzU5H5fqT4VAeW0czfR3ilKsW29l4Rybp9hWh28nC8PhoNlXoCgYnxzo6jrHjoaGJYw7pmszrIbYXD6YHpO5CMj0xmsgXDgEanZvX1bauVinRZWraxHA7MjE33JPP9QbF8EB3biqPJoOtRxHHJpKlxHg62xAeFwoxXD1CkRMinP9Z3NCGAa0s0bzZ1kjonAOxTScUPFGhJSMuPl6mOoQvX6/Wp9/Zvu5NCe8ud1JNeitJR28GwTAYhDYIckEB8NGYEI5xIjjcM7QQr7sZuqUELww5koUzVIaJDclXFc21clCdj99S7Cu+2OlN+uHTHmVg0UYYMvWovIVRZmB97jOpzqX1zmKQk5Usfarm1lupqb4wVSyoiquqWr5gctEpIaTL0FUyy3UTOhR8NzQgxsTeLB8JL9ZPNnQ3T3PlGJ5jOP43tPV1hbaZn4yNHMJgqNCXhpS81WZTKvo91hbiW0MiLfwhOuMVw1XhoCt3IpMqEx+iMwgaSFIvFRk6L6Chmzq9mNp3/RXDXrmVGl/cDiAoB+l7VxVq18pdbUQ40Of3k9ikEaLR6JVbgzaoQhQSDUYvbUVh2BgNHzTxxe2AOsUsb+KF4gac0VVv9UG6mBCjIZUTQ4KsKLR6giLKES6qwIgSlqMyL4jn40kmLikaZAZrAvSqnWO97T2p+K9GmUYlMX32+jNftRC29EymMggdejPevGIgV6XT0oEVyjXQPuYtR5Qo5DgoRyO8GOXEVqaDzqENtv/pruyP2tp7/+2Kf59W9oUd8v3f+2yg9tlJ+n9nneDuvnJz989XxM4dR96/OJe60T2XXkkMn9w7+tvHLzA3ln929qNp2AXa/v3cqaeeXl2N7gpcv/PrO/7RikuziYuvfsVIlZ3pb5x+682/xh4K33jx/IMzM6FT95wpnlz71sA7i/P64r3M9ckD07Hwtb2lP2v3vfKE0PO35f33Dv/gvefSl7kj0vMH7vr72JnHlkp7GPaf4/8607z4WlsgEHH/+OaTu/ZN7lp6abd+ffzo5xe/cyAgpIufeuqXf4l9aV9vMjjyB+/ts+8O3fnyfecefffH0e++/jn5tfzUZeucwdz04MFrux6+68OT88tHpqdf/8yVN07fiIWuLZz+zw/dtPLiMzsf/f47r3zQR/6UbF2Dvfu6pp5/YQadUi7v/+Ijsa9W935h/9WPRp7c/caeez6oHHz75uTY459eekD98MDa/W+deynNHgyuvXf10kD299/8SflIrOVHe86cmKDF/PjjnQG5s5DquiMQ+C8J5Ld8 \ No newline at end of file diff --git a/docs/cassettes/chatbot_46.msgpack.zlib b/docs/cassettes/chatbot_46.msgpack.zlib deleted file mode 100644 index 28611c6f70f93..0000000000000 --- a/docs/cassettes/chatbot_46.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNqNVWtwG9UVdh6Q0lICJH0wbSeLS5Nx4l3vavW06zKyZDuOo/ghJbZpE8/V7pV2rd29m713bUlJaEkggaTDsAOktDxmSGwpoxrHISEQ8gCGhtIhdAol0wZKOrRDfjTNAKVlptOm6V1ZruU6M0Q/JN17v/Odc+75zrnbi8PQwioy5o2rBoEWkAhdYGd70YKbbYjJvQUdEgXJo91d8cR+21LPrVQIMXFjQwMwVQ6Z0AAqJyG9YVhokBRAGuh/U4NlmtEkknPvzv/ullodYgzSENc2Mt/fUish6ssgdFE7gGwGWJABjAI1M2VrDMBYxQQYhGPCBh6BFgM0jSkH43IyBDFEgUySrhmUYnLIpoikqqkkx6gG02qkNRUrXG09U2shDbpOcA4TqNduq2dm+aamK8qu0wjJM36vwXKEJooZD7OK8VSjbQytOVhvNWLGyf/DiAKMDP5cNgMxpoWSGtRvvzZeBQyrRppJ2cadn0ueg/gaWd0LWIEZPccYQIdXYd5Id3QkQ83dSpuEFTkfS2wriVysQXcF+ouJBYFOFymgYUg36GWbVIUU6HLxXGBbUYFApho9X3PzqIIwcSZm6+4gkCRI+aEhIZlm6jyTzqtmPSPDlAYILNGgDVhWtVPKQGiyQFOHYWHKypkEpqmpEnDPG4YwMsYrSbIkZ8K5xyU3O5Yq2SDOkS4aRLijoTtHG8RgBM4b5PjJLEtvTDU0KnhWAzSeglk+P159YAIpQ0nYSvM5hSnjiWoMws5YDEhd8VmUwJIUZwxYut97uHrfsg2i6tApRrrnuqsczrgTOUHgAodmEeOcITlj5UI8P8sYEivHSohyOE/zBQmhjAqdc38bHJRSg0m9WY3m+oIdvCX3J6Go6JYcbuXWR9dx+YCvt3P16g1ZvzRAYh3rhkMSKwQ8Aa/PE/LxrMDxnMAJbA4E4orH5PuwHQD8kJhXQb7PyGweVPu4Pj6T8faraxJetN6bUvPeIduf6Y6vlklaHwR9LUNGPBHpjae5nqFYsrs9k79Lza4Z9LaidBNDo7OHVbm5g/e3+brbcHDd6pYNKTM4EosaPhz2rB3MdvYgk2yOkXYuGw/3BFFVeH7Rz/KVCP28N8i7n4lpbWjQSBPF2e8LeA5YEJt0LMEdBXplxMbbR6kO4ZnXi5WRt6+rc0bCXxmNUk06J9sstZ7xBJg4NBkP7/Eygr+R9zSKQaY9lhiPVNwkrirBQwkLGDhFZdg6LfmipNhGBsqlyFXFftIVO62kGz7tUhZmTYQhW4nKGe9ne6eGPdsRPTzVWSyy0sBQ82W3zsmy6kfy2RFZsmVZGR7R+VDeK6pJaEupIxUTOpdcNzQgVsfOftEnTlROpnVXornSyvMsLxyjra9KtM3cZExkERZDiT4vJOecq9dB1u2xZlHwiX568U10sEuaLcO4nYwinSoTN9ExCDUE5BezLJ0XUFN1lRam/F15urAz6qPGL8wFEJSB9JErestl5U9VIyzo8rtJzNB4Q6HQiauDpqlECgmJwouzURhWRyN4dPzCXECFYr+g4/HsNJxVZefcHXQxGAz5gyFJCAE/TCaTvqBHDPJCAMoiBB4oB3wHI21sBEgKZONlATrF6MC6cKwjcrSfrVYS22VOve1FA2FDTaUKcWjRyjglSUO2TKelBQuUqzc84BwJSiEpGRSSqVQQhHh/iG2hc2ia7X+6G3VHbfmRv6fg1tNIn56XW7bnCzXlzwLSE+uaLyw98emqTflP5i/ccMPrp7++5JHsW794+LWH3t5W/+jRg5+dXP/ABy89zi37+aKG5jOkIP72o5t+9OzYz7Z7Muxr7b87fuq2936567bY7jebz/7k8PMffvrMiS23pJ/69frAKuWfO9seLJ26fdm3do9NFmMt7GMbd/05uVVYybU/vPPjxL43nl255qMTd/6l48m973zjxj1vhY8tzzx0dummxpqaP775U3HnrS+9s/T697/9J2eg70Ch2Fmz0nnj/rfP7G1d1PP+Xu/ShZElW+6+cE/dmP61C929wSX9Fxcv1G/07Lg5unZtuGkStP7q4h8mFo/8/Ya2RY+9+p/rft/75Zax85su3133QSn15OM73z224FRT60jidLTzex+v2Ap3fPiP2qdTXwwt/1Lr2cnjN72cEXsuF5/b/cDWR+77zfUR/xM/+OulxL/q2IFvXoi37Nu4Z/GiK6Wvfmf30Qd/eGn8s3n6gbt2vVIH4/FP9l53OQkj99Utf+/M+R8fuvhvesdXriyoee7VS/fa82tq/gsxlsAl \ No newline at end of file diff --git a/docs/cassettes/chatbot_48.msgpack.zlib b/docs/cassettes/chatbot_48.msgpack.zlib index 29393494b03d8..fcb5b94524b87 100644 --- a/docs/cassettes/chatbot_48.msgpack.zlib +++ b/docs/cassettes/chatbot_48.msgpack.zlib @@ -1 +1 @@ -eNqNVWtsHNUVXkIfbqrSUEEL/kGmViOg8czOzs6+7JjU8RvjR7x2HBNgdXfm7s71zsydzL2z3l03qRIqQQWtGAL9UUAlibOLjBMIcRoHnJQmCgKpL2Q1rRGloaKtqihtCA9VTSi9s17X6zoS2R+7e+/9znfOuec75+4uZaFNEDavm0ImhTZQKFsQd3fJhtsdSOj3iwakGlYn+vvig/sdG81/W6PUIg1+P7CQgC1oAiQo2PBnA35FA9TP/ls6LNNMJLGaf2tV53idAQkBaUjqGrht43UKZr5MyhZ1I9jhgA05wGlQt1KOzgFCEKHApALXbJIxaHNA17lyMB4nRzFHNcgl2ZrDKS6PHYZIIh3RPIdMrs1M64hoQl09V2djHXpOSJ5QaNTtqOeW+Wamt5ddpzFWl/xeg+UYS5RwEreek6rRDoH2CqxcjVhy8v8wqgEzQz6TzcScZeOkDo1vXhuvBrLITHMpx9z4meR5SK6R1bsAzgBUW4yGU5HKIXaJmZVe7mc7Blah7m2lLcoHhRBPHTuJPazJdgPsl1AbAoMtUkAnkG2wi7eYIhnQ4xKFyI6SBoHK9PqOb82Ehgl1Dy3X4AtAUSDjh6aCVZa1ezBdQFY9p8KUDiicZAmYsKxwdzIDocUDHWVhccHKfRFYlo4U4J37Rwk2pyoJ8zRvwZXHk152PFO1Sd3pPhZEc5e/P8+axeQCghwVxBdzPLs9ZOpM/LwOWDxFq3z+SvWBBZQMI+ErjegWF4wPVWMwcQ/0AKUvvowS2IrmHgC2EZaPVO/bjkmRAd1SS/9Kd5XDJXdBIRAQIoeXEZO8qbgHyoU4tswYUjvPK5hxuHvFooJxBkF3/lIioaQSSaMJteaHo12irW5NwqBm2GpzmzDU2isUIqGB7s7OLbmwMkJ7unqzMYUPRKSIHJJiIZEPCKIQEAJ8HkTimmSJw8SJAHE0WECgMGxmtifQsDAsZjLyVnT3oIyH5BQqyKNOONMf71Rp2kiA4U2jZnywZSCeFjaP9iT7OzKFe1Hu7oTchtONHIvOySK1qUsMt4f620m0t3PTlpQVHetpNUOkWbonkevejC26vYd2CLl48+YorgovHAzzYiXCsChHRe9zaFEbOjTTVHP3h6LyczYkFhtR8MEiuzLqkN0TTIfwl6+XKuNvX1/3koRvnmhlmnRPtNuonpMiXBxanCRKMhcIN4hSQzDGdfQMTrVU3AxeVYKHB21gkhSTYdui5EuK5pgZqE62XFXsJzyxs0p64bMu5WHOwgTylajcqa38wMLg57tajyx0Fo/tNDBRoezWPVFW/VghN6Yqjqpq2TFDjBXkIEpCR0lNV0zYVPDcsIB4g7j7JVk8VDlZ1N0ky5VVXuTFwHHW+khhbeYlY2Gb8gQq7KmheXe+3gA5r8eagoFQMMwuvpENeUV3VBh3kq3YYMokjWwIQR0D9eUcz+YF1JGBWGHK35VnjLgTIWY8sxJAcQayB68kl8sqnqxG2NDj95JYopFjsdjs1UGLVEEGiUmxl5ejCKyOJiAZZGYloEKxP2CQqdwinEeqO/8ttkiEI1IolAooStjTpaiIETUWVaPhlBRT1WhAfqGlnW8Bigb5eFmAbql1pLe5p6vlZ1v5aiXxfdbCO18yMTFRKlWMQ5tVxp1UdOyobFrasMi4BppH3OmoElOSUVYuNZWMieEYv4nNoUW2/+luwhu15Qd/V9Grp5k+c939ax+p8ZU/19PHftV36jtrzvzzys17jufW1AyuP3VmTc97z37jKz9Ye8tL0y0jf/yH3Dh3R+1/xuYubd6Qmz99+cL45Y8/mfb9thhddc+W9j2z1jPvf/TqXa83nNxZe9fOdz74V7bh8J/8Eeno2B6/PnzDA7PxfePDkXdnrYkff/L123c9OvDkW2tbH/wgWvva6dr3nn78eX1k7zNHzLfflOpn5s53vP/r48mnHjtX88A2n2/jRyPOh3fueLemcOHE43sefqOm46Fu3y1/GFpde+P60dnkK7u+NL4u8fudf/3ao9/tXx28ct+tTTc+fO6mfc/felvjKr47MKQkGo7NRR5a/aMjdRc3Za886z947KWffFE6dbLQ+8SXp/5e/PzFLd87emXutkdeze8lf/nqpaFzcPaHp1Nvf+Hjy43PTb0W+vfFP2+c27AudPaNN++buZDecfZzH96w7nxi31TbnT/1H/zNHTf94uz5323T7/05u8dPP73ed/RvTTMbVvl8/wWFCr4/ \ No newline at end of file +eNrtWVtv20YW3r7mqSj2dQGWKFBgIdLU/RIEC1mS40sdBbaT2GkCYTQ8FMciOTRnqIsDPzTtH+BPaOpIheGmLRrs9rLZ533YP+A+7I/YX7CHIhXLSIG+F9SD5Zk558w53zlnviH1fD6CQDDuvXfJPAkBoRIHIno+D+AkBCG/mLkgbW6e3+/uH3wVBuzqr7aUvmisrRGf6dwHjzCdcndtlF+jNpFr+L/vwMLMeZ+b01/f++CZ6oIQZABCbSifPlMpx708iQP1iIcKCUAhig2Ob4WOQoRgQhJP6krTE2MIFOI4ysKZ2KYiuSJtUPo4VrilTHmIEn3mMDlVmKd0vIHDhK2rOUUNuAPxJmIqJLjqWU65sfcmU7Y+dpUDbpo5BX0mAhQJuJkb+3PMh3DDSiggUM+e4ozLTXDiqYEvtRLXXOaxWNLDuTx+CxkAcXEggxBwjJv7iK0Mg9iQoVfP5jYQE5H/75/eP7e5kNGrm2h+SygFNA4e5SbzBtE3g1Pm5xQTLIdIuMAoPFjkKroYAvgacdgIZolW9B3xfYdREq+vHQvuXaZRa3Lqw7vLF3FoGubHk9HrLjrR3Fq7P8W0e0peL9V047uJhilhnoNp1ByC/sz8xfovqws+oUM0oqUlFc0S5VerMlxEL3cJ7e7fMEkCakcvSeBWSj+szgehJ5kL0bx1/93t0sXr7Yp6Pq9Xv79hWEw9Gr20iCPgHzeUQQZTjXK0EX1pzCjnQwbR1f96PWr1+u6dnbHdetSpd4Zb0/7G3ROrxguPStNQblnQHI4q/cKEFpuDw2nrwVDLVwvVUqVSyte0vG7oeT2vbRsVfnfvyHXbgWnc36gWR8J7aHXXK6ehOWnqrXVrI+xOIGzrUH18ONgabxYKJ0fr68f64+JB7ziQmycjVtTvif3tPbIz3tjuHXZG49sKeheOmHlns0w6m65t9x40uSuMB7uj4b1Cpbq3Mzwd8ZNC+SE39M7GtPN4e7DinlEsakbqYcUo1Yz482pZGw54A2lHX6H01wEIH5sNPp8hZDIUz8+xDuE//56njfyiu3Ndwn8+b2NNRm82ApZTClVlH3ylYBRKSr7eKFUbRlW5u3tw2Uq3OYhL8AobbSLXYBTPJO1yW8HjIxAg74TS0mrfHwTEExbWZWfZA3Nqh94QzIvWb1b/m7j6MbVxPNizGkx8LkBL3YwuD7W95EzTtto/JK2m8WBAPHa6aIXozaINxqeTsUlD07RHY9eon5aKrA8htV6nKn7A423QIc0V0Yta/VW6sKzDC4zd0PKGZuR/wtAYxbaLY/F5gLECxUNUTqOrnEsmcc/dKebLxQom4jYeX9QJTdgP+23uYqWK24ofgMOJ+fNEw/MDHOYyTNTib3pAi+i8jMo/visg8QDDo3xeWqTZ+NeqRACx/TiGazOler3+z98WWpoqoki9bPx8UwpztmImX3DFj+8KpCZeGOJyspTWmBldfYSDXqlarZQL/T5AzQBSM0rlAjVNbLNqoU/KtPZta0NrEWqDtr+ox2jePrrX3N1q/f1QWy0sresnBDb3uPCYZc32IcDERBfU4aGJh2cAM7S11zyKXtdoHQwDwKS1Wt2olLX17v6Csj6bxXnzBr/+5YlJJGkgbTBTbagxv1FkN63ZWm/Dw73u0Wh3x2zv8MoDUaiufzKg4+1HyAQq7x9jfaYa+jUj6osKRgGKFS8BbS6bs1DJLWlllVW0uI80o6rla6iV8FjPQtcg8NHDeAvL71m1ch9IpVorxaZtzmjMtEi0zDNhojaMnIqWJVEbz1I2U9+ybKyxpEQVBwFYoSDohhc6zllOdfgAK74vkomcipsjvfbQfySOVOrp2a1bfzigrlHZZGoGRKy4uCplWCwUP8xwSGpiEwL4WGRoJGiQDIgEiPjxKcMiwcLiQQZFAgU+rmdQLBQbT7wnXobFQvGRPc2QSBrEZNkNK4VC2hmDpFAIig+uGRgLRRrwcQZFUhdjllHI8uadIbFEYkyCjEQSMP6W3bLeYrEOlIQio5G0TbLbxVsWIdn7mxQKHsr4tXj8e1MGSQJJdsV4e2KwrE+Wr7QYONkdI/sp4Nnvh64KyX11JfhP2917nae3bv0fs0k9Cw== \ No newline at end of file diff --git a/docs/cassettes/chatbot_5.msgpack.zlib b/docs/cassettes/chatbot_5.msgpack.zlib index e5aec1c68abc3..983518a3a0ec4 100644 --- a/docs/cassettes/chatbot_5.msgpack.zlib +++ b/docs/cassettes/chatbot_5.msgpack.zlib @@ -1 +1 @@ -eNqdVWlsE0cUDiVFUSu1oKIU0Ygs7oEK2fXuen1GVpvEuQiOExxIOM14d2xv7D3YHQfbEMTRFqmtShcBhV6i4NjgpgFExNUGFVEEPX5UKqEKIP5AS6noQaUK0T/p2HFKIvjVlezdmffme8f33pst2R6o6aIiT+kXZQQ1wCO80I0tWQ2ujUMdvZaRIIooQrrN5+84ENfEkfkRhFTdZTYDVaQUFcpApHhFMvcwZj4CkBl/qzFYgEkHFSE5snm9SYK6DsJQN7mIFetNvIJNyQgvTE3iXKJ5nkTUKkFTFWHSlBjMb8d1qJl6V+EdSRFgLL8VVhFpoawkimtBJa8r410Gv3WkQSDhRQjEdIg3EJRUHAlWzGPRlL03G4FAwHFuT0cUHRkDkz0/DHgeYnQo84ogymHjs3BKVKsIAYZiAMEcdleGhbwYuSiEKgliYg/MjJ0yjgBVjYk8yMvN3boi9xfDI1FShQ+Lc/nYSJwMGRmDPuxETbO5LYlTLBMMxTko+kiC1BEQ5RjOGRkD2J+MWpB/PlGgAj6KQcgifUZm7PDARB1FN/q8gPf5J0ECjY8YfUCTbNyxiftaXEaiBI1sXdvD5orCB+YsFMNQ9qOTgPWkzBt9BRpOTDoMkZYkeQVjGJ/QA+P5iUE5jCLGAYZ1HtSgruKCgVsz+BiK61vSmAv43cVssXL2+1rGSbxeUp72YF6MoQZNrCJYO+GHKsHSLEcwNhfNuiw00ejt6K8rmul4JA1HOzQg6yFMRf047Vk+EpejUMjVPZLwoTzhOJq8+7hOSZhQFR2SRa+M/i5y8VjLkM2eY2PVRSpaGMhiqmDWGCowvy6VWCfwcUGI9KyTaGeKs4hBGOdDg8UjqqbkzWCHSEk3DrCsdaAoGc99DsdKkwxN0swpXPwij0stH4yqaIjUIY+bFCWNkSoJJPJ15rYwVouNpulqQpT5WFyA/njQo0iYHb2aUDUYU4BwOkHijoExURIxMYX/4gDQjbQVHz75sAJSohCPiixHF54zEzU0mMfPB/EAhnM6nV88WmkcyoJVnHbn6claOpzoDcNK+smHFYoQ+2m9PzGuTYqCMfICXgQYwcoDmmO4oJUJ2aBdsDMhK2RYxs5Cu90ODtc1kHWAj0DSX6g/I+tZ1lrjba7L+TF2naJERbjjypSpgQAfCgQlt+hJdjqaaU3oCkJLRNKEmnpqiaeVStmti1uampYmbPwy5G1u7XHyJLZh56ys04pJo2iKoRgyCez+CKvSnXrcDuhuS0oEqU45ujYgdlKddDTKdYkLOzhlCRcSU1x33BZt8zcJKCwFQGdtt+zvqFvsD1Pt3d5gW2M0tVxMLAxw9UoYswlQxG2uJnBtijgt7mKHkLhDyLH+sIz3RzUhFGrATU2ehtVEE57vPjmWrMaNhYsJ4jeQoF9E0N2qyHBkJ85BvEcU3M20rcHa1qA7Wptql4ZUxzqvR7bqNeyiQKKlXVHRWi9qpBL+mnaHMiEJNouNpIt5sNGco1A8D1z/n14d7yInNjzpU8cusqys6LIYCmX8UMMNZOT4mBIX8GDXYAZzvrhmmTHo4J180A6B0xmyOGmbk6zFI3Mc7b/xkM7fCoUbbXMm33Zy+PyUNZVvlZUUnqn4NzqK3j3beo6e7vnj/pvnb02zbj7IPVtadmnOjH0LiZnOr21Wx/WL18tyudF37rwxWHHyJn3DvvejVaVEB6lNH27QhntcZ059cOinGxt2Vb33ZDTT+fS9w3uGBn2f3todZe6W7qrsumuOr26/Kgr1P6/pLH/xgM8bun2Hbbk1ws0ihw+6L6OKwIdh5t7xdOL99MirctOSXzxPKTNKvqQ29Db6b5y4OPdmaV/9LWGrqbJySlnLlQW1rmxFy7mgxi1g337p0up9l0tPdHx8dsuZfTum7VY3rWlx08Pa+t4nVqy81/rPX5kfU9/f45/zesOH0NZXfo3CnVdnlvc9f2P0zh7XjGu9pX/OGfh9tMLkapEuPP7DvNmVe9LbHGuv3f+qonzGoqq/232jr785e9n831p3P7Nt4zfchU23u9iVL1+7zBw/v3zvNCm8ZBYbO7p9YyGNU0ss34b1xsdKSv4FMhxWSA== \ No newline at end of file +eNqdVWtsFFUU3ooajCBqIiQKOlQFA72zM/uY3W1dsd1u6RbbxW5b2qI0szN3d6e7M3c6d2YfrcZQBCNqdHyExDey3TWlFBBEXkWEoPiIrxqlKiTGgI/EaJRgFAze3W6lDfxykt2Ze86533l859zbn09CDUtIKRuSFB1qvKCTBTb78xrsMSDWH87JUI8hMbs8GGrZZGjS2KKYrqu40mrlVYlGKlR4iRaQbE2yViHG61byrSZgESYbRmJm7KG+chlizEchLq+kVvaVC4i4UnSyKK+X5lOBhTJVg8LlFVS5hhKwIDYw1MofvJ9IZCTCREEUVXXgQECWFKlgqRAZS95Y1yAvk0WET2BIBDqUVZKHbmgFJIZ2PZiPQV4kWT6ZjSGsm8NT497KCwIk2FARkCgpUXNLtFdSKygRRhK8DgdJsAosVsUcjEOoAj4hJWFufJe5jVfVhCTwBb21GyNlqJQc0DMqvFg9WMgMkFIourkzSIKoDliXZ0iBFYqlHW6a2ZYGWOclJUEqBhI8iSenFvX7JitUXogTEFAiz8yNbx6ebIOwOdDIC8HQFEheE2LmAK/JnGPHZLlmKLokQzPvW36xu5Lygjs7zbK0a/sUYJxRBHOgSMNbUzZDXcsAAREMcyMzPFGfBFSieszcxNpcr2sQq6Rd4Joc2aYbuD9LuIAfHc2X+ua14LIJEk9YZmdrCS/mSJ0mVVA2FxWCKmVjbA6K9VQ6uEqnm1ra2DLkK7lpuSQN21s0XsERQoV/gva8EDOUOBQHfZckfKRAOMmmED7pUgDTKsIQlKIyh9pB8/jAgEDtjvHuAkiL8orUW3RrjhSZT/WmU6JgiGIsmZIZT6/DLoWhIUR2lraoGiq4IQEBGZPieNzDJc1E7QdJrgxgGcCwe0jzSwJptUIyKtJ0gKFARlTPmGMVMp8u9JnXzjrtHMMwVZSkCAlDhCEjXItkwg6uolQNJhAv7k0DMjEwIckSIab4Xxp/bGadZPPuiw10FIfkoMg7mOJzYLKFBgv4hSQuwDg8Hs/+SxtNQNmJicfl2TvVCsPJ0bA2Ge++2KAE8RqDh9IT1kASzbHbyKJL5MWI4PY4BUZw2p0ujol4nC43B+2RiOjkbexWXx3w8UIMglCx/8x8bUdTdWPANxgi2D6E4hJ8+uuyaV1dQqQrLHuXpWK+FX6PPx7IhOuW9kTcyLbCkTH0QARWx5Nc2JYW7NXR9oyvNQ5Yl83l4DgH6wYszdAszYIGhkNLmztkuVYTmeV1LnsSK22RYA3Xa4jpatpXE6kzgmlo1NLQ1dkeDaTqbbaejpqabrrT3tLVren1PUnJTjfhUEMzvyxV19DV7k+mCJu8HvNaqyjSmxIpi7c0IYBMCCDzYWMq2Yn5qKLEYg946amnYRVVT073oJLIVJHBIs0EyZuXYUjSobcJKXDsWVIDIymJ3non76+XY7Gu1mokY6a1MRlvsnGu5mXx3iTqsTnbEEP76zL+zobopCIwdjtgSnXgGIe72DwXQv+fUe1qB5MHHgTV8WssryCsSJFILgQ1MkDmoJBAhkgOdg3mCOfN1R3mTrfggQwT5hxh6PAwnBPUkCNzAu2/4yFbuBWK99nqXGHslOiRsu5bHptuKT7TyO/8ef2patR/97Vrfz23/qcXB8rW3L3f0Trr+TOJZ7iZHfxBecNLYF/W+e75E5ZFZz69+pE/z/TtzyydZYGr8cEHrPetuunkEnrOZjBy54bmga7uD7cu9q6c/cKux0+Ozl447519sz44vnbvJ3u49S+K3mMZ36F7N9y4YPS29d/fcU/vplP9LY+0vvvXdWff/OLzV64cPTrr+iX3fHL6Zdg0+4ffTpVZ5p5FX4XmnF6Dpy+YM7Ag7ve7377rsoPrWvOvbmvrXLjz8PuxU0Pzfjrwd+eh/lRq2yl0dO26mTMun7v7ivfWuu/89umZz2254f6+44uML4NfLVq/ePS3j1/+Z/OqBvDLXfH69i+Poy3CmpG+3y0nzj7zzZIVr0q7ORqfC+y9esbh+de8txF99MfcH49d5fx59KGNQ9TC7BuuHd99Gvy5e8b04dvPLv7s5qFNlYsHhl9nn5jbVtHYltsx71F34NbTTSesxYJOs3xwpN8Tusxi+RdCumK8 \ No newline at end of file diff --git a/docs/cassettes/chatbot_51.msgpack.zlib b/docs/cassettes/chatbot_51.msgpack.zlib deleted file mode 100644 index 18902845ceb27..0000000000000 --- a/docs/cassettes/chatbot_51.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNqNVX1sHMUVdxwqKvVD/qPQSlHF1kVCJbfr3bu9L5uUXs6J7Rjbl9wR2+HjNLc7d7u+3ZnNzqzvI6Q0SSlq0lZsSxCEipbYuYPDcUIwaRoIIqKorZuUhAqQg1olqCKESi2h2EiUj86dz/W5jkROurNn5vd+7715v/dmZ3kU2kTHaMWEjii0gULZgrg7yzbc6kBCf1gyIdWwOh4biCfGHFufuVmj1CLtbW3A0gVsQQR0QcFm26jUpmiAtrH/LQPWaMZTWC2ca+7Y1mpCQkAGktZ27o5trQpmvhBli9Zh7HDAhhzgNGhYacfgACE6oQBRgYsgkoM2BwyDqwVT5eQo5qgGuRRbczjNFbDDECnd0GmB0xG3DmUMnWhCq4drtbEBq05IgVBotm73cEt8M9Obaq4zGKuLfq/CMscSJZyXW815G9EOgfYyrNyIWHTy/zCqAZQln8uGMGfZOGVA81tXx6uBUR1luLSDbv1c8gIkV8k6fwFmgUPAhFcgvovtmFiFRnUrY1HeJ/h56tgpXMUitiuxv4TaEJhskQYGgWyD3bXFRMiAVS5RCG4vaxCoTKJ/a2oZ1zCh7uRS2R0CigIZP0QKVlmi7sFMUbc8nArTBqCwwmJGsCZqt5KF0OKBoY/C0ryVexhYlqEroHreNkIwmqjnyNOCBZcfV6rZ8UzIiLpTAyyISE9brMD6A3GSIIcE8XCeZxemI4PpnTcAi6dk1c6fazywgJJlJHy999zSvPFkIwYT90AfUAbiSyiBrWjuAWCbAfmZxn3bQVQ3oVuOxpa7qx8uuvMJkiQEn15CTApIcQ/UCvGbJcaQ2gVewYzDfVwsKRhndejOvJ9MKulkylyjdxYGQz2irQ6loE8zbTWyTri9s18oBv2beru7N+cDyjDt6+kfDSu8FPQGZb837Bd5SRAFSZD4AgjGNa8lDhInCMQRX1EHxUGU3ZrUB4VBMZuVh/QNCRnfLqf1ojziBLKxeLdKM2YSDK4dQfFEdFM8I2wc6UvFurLFLXp+Q1JehzMdHIvOGdXVNT1iYL0/tp6E+rvXbk5boVxfJ/KTiPe2ZL53I7bo1j7aJeTjkY0h3BBewBfgxXqEAVEOidXP5II2DIgyVHPH/EHpCRsSi00luKvErow6ZOc40yE89YdyfeLtH+hdlPB1451Mk+6J9bbu4bxBLg4tzit6ZU4KtIvedlnkuvoSE9G6m8QVJfh0wgaIpJkM1y1IvqxoDspCtRK9othPVMXOKlkNn3UpD/MWJpCvR+VODPGb5mc939P5zHxn8djOAKQXa27dEzXV54r5nKo4qqqN5kwxXJR9ego6SnqqbsLGUtUNC4g3iTsmy+Jk/WRBdxWWK6u8yIvSb1nr6wprs2oyFrYpT6DCXhdacGc8JshXe2yNT/L7AuziO9hcVwxHhXEn1YlNpkzSwaYgNDBQj+d5Ni+goZs6K0ztt/5yEXfcz4yPLQdQnIXsjSvLtbKKLzQibFjlryaxSCOHw+HnrwxaoPIxSNgnHV+KIrAxGslrkmPLAXWKMckkE/kFOK+r7syNbJEMhUPpsJgGqUAYhFJ+WU2BgAgUMaSGgRpKBQ9F1/NRoGiQj9cE6JY7h/sjfT3Ro0N8o5L4AWv+aS8jTJCeTpfi0GaVcSuKgR2VTUsblhjXpsiwOxVSwkoq5A360gpzHgjza9kcWmD7n+7Gq6O29sbvKFXriTIvr7jnhj1fbKp9VrLvZ59Rd6B3pdTy8r8++VpUuPvcq7e+PtLWunvqrUgs1vIdffT4qkdPex4ZdvZ+P9ez5bnie0fu//BP254v/Oe1FbFA3BcrnUlNT88KQnnuU+Xf9O1/7l7le2GbiS6+9I9XnprdNXNLBdEz717304+PffSj6enHh97vMJpHTj7c/suflX7/i7Mzf+R2/NxLzz40+OqqD29a8+RLu4u7LmhHcterXzmy7yS5tun86u1K168u3L8vMt2z4cen2rv8Hbc0x3bcmXgwfdRzbs+7R8888PUPnpx764lrSi37BA/37XtvjL0TvMb8sndXi5y49r3jv/vuqUvf/Mnc4R/cZajcg5fe/jh3z4vO6fbHLmYvzn00d+aNjXd/Ycvlx2744NnZh/bA753+uxK8vpLY/+ZXX+w9d+BTUqk0793ruS/wjnL+0mWh/9l799839kl6Q/dUdvbC+eQb3ujZ1y8f2vra5K9nC4+KoanbMjejocN3Th58RUqcLB38yzfe/HOwdsUrm760+a+rR5qbmv4Lx1LS0Q== \ No newline at end of file diff --git a/docs/cassettes/chatbot_53.msgpack.zlib b/docs/cassettes/chatbot_53.msgpack.zlib deleted file mode 100644 index 71cf2b5d80476..0000000000000 --- a/docs/cassettes/chatbot_53.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNqNVWtsHFcVXjdKCSguRrSVKLVyuy0YEs94Znf2ZTcg22vHjuPnun4hWO7O3N0d78zc8dwZe3cT0yREUFrVYaAioBbc1PYusVwncYyb5lGIqkoFCqVxRXGCUEVKxEMQqIoCfRDurNfYlv2D/bG7957vfOece7577uH8MDKIjLWSGVkzkQFFky6IfThvoCELEfNITkVmEkuTHe2R7gnLkJd2Jk1TJ9VVVVCXWawjDcqsiNWqYb5KTEKziv7XFVSgmYxhKXOl5A/73SoiBCYQcVeDL+x3i5jG0ky6cPdjC0ADAQiSSNHjlgIgITIxoWayoFYjI8gAUFFAIRmHE5gYmEkEYnQNcBxksEURMVmRzQyQNdCgJRSZJFl3JXAbWEFOEJIhJlLdo5VgXWzqWlEIncBYWo37f3iO0EIJUDNAgyr6/FoHiyBjA7y5QgUEG0amEjQDCWsVJkjCYRpZFOm5OBUVqnDIWNCER4AINYpczsixUYgEM+sCraa7WXJAhWYS6AaOKUgFkiwBmdKlNkn1i3RHxRJSnK2EbjJe1seYlhHDDlajuzz9JaaBoEoXcagQRDfoqehULhTocHFsYDSfRFCiYvqdq2wyiYlpz64XyEmnWsqPNBFLspawn01kZb0SSCiuQBNN0wo0VJCfPZ1CSGegIg+j3LKXfQrquiKL0LFXDRKszRQrZsyMjjaap53qGCo5zbTn22kStc1VHRmqZA3wrBBkuVNphh6frCm0A4wCaT45vWA/v9agQzFFSZjiLbFzy86zazGY2FOtUGyPrKOEhpi0p6Ch+oUza/cNSzNlFdn5+o6N4YrG1XBelufZwOl1xCSjifZUoRHPrXNGppFhREw57ONcTsQ4JSN76e1oVIxHY+puOZzpDTZzhtQXQ96kaki1DexD4TY2G/B1tTQ19aT9Yr/Z2tw2HBIZPuAJCD5PyMcxPMuxPMszGRiIJD0610usAOQGvVkZZnu11FBU7mV7uVRK6JP3dgv4ISEuZ4VBy5/qiDRJZkKNwt66QS3SXd8VSbCdg62xjj2p7ICc3hsVGnCiBtDsrGFZ2t3M+Rt9HY0k2NZU1xPXgyOtYc1Haj37oumWTqybQ63mHjYdqe0M4jXp+b1+hitm6OeEIOd8Zle0oSAtYSbtCSHo+6GBiE7nB/pqjh6ZaZHDk1SH6JWX88XZ9Ex7y6qE75oMU03aFxsNuRJ4AiCCdODhPALg/dWcp1rgwZ7W7pn6YpjuTSV4utuAGolTGTasSD4vJi0thaTp+k3FfnF5IDBO+vSWMiitY4KYYlb2TB/TtTyVmebwmeWbxWAjATU5WwhrXyyofiSbHpFES5KSwyMqF8oKXjmGLDE+X3ShY8EJQxNiVGJP+ATvbNGyortpWivtPMdw/PP06ssivWZOMTo2TIYgkb4DZsZeqlRh2rlju728z+unB19DJ7CoWBKKWLEwVqkySQ2dQkjBUDqXZui8QIqsyrQxhe/iG0PsSR91PrsRYOIUoq9RXii0lXthLcJADr9TxCqNEAqFLmwOWqHyUkjI4zm3HkXQ2mx4j0rObgQUKSZ4lcykV+CMLNlLD9BFFAkxDsYQRyPQX58PenhfMMYF/Tzycd6YeLK+kamHYhIxkYIA7Xy4v622tbl+oY9ZqySmXV9+hPMaJpocj+ciyKCdsadFBVsSnZYGylGurtp+ez4ohsRY0BODiPOFOH+IqaNzaIXtf7qbdEZt4TU+lHP6qSVeKvnujse2uQqfLWbnpbZrXNnFm7saX9Dl6QXls+8sDHz4yFnQs+2+Izvf+N4DX7r36D/2HTz47o2XNfnNU8dCFwI3/vSLJ49+7ODc1MAhz+N3L97e+/5/zsw/HJ3949+yX9l/6wAZHvrELz/453snvhEonx7q+tX1Hff8u6t1+48uHe97u+ZK+PXDP7UWnqp6mLmeqFjcN9/9TMtcddPQg/13Xn6p9K7FpQOne/1S6dzApRe3ud68/9UnXjv2r1zPN0c9l91nZjufvaPFtfNbLY9cfuU7Dfs7rx0S3to6OjbaXi68u/UD++D1israLR8SFuZuH2BKjzR98uvMO5/5eHCP2dEQLtv73tOj4288+gj7/Y+8Wm7s/PL2K8f0zFv3KmX3lNeNC6XnXLutvw9Olez69o3n3T3lf3n9sRtPXb129cXjY3++82s33y85OfPcb2fuH/z0+Fjs+NOe8+UlD16vO7+j/ETZidK5u7N149uP5WtqfBd0cPQm2nXfa9pPEou3pn/9qUcXxn482vDX2waiH71jsf5nB3K//8HWqxYafZwPPzn+xM9/M/a5Epfr1q0trvZqTZ64zeX6L4VnuD8= \ No newline at end of file diff --git a/docs/cassettes/chatbot_56.msgpack.zlib b/docs/cassettes/chatbot_56.msgpack.zlib deleted file mode 100644 index 02221c5cc2137..0000000000000 --- a/docs/cassettes/chatbot_56.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNrtWctu20YUbbYBCnTRXTcMUaBAIdKk3pJhFLLlV1I/5cR2gkAYDYfiWCSHnhlaj8CLpvkB7rormjhSYbhOggRtmjZdd9EfcBf9hqJf0EtZqmU4QNcF64Wt4dw59565j2PTDwcHhAvK/Gsn1JeEIyxhIaKHA072QyLko75HpMOso/W12taTkNOzTx0pA1GemkIB1VlAfER1zLypA3MKO0hOwefAJUOYowazur9fe/+B6hEhUJMItazce6BiBr58CQt1l4UK4kRBikPcwA5dBQlBhUS+1JWKL9qEK8h1lWEwMaYimSIdojRgrTBb6bIQLBrUpbKrUF+Z95suFY6uphSVM5fETkRXSOKphynlkm+H3lCWP/EA0bJ0RRJw48WR7LEWmTweCsLVw/vwxGMWceNHzUBqGT2nyZA3WGzrw1MTfgrJCfJgIXlIYA1+A7hWsIuhDL1wOHAIsuDS/3jvgyOHCRmdXr7IZwhjAvDEx8yifjP6rtmjQUqxiO0iSY6BgE+GaYqOW4QEGnLpAemfn4qeoyBwKUbx/tSeYP7JiLAmuwG5un0ck9MgNb6MXq1BEJXlqfUuZNxXTD1b1I3nHQ2yQX0XMqi5COLpB8P9nyY3AoRbAKKNqinqnx8+nbRhInq6gvBa7RIk4tiJniLu5bMvJ5/z0JfUI9Fgbv2qu9HmhbuMbpp64cUlYNH1cfTURq4gP1w6TCTvapgBRvSN0ceMtSiJzv6q17Fdb3gztNrdLi4b3NppkIzjcasyr9+uruq9Qm7z1tLSnU4e78qV5dWDEtbMQrqQzaVLOUMzdUM3dVProkLNSQfGtggLyNjL9Cjqbfut/Trd1reNViu7Q29uZdntrE172b0w31qvLVmy6dXR9uyeX9ua26w19Y29lcb6Yqt3l3Zu1rPzrDmtQHThAbVmlo38Qm59QRRXl2bv2EGxvVL1c6KS/rzeubXBArm/Ihf1Tq2yUWQT4eUzec0YRZg3skUj/jod14ZL/KZ0oifpfOlbTkQAfUa+7MOVyVA8PII6JL/9Ohj18OO1Wxcl/OFRFWoyervAaUpJF5QaCZS0kc4qZr5spMtZU1lc2TqZG7nZikvwDDqtI6fIQfzkvF2mFZgcXBA5E0pbK77Y4sgXNtTl/LgHBtgJ/RaxjufeWf1v4+qH1MZ8oGs10gmYINoozOhkR9s8H2facvXleatpjDeRT3vDVojeDtug3eu0LRxalnPQ9oxSL5uhDRJi+9XoSMBZ7AYC0jwRPTHT+dPRzrgQj4E8lIKhGeaPwI1i6LuYTMA4kCUYBqjsRmcpD3XippvJmLlMHjIxDaMLu6FFamGjyjwoVTGtBJy4DFlvOhoMEOJSj0Kmht9Hw1lERzk4/PqqgYQRBmN8kB3m2fhl0oKTGD8mcQGTLZVKP7/baAyVAZNSLv3mshUkbQLGTHvi9VWDEcRjQ5x0xtYataKzj2FRz9iFTMY2SgXTMi1cMAq5TKaYNhso2zBMyyg8m1vQ5hB2iFYbFmQ0qO6uVlaW577f0SYrS1sLzsVr4DPhU9vu1wiHxETH2GWhBdOTkz5gbVZ2o1dFXMKNYsbMW2mzZORL2uxabShXX/TjvPnN3z96biGJyiAZ1FLLaqxtGJRNq8wV6JZx9/adlcxNI+ix6iKvVjZWlz1rf2NTTamssQcFOjqhX6ihPixhMMBQ8pIA5rg782ZqrCyXhQWqKJ2DE+f6VbchLMIDiA7g/dB1ActhFMeyCqpKfYt01LKRUgFKIrX8YKRg6j+SGnsf658KC07sUCD3HO0wpbqsCTXeEGN48AhaWoeAQSpGVvcPr1//79/MxTXUYmlOKPdUUokrW/A7X1LJ30hs1pdIcns9/kspsdybLKnUy4nN+bbTTSx3iyZW3eK3Q4nlLjBKrsJhztqJzXyb+onljhJMvY14Ykf9Z4lN+yzBKBTJ1bkES3wbicRyZ6GM32PG/xJI7B0kWOcdmtzStylx/39jmbi0//n1V48SRf7fuapCskCdYHuvurY6f//69b8BZPHYWQ== \ No newline at end of file diff --git a/docs/cassettes/chatbot_7.msgpack.zlib b/docs/cassettes/chatbot_7.msgpack.zlib index 9f9298a4ef88c..5f0946c203b32 100644 --- a/docs/cassettes/chatbot_7.msgpack.zlib +++ b/docs/cassettes/chatbot_7.msgpack.zlib @@ -1 +1 @@ -eNptVXtQFHUcRyjFNJMxcjSbTsrJadi93XsfCM7BiSAeIIc8fAzt7f72brnbB/vAuyP+kKQsNd0crdEpX3CniCBK5SNsgnGyx4xpOnGZ2vuhjaZNqWNGvz2OhMGducfv9/1+P9/X5/vd5mgDECWG58Z1MJwMRIKU4UFSm6MiqFeAJK+JsED28VRrWam7Yo8iMrHnfbIsSFl6PSEwKC8AjmBQkmf1Dbie9BGyHv4XAiAO0+rhqVCspTGDBZJEeIGUkaVb3phB8tAVJ8NDRhW0eE7SsSEdR7BgfkamLkPkA0ATKRIQM5pWwhuWp0BAu/IKMmJEzYisiB5e0+XgLQ5/JVkEBAsPNBGQALyQASvAbKCihoWh1qaoDxAUzPVS0tRWHy/Jaufo+LsIkgQQH3AkTzGcVz3gDTNCpo4CdICQQTsMmgPx6qjtfgAEhAgwDSAyZKUeJAQhwJCEJtfXSTzXkUgSkUMCGCtu17JDYEk4We0phUE4ivRlIVhoToejJhuKHQwikkwwXABWDgkQMJ6IEJcfHykQCNIPQZBEE9XIkHHnSB1eUttcBFnqHgVJiKRPbSNE1mI6PPJeVDiZYYEazS8b6y4hvO/OiOI4au0eBSyFOFJtizfi/VHGQBZDCMlDDHUXFiF53s8ANXaztpakaz1sDuMMVdmKMJGq9gCjjxUpxwJ0qbMEDVvN5cWFhZVBC1kju4pKGuwkglsNVpPZYDdjCI5iKI7iSIiwun0GAauSFCuB1RnDDBGu4vz1tUwVWoX5/aZqZlGFiV9qopmwqU6x+MvchZTsZWuJqrw6zl2RX+72okvqXJ6yhf7wMia4qNa0gPdm62B0SgND5RRhlgJzWYFkKynMq6QF2yqXkzNLDsPi2mDxEl6Q613yQjTodiyx8SPCsxgtCJaI0IKZbJj2dA5zIwA4r+xT9+BG414RSAIcGfBSBJZMVqTmVshD8PmpaGJ2dpcW36dweqsTclLtLRCZTJ3BqnMDQWfADCYdbsnCDFlGXLfQVdGRn3BT8UAKdleIBCfRkIYLhikfJX0K5wdUe/4Dyd6rkR12UgsfTikCggIvASQRldpRjZQPLQ2kyHl4aLIQXvQSHBOOu1V746xfFQ6uokiFonwNq1jMHjYZGQ9QSLonYSKIvOYGBoSwkrrHZjJ3JiTDvGuHucLOYwiGH4Wjz5BwzLRkBF6UEQmQcE3JITWWyRJBbcZyjLjZaIGFz9YxHBlQKOBWPE6ehcyUsnWCCAI8QR0LInBfgADDMrAx8e/ECpTUVjM0PjJWQeb9AC7LqCneVuzESA0RaPhaEvdhTHa7/YMHKw1DGaGK3Wo/NlpLAiOjwQ2sdGSsQgJiNyZ1BIe1EYZSY8/CQ62JBDRN2wy4h/Z4jHaPhyCMmNkMPBbaRnusoCu/AMknSB9A3HH+qVFnTYnDVZT/XjUykkhIqTD0iohyvMQxNB1xAxE2Rm0nA7xCwWUpggjEKnfUqD020k5CbMoDDAY7ZrEjeXANDaP9T7tWbdPG3xWrI1o7Oe/Jca8/vS41Kf6kwM/goLykm7uATb14L30CujLQX5l55K/ynoLkxisDs1PXT1O2Dmyat3reyfTkwUbH11vO7pr0yq1rOScab+WmlEUqs8uOLTjTeXgdf/eA/e8LTcqA78qtN/PvnBu8/e9c5d6f9Qd+LvrovP7szap3z/2eVewZuOx7KrXljavh5QEruNG2AvTVuyyfbCt54rHPxNlNe4Pdy6b/tDJ26Mzj56e8401Kun7HNaPJ9eH5KYdKC2adXPvovou2h3YmX1gzd/0Zp0Nfl6tu6910unvjjlnOKcRb/Wl1408nIyfSCot3dvd/Wv1MSmr//l8mTf5mwh+/pmJ9aV+M69s+8eyelhtdc2bdTp7ZtO3UC2dXCxv6v9roSH777tp9qZeXN37c9QO9dTsqpF//Z+2ODdbunIf7TuWyaS3ZW2JXJ387xzow6H7tSvLxkpK+rh83f3fsSGzfb/S0mhdPbO5rbuiz7h98tcQ8ceaT81c4Lt6ccXnx+P04dan85OxHjn4Zu/1y3vTFuYuufZ91N0UrekrSvJZzEzYnJyX9B/ymRTw= \ No newline at end of file +eNptVXtsE3Uc7yQKKosKIjFiqEPFwO56116v7eaCXdfSbtmDbsA2GOV692t7a++xe/QxnERmBAUlF42PGBW3roUyXopzzG2AgqgsSkBMhnEhSmKMgJH4h4MR/bXrZAtc0sfv9/1+P9/X5/u9LekokGRW4At6WV4BEkUr8CBrW9ISaFOBrLyU4oASEphkXW19Q7cqsaPLQooiyiUGAyWyqCACnmJRWuAMUdxAhyjFAP+LEZCDSfoFJjHauamIA7JMBYFcVKJft6mIFqArXoGHorXQYqms5xJ6nuLAiqJifZEkREBWpMpAKupogTecwIBI9iooKgghIBzLs1lNHt7h8FdWJEBx8BCgIjKAFwrgRJiLokpZJAy1dKRDgGJgpmO6B5MhQVa0/TOjP0DRNIDogKcFhuWD2r5gOysW6xkQiFAKyMCQeZCrjZYJAyAiVISNgtSklXaQEsUIS1NZuaFVFvjefIqIkhDB7eJMNjcEFoRXtMO1MAi7x1CXgGXm9ThKWFHsYByRFYrlI7BuSISC8aTEnPzz6QKRosMQBMm3UEtNGu+friPIWk81RdfWz4CkJDqk9VASRxKfTL+XVF5hOaClHXW3u8sLb7kzoTiOWg7NAJYTPK315Brx2QxjoEgJhBYghvYRlqIFIcwCbfSaz0cHfH6urCoWcqx12pxhT8LvWtkWsArGtURCVTwBYA9HSb8xTpvswcaEY3UYwS1GC0GSBG5FcBRDcRRHKjFSWOlt4rgKicHqXBZTVObXBGrLyXaVidtRR3nApdbGgVqBAktzY9ATcxuNbU3l5a1os6nB1yop7rYoa0Jr5PpKL1UVc1X6Gp3RWKkeRqdGWabMbaacbi4U8q22C5yMra6OhmuMpMVbFW6PCm1G8xoBQ52uhLO5MjgtPMxkQrB8hCRGWLHss3+KGxHAB5WQ1o2b8N0SkEU4MKAzBUumqPKWJOQhGPk6nZ+crtqqWxRekKyAnNSGXBJbrDda9PVA1BsxI6HHbSUEWWK26ldWN/Q68m4a7kjBQw0SxcsBSEPnFOXTdEjlw4DJOO5I9qEs2WEns+HDGUVAXBRkgOSj0nobEe/kykA8FZ9MThYiSEGKZ9tzbrWhHOtj7fEYQ6sME4rGOMzWTphYP1DpwOG8iSgJWTcwIISTtW7SSOzPS6Z4l4G5YgiOIRh+BI4+S8MxyyYjCpKCyICGS0pJaKPFHBXPzliZCTebSFj4Uj3L0xGVAfWqv0LgIDPlUr0ogYhAMQNxBO4LEGE5FjYm951fgLKWNEPj/tsVFCEM4KpME7m2YsPTNSSQxc8mcQuGsNlsg3dWmoIyQRWbxTYwU0sG06PBjZzcf7tCHqILk3vjU9oIy2ijT8KDjyCsFquNYBjMb7bBA24GVgBII7ARJspCBw44XIiDokMAqc/xT0tXNNXYqz2OvkZkOpGQWnHyBZHmBZlnA4FUPZBgY7QMHRFUBi5LCaQgltfepB220jaAYX5rIICZbBhpRsrhGppC+592yeymzb0pXkxl28kHTxacXrx9ji73zIKff/9VvKf5n7AHB28sf2Twkv/Rvr0XMgbnhS43WrZgzqtF2DOt4NATsXtRcGrOxJ93P7XirMPw7MjluYHoOFlQ17fqsbrUuW/+ufjqjQO+4bLH97xw3/nY/M3PLlqz4ya+SLzq27yHXSPenF09sDvwcof4TvE593vbSqhoS+/bf0ifLlt4/rk4fwaMosmuS1+s+/ikNFy8fXygsSGonvpx1+7CiQ90usFC4TtP4XCmcCS98ET3trGPLqzfqmtg3n6ox9n7ZLWnWkhSlW7H3ytD71ydp7Tv3PWa/SH33IJujJ39VvPrG35r3hW+Or+x9fIYORS9q+xI8cEmQ92See70ElI8uGfWzo33erdu7N+55RgiLj6OXcG/vVjS+eHjrojtl4dRdsO2B8SR7mTZdxdffGAjcXzJjg7j7Fce65iYGF57om/h+7GqITk4obS8+WvLtvWn3z36y76nlv9Mdj2SGnzYs+iHYx88rfv+S/v1zdHecmbxWfKv+73k+mJXqXrjHss1h/r0/KVX+Pj475u7uE7PuqV/vp756vm3opbOcfOlM95vVs27zoXZrXhF6Vh/4U991xu7N+zb662ueeNormOzdJ9bCub+dZdO9x9fx1Kh \ No newline at end of file diff --git a/docs/cassettes/chatbot_9.msgpack.zlib b/docs/cassettes/chatbot_9.msgpack.zlib index 01ad202d8df0f..98dd3b8f25693 100644 --- a/docs/cassettes/chatbot_9.msgpack.zlib +++ b/docs/cassettes/chatbot_9.msgpack.zlib @@ -1 +1 @@ -eNptVWtsFFUULpqgkkgKpoqiOKwGEujMzuzOvlqwbN8Ft912Sx8YaO7O3N2d7s7cYe5Mu7uAj9poDJA4EiUSExWWXa21PKzyEh+ARAQlxIisaOMfRRM0Skw0NgbvbLfSWibZxz2P75x7znfODOT6oIYlpMwalhQdakDQyQGbAzkNbjQg1gezMtRjSMwEW0LtewxNyi+L6bqKK+x2oEoMUqECJEZAsr2PswsxoNvJfzUBCzCZMBJT+X822WSIMYhCbKugHttkExAJpejkYGuUFlNNS2WqGoVt5ZRNQwloiQ0MNduWcmq6LUwkkGW5mGpE/ZQAFKqJAhhLWKdSyKB0JIJU1VSYCSUg7v/H6iSZLsWUnKIUIMOqmbHXE4mMRJiwRFFVp52Mi9YNLYwsW4VIOfKLdQ0CmRwiIIEhEehQVkkViaGFxTKeLbkYBCKp8VhJaSaGsG6OTK/bPiAIkOBDRUCipETNt6NpSS2nRBhJAB0OkaQVWOiKORSHUKVBQuqD2Qkvcz9Q1YQkAEtv78VIGS5ektZTKpypHrJuR5NWKLo52kKS8DfZgynSYIXiGN7LsPuTNKmYpCRIx+gEIPlk1YL+2FSFCoQ4AaGL5DGzE84jU20QNvcGgNASmgYJNCFm7gWa7ObfmSrXDEWXZGjmaoIzwxWVN8I5GY5jPAemAeOUIph7C404NM0Z6lqKFhDBMF9nswJCcQma+Ws9PUKkJyyvlGpTnd4mVhO7wtAZkzXRX8esrW1m0h5X25rGxo6kW+jWA03NfT6B5jwOD+9y+FwszTEswzEcnQKeUMyhsp3Y8AC215mWQLpTiW/skTqZTjYe57uk1e08WstHpDTfa7jjwVCjqEflHtBZ3auE2mvaQlGmtTcQDjbE0+uk5Ooevg5FKymSndEniSubWHe9K1iPvc2N1R0R1dsfqFVc2O94tCe5phWp+saA3sAkQ/5WL5qSntvpptlihm6W97LWMzLJjQRUonrM3ONwOd/QIFbJqMKns6RkuoEHMoSH8NynueLM7m5Zc4PCZZlawknzeL0mlVMODxWCKuVgHTzFuStYR4XTQTUE2odrimHab0rBA+0aUHCE0LBukvI5IWYocSgO1dyU7MctspNOWumTKaVhUkUY0sWszOEuum1iWdFNte9MTBaNtChQpHQhrHm8wPr+dLJfFAxRjPX1y6wvzTulMDSEyGjRRdWQFYYkRMvY3MP5PCNFzSTvhshdSedZmuWOkNGXBDJm1mVUpOk0hgJZj3rKzJfLIGnN2Eon53K6SeErKUkREoYIQ0a4FsmEmbiSUjWYQEA8mqTJvoAJSZZIYwrfxdWLzYyLOB+eaaCjOCRLOscX2sp+MNVCgxa+dYkbMLzP53v/5kaTUE5i4nO7jk63wnBqNpxDxodnGhQhdrN4ODlpTUuimX+YHHo4zuv18pzLFYY+zsd6WB56I9AliJwzwns93n019XQNEGKQDhX4Z+Zqu5v9gaaa97roqUSiW9SJV1NOQViRIpFsCGqkMeaQkECGSJalBrMEq83fbY56BZ8Q9kT4sJeN+Fi3j64ma2gS7T/aZaxNW3hHPZW12qlEP5nV/uDW20sKz63kc/26HvQ3n2RL379S9tL5wIuj2+ccKWPK7i99bc7iQZsnn5l9OZ5f2Dr4d794KPDZU2cXRfoi0StLwbazz+zc9nWl/933lq3/jvnhu6c3VP053pBO7XZ/uAtdXP6E3T1uf+SOOd8+vioQ/Wq0Xi57vhZn4rcNUId/PJvXWs+NbF0x/76FQf/F7tVXvrTtHZO5z691b2z7NXjx+9SZsfmnF4w/5xT5O6+mzhw8c++F74//9OTOk/OFb1+fc/s9Y2taTpS+uk3c/sxHpy7tu1QZGZy9ZOWxj3/5Qzvvv7xpnrZrw5t3Pb5g7vKrwyffrvr91rHVD/0zolErdqmn7lq3KF9z4tKmzfuv73j5sWe/aav7644lC77YuueBB5c/cMbZ0Fi9/TfbeEvItap62RMX5lWd3pFdeOfBxcLmF8ZRy2tvdeya+4rh6Lj7Le/DHxZKRwp44Odj3C0lJf8CQsxC+Q== \ No newline at end of file +eNptVX1sE2UYH/AHxiAQFI0m6lnBJbDr7vq5DhG7rl3LZJ3rYB9Ex9u7t+3Ru3tv9951bRGQDzFEiV4wfgTECFsLzRggBBWY8ROmSIgfUTeJCajx2zm/IvED3+s62RxN2t77fPye532e3/PcxnwKqlhA8pReQdagCjiNHLCxMa/CTh1ibXNOgloC8d2N4UjzHl0VBhckNE3B1ZWVQBGsSIEyEKwckipTbCWXAFoleVZEWITpjiI+M/jnGosEMQZxiC3V1Mo1Fg6RULJGDpagcBsVKpeoGhS1VFAWFYnQFOsYqpa1FdREWyiKyLS8jQqiLooDMhWiAMYC1qgM0ikN8SCzZDzMqBIQ9/9jtZBMyzElZSgZSHDJ5Nj3EYmEeCiaorii0Q5ES4IsmJYykbHkH2sqBBI5xICIIRFoUFJIDTVdNZEYq3ttPgEBTyr8Wdns7gTCmtE3sWoHAMdBgg5lDvGCHDf2x7OCUkHxMCYCDRZIyjIs9sQoJCFUaCAKKZgb9TIOAkURBQ6Y+srVGMm9pSvSWkaBk9UF8240aYSsGUfCJAlvqLIxQ9orU6zVUWVlDqZpUi9BFkm/aBGQfHJKUX98vEIBXJKA0CXqGLlR577xNggbPcsAF45MgAQqlzB6gCq5HIfHy1Vd1gQJGnlf4+RwJeXlcHYry1rdhyYA44zMGT3FRrw4wRlqaobmEMEwnmdyHEJJARqDP3d0cLGOqLS4vivha/F7/MlQJhqo64xVIVuLI6NroRj0JlOuqC3N2b3x1oxveZJm3Ta3w+VysFU0a2WsrJWllzIuVNfUJkm1Ks80Btz2FJZXxMI1rqzOp71WX00soIfTUK+1Qnd7azzUFbTZOttqalZb2+3NHatVLdiZEuzWBhxZ2gTquwJLO1r9qa5FFMlOTwn84qAT+INSItGx3IskzCxflko22FzupvpkNoU6bc4ViLH6Axl/+9L4uPQYu51mShm6GEcVY376xrghQjmuJYw9Nie7V4VYIYMKN+VIyTQdb+wmPITvDuRLE7s7XH+ZwnO7awknjf6AKlRQNjcVgQplY2wOivVUO1zVTg9Vt6y511cK03xFCh5qVoGMY4SG/jHK57mELichX/Bdkez9JtlJJ830yYzSMK0gDOlSVkZvK900uqroUO3h0cmikRoHspAthjX6i6zvyqa7eE7n+USqS2I8WYddiEKdix0puSgqMsOQhGgJk+K4mb6SZox3BXJXhmYZmmFfJqMvcGTMzMsoSNVoDDmyHLWMMVghgbQ5Y4vtrNPuIoVfRAkyJ+o8jOjRWiQRZuJFlKJCEQH+WJom+wKKgiSQxhR/S4sXG91O4vzSZAMNJSFZ0XlHsa3MK+MtVGjim5e4DOPweDwnrmw0BmUnJh6X89hEKwzHZ8PaJPzSZIMSxG4G96bHrGmBNwbnkUMH5N3uqJ2zR4GTYWOQhy6eAx4mVuWORnkbX3XAF6B9gEtAOlLkn5GvbWvwLgv5jrbS44lEh5XRF1NeRlgWYrFcBKqkMUaBE5HOk2WpwhzBavK2GUeqOA9kmCh08G6nh3E56RqyhsbQ/qNdt7lpi2+oDTmznXL8rSnZWx+5qqz4mUa+ly5pj3vDnzOzHxq+OHfT2WtW1weP9fcOWaZMl4PHF262uJ+Zlzt66oXyu71/Dy/M94BZa0bOr10XHj4Tn0o1009ShQYV7mxo2fHbmeHvR1xfFE6vHzn5wIlX0I+oZdbx41H6jve8n97Q+8dpvc3ovKnNWHzhnT38o/sOR+/nM7vOrJvxauO26qH2U0Jg370ren755s76vL965c5zN++7elPr65GpZT95ThzK7x+5rwJUX2yasYVaed3pX8rLzofq/AOzm1cJbfcEWg14xPfbkg/XHHyf2luWHLr/1Zv7vfPpc76Hh17blV1V+HLdnJGzw+wtO+4aqHrr9/bnfn3j5adyH2WGtly75LttLyzQeupuHDg5c8PbWzfcMufBr8D6S2fnnb/zxlPzP5gpzThxrPrk1pFvb98JywfebHosu+Kvs80fDTR989MPWy58Eh5MzXh6V/3Mhf5PpgcirneOfv1sy6ntrj7x+o9XPpE5NFCrBv+ZahZ4Wtn2S3vqVpHnfwH0sWF4 \ No newline at end of file diff --git a/docs/cassettes/qa_chat_history_1c8df9d7-6a74-471c-aaef-6c4819ee0cd0.msgpack.zlib b/docs/cassettes/qa_chat_history_1c8df9d7-6a74-471c-aaef-6c4819ee0cd0.msgpack.zlib new file mode 100644 index 0000000000000..fb6d8d5b1c38c --- /dev/null +++ b/docs/cassettes/qa_chat_history_1c8df9d7-6a74-471c-aaef-6c4819ee0cd0.msgpack.zlib @@ -0,0 +1 @@ +eNpteXdUU9v2NR3pRQURqSKIEAid0HuR3psIIQQSShJI6B1ERESMKCUgTTpIFaUZQEHpRTpIlQ5KlyLli17v7937vXf+SMbZa6551llrzr1HRiILfKHeaDgSQVwKR2Cg3mAIhnCDxkYWeEO9fKBozL18TygGhnTONTYyM3/p4w0f54NhMCi0nKgoGAUXQaKgCDBcBIL0FPUVE4V6OkGdneEIV3SuE9I5YNw2iBeOQPlgeOW47exkJaSlhbmlpcWlpe3thbl5PZHOUA9ChBcD9ccA/i8VAHYGA4BAcV4CBIqAIH+tObggvT3Bv2h4ncBoqLQkb0gBDAp2JpQ+TcSYC0OiMdiyf5dTDoZAoCgC7x8K7CvXQDhKmNsZ6uIBxkCLIUgEAvr7ZbHF7lAoCgD2gPtC8//KwlaAUSgPOAT8Ky7qhkYiSgl4DBSBAWACUND/Dhf7oKHeALArAYF9bUQoQlVX1DiA0DcEt5iIpKwIsMIfgMaA4QgPKBoN8AAT6slH/Y43/DOAAkPcCSSAPzPB5v+VXPZPDBKNzTMAQ4zM/kUJ9obAsHlgb09pyep/rnv7IDBwTyi2QN34vx/3J/ifx0mIiImJyFT+ixgdgIBg81zAHmjo238lQzHeAQAIksCBzQbmQ5BIdzgUO27mAHHx8YU7K5pLyaJAcCstCR8/VVVPWWlTkIEUDAyyBkuqIbVdweJShu5qFpoaYCcvVYCYjLiMpJSEuJQsUEYKABQBioiJiAGkgZKywF9X2d/N94AiXDEwbA4IWOgNRaMIQoVG5RNKwvigI3MJc4Z2txd4Eooj9DDHSO8/ErmSq0GYOfadljdcmFtchtsMiuIWB4pLcouB5CRl5aQkuLUNzEvV/zzF/H+OuNLcG4xAuxDGrPm3pAogMB+EO9S5WP1/iqnhl5gInfpVvTfSg7DogfQDIL3hrnAENvvWu/8vDPVHIdFQwJ+isaXWANO/LAjQ1Sj9S9iA357Bvvnfjqn+A0J6u4IR8MDfpWPf/VamX6C/nzPEx9kZ5uvnCQQFSkrAnaA+EJfXf1JQ3shftfwi80Rjc8Rkyv4E/pZGMaFdQIAYEAAUq0NjvOEQghN+9QOF9MYA0FAIYWPABGDHhT3B/r9soCghJiUhTRidPDccAfHwcYaa+ThpID0J4kHLc6O8oR5IsHO9P8CbMBYPuCecMNrfn382HTQ2V4qQXPvfAAzSHUrYngqkfgsDiP8nwhv6i//XO/yHRhIEAjX+b9DfVL8gIJBs/b9RaOg/qxET90TX/jfgD0UOEF3q/zcaAHfGjvMRbhxkIFCIk6QUWMYZ6AR1EhOXlYaAnaUkpMTAYiAwBCpdrq4FUAdDYFCA2W8JYws0bAxVDXTVi80I3Op/GWqCmNSBYCkHJ09FBEpVXNZSBulpo2mp42Tlr2Hgh3aAqolp6iHhUBuwpJkDXF/MA2xkqIn+y1HS0lISEgCxP3Zy0dNBmXtDjC0NJDQdbLy8DRA6QHMbpKynFULL191WC4ySkkVAXdEBbhBvSSs3tIQT0EDa0kUE5C/lpaYKk9a0FHMFeqnKWFrqII2koKhALVN3S1XCNMEYmKKoPDdBv3BCWxT/mAxAMBmAYDFxoJzY3xaT53b+rQFFkX9v1vLcOoQzxQjhESBP8CZBTFDCN9gTagbHQBUNkQjoG2vAP90JMEL9dUwVIJBoBNzFJd8M6k2QKrYY4oH0cSbs8N7QfEJ3TVVtsK9lISAoUEIWLOEiAQEBgU4ANcLe+Tfb/3k599fx8PvUisj/JXCEa9sVK664C0S/L1JzK73BYoXkF2FN80Opny1yLow3Snzn5jape6DIrEGmsfCU6OgseHQXY+W3zxf1QqqzfuN52Pl6X2aTvSXr0d5BpnLYzLDs6WlC5vhXpanzs91l5fP95b5M5fMzxXOuKlj1/Hah0qLoyUHVscLDxU3lsMPzFydcK4S0g/2TG62wgKFdmXeq5bJjpnE+g0BW26dRw+McNQX6VKkebVd1XM/VdoPHq7c7+MMR6GCmofZNlBh6m6y1zJvXIRIAaeuYm11V5xTmYW6sOxIn6eIeI05mVLqD7BjW/n5UZxyFKcA9n3Qle+rpAEdB1lV2zmoTvyxcJutCm4vPTR+Rd3tnD379KZHTm6Gga7pked3+s6SP8lQTvThakX+SUi6mOgiG3huTRwbndqk6/Dg4oJ4K8iFhM0x7VOi/rtb4yqN408hVmueVMvG7u75j5qa9ED+jtIgx6GP72gw3fjsGOnufJGYwhuF4Y/QtZ1YdRyX+1QwshDqSTDOR9rDQWvfNR3Yxom+HZG5xb+v5G3IWGHdVsz6J3U7tf3iHbAK1dQGp9AmllLqsJf35fsHSJRBb3k4veaLd8MxB4WiWjtBQflv0GqSce9o/IyBVBQyWSbr+WtrWUi25cdmxoUMZpmA1R2VcwlGxrWrwjroYoqL5ivTH2vaDEdOmb2CgqMrzyAKpSYFjerqnM59fs8V7uAZNUWECpGGS9m0ezQVqUbsDjSZvf3gdPp9xIzvQvuLUprA8p1Ax/nSTfhq8ezjOcE9mgS2iVWiMJkMgQdGklrITzW8Rg+dcOjUsdUxeoC1gUed5UatNVtT+righD+SsHyXPmyDRcjiBmw4ObmVZu7WtV8OwOVod4h0C3H6W6DmuKB7pBe8Y7K3WCRtMkbH4/iXi8jBTTZNLgvWKHxN1Oic43q97Ltnoicfu7R33LyvrPAuqeHuc0OPXQbM9fXerHIcOL1Ha+Wm4AwWJitzzzCU2YvAXRi4yJuvwnM1FAdzGvY1XkdgJ2jBM/gQ7l+Ie0dxL6xieBouh8OorSpXy8eVPFIdf3zO9bVBaVaa5S6yY7qA4AOn3nRbHL2rNb1CRN9iYNEZD4+P41OvBHAa4t1cjx3KSHl9kKim8otUL9PZ/fMJq5zLp3MWLe7hQmK5fdaLORsUQSha4cW9oSy1fdBBFOXtyIy9CwYOiSt2iySRRNudQaWhuvpcaW1JQe3IoRT1685B5T+z4WfHAuPYukVSzn53l4WiTxIS1Uvupf+7aGTdvTlgDGXCOo9g25uUn4auXgYp3mUZSA/dHTnaKw9d3GdoGV1aqN4qv5pqvkDqtuLQz2M+/9+mm3zPW/LKlmDhBfno1s3IbRILLmyyKcawekt3+AEqXNsuOyC1itjzxGlkdbFZItxd8mRTIC6vDeJJOuRVJPnmRiL8Zw5ow1nDqJLyBeyS3k9ZMKRadFT+9qnxibtr/Lt2xJDcafyX4S+QO47yj57qnjgNDbvYhO5nGyWc9WU7Uq5agGZwzW2BICteHb5G0UvUDQ0HDclOsJKoZyZsdJY9JXuhbprSD5jNaUIhIlG4au+qXn0tPTXdBIacPDKkDRfGV1JFyG41FmH6ArZMb7e22EWgl3R7Iy3Ut4IA8/jKNi+xlyLX2XgooMruU+QFa8KNtRcBqV0eVnuxSG4v7psg1lo2fLOn3WulbienmiosoIgIDOXUDx+L2E/CglCo+Hgt0tIf5wiRn586GyVc14Tfrr2rIyh4JFB83pMnIaaZ0Badv6tgsJqCc7oRprzvvDc1o4t/ueeJv90glwKdzXimNbtuRnDV+Dh/8qv1xaGspPOXpk3kZsRRgsGHECRM7XUnNVqFV2ZqLfzteC72cUykYnEkLzCDhfypOMy1rWEpSJ6DtFsKbYRB7a/+mT7zk4SIkZKqcWufjmVa1a1XfyI47a/NoWmxp35RMGsNNX420L/KBHlMKdojt9Uj6tL34qaSwFQ+hUNHwg+WHE6/mGfTGp26Z5048vHyeRdtAY/kj/NA2O2ZLr7qI4+DylYY4wx9pHWBhln7yjAK3Fmv8l8bU4hCi7UHaos2iu7VkfoVPPnBI4m6xvBovqIf1X9C97PEMwtj7YuvuQCAtp2pITQKPAVgwq3trTzlgKxd//z61wg5ehMkpzXSbh7abzO5tp6bBcrbh07oumrS0HqmfHTPMONWjsxX14beUhhiTyW7O4dF5M1FSo0t8D7pohE4DwQai1nT6J1b0vi7u9xocvWWBbZUtLW5Pj9nVr4Wf3QquCsm9Zdf6GQ1+yS6TkxfHKuJqYM5oez0+I/nkAYt2t+Uzi3ztw0aF9Qa+8VMyUyikiV55/JFvMlnkOrjtR9cZ7Xo9O3rMYTf1lmKZxA83O6r75rLg0dcfcVR06c34KxLj5g7u1lz1sCwtvQPrnqcZ2L14ecYDA93hJU96D0SfvEZ5AYeS3nZnvhSV5eq7tT4Vp96k+GJGAdQ87hYRXlJ9qX3ThOX0p+KYQRvIzrlHglvwXuxESE4AzQ4168fvU4kjckDprnJBbPB722BZRAOODZ8eedYZkn7HQ5UrYUBG5aD7wbbFvX67TTZJWBhgKVNhoxGb0Mp56/Q6hcWy5TjJ1ves8rakW4beXGOHnYFA/Ox8Qcj2h/0Ch3DqkvhvDRsB63c65COsqth9Zng7Hzx5XASUX6RjVw/pWGGs92uV+MS3vm66ot5gc3CTidNoo5ScSnCqXkBnzpgy+InIXFW3DKu5VY9O1RbL19PjL6o/3r3RSKkSh2QMWgBtBJjciz9mL94tf057B/51WcxvbKOtl+PZ2J1CLJ/JOyL9F7kJgkebjWv7Ujtzobos7w32lnrY6TefKE1T2RyOFibA/GKd/WtppA1NV30ix937FdPJJrezqUbvX5MfW/95LRP5/X0+zeaz/gc+uvsb2ZWawzOsssKZ92/lwdeD7ZI4Hql7NQc+u6pjLHOsufa0T1/OpHQPmy7CcaUnlFpjn3s0TmTK/tJnJz6Z6yobBsshJF+9krYqfXjSY1x91Nad9w8rcNJpg3vNH2SSl4jI+8bTzALQ2kh00AdbXhnl4xvMgbq9iW6bj4dLNzsuJAc1tf2QMF+T0q2V3/mW8dKFUYhns/Atm5hd42DzpeAQpk4l4kISfcUQsxV3TLzdmMLmMpFHyt7zkk62nOmfpgcwx9cXRi6rpFykLAE4w5aMbVUXPyVkhjTfX0R88+tnNdRJGF02cRXV1De+VkfzJmFa77yXvYZcr/amuU/mcAWZH57k4ObSkEHmPjAp/k4L/m7TvdMFSt0ZX0r3Aq6oGgv91z5pvlr5OrjZn9V9o9tB7nMVMwiSgXptGBM+NiH0RMOXBpmded5KvlQS+P4HNv2+4KQ8k+xXcSd1+kxY/ajp/mR5pM+A8lwjnTPp+NItx0/8VaNhD19QBHf9VOUvlzj+Ftf0M5zTPXFX51Jje3nTOQ0R0fm5cOrUCFrukpZoIaU5VtZ8rYn59nCQVsmz4mPdJyUz6k0h5LRVGVKZbFWycd3WDq9IxpaScXfYK07VQ1K2EuNov3HByeu7XL8/w6xpt7qzfY79CiqYOftQGGu4ny7rRAMbzRRce8Vja3EAUr+9yKB5sC4m0ITf1dbQ/5qN19CrO0x9NHhSv13UZRlUt9N0lK0keXc6+FFmfuM+uYfNJ7N3DhfR92N4VmL7zzXlgsryimkTZU2+Yi1Lz1nIGXzvBOQk6w9SgHsfM+JALy9gcHc3VxUEHjY53/B50ZB/cTzWyvrwcwTuuizJXLntHRgQGeP82IrNIe4lXQI0ovIgVtWgSoeDh6sX8zX+iZj2qL0ueozTYlU7Ll/6G9WC5RgvVatt66fI65FR5x1M6oVdn8pbUneuI9SCnYZDV8qG7e9bWDAXEzdxajW006S6/4wtxsiG29B8Y+F0LPjsEKov0IAX2gKLHmI3a3AHlt/IJLSEH+VXS5RmOhWQ7L+hKezQQ76wb9UjPlaYjy0MzeSchCqK6q28cBrsongh2brS/KZvQCSpIKLHteQwOTNFAjTiHp0aCWPBkH35IJR5WqvWFKLnEiUfQPbFlm464c5tubibgcPKt9nE62QGVB/MgMzwA+TT6ymOUQ12Hrr21z44ZHqdvLOn6X4OGWusk1BKzCVN1FGhOdXdKjQpep0h8C3lzJ/+3H68WcvygL97QK8Zzzvu+LEyX5W7nwPbzEN2iTKa+unDw7GgxY7++IhWUpeIR8VFFtVH+btXv/xgTy3eYjQ4fZnYcswCHObwgrZq9bFIZ4R8on/gwGThYEjzJuRRbS9RgAxw4N7jLUVma+3MB9+rWBm+VKFhocrPB8Rpq+gu4BgKKymuK/vziS0O7dyoCqz6SG/0pOsJi4XOftricsmpwHPU86qrdszO0xsjOSjY4M/oDKnJ7uQGth+M+VqPyChfHa+El9DEvTme312larHjP9iYOQA5yIpXBCo4hIvlkUopmWX0GeIeoJbfsP04blL+EU18ExoaNthBHfyOs+flhbm9VeBTqgy2n7R4Wn2zIMd1cmM8fU5JJ+rQd6jOi84adzt1rkxUaYg2rU+l5ohXZRSl8VpeZzSdoof75BDnlnt7ff/CCvlDyCoEEOSg4BScILLww3BXNGxUI3KqfAZC190NuetvNDd+03xNz60nv2xBXFd2iWq4hnI53u48uni88/Be0xRwpiaEMiqg6IF5U7kjRxOY5stW5RPRxPYKjc2hE5kECT/lwNI4PODEknFut6z8PqjuETnfFiJnc4Jj53JIL9tJ7X5UTsJS0MZ2KknKfbEEo4S8LtaygDrjpdEk3B5vIJo/uco7mE7JRuHbfDzD9NuWQipBXDzjUpNCjGhlsWAGq4QTq5hQllkm/0MutAVDaIXyhx5l8Ojc+4hYcm3upVvRbusRK+55CiKzy+rX+LPBYEssEduadpUdnSTwSKFvy8ROGufYrKm3fyDFsOXzUfKI+hgY9p6tRcTQ6uErRSIz6AKxv2tVePHCNoXezEcJD0Mu4p5NalrI9mOqscOIu8vK1dPujydbsgIwD+pl3rIUEnv6z00ISC3j6IVl9PlGC05fuEyGLlLpaR7yGyXLcY/sIMsF8oYyppT5vM0oR5wrrY6MroICPz/Tfte+WXbvfRRv69oU/q7hh2SVc8Z3TBejyJ90u7nW7fiFwfJqu8N2AUwWTIWulq7NO1kYnjB2//oX54GKYWss+bpXmZlqPgU35j8vlnSHzc6l5h7NP6yINuoIAe+xrhwfK6DEgWqJ4PNTUFRqqv2tT0MyA0pXLmpFdFynIv35gOxGg5ivYZLN+HRMhu9WmuiqmOisB614G+XcpUrhCZ+K+owDjVPS7yphJxWLxKGNd2EpfT5jXZPnN7KrnR1zDfxiHvrWmw2aLQT2Lz2Kub0VW5H30skaUN4QDloYS0LSpOQ+NpsTsgRKf0m+5rONUCYlfskO4u4l0VfiV7jWYJQQnzjqj5lcBmLa0nnZfzQdrM8Nz2uW9rGa9vUe053tL342rApcj/I7SlW+Tsb5XQKXapPf7/D5qVJGAsjWVGh4FsKo9UjVa8AV6WNQEeLgNjPcFExP6XG+gz0O9ByRiCjYd3DwYo/ikjsS/aYqQ3u2ZKQlnUzMAfbucQq+eiRqd2IqDmYrE2tLPyLTo+nk0BiG8sbNPEsrp9FcHJ3ZGkFemWpgPfSDI+95KkXvPxduljVEatg22L32DWefy+AXmY9K7luqkWWqvVubulGC27WsnPLrhA4pMDPsXzVxVCOrvM5aSC4/9XNRgJu+h7Fp+wczE8d7KWsrt/dRHK/GEZdnsweATnK6OYsm2lQnN7/Qf5thVi79mAMzBj6ifDrPqtwdV53Ysq9CJ+kgBUnpe7l9LLP56Swl4CVO87Svz+SbZzqG5doMIHhcPbIzxo2U5YEpg6DGDV40LIKJa2N2Wb6oYdNo/2qNDmmL0ApT/yYsxdZYbew6/Wz8J2/+ck/aEgulOa7z017lL5OzWj9OsH2pQXo8QFFdtfPtBj8LiUOxAypFONt2Cj8PSajSth8+YPfm9wXAeWVIY2o4H/NGNp8oOROgj4uvPenys42tVgEzUser94ctK5jRh3wrBpJx5YH30vpeavFrTFfpkZgEUCyqGYB3msFpyle4hLAVHQw524d3zGuDgygrVV4OlLoX0U3JUgHlZIpG1fd1suRkrkpPx2aLJsgZLYwpXd7DQJEhvqhXkbfn7NyELlM82h25RDF9V2VjkfcNytKq2Q0J17miSj622tg3W/LQ473XUqKUw1fHyJxYyuDJJXTSdy67pOmJ1HAI6tVozh3XrGvmF+3UPDSqDV/TnARaaJnUbKhWy0aelJ8HXK9YR26wkQWyXr0+50mq0szrAhztp49l1dgn//SF/vt0XdzNlreBP4YVouKCJ5kybnNp6ShIb11sfkJR/vO6Aw05y+xwyXUp0xKu9lDq6hm85v53vhFMSh+/MGzh68UD/ZYqyWsk1SeICkVh29fnQmUipfVt2Utt9aszTnDcTHDOh+bqdOqRifcxIbpFlodG0p8aTqk/Bb1+4oURs3Nxd3ifFH1VaXdR3mG0KTR6tOAy02JJ407bfCTZREuU9oa/0LuagRzmHGnfbfWGfhi9A7voGfi+1L7uS59iLpzwAlQlsGXZZ8ew+AqnMiThY09YT/7kVqdYH4vHlSX20s4PeuC4MhlJgfcg5h0zVMTFEc496+eWe18B/UMSOWAa4rFJnOZst2fA8EMB+1qvVOpOv8/Z0K+JgjfPNVLwWqwvY28HiiofU8CwsmZ+u+tonwWRLeCdwNLNdzDrdeOZCJoW+qnD+/HJdYLP48N0PhSvN6/J6TwPvABUxF2/puRZ2sv9E993j+HuRTtl7StRM7uGApjhUceTsq26Byy9CNhe4bQAcW1mxKN7FZUUGfEJHF+CWXulQ1/y7ur1b77eGRVgK6pKo4Bc0xtrzwkcO7lPG698dr8vh2N1r1uFeNLSq78kjUUlSUTbuCZVhyMU6rHlKRAxlxYIOyNFKA7mCy1odbU64IfieF7JG7OEa1NaX2ObCf3KvDOqlqU9kjqbtelKVl2b/yAGT02vfJZdnARRL3MIryhU/HCNN6q4B7HHMnxEbeyvfwEZbNNn5NAdekAzBOIhCg2mUBps4/HY1JSq7jQhTzMq+PQ2ZH9lS7fP+HtMhNsH+8ft5g4W0DcOYdaDvC12TU7RobsxYSGb3GQL9G+C+SKt7a8IedW8N6bO3FseTS5TyN3KTAgtlL6eE9o++y5M/jBwuJfp58WsC1sDz4uG0+mkRw2etht6GhbiDj+nBYmKZGn43eNMjCG2rEnBhZ1OwO8JOvlqbXIn4FLkINYBC8CyC7CY5dXlgvX9aXaSkyNgctILvP7KJsn5SbJGH0+dpl60MGvxsIPR9uyZZld5eYmsyNmygdKePEwyKvpFm5RP6Ppy066axAeFUdqMD5aVfhlap2oiLTvMd6tfbwMmdBeFMiVORdrjHl/cEDy8AlgVu5XbE2Oyvrx+OBgajRu+trX6g625cCOVziT/viTjYs1dZ+YC4KKk5zoXc1ooPl/yO2jer7/DSlCKURJXHk2fKu6vb4LnP3khcHSn6Cj49OTy+bHruaFCxcM2L/KWLB7xQrN1AJcrnWOYgeAb1vP88olt/dmPaWPwV2Y969M25B1zR7kTNMFrYUrxjWWuGzYCKX2VqGfEpjiZkedrJYPlmGpiga/XtUhrV1pjucnq8bfGwt+KQoKKqLPyDMeXtMvDUz3HbyU4t2hQxUgXNHKUoTyqsUOXHjr2nFbTfV1A35ghpj0aY9K9J1jgZq0csgMO5YdsNfTlXDgaBhbkqWVxKARsLGR2AU93ngXl8+AVyAVwFjcuPYeyzFiHZAi9OXbsmB2Y57KOZ8m9WDl1oKJHGxnyTQ0t962MqtQv+N3iTyEu8v2hD6gF21Il+JVXjkxmi8Kdlz9qbXOyxEwxwsq5fvTJGG22J4Cv8iuM+JXgzhvKPWxywa6UpCHrUet9y4yPZyebv7+HPItDupk1+NtsVHwp3lg+r5iJj1IS+Jlx8n29YuZhPP7K08Yo0PO+yQCn4+MXpTQX9g3zzol//fgiqSlIpN3t3KFt3o3I44cL3tVc1BV83Js0scp0Y/asOMDCJdyvDFTb6C1J21t2S0b8sfCbC+K+fVsyxPEzS5S4tWXbZyBKjecBRy5WfBbkFrb1uLSCxegjq/ClRO7d4duzFsQ1d6ipK2cvU9QdCRaBsqKJbxRdvlG/lUdSHysYW09BofUEeiGvLXa16LZl/+ULt8k1j+rIb2zfOBovCmi7bUmt9SSP4vaNoxuxIJ33d7jj3Op+/YNwfk5KpG3fTjzNTUT0/wDKojHU \ No newline at end of file diff --git a/docs/cassettes/qa_chat_history_8ef6aefc-fe0e-457f-b552-303a45f47342.msgpack.zlib b/docs/cassettes/qa_chat_history_8ef6aefc-fe0e-457f-b552-303a45f47342.msgpack.zlib new file mode 100644 index 0000000000000..bf42d4896f326 --- /dev/null +++ b/docs/cassettes/qa_chat_history_8ef6aefc-fe0e-457f-b552-303a45f47342.msgpack.zlib @@ -0,0 +1 @@ +eNrtewk4lO3bd6hUijal1Z02YcaMsa/JLvu+M8s9ZpituWesWUpJq5RUKkQoFLJlLZUUSiKypyxJsqWy5bvvoZ6e//O8/+/7H+/7Ht97fN/fcYS5r+s6r3P5ned16v5dh1N9QDZEZTL4MqgMDsjGEznwB+j84VQ2eIALQpwjKXSQQ2GSbliYW9skcdnUZikKh8OCVGVl8SwqmskCGXgqmsiky/pgZYkUPEcW/p1FA3libhCYJP8WgYRACToIQXhPEJJQBZwDJYhMeC8GB/4gYUD1ARkAHkCWAhQqxGGy/QE8gwRwKCBAw3NgFQAuBLIBnjawUMCXQiVSADrVk8IB2CAZZIMMIgjwRPpxACqDt/J3cTIAmcmmcxFh8EYQB5aOpzEZ4D+KJOIZAAEEuAwS7BIOk0kCfKmw6VzOXySiAV0mYGZuAysK+cK6IeM/hckAXlyIp9mvTamwWmSAAYIkkMSzjQkvYPtSIRCexuGyGcgMPARQIbSEDCDBZtJAxDWQP8QB6RJBMsCfPGaPKEKFABs85A3ogoi7mRAV2Vrr99WI0/6y9q9rEEmI+iw2kwjHCGCSAQIbxHtTGZ4AienLCw0SUCoRtgSOCiIARgoTgOh4Gg1kywB0JhsE6HgGHF48gQbCDgZZEBqwAYkUBhXxCkCjeoOADgUPxwYWbwO7FImdpA7TZg/PHTZsEPxtBAJABgWPBJUD+/uXZgQYFzQa0xfRjM4kgTRYcSYyheHN2xOZwPuJiAT9WDSeXlwahwrDEXY0HmIykLWw5RCVQKXB5oOInsgeeBa8Cx7GAAWksSAERBAVMZrsj6yYQ7Qfz3iIJx6e7UMlgchMiAdEnksQP/I02w39vt+c/n8KLR5WAQEi5z+MLoJPX7w/4hYSExFD5fxNeF3hJ7wdkUeeLA5KnomiUxlUZCYDfoaFf0IcWBc6/IGMp0Eg/AAGFQvOdBh4iCQMGhOUSgHxCOg7Fqy8QWFCnKg7f87tTDyRCMLS4UxjkmBlom57BlBZMgAJJCMAT4P1Z4C8yhGV5g2CLBSeBqd1ytyqqCzYuzz8wOOyXrBXMubtRXH8WeBfh9MQ21AwnBicqFxzWAltI1kLfzgTGQAWLa+MxmT5oWDnURk02K0oGh7WJ4XFGy/+fYCFJ3rDQlDzBS4qZW7xnd/nMKGoZFM80dz6TyLxbCIlKhnPpivK5/z+nM1lcKh0MCpVx+Kv280P/rEdDo3FopWy/yQY8mcQo5J5gSj402KQw/ZHEZmwjKjrmBQik+lNBaOaR92JZC4MNA1bJYw1m0s1V9FmkTleSt4cL7Sevu1+BZq2Bc3BzoejYKZv6G2BMZHHyqOwSnJK8go4OQWMHE4BBYcXjUVjUYoYeWUM8qUGuMNS3Ql0DYa1rZWVMcaOzGDaKShh7YwVCJ5yXgY6+yx9zKhYNtcUa+WzD61jyOUS54QqKirgsCjsvESMMWefvRUaYyznCOrp6Fvi9fyMCQdsDTlEfXnbAG15thVNz86MpIj1ZptiTeTQDiagope3kxFo6memTPMyg/wYNu6mugGG+2yt9e11Akw9dfEK2nd+YoMGMjw5lKgkFTnlm2wQYsHHCRiWAruMw4UO34BxCNY8TZ0/VhLN9/8BYdEbujAmo0r12VQZQE4JsIZLghxGTh7AqqjKK6sqyAEGpjYZOvPb2PwtBLNt2HBph88WlN5PyKcSKVyGN0hK0/lbsJciYIcjiagP5ygKLkBMCETNaxWV4YCymjtQUUa6OXOZhWKyPfEMagBv26hSHup9A/x8SUQuiUTx8aVjVALkcVQCyCWSc+eXzBcTWCEUHYpKkpOXuzM/8hN3abCtGBQWg8JgC+HUpxLhNEOMYTHZHBQEEuEjnOMf1SxDx/shOaaBwyrgFHm4oDKINC4JtOYSdJl0GJmQGly7QBoTTyryQ8H1AqRR6VQ4MLzv8+0BFHVDAV58768TOExvkAFFpcrzUIcp+30GG0TkI0b8IUZeRUWl5O8n/RSFg6coY+SK/jwLAn/XBitHh+79dcK8iCR4NMPv53QUlRTVvAP+4I5XIuOIWIwSURlDUMEQ8RgckQQqY8jKWAIR3hPM1NFH6cDnA4iy5gEwKlXX0Uzb1Egn3wH1O5JQ5ixe4xOVymBCDCqZnGINsuHIRKURaUwuCa6WbDAFlmWl7RiVq0xUATE4JTKBTFZSJpNJqH1wHfop7RfubiClltdIHUpB4snwrOA7IX5yyQLelwD8b3aWY1nMeI9Z+WlcVKjRzkNtdDvjcHWXlWufxF5hwUOr7VfTz1WzmgxPPZjdasimuR9YcXLDCxb5+2U+Se+GxTbt1FtBnInIePPa+5Hmre4n3/24FxiIc/ZcvulkZu2Qs0K/qM1gVgzztauppura/fgbA0ecsnyv3e5NJxS6tNUufnheu6m6RdJfu7CgvHesv+RpavrBfM10tbsKd1sexC9f8BZSnrpa/vZYywPO6Gk/UiMq7XnP6lDj7gBdKrk6MezmswsZe5aEb5vcn7pIRmDRwshXUYfCV9ssHe8IX7e8VPAlJmZqQV2J4KYjoaU+wvULS4SL9FxVFq47Ne03ZfWw90ERc5Pwi2sj9TuLkhR3pX2vcR9+srja9bIF7qjmLiE9hazU5W+HMHVd8Y1Su0Ytja9rhHew7NRPBX+cxCl9GjVZdtyn4Pu7kLRXCZSB72SKwq7c66WqJrLYY3EnImU+VD8JX2dt4tNWehzMcbO5eTGde/2u9ddFiMMFFmRaWc0c5V+w4J80yjv+40YZpBNAEhLeuRa5OTVQgspgcZEOwNkZpyKHkwFwSvIygDJOTgbAKsmrwJ8V4W9YBYwiFvmBzJBThGcoyisrwIPyivB3LBYHz1HB4XDI7zKAEhYrpwz/UMDAE3EY1z91C0i3jPqlBwpPwqMwGDmkcfh5yrsjHSye15UQ8BCoKC/x70bhf3qjIAeiiSQTX2MjDM7PAKNiAhnqKOr7uTMw1gw/ZRMnuq6nlxXBy0IeT9P71SjACFL4p42CnKO1obId3k4eewALkoxp8gz4rxcnoraXAieA7m5GNJV39DOkavvvN/D+o1HA/GoU/CjGurpcKy9FH3ks0ZjBsLaET0jQ3RBtpEJ2IBubW7ApXg44Mz+yzf79ZnJy5nBjYIYNICk5EY3t2b52lo4QnWFmruSnZERzMrEz0fEnsPWtLf/SKGAVlf61RmH9/5VGofgfGgXe3zRwK0D1pDKirkv9K31ExvzRz8vpqPy/z+j/smYjEYf7n95rKPzX9RrIFBVl+f9Er5GI+dtWA1QhqeCVCQoEDFYeS5LDgXJ4LNJjwIkoR8KQ8P+NrYayHAFUwalgsEr/x62G2No/Wg2bWNfMuyYFybWd7gf6NJN9F+p6jV4dQfHb3boppHZNGEhMkBC33Tv4abajfJzs++34UsOFKbZEN6LNTN/9H9MTzqlg3HjI7MTYt4kPk/dniZq+ppPdQ1ohP3ijY31a9+NnOy9MSD/PbKos/u72Y2Z0pKz3fWfIj4jZoN5Zty+18fcHlFl7bvpuHrENOa3t3GblEtKyEGt6XDrUGfAA14hbBxfE7XJozcIXVLwnmZoMLUtwPof3V9es7i+L8fU4SzSS2cwXz/juIGN+2vr8sBh1j+/GLd6jh6Y9tiXtP1kdfvtwidWKfGzJir7FHf0tpzULlldWxIo0s+sSyI5Xn+7xMAOCFiwcNholFOUdSjBvtF80vYt4pC3Gs6M1pidxgMAl2a2R2f9ZTUd44FVB9rou1LticU7o+wNtZ4ejUAL5ydIbEgde4VsvTNwVUxZf9bnw9lkPV2mK6okZIde2pgsqEyNObKOpDwmfvy1rjqvYLn75pvW9bJL2erUexxdDU3sHRixX1l7O72/oKTzz1jX1se0yN6tcBeynjn6RR32p5BXnxi3fVg684PQHT1IabGoFijFaNFyanlrD95jtxH43wzca39at6BHQN0rnr8tMumLsZaom9n33g5MBkq+6JN015O8ZP258vsd86qi2rnz++mt0EdnU6H67jw/sY/jKdw9GLCZvpWMWbcixGNDskefG3kHn1ofRaQ2pROU2osO1mRNRP/J0Z7bsVGmYNCME5KKMq3K2L2qeecYqn7y7ZVJgx1n9cd0X0rJ6NREiaMPqFbv0rxRQTugTQ6uBkzX1W3dtXKij+oBuu2iH7ccrr93dUy1O7isnls2iM7YVB8heOqPzUPFsMdXg8zq7z+Pl5BvSqLKDgjMLRow3UEYbG0ZLz7LFY680YDKqzwbH2DQfAnwEoBMZ+96/E/V96Ry2T2ZEOeZTrou4x1ZZGzm0WLbe+Egoo2bl/omRFrOhCv0FuSwVt1Pro1+RetazwEkRu2zdRCPTRIfWaD2Dza5ZMmXoIf63RbWXt592ffa+mhraPjCz4wyn0V9rXMh8p1PCmj6LkqQuH7ePWWe0t2kHZCX16231jCgavKa5uyOpP9aN88GrapjtuH4wN7GAn67fXZdoO+5xo7xuYcc96QwMI6G8zUz01KTj9ekRyLxwg5+UE6f5/IfaeE2CreqTlQUVUU/Yto5hUbulioY6lkL8X5eHW1yNrfB5I3PZ2j2kivrhRA6ZPaCvZ5sp8ePxosXPtwn3zly6+G3nFh+LofWi2WfEt3K335aiHFUnb+oduj1CXn7eruGJLqd+nN9ypVt+gNqXVa89vG9U7z1Bf+NpWr1Z4VTSj4A07LlrgY9OFftKGQzde3hWSXt/8ico8oke87VhpvCepYyZlW0b3CI+ris/2V1+1LHhaQuuwTTlvIbzTPQjHxmh21r5q6Fqz9dmF/WwFFfyJVrOCf6TQbGu/Maz/KtjiTXp7TMGlvHs8sthryU2YjSesO61Xoq6TXSafuG1YdFO1VfgdFHM+6xPy44sdnvXYmo26JGVtyywZyRxXCTDwtc9d21MJrmiEWuPCli5mm/H9EHswC3/VTodACXTx6G+8ernOo+xN8fUXxxadNBylBoSMAhoiTir2tUJr2AG2V73fCU59v1F4gEL/7CYiwC+q/YzEZu51iuhPGm3WXe2j9Spt8uHfO/vJGm0V6Fjn2w/N91twkkl2l28mSYU5xNEyHZP5QtyK1r4LGW9XOmmmJJZ/3MLac6SNxtfBJx1p22pKvrs8FaTjv7awVlMFlw0+H2lk06ozwlyoBb/lm13UAEBnkvv+hKqHnp+l/LnOy/czA0i+0U7NAdfHRfIkLEcJkTaDTos+bbMP719adIXu2H8+lu7XFblGYXGaaauV9rysm+3TVnA5TK3isGIlETNbOvvCx+3nHz3sI69Jrwy1jlj/0O+W9392eNytw8PzUQ9GhA6abUq1XqV8tJPtmyJOFW9/iMSzw0K/dzk1vfhMWIT2nzB0Mrx7xXvdSQj1h7eDjXroy1bNT66Lh5ab5x9w/RkropBsYBxvl75nfxbgucHOWty3QIe2OZ67BRSbOajJN85oNU8FD3R27c8MGjjyNlhX5WIz1VH/MuyB2cKc6WuPhZ8o1RW67Kut46VzZdU2OEg0F8v1zP+tPzDy3W14yLhz5deq5PykpHq7UilD/2IDP0sfWiNd2CQ/v3JdJG3UaZcfPn3lduODJi2na2/7mCwf0kOX4n3OQvbu/4NMrueAtGP16QE9uaWyMbYnN6QHuG/J9Ew4OVzPseqQ9bUd86PC05eXPINDPYg9YZte2F4poomdjxpRrfnJTme79kTFbwbYRNf5yvL8AtNqn6FHfFV2hp+Uktq32m/2iZTtWs1cRGQIbyjQ13wR+sQ85sCsdxJvOWZTxrfNOP8mpDSzVX4Zw1i5zZtq6lszXudGuZ4qED1akTpREZix+2TG9GZ+U8eWqJefnET479p/8306n7vmPP312yyeM7VMch3irlW0uHzUV186Z4rOwix/h/kAztpx/M+rEqUnrReUJqwArI7uXZFX6v+ovU7xToHHxsfUzZk3+heWiGX4CyvuRAveCTPd3WAq6NwU7VU2LUPbx4nxHMxepJnLw4Yd9f0Pl4e24e/ENZ/eiuus/nO4NT4Sn7LK5tw+w5SDbb6o1+K7SbRHRnybcnmAhcN1q3xbuxDOyompPjYZVt79oBapz8L73svZXFgfXh/6Pl2vqOkx7R6uzIFO9u0xEy32lri6WaiqM4NTQWZgotHxlivFp2ug/bT5D7WyZ3fvNeg72gWRfRkrtKjbYJN0Z0Oa7vXHlA470LP5r/yifHksjlNQXMv9tKzU+kxuvEV0inDZrf504MGrpdbojaZ1VfjfXzVWPpxPsI2DcI167qj60k5GbmP2ise18s7CQs2V3eNhYlmZDXhNop0Ddxs/5qu1mzcXcc2Et83+MWGsK7v2LbJ5dWfRwaPfTJo3q757snoa7zplncibFGT4hr03qIzEq56BQJDXgot2rYau00qB9UGvTW+bLM53Sfrnt//fThKqm8xdr+/9YkzNXzRPW+j1NEdBz9+Nhf5+vGrrrj3IYlgFZll4+m1A+udU3Y2B1Yy73t+JniOghfunBua3vnY1bkua0ur4EbT/n3rq6e2b1S9m77XvHAvvs/D++w46bCV7+bexRTQsq61VP5shUjO4/dln3Tyah6U5Vzyx903Nxhx1Aa1OhyuVjbdtF93tKPdPcvlCRejn8vKOzwgQC/oy1Nw9eh8UBrpcvfrRcdqsgafwNXoxCtj+jkd6R/kIiRaXwTs/uwjYgutr73qH6/YNZ1QIfG0OHGXfmtj/73B84/sM0dumnzzOBV9TMSv/+alE1dTphvUxIwI9fViO28OGo1tD+v/3FHzuelWRaSOj770+hc5Wz8pm184L5SnEnfZtedoBThF2Rd+y/jLS05Li/ITuQg198p21xuUaJ2u7En1eANysoTpBJtq9nahl9F0RWvek60Xt8pkcJ+qV/rKjNRPoT9N3X0iMFDNplQTj2n2Newkldyo+35LcNWXsQV+8rER1g6SoZrjToQ7ftLf1/qoma/paNW44n/JPLEg/W2JlHp+n9L1g8dSPA773RyqeVixM9auIk3b9a2KTHMlWep5yQnSCZmSiMGbm8PtxF8WrrEmn22ijKCLIj50mJgf5HgVbB57VKEv+kkuJqWAf8Vr1xVdfZVxwVPPdggli/TuOS+mUmjwxTnTekvmm6b4FQM09/KRrPTjslyqyqLWstAeQLVb/b7IWG2QcPHqzBGDr2KzNeO69rYqWUDwVf4fjxUWN57QrCu2phjVisdLWK/ECh3pEq1R9z/SReu8dfDKqdAmt44H74/b2WBkFpkmlbBeHzDfIRN1zJ7dCkEKK9s2CWV0198lTOV3ma1fnX3Gz7XqSltU1NYyQYJ+8JMBL4LYNlT+5Tzq8rKSsoqlnBBT8v3EguqUYyt3FoEEklFqhFRDtOWGN7tGAiuOb+VI111oO2ZAUBwKxfm+jZu8bcAiEZ1Nv8k7dj1YcxmvWnBX/MqXkiJrU4mu3SoHXtR3L5rpn9z1Q5tZZSymsTyzknt0/eGNiUbQ/lBq0F6/5MJ70Zl5GQ+kWwoef+sMwKj03jrUhfWQmqqqJ4hjn51N2x0hpqaTo998ZPOXci0ivnE7I71Cxzm8IE7lVeF3JVlCB0h6FJwP4sZWTKp3GrioaX1jf3n1QmB6e8fxVa5nUK1TIuvvmG3P8pDOorxMvbJUS8vrqGD9bYG4M6cjywK2Z3/ZPOzudshtV3deuz6xIN4na8s68TRlrW/5AyKEw16cxgjRTdoYP6U9h/onJq5Prcysqmtrq82KqRbM8a9POhBwbFtGgZBoQXHxZXanpRz308Pj/eqjrZmdFlpxzdOT6VSFV4s1DSbDBJnNcKO6RY4Lqfe6l9H6+mbHW2XECwaipXXGQ8dzgld/CyjNbyKWrrK8UaYg9XwmK0/javlnlx0mSv0rV9SltZS/7wiSbIkGq/Xc27eV1miZuL+CVN98VTzSA+LchwL4Uq9IHXvfZfH+9ZeblpFfLYWbfXqw5mqF/bk2NNEjDtFZNpf53u03TaQovIs74iqTf3C/vUTDpLaE+6zCE+/tR6rkXauOqa2Y1AwONwzY8lXGT1K7weDkeaGKYcDhQPApH7bBYEKVdAXQ+lJ1GF/XdpQZkoxzzw3L6taULNqafe9cFWWje8qKorWP5TuuJYV+2xagfaDKNUVtSu7aZJDkI6/zspeDhHK/ts7mJr3VyrpXcqb/7DJ70UnXr6RNdwtakkcci19Gxa/JLpygHfOMXSZdQ9XvDAoRLuJGSvvpWVYrxp5sNYvxXeH0oxuV7cOtEx7ZE5hQmWR4uVx17XC7z9Gut0dWOQt+rXhX3usXOm5nfSu35c2qTuPXoY31I3vzhx5SUoVcvHLXNClm16isfFO5sGH/zow3ijfdI/L0qA9HFAttdhCj2HrJsgv9mBjPvFuvuvILvyc0Dl+aeSES0rsz//VBL3XyiZ5dG2PaJPdy9j4+KV5498mU2G5AZ9rjkAWjpa5SkK8N3eDYrRZUjSq+VW6gtSVnnVlDY+PIRGiaPYfQ15Fyraj9bdAYOu0JsVLwSCXf17ufV9bWf2RPj5630xVL72lZtpgTaJrwefsi7/SBiJT6S31LZC0r27izXdZ3R8Uprfaa1Z9s8abNOPbrrzV7sCsOSld+dBIQo8xuy3X8/NKjP95B9EXl9sIqrxe7pXM5nNFxaRXRy6fvgVpeuTFv9tq/mCCsiF2zy72MjD9nZxmSliIcYzDcLPKmgPNyIGlBVVdpQsO7vNcrzske3tcbDdiIREQO220rVvVk522S3UjYbmbxdQPqnedr9Mboyi2PoE0uH4b93CdqMhUOGvcYvYtUyLu2EdpwatfnCKXTAa0nLRX3Xh1Yva2QMmok15CkGmmu+C09Rss8SvCdaxNFa6yzux3H9xYa7R+tiKn7YCi9YeN744jtS8W9i2gHBF/0Fe3cdMuvoE1LC4oQpa1cI37NMMC5X/hex3rc0Ez0e52zIh6yWSYmR5ZpTnFFgmaaPx2ru2HXtDyfjJdTb7pyLaY79EzU1zD8iyF99SXohg1tITPHl+hbUnNCdsS5NDe+Ukw8pjir6ez7fPvbwcJ1Ht4ctd4a/5X2uOfh247rF22ODlKl1Y0tgmZ3xmV6Diq7morve7/rpfagWWOb+WCm+PUsp9IMo1slS4fzNbIHJ0aX6uea4wwZzof99RsoVrrUw5XXgj/cHeoJ9a1zm+499LjYvfWrkkg31Rd8NqU9IT9SMDwT8mGG1VhN7etfenBaLgEXmSrwMJ0vKLL8EBETQO15RG4/5Zecu6uvuiMJMyaEu86fPoYmL25/tfSSP7uDXtDg+KNnj7rDFZlLMvVrEtrsduIuQMeeBJHu23iuuQJtlU58sd5ry5bUNbv1V266qlQq9lDDV3Sf5oH2MzniupK7IvyawNds8MPW+/47/elT0cf9zt/M6QTv1bRHV1wcCCXfPyKYSiznFHRCs9FDner3tvSNRHg9CqotqXcMG1LQzLL7uqeFOb1uy2to+soQ+KOWf0XxDQFMhtXjJRqbNBdhSC89PjieTcMbCRYe78JsltM/EtZ+wkYMUzNdq+jzZXl4Kbf/bfaDAfeyjVrn0x489P+Gu1/Gv+6ZdbJT5kXTp2K9B1mkhWlSr48EjWmlFSaHjpIOgIxAo5d7g5eLn7JQvNx+y1/tUmDYQ3zmBwdCUfxotP4uwhR5ZLCy5/t4ZI7buQ/riho/VDEOmXXpJPDfmb7ZMbh1mUz8q1ivikfL/IAKtPrmy1ZlH6zl07S2H71FMeM8zEJVhuetefXZ+9qHFnZESu/WPotGUwPR6Bs93TKsO7Ft96Ky4vJ3Flyc0lVTX3x6Q+82IdXo9o15LlUflcz3a22zX7YjuqjcUJyjnSmta+OupIbHS4g0drdH8j/WyM4KeKd8qUpT1FKidesW2Yy88OSdJS/XLZVV/3SH+MU58iyrhn//OSvFMdVlsvtGn2cWmqAaHlyVJTjduyQCCqnd3DqgoVk5dLGiVum7q+w2XWUbFH3s+YbkihH9p6M/DFOhtUN7c+9G3Chu7nz+g1/91KPO4K+DQ5ITj/eovTwyrRYyplOxTTtHK2jmQErQwHu3JXGzmQ021Wb8+zK81h9f2+6qp+YmsS6jXiY56WM4Zkyuvm/aYWK02R+Q0ss8eMLPHr/qdMGrbYfSNKpSvB4KP/fHxjzcPWzpfN/ljbLH3S9+B0d771as3dxz/tz4ezfBR0W+7pS2E57k684OZyBNoYni8ZGKM1c9IhZUeGRIiZpXuEndcmNIPY3/cYx+atTmlTdKUvJi8iWaedIZ/VdtpL2PHM65zwT6bwqtfLZQnXbNtnrHIPHw9imppdC0/syEm53hhY37+oaqzdNkYzPjVr2fTEC1ujxe3/Zg0BATmz2+9dCtpwUd+5eZpFff2cO1xh079uBhS4j+Qe39y3NCHnEykkJrJyRi+yqZmq0bbimUqzVbSdW9ye0uLW3haxHTyJ5SWPY9tbkCEBrQ+Dwqwtxd/V5quDVt95NoBdR9q8byOqt962rVK+0do0KPPj9159w1Unbx6wHjq9VM1XKyYSpRpWTvM2LVp0Cbkb4xqdeUXo0a3zy34DQpqwdRGhdcZU9ee13/9qa35EPpw2FCQvcSy9w+NtUGT7CtkiTCcJfimSGo5GyZ1ZfGwJZ6F/Nk4iYBjtU5vNH6qoTiwX2bVmk0r3R+z9QcY4YHh8uQrvt/Rm1Vm1r8CloZRT8QkDd9sPnecM+g+c5dN5/rOx55F6z44aHrRSp0GxQF35EwQ5UK9VEbA0O/9ar/GK2+2mQuaZ07820m6cDM8BqPoxfuspqqV9F37RaUSea7Ni7ldBi62OX0xbu3VVCy2aWTqTlypdZwAs8M8+6srWlQ6Ws+t0lksO1ZTrzwVY78OdOwl7fMl6gUSqQ+STd/eRjtcteyRVBctrH7cLgPX5BrzaqZ0We+M5yvawazXqBFi7aeQRWvE6l2inN7Zqa3Kb3yRAlAmTRsFnL7YmG9ocDN6ErLieuqyaK9xQ3CMfF1qjoHqlzeoTqNMX3tax4yz+O73g5Zf0zs2dCSuphTcsdUfvptYPKg4i6JTvVQ8HbyQ5xa6fHbH312cG/56X9+d+39sbWner6XeK2InRLZYvRWd02715bYMZuic/ttrJzFBOUz7c+abjEbvOF4OcKuo3Jb6wdoz8QX9UYLabY+Pk78/oBBdVVk3/ibmo3Jz68LkKrte7n3+sSHlMOd+N+fmDjUzHYSK19FOLHL5fWW/ccKIr0+jN4lDZz+kDfa1KkAN/QTzyGfyqowpZBAqOPwK0Wbs/wDDhZZPsLF7MmPmZ2LVLZ+PmldHRlvPjoQWTmWsizlbpv5ktuR/COt2kuCZybwb0COfE0+zctgoenwmNDSrSi9sG+O5KdPT0b5vYi0FA707qR+2xRS/2pop/y12K98K3LWrq4ZuCCws1ZgMzOsGm1f6jUw6KD5sn91Pu1FdRU2yntDlxE9q3si6fV4XICdQO29Gv9+1Mbbg43HhkbSgoXqPlR9P1Q2gA+3F2+RjU/bR3f8spI2vTdOeU2oe/1Z4y6cTgarPCzSmvZGFKr/+B7ld/99yAvP+vJi6rOhAKFmgymbdZzUsHZ9A79s66s+X8L2ht/vdOWbHc/UCvnR5HusPL2st9afuDc+ciZk+pvdoVnDO89CIhKni1cc8jGKmp3p61Rb+SO4ZXK8f9bX3X6ieGqqVtxopt17uCBzqntiNm626WD67GTTLN/cS+8rm2VF3gL/9KX3v8wOXaH5T9ihjkwugGeDAJ4B/KLMIWzOX2xL1BwDEyHI8dh5aMAWAnlEPDLzJ1mQRQWJII/ZiLw9pYI+IOkXXZTD/DsOJxowIgP+8N4kJmM3B/BmMH1543NTZQAI7w9/xnP+Yc7PzRE6IwQiRiD70vF+VDqXzuMNIq8Bf5OEqEGkQiDaheHC0Kd6ogEsGjD3Adk+VNAXURgPmJiYolhMeC5CHeVymAwmncmFAN5Lb2COIQqv1kEInQzkkTkDVAUsaHgG8jZruwtD+6/0TS7ExdNo/gCV4cOk+fA0ZPj/pG1qM+ZlI3RVHsOSZ70vYi2suP98OEgAi4YEBXkvCe//V14pvPUvxifnN8anGmAPUgGQA+BpaORtq9wegIKHAAKy9g96LptHsaSzOLzA/mSS8iI/xw39RQIFWCCbx1NA+KJMxp+5mgi7c56SiXAqqQyIw+YSeY5gAi5cOQyW+Ff+KO85bwaXQ6VRA8A5hivCREYhL+55W3A5vBepyCzSvNkgbAibNE8S/Z0iy3MXj08K/z7vZtgTAO9VKaI7bD7V8/eFv1irv9Fq/yCfQhTYABqVx5TlTYcDwSNvs2B8z6nFc/q85Tx7VKA5puzvhFQEdH+h3ko64pm/hQe3B4DTBGTAUEBUhl00x6zl+f9/w60FYMiACKkWsRlOKThzqWyI84fDfjGPYfvo/2D5T8zw/MUzG0YlwlpF8PoPkyAEBLyZMgARVoaHGjzsXyQPeRHnssE5LEAgwtn4xSmep3zv07cGJBHaM4lDQc0pOTdxDwAjThcZJYGsv4wh9PCfFiI8b9AHT+Py8gzhKgNEGlKzyFRYN0kfKh5+ModpnlA63ouJvB0HfJgc8GcGkf7EzJ5XjoRQgSWxexChcDWY23UOTr8lCY9j7SJhzXMXkicOjk5oFxcGFu0iIQMP8HjFSO4iLoe4BE8mnjY3EdYfromIDHiJFjJbUo63GRf6WVZREAskwpYQf6UQUr/VABANlyxYNmzIXO4i5H0ml0OjMmCbJHjifeHBuXgwmD4gTQYxXhI37zwKl84DL5xPc3i0AiEmlw3HRhVRHTBCYM1A4MhjRfAEznkfnAMFlTFHUkL85YlHaPXwXrAkOTRgwoR3tQHZdIAO0hHF5pKJDhc3eAIODRhY2KBwaAXgZ3nVRure3B5w2oCev1Jp3tlzJcWFIY8G9KnwZ9hQWG+e2ha/FSG9ORTAa+dsQF6zUxlcuGrDNZcN8ko7ojqsDc0fri7wGYIEgedSpJ6ADAiGK+9oQeI1X994oZijmBOQOxFM8vzCX0x2ntHwZvMBgn0N1zOQRkYRkQgQqT+3gmsNikXl5QQsioL3oTJ5BxHvcKX5z/nGCiTTQCIHqags/FzOUiGehrwKxEES0RPJcVgnNkiG4z2vzjyRfs5NevBR5o/USzqyCqnzyG0CiAOfoEwE2nCBZHPm+PpkGF1UJDSANpWOSJ1vFMBfdXHuRgiIaMPg0glwViGh4ZVTJAQIpnhJBPqBRC7P/YCeH+w9zs/bAnMDvFOClwQ/Mf1HZaUxPWFrILi8IDKN/sC6KrKDPYJYZCUPr78uqcAQnL+DMr9UhvdY2+i3luXXYUoCITgcBPBPdy4QST+35VUpFgIT5JiHV8Nt0c9KgOAAaQMgAIV0Gmw4PWBFVIHAwN8+AkFBMnOO+NkF8CbY8IxExkx5x6E1SJvjJvFG555pw/p6MpAE+UOI3h/ehOdZwHlCnYdqUBAaQPozOnLdZa4w/tZKIQQj3jUIXk+FhA/Pgw1ybMF2+CJHJYwKuKTzTGb82TO8PuV390AUuAmZQxiSNhB17vH80f6PEfiZKbyLQ/PImdMQxgM039/9dRXSFOJ5mpKR/GbB9URmzjwOSKP9IZF3HegnPH9N/ffdnX/f3fn33Z1/U3L/fXfnJyX3Bg6ngvmvvbyD+3/o8o4iRuX/s8s7WBWl/wShljf8d5RaopycigIZRitGmSiPUSYTsSryWBVFkgIZQyKSlP9bKbU4EkmF+K/d3vn4j7d3rEyZ9XtXlvbZn/q62Z7BOqe4/vGxt6HLbNbWxSRSCJGDOwe25ODCdW1/qDm1mlYdqt7aceXbl44aTbMFp1A1y0zcknpNB87ZM93uF08G+KK+XYwf+fieMdVlf6/UzLcw2L8zlmoz3Lu7Z/KGanUhdcstUOulP2Bz4k5fdbOcs0yvLFmjjqWNrkqzO3/q/JOAKzWvHGNuVHdPE2yaaNDqnMNDaxaEVv44rRKkrZZzmDN5RH7HYdHLrw8sebDQZxVWkqBXl3457unV0nNDSSGjC0NYIi77yhvtWWF3jxasbc164yCY8YAsck3XiUZbSn+bYjuQvsfwYlMov2SrcoKCdidrmUiv8+llYjcNklttTpkldC+41BQTbFA+2D5TLSiglCdKMfRGhx584frgRDdJsMpDaNSrWiNBNlSN2xjZuELVgG8tKP32+DKpRI+SZ5hujM1agKwcWHZ6L7hbTPadWp/+NMsl833aipKoKK13yeN2H7nA2q8HLiS2xcZpmCwI/uL56UfhqQ2N+XYd5yLDT04cHMaldkKEnOooGzvO7oOXybPWt4Wscs3ighbjduSWqarcqLwUZL490NKrMXlc9Kjimy5m9GBRySXmkvXVJ2tMhM/fXzD3H6XpsQWv98DR/V+2unbi \ No newline at end of file diff --git a/docs/cassettes/qa_chat_history_efdd4bcd-4de8-4d9a-8f95-4dd6960efc0a.msgpack.zlib b/docs/cassettes/qa_chat_history_efdd4bcd-4de8-4d9a-8f95-4dd6960efc0a.msgpack.zlib new file mode 100644 index 0000000000000..e9b74f559bd39 --- /dev/null +++ b/docs/cassettes/qa_chat_history_efdd4bcd-4de8-4d9a-8f95-4dd6960efc0a.msgpack.zlib @@ -0,0 +1 @@ +eNrtenk8le3Xr5lCmlQS7qg02Nve2NjGxzzPc0jb3jc2e2oP2KYUlShTRRqoiEyliDIWGZonyRCSoZAQQsi5743n6fk9v3PO+37Oez7n/eP4I+7rWte61vqu71rX9elaR3MCQTqDSKVwFxApTJCOwzOhD0by0Rw6eIgFMpjR2WSQ6UclZNnaODhmsujEth1+TCaNoaGoiKMRkVQaSMERkXgqWTEQrQiSvUECgUjxZWR5UwnsNmqoLJFCYzFlNQB3d2WskrICoKymogBgMCqqCgBWTUVNHRrBqKGhf1GengqALJlKAEmQuCwTDGYi/tSHwBFwCBRKSRYSASl4Kjzm5UOlk3GwbllvHANUVZENz/EDcQTIny6utVl+VAYz6ebfbbyFw+NBGqR3WUVSoW8IkaYAEEAfEo4J5uGpFArIQSApLwAEaQgciRgIZi+tSirC0WgkIh4Hzyv6M6iUAkieCVKYCCabBv5zOo/FAOkInC8kkVRiAxmhZ6Zoy4bApABopIo6ElUUjGAwcUQKCWQwECQcZE82jTNf8fsEDYcPgJQglgOVlL20+ObvMlRG0nUrHN7G4W8qcXS8X9J1HJ2sqlL8+zidRWESyWBSjoHtP7dbnvxrO2UkGo1Uu/03xQw2BZ903QdHYoBlf1sMMulsBJ4K6Ui6isrGU6kBRDCpzcEL78MKJBK0lUAknmAZZG6GUg42QWEtGaYGqsbBXhSUAyVY3XI/2dDX397b31YFRzJCoNWU1FQwykoq6hgMBoFCopBoJBqhilJRR8E/N1fAJ4EUX6ZfUiYajb5BBxk0iL5gVDZkE5PFOJoFBRp81pRDhqyDQLxmY/EXR7ZkGUJBT6oyphMVACU1wAGkAUooJRUAjdVQUdfAoAATK8cCg+VtHP9tjG870nEUhg8UZ6MVTuXg/ViUAJCQZ/Bv2VQBswmCCjafTiVBgyRqEIJKJ/oSKUlX91b9yzQYTKMyQMSy0UkFrgj7pcREmBkWLDEbwUmapNJ/nzLFy0JUui+OQgzhmJ5UxaFmUEhwEAHPIhD8AoPIKGyIijLRG2ThfUqWl9DoVNgWWBmZkXQNjb25PLHCjTwILhQCjUKg0PcZTDoRD6UCjAeNSmciGCAeKhdMdlKbAhkXDOeBtjIao6wKxU4TIFLwJBYBdGB5G1LJEHsYmgCNDpKoOEJ5MIIOhYVEJBOh2HL+XS5FjKQsDLT43j8FmNQAkMJIysFwmIGq/l2CDsL6YR/+UqOCxWIr/73QiipYBItVLv+7FAP83Rq0Eplx758CyyquoRgFwSvSCCIhqW0H9OGl4q2O9fFRU/NWxaiBIAGrjlL1xqkRsD5KKAIeT1C5ZWCMMMDh/UCEA4fCSTmGbtZ6VmYGeQ6QboOljGrn5vWCcsrLm6yt5OZgqu6Mc1ZBH0KDBHOSCgWkB+3H6/ljmCFkL2u8lYpbsClRj21hErCUUqqqGGUoaMv5FOxnbmjIsvdXDVRB480pFAc7iHaglynSDOvj6mNuY0v383dVtg72cbSwsFZSsmEEU6zRIQS1/XhzF3qQs50bg0yxtlELVjMj7bd0tjRge9ONHeygaOKYftqKmgDEXyIEi/ZykiGgJENAKaaE0kCvpJgmQOBwQBv592qtCZhCJ40NhcTWhHITIhMI/caRQQciE9S2plLAUlfE79mJsKFxDq+kHAqVQSH6+GQ7gHSIqkl5eBKVRYBKPB3MhtC113NLKlHHY0GUshoWg1fGYVFoNYQ+VDxXtP2Zy1nw+cA5y45kwwSn+NZvkZGJE+Li/PA6hlhsuFNePhC2GOsUYJ6xfp2pq/K33QePXovx9PSMjImM8VgvlPPi8Ggt9f3gt5O8fKJmXZ9/DOguTr33OlxGPOfb2704PzVdsa2hZrExZCTp1GP4e2l6YSqj5uXi6ItKs4nGzPmNNR2HvVIyTHQWF156zR/2mq1RW5h43z1+L8pQy1NGOK24jv/OV0fbxjVDEdy0TbiDJjb+1kd4B56fklxLckzrOlpyK7slWlZrs5EB2lcntb83nP3F/0d85bniLcjPcwaC9tjDZp2u9jUfNioe2ea/Nq2ul2A7vSP/4zYTgbvGnajoioXYc6oFpHLBT8qX8JMncWndbNJ0hUBRsuW6SuFPp7uYJfntnw7RHrZQ7MO0ItQWbvCyjmbYbyzaOSuDdzaV5B05YFCrVKmWYrJn59jQGl7uEbMfu+OvrLGQzMgNbS5zOsw/ysVjgypyweuAow6+T1PWm5lWHuhusBU3lssPJSQZ5SYfO2RdGzm1Rkhn7PK3pIJQGj1iF7/EJkJ03KCwkNLbvhsXdA8IX0au4fY6YOnHlGfftcBUOftrjd7GVNy5bblZ68FYRPONGjNjqlblNb5dF5/H02b9sjHaMVEKMdmnbxd8HyyXxpxNPnWtvzeoy2PMWy5gWmjdvmv6Nlek0g5W5Po/apsuRIOr7j+w9d8bJhTGl8L6dXVh7RaWZz9egfechu2MnuEt/2qLz299yx+gVTXKdY/dVmv/8OrhW2taLruTJX39s47AY+utRZSHJVuv5SP3sUI1vRfvDM+hL2+QiJikJkxnDOCKJGIwIoEpVuRyuwe2Wiqb9zp3fMqnx9f1Z4toNfeTja2QO8UlR1NE+dGfkuY1lHs3fLgp8Ypl4y7/qfhZ5gVL1OlOqU73E3EPE9/sGNOuMWcO7M/PmOBvsi/jPxvS/oh95rPbroIcf/3Yqfbp5m1JIvJBt/D5iefyUmbxImWBlz9Vetxc+K7zlX93FDnqdYNwLfH7vW0xxR/OED1ftuVLry2t3Hz1pG/d9zR9ukDNrGCdmXCOSax8ossLSZnEzOPFWtsCmoYKHvjasKc742j8rVqS5x7cKp4+XgGIovLuni/vvjecdndBv5W8M1bMgBjJT75oXGr5OOfExNXxlA4DItes9KavKp0dFz0LI2mXpNHvtI9ZfL79RfmQJHl1yQZxNcJg9bG1VbcNQ252bcGfadchhW7PbJfrYpeMuN2XcFeTd8s+LyAZeOQm88mXDf3NYu/unFHagclB6herPD9omePKX/eZcFBXQI0aO3MwsjhItjnQfs/OzsCUc9UnEXdITyTPl7rm9o9RgoIPxzVGpD+vEKIODBuKWZXbpLbICI2tuXMaUfMun9wgLTLpsXDVuWGu54pLjlBT+K/agdPva1+/MFiz2jr80Xtj7HHWnI5q0U+tLaUZ+PvP7EfX2GruXMUSWpzaJf2YN+zH13FB6q7tiS+/9xdp4jrXy+O1KgO1Etd71Rn3X+ihmymKBvtF2+7bEvPwhWKPyP6X07mr8B2un+ZzoxxJ8rWSd1cNnH+EDfN7LspduS8kJdSJJ6je8Q1XLVeirQt/sDI+7dPlrJC4QYZQ6Ttm7bfzUeButekt7S5pmf4dpU8r50JCm+4UzfRENRKUFQ+mc12O6SgsErlddKYnVnCSbzd3ndyeH096X6XEKJ9pvVg2YnJcb1LHm1zfAT5ARL9opBV55RoY9f2M+jjglt+80drz7vlMRJ2O2qqpre/3b0H1Cr0g2ievih1lthC6+eQ/P3GYww4JZzzWFA/7ML3hccfPZKS9xsaiUPnt77YVrsqVMRetNOlPkT7ZE22ptFHSI3TD674jF/K6b1Qob6RdxQU076N+yt1sYfBC6kYcxvn5nQyTtQ8UHrWXtR532EV1bD2dZHA5MMV+P7ZzrG5zCIoVlDbaOgI0fU0juWhdtwvUqBONL43WJYsntNUCkTPjxpPbZI6UjaklRCUU3z2Yle4lLWG05eFMzJrJ4Ixu6Q0WTsd4vnF/ehWwSpOlsyNR9PGx8kEcz1f79NW2nfG7uk6k8lm+Mx54+NySUJ4a6PbSUNtnLBRb4Yh4AnjOXxc/eKdNPKbmmwe1Jyh1DUVtMOlacqAA4/ot0Zo6SnbLi3zRT1+rd0aKVeijUgYSebb0HWwVjzG9KX6cUdNGeLDpA6/qFzDDuj4z/+HYffms6vFbzDY8lle+vDJm44wfzsqUvWUfrfb8W5Cq1VI5665IsrPClHb8kHsSS9t0chENXRGTcUdPt1j4TQyK9TRZDXBhEKSxiIrTKZ5tMTtPFQY/8GN8ySN+jxbRMZ/U0zosPYfWHhRSnRY1rt2unIfd2so/5Gnl98CssOyF16rQq543qiJqAsJ7C1obk+qGZle/nR2LDadJzojp9LmIhEgMIBQs++xG1j0fLNE8xpd8r6rHfT5PtNwuiPvSQ8Ib965Lb/esk/MckKswLBBK/bVv7uKqHx6bEteVFcl7DMfRtdZovNjkeD1Ty9YzYs+Z43yjr7PqGmMCrmx6Vi+wnpVw6AXb/pFiG/4ESg7c8HHXvXVVKSGbrQbNFP8QaAjMyBQXS3lcolP5zAsrprk3ZV9Itwh6412gPm2/hHiFWx2XP/rtGB7Vmdgp5cK1rnJt7K3Z0qGX428e2FmGq1SqtIsNDevomQ3Y8KWzX72d1hPfqFw86io+MKr/oWJHJGvRO9+uIdHRocVcfL3nz8mbqUziT1+uiHn1MIS2p7M2T+hV91enT6amt76u538vXXVbLTr+4S5aVdOHBuYN9oYTx8QjAgyGC/OFDpSOgmZeRK78DsTOiqxg9kT9COlGxcCmALUwfsfHwtkx0x4P/VMuxiWWctt3ze3K/nX0VEnFYYCvvUcDL/osNaKL75LI/BPf5rIK925fsO/X5Oq7O/p4U0qP3LpRSKt/YZywT/XrwLbk8FzvjnvfRH/UqkcSfihf4u+ozBC75VfvkJTpMjdr7haaawERRfRudd5BVED+prGy9Jm1I/YD5oobwibfH/tZIdxt07GKn+vnaqPapusNH3jajp3XXNUnUkN+OfN8J+/gWTmS+3iJwtCBs7g77jdbK6MCjKix6wxqyqUbLj+T0k4JDRc59Qs9P/D+Ym3xhSMdXNjyRZXYh8lVmPkhcTk1RFXbu1DuV6OTb3t97tbHSxhQoughWuEXwbYG08jU75VXSzdNNSh8tSmn0+PFVGoYHohr22I+0gPdh1QMBP2ehQvcO3QA/U7OYeLX2OZprFCW1/TovBBaEKybsO2+4qAul190eOp4UtqeyAdBOw0wM8bMcK8/erUGuCMYq9/pJFzNYF98HIv4KtYcPjN0p0K7FNPH0P6ZmRA0kkj3lrmv9OPwcK9Eo7LHlMSgTWLIBpvqS/MqXdh1c9nWqUGaaTR676rn8u9zW3en92ccaPz2fPxUv8IsN+oPh0R2h7j4Q7F1tfh79/H5Z0tA/aY929luiAsXgnrQYy0M1zYPQ7t9ru/mzlTTZ7582Hgxp3+TkgXKW306/I+pp9wuEzPfwdYP4paFlgcFkoZ7+VKn8XEjtyzzE1rSJFwbDVTXf1CM06zhwX7feuJTIWnLU2v7/VvS31zwFzyztbj5PfLK5SM3VFsK1Z5EbHxIEBQY2lH/1FVfYvS6jAxetLOqNOyb4Pnw0bK76aVP6eeJAmebtuQG6l+9PT/W9xC5bWpdBZIpEqKnw5ca2fXszFvhotvIj68ixAy/Txvfs8pxPbEm5Et0x7YPexP8nXlUtq5L+r6P3zLPQyKMjd3+4YlJvozfVbbduur+u16iueoZPUOLTSaO5YnWT+9Gd1t8vNZmn+/f/kYvcspR+rXjWb172iNipc1NpXqF72zWspDsoi6b7AEpxWbj1VE14z+fFH7zlat4pBthRGuwMr+XkM/7nb4/REVJICZL2WswvnFt45MXrqXJjXG01j6NwEYFpQGWdn9TvlTs5HDZhULBfR8fRV8a2tE7Nxj2fRfqBFbvU+CP2rSy0TSPkQsCMifHzT5Ph2c8ZIQFJBrurdiqNLvfQDD8m+EqwUvxojGGRUFHlcO31byMiEXmrRH/lnSwt2fsHb3cynD27J3IaZOv0xMHioW2Ue1jWL/CPc+HmP+g0aM0W75UuKeEF1X9cfax5FePdDXqE+4tXE03bWJ/pIEKDR/zi545SGdS8l2YVxiC2vdPoVLUmxUvzP2RmPBsvfvcwH4rr32BDxNeSum15PVE5BOwD0sNtzw+U3JYplwZKCb2VPl8OZiyIMX7mVj9amGqBZFVtaX0e8il6Rv0Y/km1ja3+RjB9+3CQvu7XCyQPK25tyR/PDKhPqt+vYk2gROfco3OJ0o/2UJQLKn3aXra9YGn9MQOn+gL3s8QZyaSCKZeklHtr9A/Bz+X6Vsd/rQwPWHW6Ok7zPWgPPPbJcmuPyoMeEQ8S4ucGA4/S9Wajuf/MkEJaOJFLtlF9nR+/SIttHdt7XdspOywQ16Q3bn4Wxfx+k/lvUj1Drdtp/IxBwu98mJlP58eE1JuEqauOUQoXjW4rXSXR9hsTPjax4Oyu0Jlbs4bZV/7ErJJTS/f9FRb8+Nh8dFLCkfTP8TPfH7vHJ5XNzbF45iR7nN3Bvdxb49Wh6taaQYtKZkqqCPVjVskcyckhoYe6RAbuo29+gY/Ohtx5OhEetO1N5ozPUcUuro3DzWrfG18dZcf4I1NmllQ31OhRqo3cZE3f6ITWRi2N6Hwm63B5QztUNxnkVCLycHtcb3pKgoGRaQ7gt4GITykt8lCAoG7bPHVwgdVlY0igwmrtC9VSRd9+zmFO0sJK8E8fYF/FU/akl4rhznZWEkkzSmferO9sUsvxrl6XmSsUCmy1N3FImho+/0HWMMhDa8I3Q9BYRNP1YpuW6xGdcWni16puXp4UlRuWvXohCzfRvDRKvlhqaa1F+yt7k46Is92g1KteMIGvzJ5lEn1kMFPexGS9xukcvGdwUZ5x5dJ/c8FJjXWdt1C60jrBI3ptJgJb5XY2ex19Fajvmc/KIfRFRKwEBkgxykRzfeekI7dIbb5RN4RJRcZ4fETr1UDdat4D0onP1cw7hKZf3Ds6dz969aJPxb61L1kQk6ZNt+aZLT/PLBuz5isf2v6faVtl14q1CUzJe9k+dQ8q79L+P5r9kfR5fgrW0M+7617Yf/5XcasdtzQGUTF+hnZdYLql9/cu6En/FGfVX2hctriXgxq9hG3tjDGPYZ1TnDsGE+t6uef0pKU+qxK3sFTg86qlejLWdvmJ8+3YQN3fpD0ibJ00vh6LvQd/j51dWT1DLfmHE0kzRGqlqEHPnd3HAvx3a1fUy51w+i02nkXrcigI7nMBN6tpVXFFX+s3y1PuhkRTDyRGBWnKH1onijddcdOhmf3nSc75mMFO8cP7dTVtRFjeU9l6sdu6ZqYj2NZfakd9T6pkZi/41pwX0VE5eRmmfR3qPpG506ns5u5eC7fKbwhJluYcSf03EM0je/RNuDHs3ilzfG3W+yC2BsbaoNf6Y2vxbjq/iEV5mQ2cc72SXiB8BQ7LQu1kDkt9H6kPTG81kV44VdBQUik84TiYm1CywObkwaHJ8Utt517ZbA+UNpimt8x9gguVlTkj7Kf4f3tHTGmbeCkwHah4C8RZ+Ryv7jdCtSNQGwcPfp8uPTA1InUBzkul/e0Szy7eOwHpqlC/+yXX3MRU3HfrNPoIemInxMyrOs+6752GZm8vj4bIhz/QrSgXgw4Q/HbTm3ucGbXfGjMmGdkt++oq6xdO8I3+85Gf+2E+S5x6dKrDk7CFRdWbeO1eNA+0RBqETTfEPkulberG1mutkX84kHqYx3MizvFlv0DLX94iuEflUcrbJDU5r7JVrrwte5+xrg7oajhzZlfMz7DKbkLJtSA0a9FIs38oo7T1UYyinsmBvyEDK/Iv4+VFqN2Haw/D8w8LVujv3ij6XplahHllsmjxfmRjarcG++ef72QX+64YSu/gPj2elfE6rL9wT0qjwO08Z2tUtqbUsp6UuWTnRanOjAvj9vfDztJCJBBsaUqTV/xYXrOVhUnlpwOb6OU7Btyiw7lds0VC/RnZe7CfuN6+9KBK/Tqs5bK1dF9RtHr/ffnXRHQDRezfJqZWHdm38feExfnBh2qeIw0GqfKu1uV7vpc0JuxlnEM/VwiMyfvtkBcuGpyjbjwzUUR3NDG7E84W/BL0Cdo6lRpgM6DronBa1qCdXX+HVdjhepd+K6Ha8kVDYQOj36+kmQV9cntvH5D+E+RwBacTaCkss1n65MvD065ECIMGeqrC0OcNYYnH7UfUnTZwVjFXaN16Ll7bQ8eJR7NkMCLObv9HExEPtCwVWy2KxlWkkp4xJcvY3zn5f2GAnNBviPb8bEIx6ze6o/YL487Tacb32IvJaQ8jpoOGxhPyPjyB56P6tOdfPmkl3CHhQn3GlOJmGTr8tcDuS0PWuL23glIzUFr9Z088Tin90bxkKfqo5uKOgb07MCacIkdpboYlIN1jiD7a57u6FxBtOKBxg3JzG76HydL1gXMXIk+U3LvethnPjErskmebgTu8MVVd719pknmJ0UHhsh2H17S67ObRz/Gpkbf9sgf0OyXcNzaWT836j7cFlh8rme/LsHMCdOvtt6aK0Rmp2z3a+WFdzJhIatcZ832b8pUlAyWdz9K7lVFvnWvGKa12Z9r7+odtHua4RhMDZGxAsW3Fl+/sbk+ZJQ6M7vLl4rtvrXXY2pD8rkf+DH/7cZrLzxN2ppLCX1LrnBavOx7MyQs9WLKmm6vOcPL8Xr4X4MdXrr1veZ7pF5MOM+fcck/FOg+IHiRFcz/MvHX/LB984acxC2u0e7j/nvVpvku1Mnl7LKsBJOK5aJrTwtuZFWEa9w+JPJxQO7J4cn2j+Xh8oUh4wm/CB4BsTXjHxcQPxVEqkMlUmpLHyHDB7TPPJeKrsYdaiWavlr8TvRb41fb0J4gYTMSnty/N74oXMYQ5Xf9oEF6KHp+OEckIYvFHVYxmap4rG96/oKXHaZc6s0gDdhWOZtxSvZxy2k36YKtzUci3D73pq/RDXvocnTw9b6qsZyMyO5uxeG5jNN7tDI3rbGpuBIvvXuPpJEOz9Tk/qkPlYOpeyb5d08nxRUSZrN3jF5d9ezpL6sMnAbXQFRpVMTUbtST4y710LWBq3fy1OMw/bjGwPb64za9g7YTplX9qTV1WFGtO8DtyV3vR9zPK6aezG4hHDtDbLutdKVTPL/86nV3pzeJlYe+bf3VI1x8kxQ5vvvDOisv56bRjkmelvl4hbkRbKfovcginJ4BxkI3K3Nr3M4i4466E+9srA8cBr8PphEIbTuHZIsaW83cQw/6zR5nawi+cI/00zrlaNQ6UV2ipQJobnFvRmYVZKtQAmZPCOahZB5cSmfKlAvaeM3tNLq3cXTHJpeayOOv3fMGtfYpNe84//bsJSv86/KyeWp1QFfe9kuxG3LCNIzqGs9ihd3G3bDavMK+F/v8SB0F8QfVN6OtDqR2vrkZ4cx2ljw3VNfboihFZ9Oenbpgmdpqk7Y1htdjbrP6y+Mu3Q2vG0/s/E7iW1dxNC1TkHujD7Jwbo9FakJZwtXgoTiv8C0VQ4yjr8IxG36sG6y1s7iDyXgaaKoVfAFtHl4lyIPEqOT6XFaaP6+eSCrl/3QtQbBhU7bog0etmffNM8QXJuUlwxQfx6lUogsXAHtekrbVI/KlEInrXTuLDDXqG4avOM2zeTRmpoZttZp0ay7WOSni5nc9y7xv9063+mH7w0cNNlSeGrGNzZOaoThPldaDEm8VD0zFoHUVT696MX5XT1O62YmB1uKZQlZsT5m5jab65HDny+yqdD7XdWrEgPuYZO5rlXUIqfdaXvbST4RHmuXEsidvDkfyHbVMwE2PyDu9JRl1mkg02FZcbwa/cUUsvH77mb3GxEpa+Dz/K7c5X48mjzbE9goPQeaQ1zW9H4fUm6LwMa4y6Qlnd5dufKxbjZExbxMreZvslR6vfSJl+yBV1JFvvBZPHXn/TGzXINAaLF/yY7Ql4riv/n2qob1l3zlrzcCNV856eF/aqoL5NekifbIxaXxsQLv0x+GHWSbUkCKMb8/T53sEXnU3zLzKIqsXl+Xq96g2xyha4mb7A2Iz8s5rdUdLxIWu9p+R0hHwnLb4OXziQdmkxMmSH4ulujWzXxw35IfWfF2cGbkmbRkiWq46T7N5fyIg4vhIR0fBrkxnjRu0l32pXuyA9CuLP0eiC3+d3xjV9LG7xoF7cepWza+fXjUbBKzmmrqHH0aiMxoXZ0YzeRaTtJIXP7J7XqL5lHYY/ZrWXdQ5MjZdRr04OlPTWPpCt657cX7nItZ/bLo1/vHc4v35W1MJv4a9FgW5uBYXebkMXOdvdAFcXP+Lp/O9//Onc7wfjqkI/U0jgZxHDM6jQ7uwV6js8tMlA35ED5VdfvWEX73dqCwARwcBHAXAMRhEBhNHYQI+VDrA2R1SgsBRGEEg/GoBMHGMAAYScGKAANMPhKTgV0d4gkYE8SADoPoA8NstEQwECQBnj2AmwKQCSxo4a1a0IgEzH4AN7U2gUuSZQACFGsSZXxJVABg4NvSNY/6LzMrmdBAEGCDsBLwvGRdMJLPI0GICAL+R/qYJNgNPZIBID4oHxZjoiwTQSMAmEKQHEsEg2GAcYGlphaBRIVnIaByLSaVQyVQWA+A8uQMMNoMJkqHVBhCqVAo8ZEMBNQBbEo4CP/XJeVD0AA7i8IMupAHGCGAxWDgSiQ0QKYFUUiDHQgobgDTRIPj0KMu6KSBIYMD4cLwPgr2FDGcvh4MA0EhwUOBHW2h/R1ivIQhvRWUQYQihrQ38cEQK7AXEEJavHxPYbUB11ARcQCIAMgEcCQk/RSvtAfxwDMAbXgspBuAQE3B0SD8dUsbkBBbE+1GIUGw4kQcpfjgINGic8yYM0EA6p0sCwhqgUpa8BYNX2OAIYb0kR2RADjOYdBaeAwQV8GApodB4ph+REsBxHvBeAoEzzpFgMYkkYgisAPKZCT9xwm0DnC1YTM4rMyxFWHYbhByhL0EMbwXNMMgQzlCUYbgYRNgu+grMEBIA5x0Zth1yn+j7+0Iyi8QkQuJwZKBo4LyhP5emOar8IAdIRAjRZXEoEJzOFhrE7yWzOKAve87xBwuFEvaUkw9Lb94c0jnCXIWkHZdCxAB2u+Gov4VHeQ8ApQlIgagAmwxBBAbTSFROwv1pJR3EMagUjmoqlKXeEGpMIsQriDIgDu/H8RlKKShziXQG8y/AGBwjIXMg/8j/4vkKZzh4cdyGWAnC79CMfwgxYBJwJBUAPGQMhzU4CF84DzkRZ9HBJS4wQLhjZAUCAA9B5w0C+sYOwG5vaCWB6YdYMnJJcA8AMc4QniWAtH/MBRGZfiseQoYBYCCOxOLkGYQTDsCT4JrlQ4Rs2x1IxEEjS5zmKCXj/Klw6wAQSGWCKxlE+D2DVoyD6gsI7EbvgZVC1WBp1yU6/ZYkJGIACHjIOnDggvPE1W0/0sODgkZ6yCpAEy5wAsO5C0POYHn7UnGkJUHIfqgmwjqgJbqw9G4lzmYsxkpZRTBoIB7yBP9nCsH1WxMAkVDJgnRDjizlLpXOBqgsJolIgXyS5agPgiaX4kGhBoIkBdj53crL4PmxyBzyQvm0xEd7kEFl0aHYaMCmA2YwrSkwHTktIxyFS+iDS6QgUpZapGC8fHGQbzAzIU1KSMCSCu3qCNLJABkkw4YtJRMZKm6QgDISMLF1RCgjMcBKedWD697SHlDagL5/ptIy2EslxYOiggSMidA35ChkN8ds29+KkNESC6C1Sz7AL+5ECguq2lDNpYOc0g6bDllDYkPVBTpD4CBwIIXrCUhhQHTlHC1wvJbrGycUVE70vKFKBJu1tHAl25achjZbDhCENVTPQJIPAg9HAE9c2QqqNQgakZMTkCo/XCCRyjmIOIcrib2EjT3oQwLxTLii0nBLOUtkcCzkVCAmnIi+cI5DNtFBHyjey+bQIEpCjFqCyQg6ythwvSTDq+A6D2UFlcGETlAqTG2oQNKZHI2gD8QuIhwaQI9IhrUuXxTAP+six3cSCFtDYZG9oayCQ8Mpp3AIYE5xkggMBvEsDvyAUTCEHnOpDDKWJzinBCcJVjj9V2UlUX0hbxhQeYF1mv3FdQ14BxeYsfBKDl85C5a+fCD6wJFfXqrAGdYz++3K8udhSgAZUDi8wZXKx2E1rGllW06VosE0gY95aDV0LVqpBDAP4GsAA0DANw06lB6QIRpAaOhvn0B4uMISECu3AI6AI8dJeM6Kcxw6gKSlxi3O7NKYHmSvLwVOkL+UGP2FJiRnC+UJcZmq4eFIAL6fkVmMlcr+21UK7r6SZwDL90Q4fDgObeBjC/IjCD4qIVZAJZ3jMuXvyHDuKb/Dw/CDLiFLDIPTBsKVM7x8tP9rBFYyBTZihTlLFkJ8YCzf7/65Cr4U4jiW+sD5Dff3KCy5xwRJpL80wn/8Sc8/RZFw5yadSgLhi+vSzUwWQvFv91lOFYZs/+d9Sff31fAusuF/axb1pTERKlQEVAeIsCQFGkNDvyFIQRwZ+uA0KirATaVkGnxMQtkNjaKQqP/fMfrfvWPUSQ3lQGcRbbB6NB+mv1oA0x9pZOxkgSHp2ZJcnQOZGGtj0wBblKUKWuXPjlEMdDX7D3SMZilh/7Mto5v+Ny2j6P8rLaP/mZ7Q/7J2z0xVZeX/7v2eKv91/Z7KcLcnSun/oN8zC40i/9uOTyzBW8kbq4zGo5Rwat54NSVlbyU8BkShvXE+ympK/+mOT4qDk729OcrZh0J1xqihnc0x3r5K/iYG+naB1kQ0nWWFtg/URxqYslj4vzo+0X92fKLMmfou9kiUuZIbaGRgbIczCjb3PuRkysQbqziF6KnQ7UlGztYEVXQA3QptqYR0tQRV/QP2m4FWwdbqJH9rRjDF0cvKMMRU38nB2MUgxMrXEIfR+492fKL/X3d84jDqWFDdx4fwH+745JH9q+OTC/6/Hqa9VcIGtEjVtIvrfS5796mAjjeSZ8W2q19xwnLpO/k0KI+rbjihXle6f6omM6uzdNu9Z6YRES9H6yOGuHfnNa92JDIV3KmC1BaqbtjsLq+o3IaMr9OvIz4EXpDuOlytOz9z4+fJh/W8KeNpGIX7U5vkbms+BP+QQufGbz5TT9KpuRiv/Ip+TPX1VhlBZemSPj+UhJVP1Gtv1IH0tuO5hnb7Ztp4uYKfdPe9dbrnsm/LVMY+ROwXgXdDb/iu8HQ42u2P3uG/2WlxdcpX2XJSeCYoOIpK873aJ20KbOWTuWe47uPW2MgQk5tHYmWk/Jrsci3HgV3c9CS9/WTEcZ2SbWv3jzCjyouE1r9+ulbVdatJjebGohIDV0kr5GOqSNDpk2P05xddrnjyX4wMjhK6otx+Tuh1osclccFXLd/x9BQZJ4Duf6hCSxnTdzj5UVVZm4uvHDtura1wxaMpBX1+t9jox74XrlaJFVjwChRuBgT19zDDTU+9vTmNrSSwZWbEnM5rHLM/P79jUKq4eDNdc7TvcHBFciRLXuPyseEC7fcJqVLulLCP+i9mrh84I4fuOBPYnmP4Y29mo/SDW2EWnbqfr4Z9bGkk90mV7wguO58+2TwVfetLWISAnAmZOvT2/SKevO/UhYIPHlnlWTV7TyFVGdY7BU+2UgLoogIyX5hX7na3PK3gmQq7fSm36/b05uX/9Jvg8b8JseJ/AFM1m9w= \ No newline at end of file diff --git a/docs/docs/how_to/chatbots_memory.ipynb b/docs/docs/how_to/chatbots_memory.ipynb index 027594d71d2c7..aa6e7002ca706 100644 --- a/docs/docs/how_to/chatbots_memory.ipynb +++ b/docs/docs/how_to/chatbots_memory.ipynb @@ -23,6 +23,17 @@ "\n", "We'll go into more detail on a few techniques below!\n", "\n", + ":::note\n", + "\n", + "This how-to guide previously built a chatbot using [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html). You can access this version of the guide in the [v0.2 docs](https://python.langchain.com/v0.2/docs/how_to/chatbots_memory/).\n", + "\n", + "As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into new LangChain applications.\n", + "\n", + "If your code is already relying on `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do **not** need to make any changes. We do not plan on deprecating this functionality in the near future as it works for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.\n", + "\n", + "Please see [How to migrate to LangGraph Memory](/docs/versions/migrating_memory/) for more details.\n", + ":::\n", + "\n", "## Setup\n", "\n", "You'll need to install a few packages, and have your OpenAI API key set as an environment variable named `OPENAI_API_KEY`:" @@ -34,32 +45,21 @@ "metadata": {}, "outputs": [ { - "name": "stdout", + "name": "stdin", "output_type": "stream", "text": [ - "\u001b[33mWARNING: You are using pip version 22.0.4; however, version 23.3.2 is available.\n", - "You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n", - "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + "OpenAI API Key: ········\n" ] - }, - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" } ], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai\n", + "%pip install --upgrade --quiet langchain langchain-openai langgraph\n", "\n", - "# Set env var OPENAI_API_KEY or load from a .env file:\n", - "import dotenv\n", + "import getpass\n", + "import os\n", "\n", - "dotenv.load_dotenv()" + "if not os.environ.get(\"OPENAI_API_KEY\"):\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] }, { @@ -71,13 +71,13 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "from langchain_openai import ChatOpenAI\n", "\n", - "chat = ChatOpenAI(model=\"gpt-4o-mini\")" + "model = ChatOpenAI(model=\"gpt-4o-mini\")" ] }, { @@ -98,34 +98,33 @@ "name": "stdout", "output_type": "stream", "text": [ - "I said \"J'adore la programmation,\" which means \"I love programming\" in French.\n" + "I said, \"I love programming\" in French: \"J'adore la programmation.\"\n" ] } ], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.messages import AIMessage, HumanMessage, SystemMessage\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", - " (\n", - " \"system\",\n", - " \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n", + " SystemMessage(\n", + " content=\"You are a helpful assistant. Answer all questions to the best of your ability.\"\n", " ),\n", - " (\"placeholder\", \"{messages}\"),\n", + " MessagesPlaceholder(variable_name=\"messages\"),\n", " ]\n", ")\n", "\n", - "chain = prompt | chat\n", + "chain = prompt | model\n", "\n", "ai_msg = chain.invoke(\n", " {\n", " \"messages\": [\n", - " (\n", - " \"human\",\n", - " \"Translate this sentence from English to French: I love programming.\",\n", + " HumanMessage(\n", + " content=\"Translate from English to French: I love programming.\"\n", " ),\n", - " (\"ai\", \"J'adore la programmation.\"),\n", - " (\"human\", \"What did you just say?\"),\n", + " AIMessage(content=\"J'adore la programmation.\"),\n", + " HumanMessage(content=\"What did you just say?\"),\n", " ],\n", " }\n", ")\n", @@ -136,93 +135,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can see that by passing the previous conversation into a chain, it can use it as context to answer questions. This is the basic concept underpinning chatbot memory - the rest of the guide will demonstrate convenient techniques for passing or reformatting messages.\n", - "\n", - "## Chat history\n", - "\n", - "It's perfectly fine to store and pass messages directly as an array, but we can use LangChain's built-in [message history class](https://python.langchain.com/api_reference/langchain/index.html#module-langchain.memory) to store and load messages as well. Instances of this class are responsible for storing and loading chat messages from persistent storage. LangChain integrates with many providers - you can see a [list of integrations here](/docs/integrations/memory) - but for this demo we will use an ephemeral demo class.\n", - "\n", - "Here's an example of the API:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[HumanMessage(content='Translate this sentence from English to French: I love programming.'),\n", - " AIMessage(content=\"J'adore la programmation.\")]" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langchain_community.chat_message_histories import ChatMessageHistory\n", - "\n", - "demo_ephemeral_chat_history = ChatMessageHistory()\n", - "\n", - "demo_ephemeral_chat_history.add_user_message(\n", - " \"Translate this sentence from English to French: I love programming.\"\n", - ")\n", - "\n", - "demo_ephemeral_chat_history.add_ai_message(\"J'adore la programmation.\")\n", - "\n", - "demo_ephemeral_chat_history.messages" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can use it directly to store conversation turns for our chain:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "demo_ephemeral_chat_history = ChatMessageHistory()\n", - "\n", - "input1 = \"Translate this sentence from English to French: I love programming.\"\n", - "\n", - "demo_ephemeral_chat_history.add_user_message(input1)\n", - "\n", - "response = chain.invoke(\n", - " {\n", - " \"messages\": demo_ephemeral_chat_history.messages,\n", - " }\n", - ")\n", - "\n", - "demo_ephemeral_chat_history.add_ai_message(response)\n", - "\n", - "input2 = \"What did I just ask you?\"\n", - "\n", - "demo_ephemeral_chat_history.add_user_message(input2)\n", - "\n", - "chain.invoke(\n", - " {\n", - " \"messages\": demo_ephemeral_chat_history.messages,\n", - " }\n", - ")" + "We can see that by passing the previous conversation into a chain, it can use it as context to answer questions. This is the basic concept underpinning chatbot memory - the rest of the guide will demonstrate convenient techniques for passing or reformatting messages." ] }, { @@ -231,128 +144,97 @@ "source": [ "## Automatic history management\n", "\n", - "The previous examples pass messages to the chain explicitly. This is a completely acceptable approach, but it does require external management of new messages. LangChain also includes an wrapper for LCEL chains that can handle this process automatically called `RunnableWithMessageHistory`.\n", - "\n", - "To show how it works, let's slightly modify the above prompt to take a final `input` variable that populates a `HumanMessage` template after the chat history. This means that we will expect a `chat_history` parameter that contains all messages BEFORE the current messages instead of all messages:" + "The previous examples pass messages to the chain (and model) explicitly. This is a completely acceptable approach, but it does require external management of new messages. LangChain also provides a way to build applications that have memory using LangGraph's [persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/). You can [enable persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/) in LangGraph applications by providing a `checkpointer` when compiling the graph." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ - "prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\n", - " \"system\",\n", - " \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n", - " ),\n", - " (\"placeholder\", \"{chat_history}\"),\n", - " (\"human\", \"{input}\"),\n", - " ]\n", - ")\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, MessagesState, StateGraph\n", "\n", - "chain = prompt | chat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " We'll pass the latest input to the conversation here and let the `RunnableWithMessageHistory` class wrap our chain and do the work of appending that `input` variable to the chat history.\n", - " \n", - " Next, let's declare our wrapped chain:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "workflow = StateGraph(state_schema=MessagesState)\n", "\n", - "demo_ephemeral_chat_history_for_chain = ChatMessageHistory()\n", "\n", - "chain_with_message_history = RunnableWithMessageHistory(\n", - " chain,\n", - " lambda session_id: demo_ephemeral_chat_history_for_chain,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"chat_history\",\n", - ")" + "# Define the function that calls the model\n", + "def call_model(state: MessagesState):\n", + " system_prompt = (\n", + " \"You are a helpful assistant. \"\n", + " \"Answer all questions to the best of your ability.\"\n", + " )\n", + " messages = [SystemMessage(content=system_prompt)] + state[\"messages\"]\n", + " response = model.invoke(messages)\n", + " return {\"messages\": response}\n", + "\n", + "\n", + "# Define the node and edge\n", + "workflow.add_node(\"model\", call_model)\n", + "workflow.add_edge(START, \"model\")\n", + "\n", + "# Add simple in-memory checkpointer\n", + "# highlight-start\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)\n", + "# highlight-end" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This class takes a few parameters in addition to the chain that we want to wrap:\n", - "\n", - "- A factory function that returns a message history for a given session id. This allows your chain to handle multiple users at once by loading different messages for different conversations.\n", - "- An `input_messages_key` that specifies which part of the input should be tracked and stored in the chat history. In this example, we want to track the string passed in as `input`.\n", - "- A `history_messages_key` that specifies what the previous messages should be injected into the prompt as. Our prompt has a `MessagesPlaceholder` named `chat_history`, so we specify this property to match.\n", - "- (For chains with multiple outputs) an `output_messages_key` which specifies which output to store as history. This is the inverse of `input_messages_key`.\n", - "\n", - "We can invoke this new chain as normal, with an additional `configurable` field that specifies the particular `session_id` to pass to the factory function. This is unused for the demo, but in real-world chains, you'll want to return a chat history corresponding to the passed session:" + " We'll pass the latest input to the conversation here and let the LangGraph keep track of the conversation history using the checkpointer:" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Parent run dc4e2f79-4bcd-4a36-9506-55ace9040588 not found for run 34b5773e-3ced-46a6-8daf-4d464c15c940. Treating as a root run.\n" - ] - }, { "data": { "text/plain": [ - "AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})" + "{'messages': [HumanMessage(content='Translate to French: I love programming.', additional_kwargs={}, response_metadata={}, id='be5e7099-3149-4293-af49-6b36c8ccd71b'),\n", + " AIMessage(content=\"J'aime programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 4, 'prompt_tokens': 35, 'total_tokens': 39, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_e9627b5346', 'finish_reason': 'stop', 'logprobs': None}, id='run-8a753d7a-b97b-4d01-a661-626be6f41b38-0', usage_metadata={'input_tokens': 35, 'output_tokens': 4, 'total_tokens': 39})]}" ] }, - "execution_count": 8, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "chain_with_message_history.invoke(\n", - " {\"input\": \"Translate this sentence from English to French: I love programming.\"},\n", - " {\"configurable\": {\"session_id\": \"unused\"}},\n", + "app.invoke(\n", + " {\"messages\": [HumanMessage(content=\"Translate to French: I love programming.\")]},\n", + " config={\"configurable\": {\"thread_id\": \"1\"}},\n", ")" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Parent run cc14b9d8-c59e-40db-a523-d6ab3fc2fa4f not found for run 5b75e25c-131e-46ee-9982-68569db04330. Treating as a root run.\n" - ] - }, { "data": { "text/plain": [ - "AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})" + "{'messages': [HumanMessage(content='Translate to French: I love programming.', additional_kwargs={}, response_metadata={}, id='be5e7099-3149-4293-af49-6b36c8ccd71b'),\n", + " AIMessage(content=\"J'aime programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 4, 'prompt_tokens': 35, 'total_tokens': 39, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_e9627b5346', 'finish_reason': 'stop', 'logprobs': None}, id='run-8a753d7a-b97b-4d01-a661-626be6f41b38-0', usage_metadata={'input_tokens': 35, 'output_tokens': 4, 'total_tokens': 39}),\n", + " HumanMessage(content='What did I just ask you?', additional_kwargs={}, response_metadata={}, id='c667529b-7c41-4cc0-9326-0af47328b816'),\n", + " AIMessage(content='You asked me to translate \"I love programming\" into French.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 54, 'total_tokens': 67, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-134a7ea0-d3a4-4923-bd58-25e5a43f6a1f-0', usage_metadata={'input_tokens': 54, 'output_tokens': 13, 'total_tokens': 67})]}" ] }, - "execution_count": 9, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "chain_with_message_history.invoke(\n", - " {\"input\": \"What did I just ask you?\"}, {\"configurable\": {\"session_id\": \"unused\"}}\n", + "app.invoke(\n", + " {\"messages\": [HumanMessage(content=\"What did I just ask you?\")]},\n", + " config={\"configurable\": {\"thread_id\": \"1\"}},\n", ")" ] }, @@ -366,80 +248,44 @@ "\n", "### Trimming messages\n", "\n", - "LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the historic messages before passing them to the model. Let's use an example history with some preloaded messages:" + "LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the history messages before passing them to the model. Let's use an example history with the `app` we declared above:" ] }, { "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n", - " AIMessage(content='Hello!'),\n", - " HumanMessage(content='How are you today?'),\n", - " AIMessage(content='Fine thanks!')]" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "demo_ephemeral_chat_history = ChatMessageHistory()\n", - "\n", - "demo_ephemeral_chat_history.add_user_message(\"Hey there! I'm Nemo.\")\n", - "demo_ephemeral_chat_history.add_ai_message(\"Hello!\")\n", - "demo_ephemeral_chat_history.add_user_message(\"How are you today?\")\n", - "demo_ephemeral_chat_history.add_ai_message(\"Fine thanks!\")\n", - "\n", - "demo_ephemeral_chat_history.messages" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's use this message history with the `RunnableWithMessageHistory` chain we declared above:" - ] - }, - { - "cell_type": "code", - "execution_count": 22, + "execution_count": 7, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Parent run 7ff2d8ec-65e2-4f67-8961-e498e2c4a591 not found for run 3881e990-6596-4326-84f6-2b76949e0657. Treating as a root run.\n" - ] - }, { "data": { "text/plain": [ - "AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})" + "{'messages': [HumanMessage(content=\"Hey there! I'm Nemo.\", additional_kwargs={}, response_metadata={}, id='6b4cab70-ce18-49b0-bb06-267bde44e037'),\n", + " AIMessage(content='Hello!', additional_kwargs={}, response_metadata={}, id='ba3714f4-8876-440b-a651-efdcab2fcb4c'),\n", + " HumanMessage(content='How are you today?', additional_kwargs={}, response_metadata={}, id='08d032c0-1577-4862-a3f2-5c1b90687e21'),\n", + " AIMessage(content='Fine thanks!', additional_kwargs={}, response_metadata={}, id='21790e16-db05-4537-9a6b-ecad0fcec436'),\n", + " HumanMessage(content=\"What's my name?\", additional_kwargs={}, response_metadata={}, id='c933eca3-5fd8-4651-af16-20fe2d49c216'),\n", + " AIMessage(content='Your name is Nemo.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 5, 'prompt_tokens': 63, 'total_tokens': 68, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-a0b21acc-9dbb-4fb6-a953-392020f37d88-0', usage_metadata={'input_tokens': 63, 'output_tokens': 5, 'total_tokens': 68})]}" ] }, - "execution_count": 22, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "chain_with_message_history = RunnableWithMessageHistory(\n", - " chain,\n", - " lambda session_id: demo_ephemeral_chat_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"chat_history\",\n", - ")\n", + "demo_ephemeral_chat_history = [\n", + " HumanMessage(content=\"Hey there! I'm Nemo.\"),\n", + " AIMessage(content=\"Hello!\"),\n", + " HumanMessage(content=\"How are you today?\"),\n", + " AIMessage(content=\"Fine thanks!\"),\n", + "]\n", "\n", - "chain_with_message_history.invoke(\n", - " {\"input\": \"What's my name?\"},\n", - " {\"configurable\": {\"session_id\": \"unused\"}},\n", + "app.invoke(\n", + " {\n", + " \"messages\": demo_ephemeral_chat_history\n", + " + [HumanMessage(content=\"What's my name?\")]\n", + " },\n", + " config={\"configurable\": {\"thread_id\": \"2\"}},\n", ")" ] }, @@ -447,137 +293,96 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can see the chain remembers the preloaded name.\n", + "We can see the app remembers the preloaded name.\n", "\n", - "But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the built in [trim_messages](/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:" + "But let's say we have a very small context window, and we want to trim the number of messages passed to the model to only the 2 most recent ones. We can use the built in [trim_messages](/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:" ] }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ - "from operator import itemgetter\n", - "\n", "from langchain_core.messages import trim_messages\n", - "from langchain_core.runnables import RunnablePassthrough\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, MessagesState, StateGraph\n", "\n", + "# Define trimmer\n", + "# highlight-start\n", + "# count each message as 1 \"token\" (token_counter=len) and keep only the last two messages\n", "trimmer = trim_messages(strategy=\"last\", max_tokens=2, token_counter=len)\n", + "# highlight-end\n", "\n", - "chain_with_trimming = (\n", - " RunnablePassthrough.assign(chat_history=itemgetter(\"chat_history\") | trimmer)\n", - " | prompt\n", - " | chat\n", - ")\n", + "workflow = StateGraph(state_schema=MessagesState)\n", "\n", - "chain_with_trimmed_history = RunnableWithMessageHistory(\n", - " chain_with_trimming,\n", - " lambda session_id: demo_ephemeral_chat_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"chat_history\",\n", - ")" + "\n", + "# Define the function that calls the model\n", + "def call_model(state: MessagesState):\n", + " # highlight-start\n", + " trimmed_messages = trimmer.invoke(state[\"messages\"])\n", + " system_prompt = (\n", + " \"You are a helpful assistant. \"\n", + " \"Answer all questions to the best of your ability.\"\n", + " )\n", + " messages = [SystemMessage(content=system_prompt)] + trimmed_messages\n", + " # highlight-end\n", + " response = model.invoke(messages)\n", + " return {\"messages\": response}\n", + "\n", + "\n", + "# Define the node and edge\n", + "workflow.add_node(\"model\", call_model)\n", + "workflow.add_edge(START, \"model\")\n", + "\n", + "# Add simple in-memory checkpointer\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let's call this new chain and check the messages afterwards:" + "Let's call this new app and check the response" ] }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 9, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Parent run 775cde65-8d22-4c44-80bb-f0b9811c32ca not found for run 5cf71d0e-4663-41cd-8dbe-e9752689cfac. Treating as a root run.\n" - ] - }, { "data": { "text/plain": [ - "AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})" + "{'messages': [HumanMessage(content=\"Hey there! I'm Nemo.\", additional_kwargs={}, response_metadata={}, id='6b4cab70-ce18-49b0-bb06-267bde44e037'),\n", + " AIMessage(content='Hello!', additional_kwargs={}, response_metadata={}, id='ba3714f4-8876-440b-a651-efdcab2fcb4c'),\n", + " HumanMessage(content='How are you today?', additional_kwargs={}, response_metadata={}, id='08d032c0-1577-4862-a3f2-5c1b90687e21'),\n", + " AIMessage(content='Fine thanks!', additional_kwargs={}, response_metadata={}, id='21790e16-db05-4537-9a6b-ecad0fcec436'),\n", + " HumanMessage(content='What is my name?', additional_kwargs={}, response_metadata={}, id='a22ab7c5-8617-4821-b3e9-a9e7dca1ff78'),\n", + " AIMessage(content=\"I'm sorry, but I don't have access to personal information about you unless you share it with me. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 39, 'total_tokens': 66, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-f7b32d72-9f57-4705-be7e-43bf1c3d293b-0', usage_metadata={'input_tokens': 39, 'output_tokens': 27, 'total_tokens': 66})]}" ] }, - "execution_count": 24, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "chain_with_trimmed_history.invoke(\n", - " {\"input\": \"Where does P. Sherman live?\"},\n", - " {\"configurable\": {\"session_id\": \"unused\"}},\n", + "app.invoke(\n", + " {\n", + " \"messages\": demo_ephemeral_chat_history\n", + " + [HumanMessage(content=\"What is my name?\")]\n", + " },\n", + " config={\"configurable\": {\"thread_id\": \"3\"}},\n", ")" ] }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n", - " AIMessage(content='Hello!'),\n", - " HumanMessage(content='How are you today?'),\n", - " AIMessage(content='Fine thanks!'),\n", - " HumanMessage(content=\"What's my name?\"),\n", - " AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n", - " HumanMessage(content='Where does P. Sherman live?'),\n", - " AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "demo_ephemeral_chat_history.messages" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ - "And we can see that our history has removed the two oldest messages while still adding the most recent conversation at the end. The next time the chain is called, `trim_messages` will be called again, and only the two most recent messages will be passed to the model. In this case, this means that the model will forget the name we gave it the next time we invoke it:" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Parent run fde7123f-6fd3-421a-a3fc-2fb37dead119 not found for run 061a4563-2394-470d-a3ed-9bf1388ca431. Treating as a root run.\n" - ] - }, - { - "data": { - "text/plain": [ - "AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})" - ] - }, - "execution_count": 27, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chain_with_trimmed_history.invoke(\n", - " {\"input\": \"What is my name?\"},\n", - " {\"configurable\": {\"session_id\": \"unused\"}},\n", - ")" + "We can see that `trim_messages` was called and only the two most recent messages will be passed to the model. In this case, this means that the model forgot the name we gave it." ] }, { @@ -593,114 +398,84 @@ "source": [ "### Summary memory\n", "\n", - "We can use this same pattern in other ways too. For example, we could use an additional LLM call to generate a summary of the conversation before calling our chain. Let's recreate our chat history and chatbot chain:" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n", - " AIMessage(content='Hello!'),\n", - " HumanMessage(content='How are you today?'),\n", - " AIMessage(content='Fine thanks!')]" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "demo_ephemeral_chat_history = ChatMessageHistory()\n", - "\n", - "demo_ephemeral_chat_history.add_user_message(\"Hey there! I'm Nemo.\")\n", - "demo_ephemeral_chat_history.add_ai_message(\"Hello!\")\n", - "demo_ephemeral_chat_history.add_user_message(\"How are you today?\")\n", - "demo_ephemeral_chat_history.add_ai_message(\"Fine thanks!\")\n", - "\n", - "demo_ephemeral_chat_history.messages" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We'll slightly modify the prompt to make the LLM aware that will receive a condensed summary instead of a chat history:" + "We can use this same pattern in other ways too. For example, we could use an additional LLM call to generate a summary of the conversation before calling our app. Let's recreate our chat history:" ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ - "prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\n", - " \"system\",\n", - " \"You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.\",\n", - " ),\n", - " (\"placeholder\", \"{chat_history}\"),\n", - " (\"user\", \"{input}\"),\n", - " ]\n", - ")\n", - "\n", - "chain = prompt | chat\n", - "\n", - "chain_with_message_history = RunnableWithMessageHistory(\n", - " chain,\n", - " lambda session_id: demo_ephemeral_chat_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"chat_history\",\n", - ")" + "demo_ephemeral_chat_history = [\n", + " HumanMessage(content=\"Hey there! I'm Nemo.\"),\n", + " AIMessage(content=\"Hello!\"),\n", + " HumanMessage(content=\"How are you today?\"),\n", + " AIMessage(content=\"Fine thanks!\"),\n", + "]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "And now, let's create a function that will distill previous interactions into a summary. We can add this one to the front of the chain too:" + "And now, let's update the model-calling function to distill previous interactions into a summary:" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ - "def summarize_messages(chain_input):\n", - " stored_messages = demo_ephemeral_chat_history.messages\n", - " if len(stored_messages) == 0:\n", - " return False\n", - " summarization_prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\"placeholder\", \"{chat_history}\"),\n", - " (\n", - " \"user\",\n", - " \"Distill the above chat messages into a single summary message. Include as many specific details as you can.\",\n", - " ),\n", - " ]\n", - " )\n", - " summarization_chain = summarization_prompt | chat\n", - "\n", - " summary_message = summarization_chain.invoke({\"chat_history\": stored_messages})\n", - "\n", - " demo_ephemeral_chat_history.clear()\n", + "from langchain_core.messages import HumanMessage, RemoveMessage\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, MessagesState, StateGraph\n", "\n", - " demo_ephemeral_chat_history.add_message(summary_message)\n", + "workflow = StateGraph(state_schema=MessagesState)\n", "\n", - " return True\n", "\n", - "\n", - "chain_with_summarization = (\n", - " RunnablePassthrough.assign(messages_summarized=summarize_messages)\n", - " | chain_with_message_history\n", - ")" + "# Define the function that calls the model\n", + "def call_model(state: MessagesState):\n", + " system_prompt = (\n", + " \"You are a helpful assistant. \"\n", + " \"Answer all questions to the best of your ability. \"\n", + " \"The provided chat history includes a summary of the earlier conversation.\"\n", + " )\n", + " system_message = SystemMessage(content=system_prompt)\n", + " message_history = state[\"messages\"][:-1] # exclude the most recent user input\n", + " # Summarize the messages if the chat history reaches a certain size\n", + " if len(message_history) >= 4:\n", + " last_human_message = state[\"messages\"][-1]\n", + " # Invoke the model to generate conversation summary\n", + " summary_prompt = (\n", + " \"Distill the above chat messages into a single summary message. \"\n", + " \"Include as many specific details as you can.\"\n", + " )\n", + " summary_message = model.invoke(\n", + " message_history + [HumanMessage(content=summary_prompt)]\n", + " )\n", + "\n", + " # Delete messages that we no longer want to show up\n", + " delete_messages = [RemoveMessage(id=m.id) for m in state[\"messages\"]]\n", + " # Re-add user message\n", + " human_message = HumanMessage(content=last_human_message.content)\n", + " # Call the model with summary & response\n", + " response = model.invoke([system_message, summary_message, human_message])\n", + " message_updates = [summary_message, human_message, response] + delete_messages\n", + " else:\n", + " message_updates = model.invoke([system_message] + state[\"messages\"])\n", + "\n", + " return {\"messages\": message_updates}\n", + "\n", + "\n", + "# Define the node and edge\n", + "workflow.add_node(\"model\", call_model)\n", + "workflow.add_edge(START, \"model\")\n", + "\n", + "# Add simple in-memory checkpointer\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { @@ -712,54 +487,37 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='You introduced yourself as Nemo. How can I assist you today, Nemo?')" + "{'messages': [AIMessage(content=\"Nemo greeted me, and I responded positively, indicating that I'm doing well.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 16, 'prompt_tokens': 60, 'total_tokens': 76, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-ee42f98d-907d-4bad-8f16-af2db789701d-0', usage_metadata={'input_tokens': 60, 'output_tokens': 16, 'total_tokens': 76}),\n", + " HumanMessage(content='What did I say my name was?', additional_kwargs={}, response_metadata={}, id='788555ea-5b1f-4c29-a2f2-a92f15d147be'),\n", + " AIMessage(content='You mentioned that your name is Nemo.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 67, 'total_tokens': 75, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-099a43bd-a284-4969-bb6f-0be486614cd8-0', usage_metadata={'input_tokens': 67, 'output_tokens': 8, 'total_tokens': 75})]}" ] }, - "execution_count": 20, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "chain_with_summarization.invoke(\n", - " {\"input\": \"What did I say my name was?\"},\n", - " {\"configurable\": {\"session_id\": \"unused\"}},\n", + "app.invoke(\n", + " {\n", + " \"messages\": demo_ephemeral_chat_history\n", + " + [HumanMessage(\"What did I say my name was?\")]\n", + " },\n", + " config={\"configurable\": {\"thread_id\": \"4\"}},\n", ")" ] }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[AIMessage(content='The conversation is between Nemo and an AI. Nemo introduces himself and the AI responds with a greeting. Nemo then asks the AI how it is doing, and the AI responds that it is fine.'),\n", - " HumanMessage(content='What did I say my name was?'),\n", - " AIMessage(content='You introduced yourself as Nemo. How can I assist you today, Nemo?')]" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "demo_ephemeral_chat_history.messages" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note that invoking the chain again will generate another summary generated from the initial summary plus new messages and so on. You could also design a hybrid approach where a certain number of messages are retained in chat history while others are summarized." + "Note that invoking the app again will keep accumulating the history until it reaches the specified number of messages (four in our case). At that point we will generate another summary generated from the initial summary plus new messages and so on." ] } ], @@ -779,7 +537,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/how_to/chatbots_tools.ipynb b/docs/docs/how_to/chatbots_tools.ipynb index 95125b15bb001..4bbd425579609 100644 --- a/docs/docs/how_to/chatbots_tools.ipynb +++ b/docs/docs/how_to/chatbots_tools.ipynb @@ -18,25 +18,49 @@ "\n", "This section will cover how to create conversational agents: chatbots that can interact with other systems and APIs using tools.\n", "\n", + ":::note\n", + "\n", + "This how-to guide previously built a chatbot using [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html). You can access this version of the guide in the [v0.2 docs](https://python.langchain.com/v0.2/docs/how_to/chatbots_tools/).\n", + "\n", + "As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into new LangChain applications.\n", + "\n", + "If your code is already relying on `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do **not** need to make any changes. We do not plan on deprecating this functionality in the near future as it works for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.\n", + "\n", + "Please see [How to migrate to LangGraph Memory](/docs/versions/migrating_memory/) for more details.\n", + ":::\n", + "\n", "## Setup\n", "\n", - "For this guide, we'll be using a [tool calling agent](/docs/how_to/agent_executor) with a single tool for searching the web. The default will be powered by [Tavily](/docs/integrations/tools/tavily_search), but you can switch it out for any similar tool. The rest of this section will assume you're using Tavily.\n", + "For this guide, we'll be using a [tool calling agent](https://langchain-ai.github.io/langgraph/concepts/agentic_concepts/#tool-calling-agent) with a single tool for searching the web. The default will be powered by [Tavily](/docs/integrations/tools/tavily_search), but you can switch it out for any similar tool. The rest of this section will assume you're using Tavily.\n", "\n", "You'll need to [sign up for an account](https://tavily.com/) on the Tavily website, and install the following packages:" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "OpenAI API Key: ········\n", + "Tavily API Key: ········\n" + ] + } + ], "source": [ - "%pip install --upgrade --quiet langchain-community langchain-openai tavily-python\n", + "%pip install --upgrade --quiet langchain-community langchain-openai tavily-python langgraph\n", + "\n", + "import getpass\n", + "import os\n", "\n", - "# Set env var OPENAI_API_KEY or load from a .env file:\n", - "import dotenv\n", + "if not os.environ.get(\"OPENAI_API_KEY\"):\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", - "dotenv.load_dotenv()" + "if not os.environ.get(\"TAVILY_API_KEY\"):\n", + " os.environ[\"TAVILY_API_KEY\"] = getpass.getpass(\"Tavily API Key:\")" ] }, { @@ -70,14 +94,14 @@ "\n", "# Choose the LLM that will drive the agent\n", "# Only certain models support this\n", - "chat = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)" + "model = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To make our agent conversational, we must also choose a prompt with a placeholder for our chat history. Here's an example:" + "To make our agent conversational, we can also specify a prompt. Here's an example:" ] }, { @@ -86,18 +110,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", - "\n", - "# Adapted from https://smith.langchain.com/hub/jacob/tool-calling-agent\n", - "prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\n", - " \"system\",\n", - " \"You are a helpful assistant. You may not need to use tools for every query - the user may just want to chat!\",\n", - " ),\n", - " (\"placeholder\", \"{messages}\"),\n", - " (\"placeholder\", \"{agent_scratchpad}\"),\n", - " ]\n", + "prompt = (\n", + " \"You are a helpful assistant. \"\n", + " \"You may not need to use tools for every query - the user may just want to chat!\"\n", ")" ] }, @@ -105,7 +120,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Great! Now let's assemble our agent:" + "Great! Now let's assemble our agent using LangGraph's prebuilt [create_react_agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent), which allows you to create a [tool-calling agent](https://langchain-ai.github.io/langgraph/concepts/agentic_concepts/#tool-calling-agent):" ] }, { @@ -114,11 +129,12 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentExecutor, create_tool_calling_agent\n", - "\n", - "agent = create_tool_calling_agent(chat, tools, prompt)\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", - "agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)" + "# state_modifier allows you to preprocess the inputs to the model inside ReAct agent\n", + "# in this case, since we're passing a prompt string, we'll just always add a SystemMessage\n", + "# with this prompt string before any other messages sent to the model\n", + "agent = create_react_agent(model, tools, state_modifier=prompt)" ] }, { @@ -135,23 +151,11 @@ "execution_count": 5, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3mHello Nemo! It's great to meet you. How can I assist you today?\u001b[0m\n", - "\n", - "\u001b[1m> Finished chain.\u001b[0m\n" - ] - }, { "data": { "text/plain": [ - "{'messages': [HumanMessage(content=\"I'm Nemo!\")],\n", - " 'output': \"Hello Nemo! It's great to meet you. How can I assist you today?\"}" + "{'messages': [HumanMessage(content=\"I'm Nemo!\", additional_kwargs={}, response_metadata={}, id='39e715c7-bd1c-426f-8e14-c05586b3d221'),\n", + " AIMessage(content='Hi Nemo! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 107, 'total_tokens': 118, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-6937c944-d702-40bb-9a9f-4141ddde9f78-0', usage_metadata={'input_tokens': 107, 'output_tokens': 11, 'total_tokens': 118})]}" ] }, "execution_count": 5, @@ -162,7 +166,7 @@ "source": [ "from langchain_core.messages import HumanMessage\n", "\n", - "agent_executor.invoke({\"messages\": [HumanMessage(content=\"I'm Nemo!\")]})" + "agent.invoke({\"messages\": [HumanMessage(content=\"I'm Nemo!\")]})" ] }, { @@ -177,29 +181,13 @@ "execution_count": 6, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3m\n", - "Invoking: `tavily_search_results_json` with `{'query': 'current conservation status of the Great Barrier Reef'}`\n", - "\n", - "\n", - "\u001b[0m\u001b[36;1m\u001b[1;3m[{'url': 'https://www.abc.net.au/news/2022-08-04/great-barrier-reef-report-says-coral-recovering-after-bleaching/101296186', 'content': 'Great Barrier Reef hit with widespread and severe bleaching event\\n\\'Devastating\\': Over 90pc of reefs on Great Barrier Reef suffered bleaching over summer, report reveals\\nTop Stories\\nJailed Russian opposition leader Alexei Navalny is dead, says prison service\\nTaylor Swift puts an Aussie twist on a classic as she packs the MCG for the biggest show of her career — as it happened\\nMelbourne comes alive with Swifties, as even those without tickets turn up to soak in the atmosphere\\nAustralian Border Force investigates after arrival of more than 20 men by boat north of Broome\\nOpenAI launches video model that can instantly create short clips from text prompts\\nAntoinette Lattouf loses bid to force ABC to produce emails calling for her dismissal\\nCategory one cyclone makes landfall in Gulf of Carpentaria off NT-Queensland border\\nWhy the RBA may be forced to cut before the Fed\\nBrisbane records \\'wettest day since 2022\\', as woman dies in floodwaters near Mount Isa\\n$45m Sydney beachside home once owned by late radio star is demolished less than a year after sale\\nAnnabel Sutherland\\'s historic double century puts Australia within reach of Test victory over South Africa\\nAlmighty defensive effort delivers Indigenous victory in NRL All Stars clash\\nLisa Wilkinson feared she would have to sell home to pay legal costs of Bruce Lehrmann\\'s defamation case, court documents reveal\\nSupermarkets as you know them are disappearing from our cities\\nNRL issues Broncos\\' Reynolds, Carrigan with breach notices after public scrap\\nPopular Now\\nJailed Russian opposition leader Alexei Navalny is dead, says prison service\\nTaylor Swift puts an Aussie twist on a classic as she packs the MCG for the biggest show of her career — as it happened\\n$45m Sydney beachside home once owned by late radio star is demolished less than a year after sale\\nAustralian Border Force investigates after arrival of more than 20 men by boat north of Broome\\nDealer sentenced for injecting children as young as 12 with methylamphetamine\\nMelbourne comes alive with Swifties, as even those without tickets turn up to soak in the atmosphere\\nTop Stories\\nJailed Russian opposition leader Alexei Navalny is dead, says prison service\\nTaylor Swift puts an Aussie twist on a classic as she packs the MCG for the biggest show of her career — as it happened\\nMelbourne comes alive with Swifties, as even those without tickets turn up to soak in the atmosphere\\nAustralian Border Force investigates after arrival of more than 20 men by boat north of Broome\\nOpenAI launches video model that can instantly create short clips from text prompts\\nJust In\\nJailed Russian opposition leader Alexei Navalny is dead, says prison service\\nMelbourne comes alive with Swifties, as even those without tickets turn up to soak in the atmosphere\\nTraveller alert after one-year-old in Adelaide reported with measles\\nAntoinette Lattouf loses bid to force ABC to produce emails calling for her dismissal\\nFooter\\nWe acknowledge Aboriginal and Torres Strait Islander peoples as the First Australians and Traditional Custodians of the lands where we live, learn, and work.\\n Increased coral cover could come at a cost\\nThe rapid growth in coral cover appears to have come at the expense of the diversity of coral on the reef, with most of the increases accounted for by fast-growing branching coral called Acropora.\\n Documents obtained by the ABC under Freedom of Information laws revealed the Morrison government had forced AIMS to rush the report\\'s release and orchestrated a \"leak\" of the material to select media outlets ahead of the reef being considered for inclusion on the World Heritage In Danger list.\\n The reef\\'s status and potential inclusion on the In Danger list were due to be discussed at the 45th session of the World Heritage Committee in Russia in June this year, but the meeting was indefinitely postponed due to the war in Ukraine.\\n More from ABC\\nEditorial Policies\\nGreat Barrier Reef coral cover at record levels after mass-bleaching events, report shows\\nGreat Barrier Reef coral cover at record levels after mass-bleaching events, report shows\\nRecord coral cover is being seen across much of the Great Barrier Reef as it recovers from past storms and mass-bleaching events.'}]\u001b[0m\u001b[32;1m\u001b[1;3mThe Great Barrier Reef is currently showing signs of recovery, with record coral cover being seen across much of the reef. This recovery comes after past storms and mass-bleaching events. However, the rapid growth in coral cover appears to have come at the expense of the diversity of coral on the reef, with most of the increases accounted for by fast-growing branching coral called Acropora. There were discussions about the reef's potential inclusion on the World Heritage In Danger list, but the meeting to consider this was indefinitely postponed due to the war in Ukraine.\n", - "\n", - "You can read more about it in this article: [Great Barrier Reef hit with widespread and severe bleaching event](https://www.abc.net.au/news/2022-08-04/great-barrier-reef-report-says-coral-recovering-after-bleaching/101296186)\u001b[0m\n", - "\n", - "\u001b[1m> Finished chain.\u001b[0m\n" - ] - }, { "data": { "text/plain": [ - "{'messages': [HumanMessage(content='What is the current conservation status of the Great Barrier Reef?')],\n", - " 'output': \"The Great Barrier Reef is currently showing signs of recovery, with record coral cover being seen across much of the reef. This recovery comes after past storms and mass-bleaching events. However, the rapid growth in coral cover appears to have come at the expense of the diversity of coral on the reef, with most of the increases accounted for by fast-growing branching coral called Acropora. There were discussions about the reef's potential inclusion on the World Heritage In Danger list, but the meeting to consider this was indefinitely postponed due to the war in Ukraine.\\n\\nYou can read more about it in this article: [Great Barrier Reef hit with widespread and severe bleaching event](https://www.abc.net.au/news/2022-08-04/great-barrier-reef-report-says-coral-recovering-after-bleaching/101296186)\"}" + "{'messages': [HumanMessage(content='What is the current conservation status of the Great Barrier Reef?', additional_kwargs={}, response_metadata={}, id='a74cc581-8ad5-4401-b3a5-f028d69e4b21'),\n", + " AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_aKOItwvAb4DHQCwaasKphGHq', 'function': {'arguments': '{\"query\":\"current conservation status of the Great Barrier Reef 2023\"}', 'name': 'tavily_search_results_json'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 116, 'total_tokens': 144, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-267ff8a8-d866-4ae5-9534-ad87ebbdc954-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'current conservation status of the Great Barrier Reef 2023'}, 'id': 'call_aKOItwvAb4DHQCwaasKphGHq', 'type': 'tool_call'}], usage_metadata={'input_tokens': 116, 'output_tokens': 28, 'total_tokens': 144}),\n", + " ToolMessage(content='[{\"url\": \"https://www.aims.gov.au/monitoring-great-barrier-reef/gbr-condition-summary-2023-24\", \"content\": \"This report summarises the condition of coral reefs in the Northern, Central and Southern\\xa0Great Barrier Reef (GBR) from the Long-Term Monitoring Program (LTMP) surveys of 94 reefs conducted between August\\xa02023 and June 2024 (reported as ‘2024’). Over the past 38 years of monitoring by the Australian Institute of Marine Science (AIMS), hard coral cover on reefs of the GBR has decreased and increased in response to cycles of disturbance and recovery. It is relatively rare for GBR reefs to have 75% to 100% hard coral cover and AIMS defines >30% – 50% hard coral cover as a high value, based on historical surveys across the GBR.\"}]', name='tavily_search_results_json', id='05b3fab7-9ac8-42bb-9612-ff2a896dbb67', tool_call_id='call_aKOItwvAb4DHQCwaasKphGHq', artifact={'query': 'current conservation status of the Great Barrier Reef 2023', 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [{'title': 'Annual Summary Report of Coral Reef Condition 2023/24', 'url': 'https://www.aims.gov.au/monitoring-great-barrier-reef/gbr-condition-summary-2023-24', 'content': 'This report summarises the condition of coral reefs in the Northern, Central and Southern\\xa0Great Barrier Reef (GBR) from the Long-Term Monitoring Program (LTMP) surveys of 94 reefs conducted between August\\xa02023 and June 2024 (reported as ‘2024’). Over the past 38 years of monitoring by the Australian Institute of Marine Science (AIMS), hard coral cover on reefs of the GBR has decreased and increased in response to cycles of disturbance and recovery. It is relatively rare for GBR reefs to have 75% to 100% hard coral cover and AIMS defines >30% – 50% hard coral cover as a high value, based on historical surveys across the GBR.', 'score': 0.95991266, 'raw_content': None}], 'response_time': 4.22}),\n", + " AIMessage(content='The current conservation status of the Great Barrier Reef (GBR) indicates ongoing challenges and fluctuations in coral health. According to a report from the Australian Institute of Marine Science (AIMS), the condition of coral reefs in the GBR has been monitored over the years, showing cycles of disturbance and recovery. \\n\\nAs of the latest surveys conducted between August 2023 and June 2024, hard coral cover on the GBR has experienced both decreases and increases. AIMS defines a hard coral cover of over 30% to 50% as high value, but it is relatively rare for GBR reefs to achieve 75% to 100% hard coral cover.\\n\\nFor more detailed information, you can refer to the [AIMS report](https://www.aims.gov.au/monitoring-great-barrier-reef/gbr-condition-summary-2023-24).', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 174, 'prompt_tokens': 337, 'total_tokens': 511, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-bec32925-0dba-445d-8b55-87358ef482bb-0', usage_metadata={'input_tokens': 337, 'output_tokens': 174, 'total_tokens': 511})]}" ] }, "execution_count": 6, @@ -208,7 +196,7 @@ } ], "source": [ - "agent_executor.invoke(\n", + "agent.invoke(\n", " {\n", " \"messages\": [\n", " HumanMessage(\n", @@ -233,25 +221,13 @@ "execution_count": 7, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3mYour name is Nemo!\u001b[0m\n", - "\n", - "\u001b[1m> Finished chain.\u001b[0m\n" - ] - }, { "data": { "text/plain": [ - "{'messages': [HumanMessage(content=\"I'm Nemo!\"),\n", - " AIMessage(content='Hello Nemo! How can I assist you today?'),\n", - " HumanMessage(content='What is my name?')],\n", - " 'output': 'Your name is Nemo!'}" + "{'messages': [HumanMessage(content=\"I'm Nemo!\", additional_kwargs={}, response_metadata={}, id='2c8e58bf-ad20-45a4-940b-84393c6b3a03'),\n", + " AIMessage(content='Hello Nemo! How can I assist you today?', additional_kwargs={}, response_metadata={}, id='5e014114-7e9d-42c3-b63e-a662b3a49bef'),\n", + " HumanMessage(content='What is my name?', additional_kwargs={}, response_metadata={}, id='d92be4e1-6497-4037-9a9a-83d3e7b760d5'),\n", + " AIMessage(content='Your name is Nemo!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 130, 'total_tokens': 136, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-17db96f8-8dbd-4f25-a80d-e4e872967641-0', usage_metadata={'input_tokens': 130, 'output_tokens': 6, 'total_tokens': 136})]}" ] }, "execution_count": 7, @@ -262,7 +238,7 @@ "source": [ "from langchain_core.messages import AIMessage, HumanMessage\n", "\n", - "agent_executor.invoke(\n", + "agent.invoke(\n", " {\n", " \"messages\": [\n", " HumanMessage(content=\"I'm Nemo!\"),\n", @@ -277,7 +253,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If preferred, you can also wrap the agent executor in a [`RunnableWithMessageHistory`](/docs/how_to/message_history/) class to internally manage history messages. Let's redeclare it this way:" + "If preferred, you can also add memory to the LangGraph agent to manage the history of messages. Let's redeclare it this way:" ] }, { @@ -286,63 +262,35 @@ "metadata": {}, "outputs": [], "source": [ - "agent = create_tool_calling_agent(chat, tools, prompt)\n", + "from langgraph.checkpoint.memory import MemorySaver\n", "\n", - "agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then, because our agent executor has multiple outputs, we also have to set the `output_messages_key` property when initializing the wrapper:" + "# highlight-start\n", + "memory = MemorySaver()\n", + "agent = create_react_agent(model, tools, state_modifier=prompt, checkpointer=memory)\n", + "# highlight-end" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 9, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3mHi Nemo! It's great to meet you. How can I assist you today?\u001b[0m\n", - "\n", - "\u001b[1m> Finished chain.\u001b[0m\n" - ] - }, { "data": { "text/plain": [ - "{'messages': [HumanMessage(content=\"I'm Nemo!\")],\n", - " 'output': \"Hi Nemo! It's great to meet you. How can I assist you today?\"}" + "{'messages': [HumanMessage(content=\"I'm Nemo!\", additional_kwargs={}, response_metadata={}, id='117b2cfc-c6cc-449c-bba9-26fc545d0afa'),\n", + " AIMessage(content='Hi Nemo! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 107, 'total_tokens': 118, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-ba16cc0b-fba1-4ec5-9d99-e010c3b702d0-0', usage_metadata={'input_tokens': 107, 'output_tokens': 11, 'total_tokens': 118})]}" ] }, - "execution_count": 11, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain_community.chat_message_histories import ChatMessageHistory\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", - "\n", - "demo_ephemeral_chat_history_for_chain = ChatMessageHistory()\n", - "\n", - "conversational_agent_executor = RunnableWithMessageHistory(\n", - " agent_executor,\n", - " lambda session_id: demo_ephemeral_chat_history_for_chain,\n", - " input_messages_key=\"messages\",\n", - " output_messages_key=\"output\",\n", - ")\n", - "\n", - "conversational_agent_executor.invoke(\n", + "agent.invoke(\n", " {\"messages\": [HumanMessage(\"I'm Nemo!\")]},\n", - " {\"configurable\": {\"session_id\": \"unused\"}},\n", + " config={\"configurable\": {\"thread_id\": \"1\"}},\n", ")" ] }, @@ -355,39 +303,27 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3mYour name is Nemo! How can I assist you today, Nemo?\u001b[0m\n", - "\n", - "\u001b[1m> Finished chain.\u001b[0m\n" - ] - }, { "data": { "text/plain": [ - "{'messages': [HumanMessage(content=\"I'm Nemo!\"),\n", - " AIMessage(content=\"Hi Nemo! It's great to meet you. How can I assist you today?\"),\n", - " HumanMessage(content='What is my name?')],\n", - " 'output': 'Your name is Nemo! How can I assist you today, Nemo?'}" + "{'messages': [HumanMessage(content=\"I'm Nemo!\", additional_kwargs={}, response_metadata={}, id='117b2cfc-c6cc-449c-bba9-26fc545d0afa'),\n", + " AIMessage(content='Hi Nemo! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 107, 'total_tokens': 118, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-ba16cc0b-fba1-4ec5-9d99-e010c3b702d0-0', usage_metadata={'input_tokens': 107, 'output_tokens': 11, 'total_tokens': 118}),\n", + " HumanMessage(content='What is my name?', additional_kwargs={}, response_metadata={}, id='53ac8d34-99bb-43a7-9103-80e26b7ee6cc'),\n", + " AIMessage(content='Your name is Nemo!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 130, 'total_tokens': 136, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-b3f224a5-902a-4973-84ff-9b683615b0e2-0', usage_metadata={'input_tokens': 130, 'output_tokens': 6, 'total_tokens': 136})]}" ] }, - "execution_count": 11, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "conversational_agent_executor.invoke(\n", + "agent.invoke(\n", " {\"messages\": [HumanMessage(\"What is my name?\")]},\n", - " {\"configurable\": {\"session_id\": \"unused\"}},\n", + " config={\"configurable\": {\"thread_id\": \"1\"}},\n", ")" ] }, @@ -395,11 +331,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This [LangSmith trace](https://smith.langchain.com/public/1a9f712a-7918-4661-b3ff-d979bcc2af42/r) shows what's going on under the hood.\n", + "This [LangSmith trace](https://smith.langchain.com/public/9e6b000d-08aa-4c5a-ac83-2fdf549523cb/r) shows what's going on under the hood.\n", "\n", "## Further reading\n", "\n", - "Other types agents can also support conversational responses too - for more, check out the [agents section](/docs/tutorials/agents).\n", + "For more on how to build agents, check these [LangGraph](https://langchain-ai.github.io/langgraph/) guides:\n", + "\n", + "* [agents conceptual guide](https://langchain-ai.github.io/langgraph/concepts/agentic_concepts/)\n", + "* [agents tutorials](https://langchain-ai.github.io/langgraph/tutorials/multi_agent/multi-agent-collaboration/)\n", + "* [create_react_agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/)\n", "\n", "For more on tool usage, you can also check out [this use case section](/docs/how_to#tools)." ] @@ -421,9 +361,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.5" + "version": "3.11.4" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/docs/how_to/message_history.ipynb b/docs/docs/how_to/message_history.ipynb index ec843eab8bf3a..9d0d8f44b60d7 100644 --- a/docs/docs/how_to/message_history.ipynb +++ b/docs/docs/how_to/message_history.ipynb @@ -24,136 +24,31 @@ ":::info Prerequisites\n", "\n", "This guide assumes familiarity with the following concepts:\n", - "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n", "- [Chaining runnables](/docs/how_to/sequence/)\n", - "- [Configuring chain parameters at runtime](/docs/how_to/configure)\n", "- [Prompt templates](/docs/concepts/#prompt-templates)\n", "- [Chat Messages](/docs/concepts/#message-types)\n", + "- [LangGraph persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/)\n", "\n", ":::\n", "\n", - "Passing conversation state into and out a chain is vital when building a chatbot. The [`RunnableWithMessageHistory`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) class lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it. Specifically, it loads previous messages in the conversation BEFORE passing it to the Runnable, and it saves the generated response as a message AFTER calling the runnable. This class also enables multiple conversations by saving each conversation with a `session_id` - it then expects a `session_id` to be passed in the config when calling the runnable, and uses that to look up the relevant conversation history.\n", + ":::note\n", "\n", - "![index_diagram](../../static/img/message_history.png)\n", + "This guide previously covered the [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) abstraction. You can access this version of the guide in the [v0.2 docs](https://python.langchain.com/v0.2/docs/how_to/message_history/).\n", "\n", - "In practice this looks something like:\n", + "As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into new LangChain applications.\n", "\n", - "```python\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "If your code is already relying on `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do **not** need to make any changes. We do not plan on deprecating this functionality in the near future as it works for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.\n", "\n", + "Please see [How to migrate to LangGraph Memory](/docs/versions/migrating_memory/) for more details.\n", + ":::\n", "\n", - "with_message_history = RunnableWithMessageHistory(\n", - " # The underlying runnable\n", - " runnable, \n", - " # A function that takes in a session id and returns a memory object\n", - " get_session_history, \n", - " # Other parameters that may be needed to align the inputs/outputs\n", - " # of the Runnable with the memory object\n", - " ... \n", - ")\n", - "\n", - "with_message_history.invoke(\n", - " # The same input as before\n", - " {\"ability\": \"math\", \"input\": \"What does cosine mean?\"},\n", - " # Configuration specifying the `session_id`,\n", - " # which controls which conversation to load\n", - " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", - ")\n", - "```\n", - "\n", - "\n", - "In order to properly set this up there are two main things to consider:\n", - "\n", - "1. How to store and load messages? (this is `get_session_history` in the example above)\n", - "2. What is the underlying Runnable you are wrapping and what are its inputs/outputs? (this is `runnable` in the example above, as well any additional parameters you pass to `RunnableWithMessageHistory` to align the inputs/outputs)\n", - "\n", - "Let's walk through these pieces (and more) below." - ] - }, - { - "cell_type": "markdown", - "id": "734123cb", - "metadata": {}, - "source": [ - "## How to store and load messages\n", - "\n", - "A key part of this is storing and loading messages.\n", - "When constructing `RunnableWithMessageHistory` you need to pass in a `get_session_history` function.\n", - "This function should take in a `session_id` and return a `BaseChatMessageHistory` object.\n", - "\n", - "**What is `session_id`?** \n", - "\n", - "`session_id` is an identifier for the session (conversation) thread that these input messages correspond to. This allows you to maintain several conversations/threads with the same chain at the same time.\n", - "\n", - "**What is `BaseChatMessageHistory`?** \n", - "\n", - "`BaseChatMessageHistory` is a class that can load and save message objects. It will be called by `RunnableWithMessageHistory` to do exactly that. These classes are usually initialized with a session id.\n", - "\n", - "Let's create a `get_session_history` object to use for this example. To keep things simple, we will use a simple SQLiteMessage" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "e8210560", - "metadata": {}, - "outputs": [], - "source": [ - "! rm memory.db" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "27f36241", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.chat_message_histories import SQLChatMessageHistory\n", - "\n", - "\n", - "def get_session_history(session_id):\n", - " return SQLChatMessageHistory(session_id, \"sqlite:///memory.db\")" - ] - }, - { - "cell_type": "markdown", - "id": "c200cb3a", - "metadata": {}, - "source": [ - "Check out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using other providers (Redis, Postgres, etc)." - ] - }, - { - "cell_type": "markdown", - "id": "a531da5e", - "metadata": {}, - "source": [ - "## What is the runnable you are trying to wrap?\n", - "\n", - "`RunnableWithMessageHistory` can only wrap certain types of Runnables. Specifically, it can be used for any Runnable that takes as input one of:\n", + "Passing conversation state into and out a chain is vital when building a chatbot. LangGraph implements a built-in persistence layer, allowing chain states to be automatically persisted in memory, or external backends such as SQLite, Postgres or Redis. Details can be found in the LangGraph [persistence documentation](https://langchain-ai.github.io/langgraph/how-tos/persistence/).\n", "\n", - "* a sequence of [`BaseMessages`](/docs/concepts/#message-types)\n", - "* a dict with a key that takes a sequence of `BaseMessages`\n", - "* a dict with a key that takes the latest message(s) as a string or sequence of `BaseMessages`, and a separate key that takes historical messages\n", + "In this guide we demonstrate how to add persistence to arbitrary LangChain runnables by wrapping them in a minimal LangGraph application. This lets us persist the message history and other elements of the chain's state, simplifying the development of multi-turn applications. It also supports multiple threads, enabling a single application to interact separately with multiple users.\n", "\n", - "And returns as output one of\n", + "## Setup\n", "\n", - "* a string that can be treated as the contents of an `AIMessage`\n", - "* a sequence of `BaseMessage`\n", - "* a dict with a key that contains a sequence of `BaseMessage`\n", - "\n", - "Let's take a look at some examples to see how it works. " - ] - }, - { - "cell_type": "markdown", - "id": "6a4becbd-238e-4c1d-a02d-08e61fbc3763", - "metadata": {}, - "source": [ - "### Setup\n", - "\n", - "First we construct a runnable (which here accepts a dict as input and returns a message as output):\n", + "Let's initialize a chat model:\n", "\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", @@ -164,8 +59,8 @@ }, { "cell_type": "code", - "execution_count": 3, - "id": "6489f585", + "execution_count": 1, + "id": "ca50d084-ae4b-4aea-9eb7-2ebc699df9bc", "metadata": {}, "outputs": [], "source": [ @@ -180,704 +75,382 @@ "# os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n", "from langchain_anthropic import ChatAnthropic\n", "\n", - "model = ChatAnthropic(model=\"claude-3-haiku-20240307\", temperature=0)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2ed413b4-33a1-48ee-89b0-2d4917ec101a", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_core.messages import HumanMessage\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory" + "llm = ChatAnthropic(model=\"claude-3-haiku-20240307\", temperature=0)" ] }, { "cell_type": "markdown", - "id": "e8816b01", + "id": "1f6121bc-2080-4ccc-acf0-f77de4bc951d", "metadata": {}, "source": [ - "### Messages input, message(s) output\n", + "## Example: message inputs\n", + "\n", + "Adding memory to a [chat model](/docs/concepts/#chat-models) provides a simple example. Chat models accept a list of messages as input and output a message. LangGraph includes a built-in `MessagesState` that we can use for this purpose.\n", "\n", - "The simplest form is just adding memory to a ChatModel.\n", - "ChatModels accept a list of messages as input and output a message.\n", - "This makes it very easy to use `RunnableWithMessageHistory` - no additional configuration is needed!" + "Below, we:\n", + "1. Define the graph state to be a list of messages;\n", + "2. Add a single node to the graph that calls a chat model;\n", + "3. Compile the graph with an in-memory checkpointer to store messages between runs.\n", + "\n", + ":::info\n", + "\n", + "The output of a LangGraph application is its [state](https://langchain-ai.github.io/langgraph/concepts/low_level/). This can be any Python type, but in this context it will typically be a `TypedDict` that matches the schema of your runnable.\n", + "\n", + ":::" ] }, { "cell_type": "code", - "execution_count": 5, - "id": "0521d551", + "execution_count": 2, + "id": "f691a73a-a866-4354-9fff-8315605e2b8f", "metadata": {}, "outputs": [], "source": [ - "runnable_with_history = RunnableWithMessageHistory(\n", - " model,\n", - " get_session_history,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "d5142e1a", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage(content=\"It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\", response_metadata={'id': 'msg_01UHCCMiZz9yNYjt41xUJrtk', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-55f6a451-606b-4e04-9e39-e03b81035c1f-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runnable_with_history.invoke(\n", - " [HumanMessage(content=\"hi - im bob!\")],\n", - " config={\"configurable\": {\"session_id\": \"1\"}},\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "768e0c12", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage(content='I\\'m afraid I don\\'t actually know your name - you introduced yourself as Bob, but I don\\'t have any other information about your identity. As an AI assistant, I don\\'t have a way to independently verify people\\'s names or identities. I\\'m happy to continue our conversation, but I\\'ll just refer to you as \"Bob\" since that\\'s the name you provided.', response_metadata={'id': 'msg_018L96tAxiexMKsHBQz22CcE', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-7399ddb5-bb06-444b-bfb2-2f65674105dd-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runnable_with_history.invoke(\n", - " [HumanMessage(content=\"whats my name?\")],\n", - " config={\"configurable\": {\"session_id\": \"1\"}},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "9d942227", - "metadata": {}, - "source": [ - ":::info\n", + "from langchain_core.messages import HumanMessage\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, MessagesState, StateGraph\n", "\n", - "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n", + "# Define a new graph\n", + "workflow = StateGraph(state_schema=MessagesState)\n", "\n", - ":::\n", "\n", - "We can now try this with a new session id and see that it does not remember." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "addddd03", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\", response_metadata={'id': 'msg_01LhbWu7mSKTvKAx7iQpMPzd', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-cf86cad2-21f2-4525-afc8-09bfd1e8af70-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runnable_with_history.invoke(\n", - " [HumanMessage(content=\"whats my name?\")],\n", - " config={\"configurable\": {\"session_id\": \"1a\"}},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "8b26a0c0", - "metadata": {}, - "source": [ - ":::info \n", + "# Define the function that calls the model\n", + "def call_model(state: MessagesState):\n", + " response = llm.invoke(state[\"messages\"])\n", + " # Update message history with response:\n", + " return {\"messages\": response}\n", "\n", - "When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. \n", "\n", - ":::" + "# Define the (single) node in the graph\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "# Add memory\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { "cell_type": "markdown", - "id": "e5bb5c7c", + "id": "c0b396a8-f81e-4139-b4b2-75adf61d8179", "metadata": {}, "source": [ - "### Dictionary input, message(s) output\n", - "\n", - "Besides just wrapping a raw model, the next step up is wrapping a prompt + LLM. This now changes the input to be a **dictionary** (because the input to a prompt is a dictionary). This adds two bits of complication.\n", - "\n", - "First: a dictionary can have multiple keys, but we only want to save ONE as input. In order to do this, we now now need to specify a key to save as the input.\n", - "\n", - "Second: once we load the messages, we need to know how to save them to the dictionary. That equates to know which key in the dictionary to save them in. Therefore, we need to specify a key to save the loaded messages in.\n", - "\n", - "Putting it all together, that ends up looking something like:" + "When we run the application, we pass in a configuration `dict` that specifies a `thread_id`. This ID is used to distinguish conversational threads (e.g., between different users)." ] }, { "cell_type": "code", - "execution_count": 15, - "id": "34edd990", + "execution_count": 3, + "id": "e4309511-2140-4d91-8f5f-ea3661e6d179", "metadata": {}, "outputs": [], "source": [ - "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "\n", - "prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\n", - " \"system\",\n", - " \"You're an assistant who speaks in {language}. Respond in 20 words or fewer\",\n", - " ),\n", - " MessagesPlaceholder(variable_name=\"history\"),\n", - " (\"human\", \"{input}\"),\n", - " ]\n", - ")\n", - "\n", - "runnable = prompt | model\n", - "\n", - "runnable_with_history = RunnableWithMessageHistory(\n", - " runnable,\n", - " get_session_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"history\",\n", - ")" + "config = {\"configurable\": {\"thread_id\": \"abc123\"}}" ] }, { "cell_type": "markdown", - "id": "c0baa075", + "id": "108c45a2-4971-4120-ba64-9a4305a414bb", "metadata": {}, "source": [ - ":::info\n", - "\n", - "Note that we've specified `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to).\n", - "\n", - ":::" + "We can then invoke the application:" ] }, { "cell_type": "code", - "execution_count": 16, - "id": "5877544f", + "execution_count": 4, + "id": "72a5ff6c-501f-4151-8dd9-f600f70554be", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage(content='Ciao Bob! È un piacere conoscerti. Come stai oggi?', response_metadata={'id': 'msg_0121ADUEe4G1hMC6zbqFWofr', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 23}}, id='run-246a70df-aad6-43d6-a7e8-166d96e0d67e-0', usage_metadata={'input_tokens': 29, 'output_tokens': 23, 'total_tokens': 52})" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\n" + ] } ], "source": [ - "runnable_with_history.invoke(\n", - " {\"language\": \"italian\", \"input\": \"hi im bob!\"},\n", - " config={\"configurable\": {\"session_id\": \"2\"}},\n", - ")" + "query = \"Hi! I'm Bob.\"\n", + "\n", + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke({\"messages\": input_messages}, config)\n", + "output[\"messages\"][-1].pretty_print() # output contains all messages in state" ] }, { "cell_type": "code", - "execution_count": 17, - "id": "8605c2b1", + "execution_count": 5, + "id": "5931fb35-0fac-40e7-8ac6-b14cb4e926cd", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage(content='Bob, il tuo nome è Bob.', response_metadata={'id': 'msg_01EDUZG6nRLGeti9KhFN5cek', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 60, 'output_tokens': 12}}, id='run-294b4a72-81bc-4c43-b199-3aafdff87cb3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 12, 'total_tokens': 72})" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Your name is Bob, as you introduced yourself at the beginning of our conversation.\n" + ] } ], "source": [ - "runnable_with_history.invoke(\n", - " {\"language\": \"italian\", \"input\": \"whats my name?\"},\n", - " config={\"configurable\": {\"session_id\": \"2\"}},\n", - ")" + "query = \"What's my name?\"\n", + "\n", + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke({\"messages\": input_messages}, config)\n", + "output[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "markdown", - "id": "3ab7c09f", + "id": "91de6d12-881d-4d23-a421-f2e3bf829b79", "metadata": {}, "source": [ - ":::info\n", - "\n", - "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n", - "\n", - ":::\n", - "\n", - "We can now try this with a new session id and see that it does not remember." + "Note that states are separated for different threads. If we issue the same query to a thread with a new `thread_id`, the model indicates that it does not know the answer:" ] }, { "cell_type": "code", - "execution_count": 19, - "id": "c7ddad6b", + "execution_count": 6, + "id": "6f12c26f-8913-4484-b2c5-b49eda2e6d7d", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage(content='Mi dispiace, non so il tuo nome. Come posso aiutarti?', response_metadata={'id': 'msg_01Lyd9FAGQJTxxAZoFi3sQpQ', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 23}}, id='run-19a82197-3b1c-4b5f-a68d-f91f4a2ba523-0', usage_metadata={'input_tokens': 30, 'output_tokens': 23, 'total_tokens': 53})" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\n" + ] } ], "source": [ - "runnable_with_history.invoke(\n", - " {\"language\": \"italian\", \"input\": \"whats my name?\"},\n", - " config={\"configurable\": {\"session_id\": \"2a\"}},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "a05e6c12", - "metadata": {}, - "source": [ - ":::info \n", + "query = \"What's my name?\"\n", + "config = {\"configurable\": {\"thread_id\": \"abc234\"}}\n", "\n", - "When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. \n", - "\n", - ":::" + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke({\"messages\": input_messages}, config)\n", + "output[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "markdown", - "id": "717440a9", + "id": "6749ea95-3382-4843-bb96-cfececb9e4e5", "metadata": {}, "source": [ - "### Messages input, dict output\n", + "## Example: dictionary inputs\n", + "\n", + "LangChain runnables often accept multiple inputs via separate keys in a single `dict` argument. A common example is a prompt template with multiple parameters.\n", "\n", - "This format is useful when you are using a model to generate one key in a dictionary." + "Whereas before our runnable was a chat model, here we chain together a prompt template and chat model." ] }, { "cell_type": "code", - "execution_count": 20, - "id": "80b8efb0", + "execution_count": 7, + "id": "6e7a402a-0994-4fc5-a607-fb990a248aa4", "metadata": {}, "outputs": [], "source": [ - "from langchain_core.messages import HumanMessage\n", - "from langchain_core.runnables import RunnableParallel\n", - "\n", - "chain = RunnableParallel({\"output_message\": model})\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "\n", + "prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\"system\", \"Answer in {language}.\"),\n", + " MessagesPlaceholder(variable_name=\"messages\"),\n", + " ]\n", + ")\n", "\n", - "runnable_with_history = RunnableWithMessageHistory(\n", - " chain,\n", - " get_session_history,\n", - " output_messages_key=\"output_message\",\n", - ")" + "runnable = prompt | llm" ] }, { "cell_type": "markdown", - "id": "9040c535", + "id": "f83107bd-ae61-45e1-a57e-94ab043aad4b", "metadata": {}, "source": [ - ":::info\n", + "For this scenario, we define the graph state to include these parameters (in addition to the message history). We then define a single-node graph in the same way as before.\n", "\n", - "Note that we've specified `output_messages_key` (the key to be treated as the output to save).\n", - "\n", - ":::" + "Note that in the below state:\n", + "- Updates to the `messages` list will append messages;\n", + "- Updates to the `language` string will overwrite the string." ] }, { "cell_type": "code", - "execution_count": 21, - "id": "8b26a209", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'output_message': AIMessage(content=\"It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\", response_metadata={'id': 'msg_01WWJSyUyGGKuBqTs3h18ZMM', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-0f50cb43-a734-447c-b535-07c615a0984c-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})}" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runnable_with_history.invoke(\n", - " [HumanMessage(content=\"hi - im bob!\")],\n", - " config={\"configurable\": {\"session_id\": \"3\"}},\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "743edcf8", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'output_message': AIMessage(content='I\\'m afraid I don\\'t actually know your name - you introduced yourself as Bob, but I don\\'t have any other information about your identity. As an AI assistant, I don\\'t have a way to independently verify people\\'s names or identities. I\\'m happy to continue our conversation, but I\\'ll just refer to you as \"Bob\" since that\\'s the name you provided.', response_metadata={'id': 'msg_01TEGrhfLXTwo36rC7svdTy4', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-178e8f3f-da21-430d-9edc-ef07797a5e2d-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})}" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runnable_with_history.invoke(\n", - " [HumanMessage(content=\"whats my name?\")],\n", - " config={\"configurable\": {\"session_id\": \"3\"}},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "81efb7f1", + "execution_count": 8, + "id": "267429ea-be0f-4f80-8daf-c63d881a1436", "metadata": {}, + "outputs": [], "source": [ - ":::info\n", + "from typing import Sequence\n", "\n", - "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n", + "from langchain_core.messages import BaseMessage\n", + "from langgraph.graph.message import add_messages\n", + "from typing_extensions import Annotated, TypedDict\n", "\n", - ":::\n", "\n", - "We can now try this with a new session id and see that it does not remember." - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "b8b04907", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'output_message': AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\", response_metadata={'id': 'msg_0118ZBudDXAC9P6smf91NhCX', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-deb14a3a-0336-42b4-8ace-ad1e52ca5910-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})}" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runnable_with_history.invoke(\n", - " [HumanMessage(content=\"whats my name?\")],\n", - " config={\"configurable\": {\"session_id\": \"3a\"}},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "6716a068", - "metadata": {}, - "source": [ - ":::info \n", + "# highlight-next-line\n", + "class State(TypedDict):\n", + " # highlight-next-line\n", + " messages: Annotated[Sequence[BaseMessage], add_messages]\n", + " # highlight-next-line\n", + " language: str\n", "\n", - "When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. \n", "\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "id": "ec4187d0", - "metadata": {}, - "source": [ - "### Dict with single key for all messages input, messages output\n", + "workflow = StateGraph(state_schema=State)\n", "\n", - "This is a specific case of \"Dictionary input, message(s) output\". In this situation, because there is only a single key we don't need to specify as much - we only need to specify the `input_messages_key`." - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "7530c4ed", - "metadata": {}, - "outputs": [], - "source": [ - "from operator import itemgetter\n", "\n", - "runnable_with_history = RunnableWithMessageHistory(\n", - " itemgetter(\"input_messages\") | model,\n", - " get_session_history,\n", - " input_messages_key=\"input_messages\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "def75152", - "metadata": {}, - "source": [ - ":::info\n", + "def call_model(state: State):\n", + " response = runnable.invoke(state)\n", + " # Update message history with response:\n", + " return {\"messages\": [response]}\n", "\n", - "Note that we've specified `input_messages_key` (the key to be treated as the latest input message).\n", "\n", - ":::" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "659bc1bf", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage(content=\"It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\", response_metadata={'id': 'msg_01UdD5wz1J5xwoz5D94onaQC', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-91bee6eb-0814-4557-ad71-fef9b0270358-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runnable_with_history.invoke(\n", - " {\"input_messages\": [HumanMessage(content=\"hi - im bob!\")]},\n", - " config={\"configurable\": {\"session_id\": \"4\"}},\n", - ")" + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { "cell_type": "code", - "execution_count": 26, - "id": "6da2835e", + "execution_count": 9, + "id": "f3844fb4-58d7-43c8-b427-6d9f64d7411b", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage(content='I\\'m afraid I don\\'t actually know your name - you introduced yourself as Bob, but I don\\'t have any other information about your identity. As an AI assistant, I don\\'t have a way to independently verify people\\'s names or identities. I\\'m happy to continue our conversation, but I\\'ll just refer to you as \"Bob\" since that\\'s the name you provided.', response_metadata={'id': 'msg_012WUygxBKXcVJPeTW14LNrc', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-fcbaaa1a-8c33-4eec-b0b0-5b800a47bddd-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "¡Hola, Bob! Es un placer conocerte.\n" + ] } ], "source": [ - "runnable_with_history.invoke(\n", - " {\"input_messages\": [HumanMessage(content=\"whats my name?\")]},\n", - " config={\"configurable\": {\"session_id\": \"4\"}},\n", - ")" + "config = {\"configurable\": {\"thread_id\": \"abc345\"}}\n", + "\n", + "input_dict = {\n", + " \"messages\": [HumanMessage(\"Hi, I'm Bob.\")],\n", + " \"language\": \"Spanish\",\n", + "}\n", + "output = app.invoke(input_dict, config)\n", + "output[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "markdown", - "id": "d4c7a6f2", + "id": "7df47824-ef18-4a6e-a416-345ec9203f88", "metadata": {}, "source": [ - ":::info\n", - "\n", - "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n", + "## Managing message history\n", "\n", - ":::\n", - "\n", - "We can now try this with a new session id and see that it does not remember." + "The message history (and other elements of the application state) can be accessed via `.get_state`:" ] }, { "cell_type": "code", - "execution_count": 27, - "id": "6cf6abd6", + "execution_count": 10, + "id": "1cbd6d82-43c1-4d11-98af-5c3ad9cd9b3b", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\", response_metadata={'id': 'msg_017xW3Ki5y4UBYzCU9Mf1pgM', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-d2f372f7-3679-4a5c-9331-a55b820ec03e-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})" - ] - }, - "execution_count": 27, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Language: Spanish\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "Hi, I'm Bob.\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "¡Hola, Bob! Es un placer conocerte.\n" + ] } ], "source": [ - "runnable_with_history.invoke(\n", - " {\"input_messages\": [HumanMessage(content=\"whats my name?\")]},\n", - " config={\"configurable\": {\"session_id\": \"4a\"}},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "9839a6d1", - "metadata": {}, - "source": [ - ":::info \n", - "\n", - "When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. \n", + "state = app.get_state(config).values\n", "\n", - ":::" + "print(f'Language: {state[\"language\"]}')\n", + "for message in state[\"messages\"]:\n", + " message.pretty_print()" ] }, { "cell_type": "markdown", - "id": "a6710e65", + "id": "acfbccda-0bd6-4c4d-ae6e-8118520314e1", "metadata": {}, "source": [ - "## Customization" - ] - }, - { - "cell_type": "markdown", - "id": "d29497be-3366-408d-bbb9-d4a8bf4ef37c", - "metadata": {}, - "source": [ - "The configuration parameters by which we track message histories can be customized by passing in a list of ``ConfigurableFieldSpec`` objects to the ``history_factory_config`` parameter. Below, we use two parameters: a `user_id` and `conversation_id`." + "We can also update the state via `.update_state`. For example, we can manually append a new message:" ] }, { "cell_type": "code", - "execution_count": 30, - "id": "1c89daee-deff-4fdf-86a3-178f7d8ef536", + "execution_count": 11, + "id": "e98310d7-8ab1-461d-94a7-dd419494ab8d", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage(content='Ciao Bob! È un piacere conoscerti. Come stai oggi?', response_metadata={'id': 'msg_016RJebCoiAgWaNcbv9wrMNW', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 23}}, id='run-40425414-8f72-47d4-bf1d-a84175d8b3f8-0', usage_metadata={'input_tokens': 29, 'output_tokens': 23, 'total_tokens': 52})" - ] - }, - "execution_count": 30, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "from langchain_core.runnables import ConfigurableFieldSpec\n", - "\n", - "\n", - "def get_session_history(user_id: str, conversation_id: str):\n", - " return SQLChatMessageHistory(f\"{user_id}--{conversation_id}\", \"sqlite:///memory.db\")\n", - "\n", - "\n", - "with_message_history = RunnableWithMessageHistory(\n", - " runnable,\n", - " get_session_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"history\",\n", - " history_factory_config=[\n", - " ConfigurableFieldSpec(\n", - " id=\"user_id\",\n", - " annotation=str,\n", - " name=\"User ID\",\n", - " description=\"Unique identifier for the user.\",\n", - " default=\"\",\n", - " is_shared=True,\n", - " ),\n", - " ConfigurableFieldSpec(\n", - " id=\"conversation_id\",\n", - " annotation=str,\n", - " name=\"Conversation ID\",\n", - " description=\"Unique identifier for the conversation.\",\n", - " default=\"\",\n", - " is_shared=True,\n", - " ),\n", - " ],\n", - ")\n", + "from langchain_core.messages import HumanMessage\n", "\n", - "with_message_history.invoke(\n", - " {\"language\": \"italian\", \"input\": \"hi im bob!\"},\n", - " config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n", - ")" + "_ = app.update_state(config, {\"messages\": [HumanMessage(\"Test\")]})" ] }, { "cell_type": "code", - "execution_count": 32, - "id": "4f282883", + "execution_count": 12, + "id": "74ab3691-6f3b-49c5-aad0-2a90fc2a1e6a", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage(content='Bob, il tuo nome è Bob.', response_metadata={'id': 'msg_01Kktiy3auFDKESY54KtTWPX', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 60, 'output_tokens': 12}}, id='run-c7768420-3f30-43f5-8834-74b1979630dd-0', usage_metadata={'input_tokens': 60, 'output_tokens': 12, 'total_tokens': 72})" - ] - }, - "execution_count": 32, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Language: Spanish\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "Hi, I'm Bob.\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "¡Hola, Bob! Es un placer conocerte.\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "Test\n" + ] } ], "source": [ - "# remembers\n", - "with_message_history.invoke(\n", - " {\"language\": \"italian\", \"input\": \"whats my name?\"},\n", - " config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n", - ")" + "state = app.get_state(config).values\n", + "\n", + "print(f'Language: {state[\"language\"]}')\n", + "for message in state[\"messages\"]:\n", + " message.pretty_print()" ] }, { - "cell_type": "code", - "execution_count": 33, - "id": "fc122c18", + "cell_type": "markdown", + "id": "e4a1ea00-d7ff-4f18-b9ec-9aec5909d027", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage(content='Mi dispiace, non so il tuo nome. Come posso aiutarti?', response_metadata={'id': 'msg_0178FpbpPNioB7kqvyHk7rjD', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 23}}, id='run-df1f1768-aab6-4aec-8bba-e33fc9e90b8d-0', usage_metadata={'input_tokens': 30, 'output_tokens': 23, 'total_tokens': 53})" - ] - }, - "execution_count": 33, - "metadata": {}, - "output_type": "execute_result" - } - ], "source": [ - "# New user_id --> does not remember\n", - "with_message_history.invoke(\n", - " {\"language\": \"italian\", \"input\": \"whats my name?\"},\n", - " config={\"configurable\": {\"user_id\": \"456\", \"conversation_id\": \"1\"}},\n", - ")" + "For details on managing state, including deleting messages, see the LangGraph documentation:\n", + "- [How to delete messages](https://langchain-ai.github.io/langgraph/how-tos/memory/delete-messages/)\n", + "- [How to view and update past graph state](https://langchain-ai.github.io/langgraph/how-tos/human_in_the_loop/time-travel/)" ] }, { - "cell_type": "markdown", - "id": "3ce37565", + "cell_type": "code", + "execution_count": null, + "id": "870c9c5b-c859-4c0e-9cbd-3555e6ed11e4", "metadata": {}, - "source": [ - "Note that in this case the context was preserved for the same `user_id`, but once we changed it, the new chat history was started, even though the `conversation_id` was the same." - ] + "outputs": [], + "source": [] } ], "metadata": { @@ -896,7 +469,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/how_to/qa_chat_history_how_to.ipynb b/docs/docs/how_to/qa_chat_history_how_to.ipynb index abe688a6b90e1..743f60c6349d6 100644 --- a/docs/docs/how_to/qa_chat_history_how_to.ipynb +++ b/docs/docs/how_to/qa_chat_history_how_to.ipynb @@ -7,6 +7,18 @@ "source": [ "# How to add chat history\n", "\n", + ":::note\n", + "\n", + "This guide previously used the [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) abstraction. You can access this version of the documentation in the [v0.2 docs](https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).\n", + "\n", + "As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into new LangChain applications.\n", + "\n", + "If your code is already relying on `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do **not** need to make any changes. We do not plan on deprecating this functionality in the near future as it works for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.\n", + "\n", + "Please see [How to migrate to LangGraph Memory](/docs/versions/migrating_memory/) for more details.\n", + ":::\n", + "\n", + "\n", "In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n", "\n", "In this guide we focus on **adding logic for incorporating historical messages.**\n", @@ -29,7 +41,7 @@ "\n", "### Dependencies\n", "\n", - "We'll use OpenAI embeddings and a Chroma vector store in this walkthrough, but everything shown here works with any [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers). \n", + "We'll use OpenAI embeddings and an InMemory vector store in this walkthrough, but everything shown here works with any [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers). \n", "\n", "We'll use the following packages:" ] @@ -42,7 +54,7 @@ "outputs": [], "source": [ "%%capture --no-stderr\n", - "%pip install --upgrade --quiet langchain langchain-community langchain-chroma beautifulsoup4" + "%pip install --upgrade --quiet langchain langchain-community beautifulsoup4" ] }, { @@ -56,7 +68,7 @@ { "cell_type": "code", "execution_count": 2, - "id": "143787ca-d8e6-4dc9-8281-4374f4d71720", + "id": "3b156b76-22a1-43af-a509-137acdccc5d0", "metadata": {}, "outputs": [], "source": [ @@ -64,11 +76,7 @@ "import os\n", "\n", "if not os.environ.get(\"OPENAI_API_KEY\"):\n", - " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", - "\n", - "# import dotenv\n", - "\n", - "# dotenv.load_dotenv()" + " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()" ] }, { @@ -153,7 +161,7 @@ "id": "15f8ad59-19de-42e3-85a8-3ba95ee0bd43", "metadata": {}, "source": [ - "For the retriever, we will use [WebBaseLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load the content of a web page. Here we instantiate a `Chroma` vectorstore and then use its [.as_retriever](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.as_retriever) method to build a retriever that can be incorporated into [LCEL](/docs/concepts/#langchain-expression-language) chains." + "For the retriever, we will use [WebBaseLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load the content of a web page. Here we instantiate a `InMemoryVectorStore` vectorstore and then use its [.as_retriever](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.as_retriever) method to build a retriever that can be incorporated into [LCEL](/docs/concepts/#langchain-expression-language) chains." ] }, { @@ -161,16 +169,24 @@ "execution_count": 5, "id": "820244ae-74b4-4593-b392-822979dd91b8", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "USER_AGENT environment variable not set, consider setting it to identify your requests.\n" + ] + } + ], "source": [ "import bs4\n", "from langchain.chains import create_retrieval_chain\n", "from langchain.chains.combine_documents import create_stuff_documents_chain\n", - "from langchain_chroma import Chroma\n", "from langchain_community.document_loaders import WebBaseLoader\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_core.vectorstores import InMemoryVectorStore\n", "from langchain_openai import OpenAIEmbeddings\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", "\n", @@ -186,7 +202,8 @@ "\n", "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", "splits = text_splitter.split_documents(docs)\n", - "vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n", + "vectorstore = InMemoryVectorStore(embedding=OpenAIEmbeddings())\n", + "vectorstore.add_documents(splits)\n", "retriever = vectorstore.as_retriever()" ] }, @@ -286,8 +303,8 @@ " (\"human\", \"{input}\"),\n", " ]\n", ")\n", - "question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)\n", "\n", + "question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)\n", "rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)" ] }, @@ -296,20 +313,17 @@ "id": "53a662c2-f38b-45f9-95c4-66de15637614", "metadata": {}, "source": [ - "### Adding chat history\n", + "### Stateful Management of chat history\n", "\n", - "To manage the chat history, we will need:\n", + "We have added application logic for incorporating chat history, but we are still manually plumbing it through our application. In production, the Q&A application we usually persist the chat history into a database, and be able to read and update it appropriately.\n", "\n", - "1. An object for storing the chat history;\n", - "2. An object that wraps our chain and manages updates to the chat history.\n", + "[LangGraph](https://langchain-ai.github.io/langgraph/) implements a built-in [persistence layer](https://langchain-ai.github.io/langgraph/concepts/persistence/), making it ideal for chat applications that support multiple conversational turns.\n", "\n", - "For these we will use [BaseChatMessageHistory](https://python.langchain.com/api_reference/core/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html) and [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html). The latter is a wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", + "Wrapping our chat model in a minimal LangGraph application allows us to automatically persist the message history, simplifying the development of multi-turn applications.\n", "\n", - "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history/) LCEL how-to guide.\n", + "LangGraph comes with a simple [in-memory checkpointer](https://langchain-ai.github.io/langgraph/reference/checkpoints/#memorysaver), which we use below. See its documentation for more detail, including how to use different persistence backends (e.g., SQLite or Postgres).\n", "\n", - "Below, we implement a simple example of the second option, in which chat histories are stored in a simple dict. LangChain manages memory integrations with [Redis](/docs/integrations/memory/redis_chat_message_history/) and other technologies to provide for more robust persistence.\n", - "\n", - "Instances of `RunnableWithMessageHistory` manage the chat history for you. They accept a config with a key (`\"session_id\"` by default) that specifies what conversation history to fetch and prepend to the input, and append the output to the same conversation history. Below is an example:" + "For a detailed walkthrough of how to manage message history, head to the How to add message history (memory) guide." ] }, { @@ -319,26 +333,48 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_message_histories import ChatMessageHistory\n", - "from langchain_core.chat_history import BaseChatMessageHistory\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", - "\n", - "store = {}\n", - "\n", - "\n", - "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n", - " if session_id not in store:\n", - " store[session_id] = ChatMessageHistory()\n", - " return store[session_id]\n", - "\n", + "from typing import Sequence\n", "\n", - "conversational_rag_chain = RunnableWithMessageHistory(\n", - " rag_chain,\n", - " get_session_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"chat_history\",\n", - " output_messages_key=\"answer\",\n", - ")" + "from langchain_core.messages import AIMessage, BaseMessage, HumanMessage\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, StateGraph\n", + "from langgraph.graph.message import add_messages\n", + "from typing_extensions import Annotated, TypedDict\n", + "\n", + "\n", + "# We define a dict representing the state of the application.\n", + "# This state has the same input and output keys as `rag_chain`.\n", + "class State(TypedDict):\n", + " input: str\n", + " chat_history: Annotated[Sequence[BaseMessage], add_messages]\n", + " context: str\n", + " answer: str\n", + "\n", + "\n", + "# We then define a simple node that runs the `rag_chain`.\n", + "# The `return` values of the node update the graph state, so here we just\n", + "# update the chat history with the input message and response.\n", + "def call_model(state: State):\n", + " response = rag_chain.invoke(state)\n", + " return {\n", + " \"chat_history\": [\n", + " HumanMessage(state[\"input\"]),\n", + " AIMessage(response[\"answer\"]),\n", + " ],\n", + " \"context\": response[\"context\"],\n", + " \"answer\": response[\"answer\"],\n", + " }\n", + "\n", + "\n", + "# Our graph consists only of one node:\n", + "workflow = StateGraph(state_schema=State)\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "# Finally, we compile the graph with a checkpointer object.\n", + "# This persists the state, in this case in memory.\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { @@ -348,23 +384,21 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.'" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This process helps agents or models tackle difficult tasks by dividing them into more manageable subtasks. Task decomposition can be achieved through methods like Chain of Thought (CoT) or Tree of Thoughts, which guide the agent in thinking step by step or exploring multiple reasoning possibilities at each step.\n" + ] } ], "source": [ - "conversational_rag_chain.invoke(\n", + "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n", + "\n", + "result = app.invoke(\n", " {\"input\": \"What is Task Decomposition?\"},\n", - " config={\n", - " \"configurable\": {\"session_id\": \"abc123\"}\n", - " }, # constructs a key \"abc123\" in `store`.\n", - ")[\"answer\"]" + " config=config,\n", + ")\n", + "print(result[\"answer\"])" ] }, { @@ -374,21 +408,19 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions, or human inputs to break down complex tasks into smaller and more manageable steps. Additionally, task decomposition can involve utilizing resources like internet access for information gathering, long-term memory management, and GPT-3.5 powered agents for delegation of simple tasks.'" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "One way of task decomposition is by using Large Language Models (LLMs) with simple prompting, such as providing instructions like \"Steps for XYZ\" or asking about subgoals for achieving a specific task. This method leverages the power of LLMs to break down tasks into smaller components for easier handling. Additionally, task decomposition can also be done using task-specific instructions tailored to the nature of the task, like requesting a story outline for writing a novel.\n" + ] } ], "source": [ - "conversational_rag_chain.invoke(\n", - " {\"input\": \"What are common ways of doing it?\"},\n", - " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", - ")[\"answer\"]" + "result = app.invoke(\n", + " {\"input\": \"What is one way of doing it?\"},\n", + " config=config,\n", + ")\n", + "print(result[\"answer\"])" ] }, { @@ -396,7 +428,7 @@ "id": "3ab59258-84bc-4904-880e-2ebfebbca563", "metadata": {}, "source": [ - "The conversation history can be inspected in the `store` dict:" + "The conversation history can be inspected via the state of the application:" ] }, { @@ -409,27 +441,25 @@ "name": "stdout", "output_type": "stream", "text": [ - "User: What is Task Decomposition?\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", "\n", - "AI: Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.\n", + "What is Task Decomposition?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", - "User: What are common ways of doing it?\n", + "Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This process helps agents or models tackle difficult tasks by dividing them into more manageable subtasks. Task decomposition can be achieved through methods like Chain of Thought (CoT) or Tree of Thoughts, which guide the agent in thinking step by step or exploring multiple reasoning possibilities at each step.\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", "\n", - "AI: Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions, or human inputs to break down complex tasks into smaller and more manageable steps. Additionally, task decomposition can involve utilizing resources like internet access for information gathering, long-term memory management, and GPT-3.5 powered agents for delegation of simple tasks.\n", - "\n" + "What is one way of doing it?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "One way of task decomposition is by using Large Language Models (LLMs) with simple prompting, such as providing instructions like \"Steps for XYZ\" or asking about subgoals for achieving a specific task. This method leverages the power of LLMs to break down tasks into smaller components for easier handling. Additionally, task decomposition can also be done using task-specific instructions tailored to the nature of the task, like requesting a story outline for writing a novel.\n" ] } ], "source": [ - "from langchain_core.messages import AIMessage\n", - "\n", - "for message in store[\"abc123\"].messages:\n", - " if isinstance(message, AIMessage):\n", - " prefix = \"AI\"\n", - " else:\n", - " prefix = \"User\"\n", - "\n", - " print(f\"{prefix}: {message.content}\\n\")" + "chat_history = app.get_state(config).values[\"chat_history\"]\n", + "for message in chat_history:\n", + " message.pretty_print()" ] }, { @@ -457,17 +487,22 @@ "metadata": {}, "outputs": [], "source": [ + "from typing import Sequence\n", + "\n", "import bs4\n", "from langchain.chains import create_history_aware_retriever, create_retrieval_chain\n", "from langchain.chains.combine_documents import create_stuff_documents_chain\n", - "from langchain_chroma import Chroma\n", - "from langchain_community.chat_message_histories import ChatMessageHistory\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_core.chat_history import BaseChatMessageHistory\n", + "from langchain_core.messages import AIMessage, BaseMessage, HumanMessage\n", "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "from langchain_core.vectorstores import InMemoryVectorStore\n", "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, StateGraph\n", + "from langgraph.graph.message import add_messages\n", + "from typing_extensions import Annotated, TypedDict\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", "\n", @@ -485,7 +520,9 @@ "\n", "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", "splits = text_splitter.split_documents(docs)\n", - "vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n", + "\n", + "vectorstore = InMemoryVectorStore(embedding=OpenAIEmbeddings())\n", + "vectorstore.add_documents(documents=splits)\n", "retriever = vectorstore.as_retriever()\n", "\n", "\n", @@ -532,22 +569,41 @@ "\n", "\n", "### Statefully manage chat history ###\n", - "store = {}\n", "\n", "\n", - "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n", - " if session_id not in store:\n", - " store[session_id] = ChatMessageHistory()\n", - " return store[session_id]\n", - "\n", - "\n", - "conversational_rag_chain = RunnableWithMessageHistory(\n", - " rag_chain,\n", - " get_session_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"chat_history\",\n", - " output_messages_key=\"answer\",\n", - ")" + "# We define a dict representing the state of the application.\n", + "# This state has the same input and output keys as `rag_chain`.\n", + "class State(TypedDict):\n", + " input: str\n", + " chat_history: Annotated[Sequence[BaseMessage], add_messages]\n", + " context: str\n", + " answer: str\n", + "\n", + "\n", + "# We then define a simple node that runs the `rag_chain`.\n", + "# The `return` values of the node update the graph state, so here we just\n", + "# update the chat history with the input message and response.\n", + "def call_model(state: State):\n", + " response = rag_chain.invoke(state)\n", + " return {\n", + " \"chat_history\": [\n", + " HumanMessage(state[\"input\"]),\n", + " AIMessage(response[\"answer\"]),\n", + " ],\n", + " \"context\": response[\"context\"],\n", + " \"answer\": response[\"answer\"],\n", + " }\n", + "\n", + "\n", + "# Our graph consists only of one node:\n", + "workflow = StateGraph(state_schema=State)\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "# Finally, we compile the graph with a checkpointer object.\n", + "# This persists the state, in this case in memory.\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { @@ -557,23 +613,21 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help in decomposing hard tasks into multiple manageable tasks by instructing models to think step by step and explore multiple reasoning possibilities at each step. Task decomposition can be achieved through various methods such as using prompting techniques, task-specific instructions, or human inputs.'" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This process helps agents or models handle difficult tasks by dividing them into more manageable subtasks. Different methods like Chain of Thought and Tree of Thoughts are used to decompose tasks into multiple steps, enhancing performance and aiding in the interpretation of the thinking process.\n" + ] } ], "source": [ - "conversational_rag_chain.invoke(\n", + "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n", + "\n", + "result = app.invoke(\n", " {\"input\": \"What is Task Decomposition?\"},\n", - " config={\n", - " \"configurable\": {\"session_id\": \"abc123\"}\n", - " }, # constructs a key \"abc123\" in `store`.\n", - ")[\"answer\"]" + " config=config,\n", + ")\n", + "print(result[\"answer\"])" ] }, { @@ -583,21 +637,19 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Task decomposition can be done in common ways such as using prompting techniques like Chain of Thought (CoT) or Tree of Thoughts, which instruct models to think step by step and explore multiple reasoning possibilities at each step. Another way is to provide task-specific instructions, such as asking to \"Write a story outline\" for writing a novel, to guide the decomposition process. Additionally, task decomposition can also involve human inputs to break down complex tasks into smaller and simpler steps.'" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "One way of task decomposition is by using Large Language Models (LLMs) with simple prompting, such as providing instructions like \"Steps for XYZ\" or asking about subgoals for achieving a specific task. This method leverages the power of LLMs to break down tasks into smaller components for easier handling and processing.\n" + ] } ], "source": [ - "conversational_rag_chain.invoke(\n", - " {\"input\": \"What are common ways of doing it?\"},\n", - " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", - ")[\"answer\"]" + "result = app.invoke(\n", + " {\"input\": \"What is one way of doing it?\"},\n", + " config=config,\n", + ")\n", + "print(result[\"answer\"])" ] }, { @@ -670,22 +722,11 @@ "id": "52ae46d9-43f7-481b-96d5-df750be3ad65", "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 5cd28d13-88dd-4eac-a465-3770ac27eff6, but expected {'tool'} run.\")\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_TbhPPPN05GKi36HLeaN4QM90', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2e60d910-879a-4a2a-b1e9-6a6c5c7d7ebc-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_TbhPPPN05GKi36HLeaN4QM90'}])]}}\n", - "----\n", - "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_TbhPPPN05GKi36HLeaN4QM90')]}}\n", - "----\n", - "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in transforming big tasks into multiple manageable tasks, making it easier for autonomous agents to handle and interpret the thinking process. One common method for task decomposition is the Chain of Thought (CoT) technique, where models are instructed to \"think step by step\" to decompose hard tasks. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of multiple thoughts per step. Task decomposition can be facilitated through various methods such as using simple prompts, task-specific instructions, or human inputs.', response_metadata={'token_usage': {'completion_tokens': 130, 'prompt_tokens': 636, 'total_tokens': 766}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-3ef17638-65df-4030-a7fe-795e6da91c69-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='Task decomposition is a problem-solving strategy that involves breaking down a complex task or problem into smaller, more manageable subtasks. By decomposing a task into smaller components, it becomes easier to understand, analyze, and solve the overall problem. This approach allows individuals to focus on one specific aspect of the task at a time, leading to a more systematic and organized problem-solving process. Task decomposition is commonly used in various fields such as project management, software development, and engineering to simplify complex tasks and improve efficiency.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 102, 'prompt_tokens': 68, 'total_tokens': 170, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-a0925ffd-f500-4677-a108-c7015987e9ae-0', usage_metadata={'input_tokens': 68, 'output_tokens': 102, 'total_tokens': 170})]}}\n", "----\n" ] } @@ -746,7 +787,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-1cd17562-18aa-4839-b41b-403b17a0fc20-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d9011a17-9dbb-4348-9a58-ff89419a4bca-0', usage_metadata={'input_tokens': 67, 'output_tokens': 11, 'total_tokens': 78})]}}\n", "----\n" ] } @@ -775,22 +816,15 @@ "id": "e2c570ae-dd91-402c-8693-ae746de63b16", "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID c54381c0-c5d9-495a-91a0-aca4ae755663, but expected {'tool'} run.\")\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_rg7zKTE5e0ICxVSslJ1u9LMg', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-122bf097-7ff1-49aa-b430-e362b51354ad-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_rg7zKTE5e0ICxVSslJ1u9LMg'}])]}}\n", + "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_qVHvDTfYmWqcbgVhTwsH03aJ', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-bf9df2a6-ad56-43af-8d57-16f850accfd1-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_qVHvDTfYmWqcbgVhTwsH03aJ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 91, 'output_tokens': 19, 'total_tokens': 110})]}}\n", "----\n", - "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_rg7zKTE5e0ICxVSslJ1u9LMg')]}}\n", + "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='742ab53d-6f34-4607-bde7-13f2d75e0055', tool_call_id='call_qVHvDTfYmWqcbgVhTwsH03aJ')]}}\n", "----\n", - "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in managing and solving intricate problems by dividing them into more manageable components. By decomposing tasks, agents or models can better understand the steps involved and plan their actions accordingly. Techniques like Chain of Thought (CoT) and Tree of Thoughts are examples of methods that enhance model performance on complex tasks by breaking them down into smaller steps.', response_metadata={'token_usage': {'completion_tokens': 87, 'prompt_tokens': 659, 'total_tokens': 746}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-b9166386-83e5-4b82-9a4b-590e5fa76671-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used in autonomous agent systems to break down complex tasks into smaller and simpler steps. This approach helps the agent to manage and execute tasks more effectively by dividing them into manageable subtasks. One common method for task decomposition is the Chain of Thought (CoT) technique, which prompts the model to think step by step and decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of thought steps.\\n\\nTask decomposition can be achieved through various methods, such as using language models with simple prompting, task-specific instructions, or human inputs. By breaking down tasks into smaller components, autonomous agents can plan and execute tasks more efficiently.\\n\\nIf you would like more detailed information or examples related to task decomposition, feel free to ask!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 168, 'prompt_tokens': 611, 'total_tokens': 779, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0f51a1cf-ff0a-474a-93f5-acf54e0d8cd6-0', usage_metadata={'input_tokens': 611, 'output_tokens': 168, 'total_tokens': 779})]}}\n", "----\n" ] } @@ -825,24 +859,11 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6kbxTU5CDWLmF9mrvR7bWSkI', 'function': {'arguments': '{\"query\":\"Common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 769, 'total_tokens': 790}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2d2c8327-35cd-484a-b8fd-52436657c2d8-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Common ways of task decomposition'}, 'id': 'call_6kbxTU5CDWLmF9mrvR7bWSkI'}])]}}\n", - "----\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 29553415-e0f4-41a9-8921-ba489e377f68, but expected {'tool'} run.\")\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_6kbxTU5CDWLmF9mrvR7bWSkI')]}}\n", + "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_n7vUrFacrvl5wUGmz5EGpmCS', 'function': {'arguments': '{\"query\":\"Common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 802, 'total_tokens': 823, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-4d949be3-00e5-49e5-af26-6a217efc8858-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Common ways of task decomposition'}, 'id': 'call_n7vUrFacrvl5wUGmz5EGpmCS', 'type': 'tool_call'}], usage_metadata={'input_tokens': 802, 'output_tokens': 21, 'total_tokens': 823})]}}\n", "----\n", - "{'agent': {'messages': [AIMessage(content='Common ways of task decomposition include:\\n1. Using LLM with simple prompting like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\"\\n2. Using task-specific instructions, for example, \"Write a story outline\" for writing a novel.\\n3. Involving human inputs in the task decomposition process.', response_metadata={'token_usage': {'completion_tokens': 67, 'prompt_tokens': 1339, 'total_tokens': 1406}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ad14cde-ca75-4238-a868-f865e0fc50dd-0')]}}\n", + "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.', name='blog_post_retriever', id='90fcbc1e-0736-47bc-9a96-347ad837e0e3', tool_call_id='call_n7vUrFacrvl5wUGmz5EGpmCS')]}}\n", + "----\n", + "{'agent': {'messages': [AIMessage(content='According to the blog post, common ways of task decomposition include:\\n\\n1. Using Language Models (LLM) with Simple Prompting: Language models can be utilized with simple prompts like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\" to break down tasks into smaller steps.\\n\\n2. Task-Specific Instructions: Providing task-specific instructions to guide the decomposition process. For example, using instructions like \"Write a story outline\" for writing a novel can help in breaking down the task effectively.\\n\\n3. Human Inputs: Involving human inputs in the task decomposition process. Human insights and expertise can contribute to breaking down complex tasks into manageable subtasks.\\n\\nThese methods of task decomposition help autonomous agents in planning and executing tasks more efficiently by breaking them down into smaller and simpler components.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 160, 'prompt_tokens': 1347, 'total_tokens': 1507, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-087ce1b5-f897-40d0-8ef4-eb1c6852a835-0', usage_metadata={'input_tokens': 1347, 'output_tokens': 160, 'total_tokens': 1507})]}}\n", "----\n" ] } @@ -877,18 +898,27 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 1, "id": "b1d2b4d4-e604-497d-873d-d345b808578e", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "USER_AGENT environment variable not set, consider setting it to identify your requests.\n" + ] + } + ], "source": [ "import bs4\n", "from langchain.tools.retriever import create_retriever_tool\n", - "from langchain_chroma import Chroma\n", "from langchain_community.document_loaders import WebBaseLoader\n", + "from langchain_core.vectorstores import InMemoryVectorStore\n", "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "memory = MemorySaver()\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", @@ -907,7 +937,8 @@ "\n", "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", "splits = text_splitter.split_documents(docs)\n", - "vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n", + "vectorstore = InMemoryVectorStore(embedding=OpenAIEmbeddings())\n", + "vectorstore.add_documents(documents=splits)\n", "retriever = vectorstore.as_retriever()\n", "\n", "\n", @@ -959,7 +990,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/how_to/trim_messages.ipynb b/docs/docs/how_to/trim_messages.ipynb index eb8a44a7f4dbb..6a882345e19f2 100644 --- a/docs/docs/how_to/trim_messages.ipynb +++ b/docs/docs/how_to/trim_messages.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "b5ee5b75-6876-4d62-9ade-5a7a808ae5a2", + "id": "eaad9a82-0592-4315-9931-0621054bdd0e", "metadata": {}, "source": [ "# How to trim messages\n", @@ -22,37 +22,83 @@ "\n", "All models have finite context windows, meaning there's a limit to how many tokens they can take as input. If you have very long messages or a chain/agent that accumulates a long message is history, you'll need to manage the length of the messages you're passing in to the model.\n", "\n", - "The `trim_messages` util provides some basic strategies for trimming a list of messages to be of a certain token length.\n", + "[trim_messages](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.utils.trim_messages.html) can be used to reduce the size of a chat history to a specified token count or specified message count.\n", "\n", - "## Getting the last `max_tokens` tokens\n", "\n", - "To get the last `max_tokens` in the list of Messages we can set `strategy=\"last\"`. Notice that for our `token_counter` we can pass in a function (more on that below) or a language model (since language models have a message token counting method). It makes sense to pass in a model when you're trimming your messages to fit into the context window of that specific model:" + "If passing the trimmed chat history back into a chat model directly, the trimmed chat history should satisfy the following properties:\n", + "\n", + "1. The resulting chat history should be **valid**. Usually this means that the following properties should be satisfied:\n", + " - The chat history **starts** with either (1) a `HumanMessage` or (2) a [SystemMessage](/docs/concepts/#systemmessage) followed by a `HumanMessage`.\n", + " - The chat history **ends** with either a `HumanMessage` or a `ToolMessage`.\n", + " - A `ToolMessage` can only appear after an `AIMessage` that involved a tool call. \n", + " This can be achieved by setting `start_on=\"human\"` and `ends_on=(\"human\", \"tool\")`.\n", + "3. It includes recent messages and drops old messages in the chat history.\n", + " This can be achieved by setting `strategy=\"last\"`.\n", + "4. Usually, the new chat history should include the `SystemMessage` if it\n", + " was present in the original chat history since the `SystemMessage` includes\n", + " special instructions to the chat model. The `SystemMessage` is almost always\n", + " the first message in the history if present. This can be achieved by setting\n", + " `include_system=True`." + ] + }, + { + "cell_type": "markdown", + "id": "e4bffc37-78c0-46c3-ad0c-b44de0ed3e90", + "metadata": {}, + "source": [ + "## Trimming based on token count\n", + "\n", + "Here, we'll trim the chat history based on token count. The trimmed chat history will produce a **valid** chat history that includes the `SystemMessage`.\n", + "\n", + "To keep the most recent messages, we set `strategy=\"last\"`. We'll also set `include_system=True` to include the `SystemMessage`, and `start_on=\"human\"` to make sure the resulting chat history is valid. \n", + "\n", + "This is a good default configuration when using `trim_messages` based on token count. Remember to adjust `token_counter` and `max_tokens` for your use case.\n", + "\n", + "Notice that for our `token_counter` we can pass in a function (more on that below) or a language model (since language models have a message token counting method). It makes sense to pass in a model when you're trimming your messages to fit into the context window of that specific model:" ] }, { "cell_type": "code", "execution_count": 1, - "id": "c974633b-3bd0-4844-8a8f-85e3e25f13fe", + "id": "c91edeb2-9978-4665-9fdb-fc96cdb51caa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "pip install -qU langchain-openai" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "40ea972c-d424-4bc4-9f2e-82f01c3d7598", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n", - " HumanMessage(content='what do you call a speechless parrot')]" + "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]" ] }, - "execution_count": 1, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# pip install -U langchain-openai\n", "from langchain_core.messages import (\n", " AIMessage,\n", " HumanMessage,\n", " SystemMessage,\n", + " ToolMessage,\n", " trim_messages,\n", ")\n", "from langchain_openai import ChatOpenAI\n", @@ -70,36 +116,69 @@ " HumanMessage(\"what do you call a speechless parrot\"),\n", "]\n", "\n", + "\n", "trim_messages(\n", " messages,\n", - " max_tokens=45,\n", + " # Keep the last <= n_count tokens of the messages.\n", " strategy=\"last\",\n", + " # highlight-start\n", + " # Remember to adjust based on your model\n", + " # or else pass a custom token_encoder\n", " token_counter=ChatOpenAI(model=\"gpt-4o\"),\n", + " # highlight-end\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " # highlight-start\n", + " # Remember to adjust based on the desired conversation\n", + " # length\n", + " max_tokens=45,\n", + " # highlight-end\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " start_on=\"human\",\n", + " # Most chat models expect that chat history ends with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a ToolMessage\n", + " end_on=(\"human\", \"tool\"),\n", + " # Usually, we want to keep the SystemMessage\n", + " # if it's present in the original history.\n", + " # The SystemMessage has special instructions for the model.\n", + " include_system=True,\n", + " allow_partial=False,\n", ")" ] }, { "cell_type": "markdown", - "id": "d3f46654-c4b2-4136-b995-91c3febe5bf9", + "id": "28fcfc94-0d4a-415c-9506-8ae7634253a2", "metadata": {}, "source": [ - "If we want to always keep the initial system message we can specify `include_system=True`:" + "## Trimming based on message count\n", + "\n", + "Alternatively, we can trim the chat history based on **message count**, by setting `token_counter=len`. In this case, each message will count as a single token, and `max_tokens` will control\n", + "the maximum number of messages.\n", + "\n", + "This is a good default configuration when using `trim_messages` based on message count. Remember to adjust `max_tokens` for your use case." ] }, { "cell_type": "code", - "execution_count": 2, - "id": "589b0223-3a73-44ec-8315-2dba3ee6117d", + "execution_count": 3, + "id": "c8fdedae-0e6b-4901-a222-81fc95e265c2", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n", - " HumanMessage(content='what do you call a speechless parrot')]" + "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='and who is harrison chasing anyways', additional_kwargs={}, response_metadata={}),\n", + " AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\", additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]" ] }, - "execution_count": 2, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -107,36 +186,59 @@ "source": [ "trim_messages(\n", " messages,\n", - " max_tokens=45,\n", + " # Keep the last <= n_count tokens of the messages.\n", " strategy=\"last\",\n", - " token_counter=ChatOpenAI(model=\"gpt-4o\"),\n", + " # highlight-next-line\n", + " token_counter=len,\n", + " # When token_counter=len, each message\n", + " # will be counted as a single token.\n", + " # highlight-start\n", + " # Remember to adjust for your use case\n", + " max_tokens=5,\n", + " # highlight-end\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " start_on=\"human\",\n", + " # Most chat models expect that chat history ends with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a ToolMessage\n", + " end_on=(\"human\", \"tool\"),\n", + " # Usually, we want to keep the SystemMessage\n", + " # if it's present in the original history.\n", + " # The SystemMessage has special instructions for the model.\n", " include_system=True,\n", ")" ] }, { + "attachments": {}, "cell_type": "markdown", - "id": "8a8b542c-04d1-4515-8d82-b999ea4fac4f", + "id": "9367857f-7f9a-4d17-9f9c-6ffc5aae909c", "metadata": {}, "source": [ + "## Advanced Usage\n", + "\n", + "You can use `trim_message` as a building-block to create more complex processing logic.\n", + "\n", "If we want to allow splitting up the contents of a message we can specify `allow_partial=True`:" ] }, { "cell_type": "code", - "execution_count": 3, - "id": "8c46a209-dddd-4d01-81f6-f6ae55d3225c", + "execution_count": 4, + "id": "8bcca1fe-674c-4713-bacc-8e8e6d6f56c3", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n", - " AIMessage(content=\"\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n", - " HumanMessage(content='what do you call a speechless parrot')]" + "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n", + " AIMessage(content=\"\\nWhy, he's probably chasing after the last cup of coffee in the office!\", additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]" ] }, - "execution_count": 3, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -154,26 +256,26 @@ }, { "cell_type": "markdown", - "id": "306adf9c-41cd-495c-b4dc-e4f43dd7f8f8", + "id": "245bee9b-e515-4e89-8f2a-84bda9a25de8", "metadata": {}, "source": [ - "If we need to make sure that our first message (excluding the system message) is always of a specific type, we can specify `start_on`:" + "By default, the `SystemMessage` will not be included, so you can drop it by either setting `include_system=False` or by dropping the `include_system` argument." ] }, { "cell_type": "code", - "execution_count": 4, - "id": "878a730b-fe44-4e9d-ab65-7b8f7b069de8", + "execution_count": 5, + "id": "94351736-28a1-44a3-aac7-82356c81d171", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n", - " HumanMessage(content='what do you call a speechless parrot')]" + "[AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\", additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]" ] }, - "execution_count": 4, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -181,11 +283,9 @@ "source": [ "trim_messages(\n", " messages,\n", - " max_tokens=60,\n", + " max_tokens=45,\n", " strategy=\"last\",\n", " token_counter=ChatOpenAI(model=\"gpt-4o\"),\n", - " include_system=True,\n", - " start_on=\"human\",\n", ")" ] }, @@ -194,25 +294,23 @@ "id": "7f5d391d-235b-4091-b2de-c22866b478f3", "metadata": {}, "source": [ - "## Getting the first `max_tokens` tokens\n", - "\n", "We can perform the flipped operation of getting the *first* `max_tokens` by specifying `strategy=\"first\"`:" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "5f56ae54-1a39-4019-9351-3b494c003d5b", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n", - " HumanMessage(content=\"i wonder why it's called langchain\")]" + "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content=\"i wonder why it's called langchain\", additional_kwargs={}, response_metadata={})]" ] }, - "execution_count": 5, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -238,18 +336,36 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, + "id": "d930c089-e8e6-4980-9d39-11d41e794772", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "pip install -qU tiktoken" + ] + }, + { + "cell_type": "code", + "execution_count": 8, "id": "1c1c3b1e-2ece-49e7-a3b6-e69877c1633b", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n", - " HumanMessage(content='what do you call a speechless parrot')]" + "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]" ] }, - "execution_count": 6, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -257,7 +373,6 @@ "source": [ "from typing import List\n", "\n", - "# pip install tiktoken\n", "import tiktoken\n", "from langchain_core.messages import BaseMessage, ToolMessage\n", "\n", @@ -298,9 +413,28 @@ "\n", "trim_messages(\n", " messages,\n", - " max_tokens=45,\n", - " strategy=\"last\",\n", + " # highlight-next-line\n", " token_counter=tiktoken_counter,\n", + " # Keep the last <= n_count tokens of the messages.\n", + " strategy=\"last\",\n", + " # When token_counter=len, each message\n", + " # will be counted as a single token.\n", + " # highlight-start\n", + " # Remember to adjust for your use case\n", + " max_tokens=45,\n", + " # highlight-end\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " start_on=\"human\",\n", + " # Most chat models expect that chat history ends with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a ToolMessage\n", + " end_on=(\"human\", \"tool\"),\n", + " # Usually, we want to keep the SystemMessage\n", + " # if it's present in the original history.\n", + " # The SystemMessage has special instructions for the model.\n", + " include_system=True,\n", ")" ] }, @@ -311,22 +445,22 @@ "source": [ "## Chaining\n", "\n", - "`trim_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain" + "`trim_messages` can be used imperatively (like above) or declaratively, making it easy to compose with other components in a chain" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, "id": "96aa29b2-01e0-437c-a1ab-02fb0141cb57", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='A: A \"Polly-gone\"!', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 32, 'total_tokens': 41}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_66b29dffce', 'finish_reason': 'stop', 'logprobs': None}, id='run-83e96ddf-bcaa-4f63-824c-98b0f8a0d474-0', usage_metadata={'input_tokens': 32, 'output_tokens': 9, 'total_tokens': 41})" + "AIMessage(content='A polygon! Because it\\'s a \"poly-gone\" quiet!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 32, 'total_tokens': 45, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_057232b607', 'finish_reason': 'stop', 'logprobs': None}, id='run-4fa026e7-9137-4fef-b596-54243615e3b3-0', usage_metadata={'input_tokens': 32, 'output_tokens': 13, 'total_tokens': 45})" ] }, - "execution_count": 7, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -337,9 +471,24 @@ "# Notice we don't pass in messages. This creates\n", "# a RunnableLambda that takes messages as input\n", "trimmer = trim_messages(\n", - " max_tokens=45,\n", - " strategy=\"last\",\n", " token_counter=llm,\n", + " # Keep the last <= n_count tokens of the messages.\n", + " strategy=\"last\",\n", + " # When token_counter=len, each message\n", + " # will be counted as a single token.\n", + " # Remember to adjust for your use case\n", + " max_tokens=45,\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " start_on=\"human\",\n", + " # Most chat models expect that chat history ends with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a ToolMessage\n", + " end_on=(\"human\", \"tool\"),\n", + " # Usually, we want to keep the SystemMessage\n", + " # if it's present in the original history.\n", + " # The SystemMessage has special instructions for the model.\n", " include_system=True,\n", ")\n", "\n", @@ -359,18 +508,18 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, "id": "1ff02d0a-353d-4fac-a77c-7c2c5262abd9", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n", - " HumanMessage(content='what do you call a speechless parrot')]" + "[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]" ] }, - "execution_count": 8, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -391,17 +540,17 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, "id": "a9517858-fc2f-4dc3-898d-bf98a0e905a0", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='A \"polly-no-wanna-cracker\"!', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 32, 'total_tokens': 42}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_5bf7397cd3', 'finish_reason': 'stop', 'logprobs': None}, id='run-054dd309-3497-4e7b-b22a-c1859f11d32e-0', usage_metadata={'input_tokens': 32, 'output_tokens': 10, 'total_tokens': 42})" + "AIMessage(content='A \"polygon\"!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 4, 'prompt_tokens': 32, 'total_tokens': 36, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_c17d3befe7', 'finish_reason': 'stop', 'logprobs': None}, id='run-71d9fce6-bb0c-4bb3-acc8-d5eaee6ae7bc-0', usage_metadata={'input_tokens': 32, 'output_tokens': 4, 'total_tokens': 36})" ] }, - "execution_count": 9, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -425,7 +574,15 @@ " max_tokens=45,\n", " strategy=\"last\",\n", " token_counter=llm,\n", + " # Usually, we want to keep the SystemMessage\n", + " # if it's present in the original history.\n", + " # The SystemMessage has special instructions for the model.\n", " include_system=True,\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " # start_on=\"human\" makes sure we produce a valid chat history\n", + " start_on=\"human\",\n", ")\n", "\n", "chain = trimmer | llm\n", @@ -471,7 +628,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.4" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/tutorials/chatbot.ipynb b/docs/docs/tutorials/chatbot.ipynb index 0740f6d3a826f..a1f983cc486ab 100644 --- a/docs/docs/tutorials/chatbot.ipynb +++ b/docs/docs/tutorials/chatbot.ipynb @@ -33,6 +33,18 @@ "- [Prompt Templates](/docs/concepts/#prompt-templates)\n", "- [Chat History](/docs/concepts/#chat-history)\n", "\n", + "This guide requires `langgraph >= 0.2.28`.\n", + ":::\n", + "\n", + ":::note\n", + "\n", + "This tutorial previously used the [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) abstraction. You can access that version of the documentation in the [v0.2 docs](https://python.langchain.com/v0.2/docs/tutorials/chatbot/).\n", + "\n", + "As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into new LangChain applications.\n", + "\n", + "If your code is already relying on `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do **not** need to make any changes. We do not plan on deprecating this functionality in the near future as it works for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.\n", + "\n", + "Please see [How to migrate to LangGraph Memory](/docs/versions/migrating_memory/) for more details.\n", ":::\n", "\n", "## Overview\n", @@ -59,7 +71,7 @@ "\n", "### Installation\n", "\n", - "To install LangChain run:\n", + "For this tutorial we will need `langchain-core` and `langgraph`:\n", "\n", "import Tabs from '@theme/Tabs';\n", "import TabItem from '@theme/TabItem';\n", @@ -67,10 +79,10 @@ "\n", "\n", " \n", - " pip install langchain\n", + " pip install langchain-core langgraph>0.2.27\n", " \n", " \n", - " conda install langchain -c conda-forge\n", + " conda install langchain-core langgraph>0.2.27 -c conda-forge\n", " \n", "\n", "\n", @@ -112,7 +124,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -121,7 +133,7 @@ "\n", "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model=\"gpt-3.5-turbo\")" + "model = ChatOpenAI(model=\"gpt-4o-mini\")" ] }, { @@ -133,16 +145,16 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 12, 'total_tokens': 22}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d939617f-0c3b-45e9-a93f-13dafecbd4b5-0', usage_metadata={'input_tokens': 12, 'output_tokens': 10, 'total_tokens': 22})" + "AIMessage(content='Hi Bob! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 11, 'total_tokens': 21, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-149994c0-d958-49bb-9a9d-df911baea29f-0', usage_metadata={'input_tokens': 11, 'output_tokens': 10, 'total_tokens': 21})" ] }, - "execution_count": 2, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -162,16 +174,16 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content=\"I'm sorry, I don't have access to personal information unless you provide it to me. How may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 26, 'prompt_tokens': 12, 'total_tokens': 38}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-47bc8c20-af7b-4fd2-9345-f0e9fdf18ce3-0', usage_metadata={'input_tokens': 12, 'output_tokens': 26, 'total_tokens': 38})" + "AIMessage(content=\"I'm sorry, but I don't have access to personal information about individuals unless you've shared it with me in this conversation. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 30, 'prompt_tokens': 11, 'total_tokens': 41, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-0ecab57c-728d-4fd1-845c-394a62df8e13-0', usage_metadata={'input_tokens': 11, 'output_tokens': 30, 'total_tokens': 41})" ] }, - "execution_count": 3, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -194,16 +206,16 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='Your name is Bob. How can I help you, Bob?', response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 35, 'total_tokens': 48}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9f90291b-4df9-41dc-9ecf-1ee1081f4490-0', usage_metadata={'input_tokens': 35, 'output_tokens': 13, 'total_tokens': 48})" + "AIMessage(content='Your name is Bob! How can I help you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 12, 'prompt_tokens': 33, 'total_tokens': 45, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-c164c5a1-d85f-46ee-ba8a-bb511cfb0e51-0', usage_metadata={'input_tokens': 33, 'output_tokens': 12, 'total_tokens': 45})" ] }, - "execution_count": 4, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -234,30 +246,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Message History\n", + "## Message persistence\n", "\n", - "We can use a Message History class to wrap our model and make it stateful.\n", - "This will keep track of inputs and outputs of the model, and store them in some datastore.\n", - "Future interactions will then load those messages and pass them into the chain as part of the input.\n", - "Let's see how to use this!\n", + "[LangGraph](https://langchain-ai.github.io/langgraph/) implements a built-in persistence layer, making it ideal for chat applications that support multiple conversational turns.\n", "\n", - "First, let's make sure to install `langchain-community`, as we will be using an integration in there to store message history." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install langchain_community" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "After that, we can import the relevant classes and set up our chain which wraps the model and adds in this message history. A key part here is the function we pass into as the `get_session_history`. This function is expected to take in a `session_id` and return a Message History object. This `session_id` is used to distinguish between separate conversations, and should be passed in as part of the config when calling the new chain (we'll show how to do that)." + "Wrapping our chat model in a minimal LangGraph application allows us to automatically persist the message history, simplifying the development of multi-turn applications.\n", + "\n", + "LangGraph comes with a simple in-memory checkpointer, which we use below. See its [documentation](https://langchain-ai.github.io/langgraph/concepts/persistence/) for more detail, including how to use different persistence backends (e.g., SQLite or Postgres)." ] }, { @@ -266,29 +261,33 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.chat_history import (\n", - " BaseChatMessageHistory,\n", - " InMemoryChatMessageHistory,\n", - ")\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, MessagesState, StateGraph\n", "\n", - "store = {}\n", + "# Define a new graph\n", + "workflow = StateGraph(state_schema=MessagesState)\n", "\n", "\n", - "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n", - " if session_id not in store:\n", - " store[session_id] = InMemoryChatMessageHistory()\n", - " return store[session_id]\n", + "# Define the function that calls the model\n", + "def call_model(state: MessagesState):\n", + " response = model.invoke(state[\"messages\"])\n", + " return {\"messages\": response}\n", "\n", "\n", - "with_message_history = RunnableWithMessageHistory(model, get_session_history)" + "# Define the (single) node in the graph\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "# Add memory\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We now need to create a `config` that we pass into the runnable every time. This config contains information that is not part of the input directly, but is still useful. In this case, we want to include a `session_id`. This should look like:" + "We now need to create a `config` that we pass into the runnable every time. This config contains information that is not part of the input directly, but is still useful. In this case, we want to include a `thread_id`. This should look like:" ] }, { @@ -297,7 +296,16 @@ "metadata": {}, "outputs": [], "source": [ - "config = {\"configurable\": {\"session_id\": \"abc2\"}}" + "config = {\"configurable\": {\"thread_id\": \"abc123\"}}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This enables us to support multiple conversation threads with a single application, a common requirement when your application has multiple users.\n", + "\n", + "We can then invoke the application:" ] }, { @@ -306,23 +314,21 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Hi Bob! How can I assist you today?'" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Hi Bob! How can I assist you today?\n" + ] } ], "source": [ - "response = with_message_history.invoke(\n", - " [HumanMessage(content=\"Hi! I'm Bob\")],\n", - " config=config,\n", - ")\n", + "query = \"Hi! I'm Bob.\"\n", "\n", - "response.content" + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke({\"messages\": input_messages}, config)\n", + "output[\"messages\"][-1].pretty_print() # output contains all messages in state" ] }, { @@ -331,30 +337,28 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Your name is Bob. How can I help you today, Bob?'" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Your name is Bob! How can I help you today?\n" + ] } ], "source": [ - "response = with_message_history.invoke(\n", - " [HumanMessage(content=\"What's my name?\")],\n", - " config=config,\n", - ")\n", + "query = \"What's my name?\"\n", "\n", - "response.content" + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke({\"messages\": input_messages}, config)\n", + "output[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Great! Our chatbot now remembers things about us. If we change the config to reference a different `session_id`, we can see that it starts the conversation fresh." + "Great! Our chatbot now remembers things about us. If we change the config to reference a different `thread_id`, we can see that it starts the conversation fresh." ] }, { @@ -363,25 +367,21 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\"I'm sorry, I cannot determine your name as I am an AI assistant and do not have access to that information.\"" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "I'm sorry, but I don't have access to personal information about you unless you provide it. How can I assist you today?\n" + ] } ], "source": [ - "config = {\"configurable\": {\"session_id\": \"abc3\"}}\n", - "\n", - "response = with_message_history.invoke(\n", - " [HumanMessage(content=\"What's my name?\")],\n", - " config=config,\n", - ")\n", + "config = {\"configurable\": {\"thread_id\": \"abc234\"}}\n", "\n", - "response.content" + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke({\"messages\": input_messages}, config)\n", + "output[\"messages\"][-1].pretty_print()" ] }, { @@ -397,25 +397,21 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Your name is Bob. How can I assist you today, Bob?'" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Your name is Bob! If there's anything else you'd like to discuss or ask, feel free!\n" + ] } ], "source": [ - "config = {\"configurable\": {\"session_id\": \"abc2\"}}\n", - "\n", - "response = with_message_history.invoke(\n", - " [HumanMessage(content=\"What's my name?\")],\n", - " config=config,\n", - ")\n", + "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n", "\n", - "response.content" + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke({\"messages\": input_messages}, config)\n", + "output[\"messages\"][-1].pretty_print()" ] }, { @@ -424,18 +420,42 @@ "source": [ "This is how we can support a chatbot having conversations with many users!\n", "\n", - "Right now, all we've done is add a simple persistence layer around the model. We can start to make the more complicated and personalized by adding in a prompt template." + ":::tip\n", + "\n", + "For async support, update the `call_model` node to be an async function and use `.ainvoke` when invoking the application:\n", + "\n", + "```python\n", + "# Async function for node:\n", + "async def call_model(state: MessagesState):\n", + " response = await model.ainvoke(state[\"messages\"])\n", + " return {\"messages\": response}\n", + "\n", + "\n", + "# Define graph as before:\n", + "workflow = StateGraph(state_schema=MessagesState)\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "app = workflow.compile(checkpointer=MemorySaver())\n", + "\n", + "# Async invocation:\n", + "output = await app.ainvoke({\"messages\": input_messages}, config):\n", + "output[\"messages\"][-1].pretty_print()\n", + "```\n", + "\n", + ":::" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "Right now, all we've done is add a simple persistence layer around the model. We can start to make the more complicated and personalized by adding in a prompt template.\n", + "\n", "## Prompt templates\n", "\n", "Prompt Templates help to turn raw user information into a format that the LLM can work with. In this case, the raw user input is just a message, which we are passing to the LLM. Let's now make that a bit more complicated. First, let's add in a system message with some custom instructions (but still taking messages as input). Next, we'll add in more input besides just the messages.\n", "\n", - "First, let's add in a system message. To do this, we will create a ChatPromptTemplate. We will utilize `MessagesPlaceholder` to pass all the messages in." + "To add in a system message, we will create a `ChatPromptTemplate`. We will utilize `MessagesPlaceholder` to pass all the messages in." ] }, { @@ -450,117 +470,96 @@ " [\n", " (\n", " \"system\",\n", - " \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n", + " \"You talk like a pirate. Answer all questions to the best of your ability.\",\n", " ),\n", " MessagesPlaceholder(variable_name=\"messages\"),\n", " ]\n", - ")\n", - "\n", - "chain = prompt | model" + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note that this slightly changes the input type - rather than pass in a list of messages, we are now passing in a dictionary with a `messages` key where that contains a list of messages." + "We can now update our application to incorporate this template:" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Hello Bob! How can I assist you today?'" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "response = chain.invoke({\"messages\": [HumanMessage(content=\"hi! I'm bob\")]})\n", + "workflow = StateGraph(state_schema=MessagesState)\n", + "\n", + "\n", + "def call_model(state: MessagesState):\n", + " # highlight-start\n", + " chain = prompt | model\n", + " response = chain.invoke(state)\n", + " # highlight-end\n", + " return {\"messages\": response}\n", + "\n", + "\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", "\n", - "response.content" + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We can now wrap this in the same Messages History object as before" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "with_message_history = RunnableWithMessageHistory(chain, get_session_history)" + "We invoke the application in the same way:" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, - "outputs": [], - "source": [ - "config = {\"configurable\": {\"session_id\": \"abc5\"}}" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Hello, Jim! How can I assist you today?'" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Ahoy there, Jim! What brings ye to these treacherous waters today? Be ye seekin’ treasure, tales, or perhaps a bit o’ knowledge? Speak up, matey!\n" + ] } ], "source": [ - "response = with_message_history.invoke(\n", - " [HumanMessage(content=\"Hi! I'm Jim\")],\n", - " config=config,\n", - ")\n", + "config = {\"configurable\": {\"thread_id\": \"abc345\"}}\n", + "query = \"Hi! I'm Jim.\"\n", "\n", - "response.content" + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke({\"messages\": input_messages}, config)\n", + "output[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 16, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Your name is Jim.'" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Ye be callin' yerself Jim, if I be hearin' ye correctly! A fine name for a scallywag such as yerself! What else can I do fer ye, me hearty?\n" + ] } ], "source": [ - "response = with_message_history.invoke(\n", - " [HumanMessage(content=\"What's my name?\")],\n", - " config=config,\n", - ")\n", + "query = \"What is my name?\"\n", "\n", - "response.content" + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke({\"messages\": input_messages}, config)\n", + "output[\"messages\"][-1].pretty_print()" ] }, { @@ -572,7 +571,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -584,16 +583,51 @@ " ),\n", " MessagesPlaceholder(variable_name=\"messages\"),\n", " ]\n", - ")\n", - "\n", - "chain = prompt | model" + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note that we have added a new `language` input to the prompt. We can now invoke the chain and pass in a language of our choice." + "Note that we have added a new `language` input to the prompt. Our application now has two parameters-- the input `messages` and `language`. We should update our application's state to reflect this:" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Sequence\n", + "\n", + "from langchain_core.messages import BaseMessage\n", + "from langgraph.graph.message import add_messages\n", + "from typing_extensions import Annotated, TypedDict\n", + "\n", + "\n", + "# highlight-next-line\n", + "class State(TypedDict):\n", + " # highlight-next-line\n", + " messages: Annotated[Sequence[BaseMessage], add_messages]\n", + " # highlight-next-line\n", + " language: str\n", + "\n", + "\n", + "workflow = StateGraph(state_schema=State)\n", + "\n", + "\n", + "def call_model(state: State):\n", + " chain = prompt | model\n", + " response = chain.invoke(state)\n", + " return {\"messages\": [response]}\n", + "\n", + "\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { @@ -602,108 +636,67 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'¡Hola, Bob! ¿En qué puedo ayudarte hoy?'" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "¡Hola, Bob! ¿Cómo puedo ayudarte hoy?\n" + ] } ], "source": [ - "response = chain.invoke(\n", - " {\"messages\": [HumanMessage(content=\"hi! I'm bob\")], \"language\": \"Spanish\"}\n", - ")\n", + "config = {\"configurable\": {\"thread_id\": \"abc456\"}}\n", + "query = \"Hi! I'm Bob.\"\n", + "language = \"Spanish\"\n", "\n", - "response.content" + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke(\n", + " # highlight-next-line\n", + " {\"messages\": input_messages, \"language\": language},\n", + " config,\n", + ")\n", + "output[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let's now wrap this more complicated chain in a Message History class. This time, because there are multiple keys in the input, we need to specify the correct key to use to save the chat history." + "Note that the entire state is persisted, so we can omit parameters like `language` if no changes are desired:" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, - "outputs": [], - "source": [ - "with_message_history = RunnableWithMessageHistory(\n", - " chain,\n", - " get_session_history,\n", - " input_messages_key=\"messages\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [], - "source": [ - "config = {\"configurable\": {\"session_id\": \"abc11\"}}" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'¡Hola Todd! ¿En qué puedo ayudarte hoy?'" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Tu nombre es Bob.\n" + ] } ], "source": [ - "response = with_message_history.invoke(\n", - " {\"messages\": [HumanMessage(content=\"hi! I'm todd\")], \"language\": \"Spanish\"},\n", - " config=config,\n", - ")\n", + "query = \"What is my name?\"\n", "\n", - "response.content" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tu nombre es Todd.'" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "response = with_message_history.invoke(\n", - " {\"messages\": [HumanMessage(content=\"whats my name?\")], \"language\": \"Spanish\"},\n", - " config=config,\n", + "input_messages = [HumanMessage(query)]\n", + "output = app.invoke(\n", + " {\"messages\": input_messages, \"language\": language},\n", + " config,\n", ")\n", - "\n", - "response.content" + "output[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To help you understand what's happening internally, check out [this LangSmith trace](https://smith.langchain.com/public/f48fabb6-6502-43ec-8242-afc352b769ed/r)" + "To help you understand what's happening internally, check out [this LangSmith trace](https://smith.langchain.com/public/15bd8589-005c-4812-b9b9-23e74ba4c3c6/r)." ] }, { @@ -723,22 +716,22 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 21, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[SystemMessage(content=\"you're a good assistant\"),\n", - " HumanMessage(content='whats 2 + 2'),\n", - " AIMessage(content='4'),\n", - " HumanMessage(content='thanks'),\n", - " AIMessage(content='no problem!'),\n", - " HumanMessage(content='having fun?'),\n", - " AIMessage(content='yes!')]" + "[SystemMessage(content=\"you're a good assistant\", additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='whats 2 + 2', additional_kwargs={}, response_metadata={}),\n", + " AIMessage(content='4', additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='thanks', additional_kwargs={}, response_metadata={}),\n", + " AIMessage(content='no problem!', additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='having fun?', additional_kwargs={}, response_metadata={}),\n", + " AIMessage(content='yes!', additional_kwargs={}, response_metadata={})]" ] }, - "execution_count": 24, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -776,170 +769,112 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To use it in our chain, we just need to run the trimmer before we pass the `messages` input to our prompt. \n", - "\n", - "Now if we try asking the model our name, it won't know it since we trimmed that part of the chat history:" + "To use it in our chain, we just need to run the trimmer before we pass the `messages` input to our prompt. " ] }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 22, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\"I'm sorry, but I don't have access to your personal information. How can I assist you today?\"" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "from operator import itemgetter\n", + "workflow = StateGraph(state_schema=State)\n", "\n", - "from langchain_core.runnables import RunnablePassthrough\n", "\n", - "chain = (\n", - " RunnablePassthrough.assign(messages=itemgetter(\"messages\") | trimmer)\n", - " | prompt\n", - " | model\n", - ")\n", + "def call_model(state: State):\n", + " chain = prompt | model\n", + " # highlight-start\n", + " trimmed_messages = trimmer.invoke(state[\"messages\"])\n", + " response = chain.invoke(\n", + " {\"messages\": trimmed_messages, \"language\": state[\"language\"]}\n", + " )\n", + " # highlight-end\n", + " return {\"messages\": [response]}\n", "\n", - "response = chain.invoke(\n", - " {\n", - " \"messages\": messages + [HumanMessage(content=\"what's my name?\")],\n", - " \"language\": \"English\",\n", - " }\n", - ")\n", - "response.content" + "\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "But if we ask about information that is within the last few messages, it remembers:" + "Now if we try asking the model our name, it won't know it since we trimmed that part of the chat history:" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 23, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'You asked \"what\\'s 2 + 2?\"'" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "I don't know your name. If you'd like to share it, feel free!\n" + ] } ], "source": [ - "response = chain.invoke(\n", - " {\n", - " \"messages\": messages + [HumanMessage(content=\"what math problem did i ask\")],\n", - " \"language\": \"English\",\n", - " }\n", - ")\n", - "response.content" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's now wrap this in the Message History" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [], - "source": [ - "with_message_history = RunnableWithMessageHistory(\n", - " chain,\n", - " get_session_history,\n", - " input_messages_key=\"messages\",\n", - ")\n", + "config = {\"configurable\": {\"thread_id\": \"abc567\"}}\n", + "query = \"What is my name?\"\n", + "language = \"English\"\n", "\n", - "config = {\"configurable\": {\"session_id\": \"abc20\"}}" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\"I'm sorry, I don't have access to that information. How can I assist you today?\"" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "response = with_message_history.invoke(\n", - " {\n", - " \"messages\": messages + [HumanMessage(content=\"whats my name?\")],\n", - " \"language\": \"English\",\n", - " },\n", - " config=config,\n", + "# highlight-next-line\n", + "input_messages = messages + [HumanMessage(query)]\n", + "output = app.invoke(\n", + " {\"messages\": input_messages, \"language\": language},\n", + " config,\n", ")\n", - "\n", - "response.content" + "output[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "As expected, the first message where we stated our name has been trimmed. Plus there's now two new messages in the chat history (our latest question and the latest response). This means that even more information that used to be accessible in our conversation history is no longer available! In this case our initial math question has been trimmed from the history as well, so the model no longer knows about it:" + "But if we ask about information that is within the last few messages, it remembers:" ] }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 24, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\"You haven't asked a math problem yet. Feel free to ask any math-related question you have, and I'll be happy to help you with it.\"" - ] - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "You asked what 2 + 2 equals.\n" + ] } ], "source": [ - "response = with_message_history.invoke(\n", - " {\n", - " \"messages\": [HumanMessage(content=\"what math problem did i ask?\")],\n", - " \"language\": \"English\",\n", - " },\n", - " config=config,\n", - ")\n", + "config = {\"configurable\": {\"thread_id\": \"abc678\"}}\n", + "query = \"What math problem did I ask?\"\n", + "language = \"English\"\n", "\n", - "response.content" + "input_messages = messages + [HumanMessage(query)]\n", + "output = app.invoke(\n", + " {\"messages\": input_messages, \"language\": language},\n", + " config,\n", + ")\n", + "output[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "If you take a look at LangSmith, you can see exactly what is happening under the hood in the [LangSmith trace](https://smith.langchain.com/public/a64b8b7c-1fd6-4dbb-b11a-47cd09a5e4f1/r)." + "If you take a look at LangSmith, you can see exactly what is happening under the hood in the [LangSmith trace](https://smith.langchain.com/public/04402eaa-29e6-4bb1-aa91-885b730b6c21/r)." ] }, { @@ -952,32 +887,41 @@ "\n", "It's actually super easy to do this!\n", "\n", - "All chains expose a `.stream` method, and ones that use message history are no different. We can simply use that method to get back a streaming response." + "By default, `.stream` in our LangGraph application streams application steps-- in this case, the single step of the model response. Setting `stream_mode=\"messages\"` allows us to stream output tokens instead:" ] }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "|Hi| Todd|!| Sure|,| here|'s| a| joke| for| you|:| Why| couldn|'t| the| bicycle| find| its| way| home|?| Because| it| lost| its| bearings|!| 😄||" + "|Hi| Todd|!| Here|’s| a| joke| for| you|:\n", + "\n", + "|Why| did| the| scare|crow| win| an| award|?\n", + "\n", + "|Because| he| was| outstanding| in| his| field|!||" ] } ], "source": [ - "config = {\"configurable\": {\"session_id\": \"abc15\"}}\n", - "for r in with_message_history.stream(\n", - " {\n", - " \"messages\": [HumanMessage(content=\"hi! I'm todd. tell me a joke\")],\n", - " \"language\": \"English\",\n", - " },\n", - " config=config,\n", + "config = {\"configurable\": {\"thread_id\": \"abc789\"}}\n", + "query = \"Hi I'm Todd, please tell me a joke.\"\n", + "language = \"English\"\n", + "\n", + "input_messages = [HumanMessage(query)]\n", + "# highlight-next-line\n", + "for chunk, metadata in app.stream(\n", + " {\"messages\": input_messages, \"language\": language},\n", + " config,\n", + " # highlight-next-line\n", + " stream_mode=\"messages\",\n", "):\n", - " print(r.content, end=\"|\")" + " if isinstance(chunk, AIMessage): # Filter to just model responses\n", + " print(chunk.content, end=\"|\")" ] }, { @@ -995,7 +939,8 @@ "\n", "- [Streaming](/docs/how_to/streaming): streaming is *crucial* for chat applications\n", "- [How to add message history](/docs/how_to/message_history): for a deeper dive into all things related to message history\n", - "- [How to manage large message history](/docs/how_to/trim_messages/): more techniques for managing a large chat history" + "- [How to manage large message history](/docs/how_to/trim_messages/): more techniques for managing a large chat history\n", + "- [LangGraph main docs](https://langchain-ai.github.io/langgraph/): for more detail on building with LangGraph" ] } ], @@ -1015,7 +960,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.4" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/tutorials/qa_chat_history.ipynb b/docs/docs/tutorials/qa_chat_history.ipynb index d74638ad0404c..72cde13832a64 100644 --- a/docs/docs/tutorials/qa_chat_history.ipynb +++ b/docs/docs/tutorials/qa_chat_history.ipynb @@ -37,8 +37,8 @@ "\n", "We will cover two approaches:\n", "\n", - "1. Chains, in which we always execute a retrieval step;\n", - "2. Agents, in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n", + "1. [Chains](/docs/tutorials/qa_chat_history/#chains), in which we always execute a retrieval step;\n", + "2. [Agents](/docs/tutorials/qa_chat_history/#agents), in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n", "\n", "For the external knowledge source, we will use the same [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag)." ] @@ -87,16 +87,13 @@ "import os\n", "\n", "if not os.environ.get(\"OPENAI_API_KEY\"):\n", - " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", - "\n", - "# import dotenv\n", - "\n", - "# dotenv.load_dotenv()" + " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()" ] }, { + "attachments": {}, "cell_type": "markdown", - "id": "1665e740-ce01-4f09-b9ed-516db0bd326f", + "id": "e207ac1d-4a8e-4172-a9ee-3294519a9a40", "metadata": {}, "source": [ "### LangSmith\n", @@ -107,8 +104,8 @@ "\n", "```python\n", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", - "if not os.environ.get(\"LANGSMITH_API_KEY\"):\n", - " os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()\n", + "if not os.environ.get(\"LANGCHAIN_API_KEY\"):\n", + " os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n", "```" ] }, @@ -134,7 +131,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 1, "id": "cb58f273-2111-4a9b-8932-9b64c95030c8", "metadata": {}, "outputs": [], @@ -144,12 +141,12 @@ "\n", "from langchain_openai import ChatOpenAI\n", "\n", - "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" + "llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 3, "id": "820244ae-74b4-4593-b392-822979dd91b8", "metadata": {}, "outputs": [], @@ -206,17 +203,17 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "id": "bf55faaf-0d17-4b74-925d-c478b555f7b2", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "\"Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model. This process helps in guiding the agent through the various subgoals required to achieve the overall task efficiently. Different techniques like Chain of Thought and Tree of Thoughts can be used to decompose tasks into step-by-step processes, enhancing performance and understanding of the model's thinking process.\"" + "\"Task decomposition is the process of breaking down a complicated task into smaller, more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts enhance this process by guiding models to think step by step and explore multiple reasoning possibilities. This approach helps in simplifying complex tasks and provides insight into the model's reasoning.\"" ] }, - "execution_count": 7, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -278,7 +275,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "id": "2b685428-8b82-4af1-be4f-7232c5d55b73", "metadata": {}, "outputs": [], @@ -322,7 +319,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "id": "66f275f3-ddef-4678-b90d-ee64576878f9", "metadata": {}, "outputs": [], @@ -354,7 +351,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 7, "id": "0005810b-1b95-4666-a795-08d80e478b83", "metadata": {}, "outputs": [ @@ -362,7 +359,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Task decomposition can be achieved through various methods such as using techniques like Chain of Thought (CoT) or Tree of Thoughts to break down complex tasks into smaller steps. Common ways include prompting the model with simple instructions like \"Steps for XYZ\" or task-specific instructions like \"Write a story outline.\" Human inputs can also be used to guide the task decomposition process effectively.\n" + "Common ways of task decomposition include using simple prompting techniques, such as asking for \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\" Additionally, task-specific instructions can be employed, like \"Write a story outline\" for writing tasks, or human inputs can guide the decomposition process.\n" ] } ], @@ -393,7 +390,7 @@ "source": [ ":::tip\n", "\n", - "Check out the [LangSmith trace](https://smith.langchain.com/public/243301e4-4cc5-4e52-a6e7-8cfe9208398d/r) \n", + "Check out the [LangSmith trace](https://smith.langchain.com/public/243301e4-4cc5-4e52-a6e7-8cfe9208398d/r).\n", "\n", ":::" ] @@ -405,97 +402,131 @@ "source": [ "#### Stateful management of chat history\n", "\n", - "Here we've gone over how to add application logic for incorporating historical outputs, but we're still manually updating the chat history and inserting it into each input. In a real Q&A application we'll want some way of persisting chat history and some way of automatically inserting and updating it.\n", + ":::note\n", + "\n", + "This section of the tutorial previously used the [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) abstraction. You can access that version of the documentation in the [v0.2 docs](https://python.langchain.com/v0.2/docs/tutorials/chatbot/).\n", + "\n", + "As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into new LangChain applications.\n", + "\n", + "If your code is already relying on `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do **not** need to make any changes. We do not plan on deprecating this functionality in the near future as it works for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.\n", + "\n", + "Please see [How to migrate to LangGraph Memory](/docs/versions/migrating_memory/) for more details.\n", + ":::\n", "\n", - "For this we can use:\n", + "We have added application logic for incorporating chat history, but we are still manually plumbing it through our application. In production, the Q&A application will usually persist the chat history into a database, and be able to read and update it appropriately.\n", "\n", - "- [BaseChatMessageHistory](https://python.langchain.com/api_reference/langchain/index.html#module-langchain.memory): Store chat history.\n", - "- [RunnableWithMessageHistory](/docs/how_to/message_history): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", + "[LangGraph](https://langchain-ai.github.io/langgraph/) implements a built-in [persistence layer](https://langchain-ai.github.io/langgraph/concepts/persistence/), making it ideal for chat applications that support multiple conversational turns.\n", "\n", - "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history) LCEL page.\n", + "Wrapping our chat model in a minimal LangGraph application allows us to automatically persist the message history, simplifying the development of multi-turn applications.\n", "\n", - "Below, we implement a simple example of the second option, in which chat histories are stored in a simple dict. LangChain manages memory integrations with [Redis](/docs/integrations/memory/redis_chat_message_history/) and other technologies to provide for more robust persistence.\n", + "LangGraph comes with a simple in-memory checkpointer, which we use below. See its [documentation](https://langchain-ai.github.io/langgraph/concepts/persistence/) for more detail, including how to use different persistence backends (e.g., SQLite or Postgres).\n", "\n", - "Instances of `RunnableWithMessageHistory` manage the chat history for you. They accept a config with a key (`\"session_id\"` by default) that specifies what conversation history to fetch and prepend to the input, and append the output to the same conversation history. Below is an example:" + "For a detailed walkthrough of how to manage message history, head to the [How to add message history (memory)](/docs/how_to/message_history) guide." ] }, { "cell_type": "code", - "execution_count": 11, - "id": "9c3fb176-8d6a-4dc7-8408-6a22c5f7cc72", + "execution_count": 8, + "id": "817f8528-ead4-47cd-a4b8-7a1cb8a6641f", "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_message_histories import ChatMessageHistory\n", - "from langchain_core.chat_history import BaseChatMessageHistory\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", - "\n", - "store = {}\n", - "\n", - "\n", - "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n", - " if session_id not in store:\n", - " store[session_id] = ChatMessageHistory()\n", - " return store[session_id]\n", + "from typing import Sequence\n", "\n", - "\n", - "conversational_rag_chain = RunnableWithMessageHistory(\n", - " rag_chain,\n", - " get_session_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"chat_history\",\n", - " output_messages_key=\"answer\",\n", - ")" + "from langchain_core.messages import BaseMessage\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, StateGraph\n", + "from langgraph.graph.message import add_messages\n", + "from typing_extensions import Annotated, TypedDict\n", + "\n", + "\n", + "# We define a dict representing the state of the application.\n", + "# This state has the same input and output keys as `rag_chain`.\n", + "class State(TypedDict):\n", + " input: str\n", + " chat_history: Annotated[Sequence[BaseMessage], add_messages]\n", + " context: str\n", + " answer: str\n", + "\n", + "\n", + "# We then define a simple node that runs the `rag_chain`.\n", + "# The `return` values of the node update the graph state, so here we just\n", + "# update the chat history with the input message and response.\n", + "def call_model(state: State):\n", + " response = rag_chain.invoke(state)\n", + " return {\n", + " \"chat_history\": [\n", + " HumanMessage(state[\"input\"]),\n", + " AIMessage(response[\"answer\"]),\n", + " ],\n", + " \"context\": response[\"context\"],\n", + " \"answer\": response[\"answer\"],\n", + " }\n", + "\n", + "\n", + "# Our graph consists only of one node:\n", + "workflow = StateGraph(state_schema=State)\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "# Finally, we compile the graph with a checkpointer object.\n", + "# This persists the state, in this case in memory.\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" + ] + }, + { + "cell_type": "markdown", + "id": "6bda388e-c794-4ca5-b96f-0b12f1daaca3", + "metadata": {}, + "source": [ + "This application out-of-the-box supports multiple conversation threads. We pass in a configuration `dict` specifying a unique identifier for a thread to control what thread is run. This enables the application to support interactions with multiple users." ] }, { "cell_type": "code", - "execution_count": 12, - "id": "1046c92f-21b3-4214-907d-92878d8cba23", + "execution_count": 9, + "id": "efdd4bcd-4de8-4d9a-8f95-4dd6960efc0a", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help models decompose hard tasks into multiple manageable subtasks. This process allows agents to plan ahead and tackle intricate tasks effectively.'" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Task decomposition is the process of breaking down a complicated task into smaller, more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts enhance this process by guiding models to think step by step and explore multiple reasoning possibilities. This approach helps in simplifying complex tasks and provides insight into the model's reasoning.\n" + ] } ], "source": [ - "conversational_rag_chain.invoke(\n", + "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n", + "\n", + "result = app.invoke(\n", " {\"input\": \"What is Task Decomposition?\"},\n", - " config={\n", - " \"configurable\": {\"session_id\": \"abc123\"}\n", - " }, # constructs a key \"abc123\" in `store`.\n", - ")[\"answer\"]" + " config=config,\n", + ")\n", + "print(result[\"answer\"])" ] }, { "cell_type": "code", - "execution_count": 13, - "id": "0e89c75f-7ad7-4331-a2fe-57579eb8f840", + "execution_count": 10, + "id": "8ef6aefc-fe0e-457f-b552-303a45f47342", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Task decomposition can be achieved through various methods such as using Language Model (LLM) with simple prompting, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to break down the task into smaller components. These approaches help in guiding agents to think step by step and decompose complex tasks into more manageable subgoals.'" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "One way of doing task decomposition is by using simple prompting, such as asking the model, \"What are the subgoals for achieving XYZ?\" This method encourages the model to identify and outline the smaller tasks needed to accomplish the larger goal.\n" + ] } ], "source": [ - "conversational_rag_chain.invoke(\n", - " {\"input\": \"What are common ways of doing it?\"},\n", - " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", - ")[\"answer\"]" + "result = app.invoke(\n", + " {\"input\": \"What is one way of doing it?\"},\n", + " config=config,\n", + ")\n", + "print(result[\"answer\"])" ] }, { @@ -503,38 +534,38 @@ "id": "3ab59258-84bc-4904-880e-2ebfebbca563", "metadata": {}, "source": [ - "The conversation history can be inspected in the `store` dict:" + "The conversation history can be inspected via the state of the application:" ] }, { "cell_type": "code", - "execution_count": 14, - "id": "7686b874-3a85-499f-82b5-28a85c4c768c", + "execution_count": 11, + "id": "eddfde25-6fac-4ba2-b52f-0682c73b9c15", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "User: What is Task Decomposition?\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "What is Task Decomposition?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", - "AI: Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help models decompose hard tasks into multiple manageable subtasks. This process allows agents to plan ahead and tackle intricate tasks effectively.\n", + "Task decomposition is the process of breaking down a complicated task into smaller, more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts enhance this process by guiding models to think step by step and explore multiple reasoning possibilities. This approach helps in simplifying complex tasks and provides insight into the model's reasoning.\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", "\n", - "User: What are common ways of doing it?\n", + "What is one way of doing it?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", - "AI: Task decomposition can be achieved through various methods such as using Language Model (LLM) with simple prompting, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to break down the task into smaller components. These approaches help in guiding agents to think step by step and decompose complex tasks into more manageable subgoals.\n", - "\n" + "One way of doing task decomposition is by using simple prompting, such as asking the model, \"What are the subgoals for achieving XYZ?\" This method encourages the model to identify and outline the smaller tasks needed to accomplish the larger goal.\n" ] } ], "source": [ - "for message in store[\"abc123\"].messages:\n", - " if isinstance(message, AIMessage):\n", - " prefix = \"AI\"\n", - " else:\n", - " prefix = \"User\"\n", - "\n", - " print(f\"{prefix}: {message.content}\\n\")" + "chat_history = app.get_state(config).values[\"chat_history\"]\n", + "for message in chat_history:\n", + " message.pretty_print()" ] }, { @@ -557,24 +588,28 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 12, "id": "71c32048-1a41-465f-a9e2-c4affc332fd9", "metadata": {}, "outputs": [], "source": [ + "from typing import Sequence\n", + "\n", "import bs4\n", "from langchain.chains import create_history_aware_retriever, create_retrieval_chain\n", "from langchain.chains.combine_documents import create_stuff_documents_chain\n", - "from langchain_community.chat_message_histories import ChatMessageHistory\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_core.chat_history import BaseChatMessageHistory\n", + "from langchain_core.messages import AIMessage, BaseMessage, HumanMessage\n", "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", "from langchain_core.vectorstores import InMemoryVectorStore\n", "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, StateGraph\n", + "from langgraph.graph.message import add_messages\n", + "from typing_extensions import Annotated, TypedDict\n", "\n", - "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n", "\n", "\n", "### Construct retriever ###\n", @@ -639,72 +674,77 @@ "\n", "\n", "### Statefully manage chat history ###\n", - "store = {}\n", + "class State(TypedDict):\n", + " input: str\n", + " chat_history: Annotated[Sequence[BaseMessage], add_messages]\n", + " context: str\n", + " answer: str\n", "\n", "\n", - "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n", - " if session_id not in store:\n", - " store[session_id] = ChatMessageHistory()\n", - " return store[session_id]\n", + "def call_model(state: State):\n", + " response = rag_chain.invoke(state)\n", + " return {\n", + " \"chat_history\": [\n", + " HumanMessage(state[\"input\"]),\n", + " AIMessage(response[\"answer\"]),\n", + " ],\n", + " \"context\": response[\"context\"],\n", + " \"answer\": response[\"answer\"],\n", + " }\n", "\n", "\n", - "conversational_rag_chain = RunnableWithMessageHistory(\n", - " rag_chain,\n", - " get_session_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"chat_history\",\n", - " output_messages_key=\"answer\",\n", - ")" + "workflow = StateGraph(state_schema=State)\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)" ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 13, "id": "6d0a7a73-d151-47d9-9e99-b4f3291c0322", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks to facilitate problem-solving. Different methods like Chain of Thought and Tree of Thoughts can be employed to decompose tasks effectively.'" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Task decomposition is the process of breaking down a complicated task into smaller, more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts enhance this process by guiding models to think step by step and explore multiple reasoning possibilities. This approach helps in simplifying complex tasks and improving the model's performance.\n" + ] } ], "source": [ - "conversational_rag_chain.invoke(\n", + "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n", + "\n", + "result = app.invoke(\n", " {\"input\": \"What is Task Decomposition?\"},\n", - " config={\n", - " \"configurable\": {\"session_id\": \"abc123\"}\n", - " }, # constructs a key \"abc123\" in `store`.\n", - ")[\"answer\"]" + " config=config,\n", + ")\n", + "print(result[\"answer\"])" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 14, "id": "17021822-896a-4513-a17d-1d20b1c5381c", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Task decomposition can be achieved through various methods such as using prompting techniques like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\", providing task-specific instructions like \"Write a story outline,\" or incorporating human inputs to break down complex tasks into smaller components. These approaches help in organizing thoughts and planning ahead for successful task completion.'" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "One way of doing task decomposition is by using simple prompting, such as asking the model, \"What are the subgoals for achieving XYZ?\" This method encourages the model to identify and outline the smaller steps needed to complete the larger task.\n" + ] } ], "source": [ - "conversational_rag_chain.invoke(\n", - " {\"input\": \"What are common ways of doing it?\"},\n", - " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", - ")[\"answer\"]" + "result = app.invoke(\n", + " {\"input\": \"What is one way of doing it?\"},\n", + " config=config,\n", + ")\n", + "print(result[\"answer\"])" ] }, { @@ -726,7 +766,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 15, "id": "809cc747-2135-40a2-8e73-e4556343ee64", "metadata": {}, "outputs": [], @@ -751,17 +791,17 @@ }, { "cell_type": "code", - "execution_count": 19, - "id": "931c4fe3-c603-4efb-9b37-5f7cbbb1cbbd", + "execution_count": 16, + "id": "1c8df9d7-6a74-471c-aaef-6c4819ee0cd0", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.'" + "'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:'" ] }, - "execution_count": 19, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -783,7 +823,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 17, "id": "1726d151-4653-4c72-a187-a14840add526", "metadata": {}, "outputs": [], @@ -803,38 +843,70 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 18, "id": "170403a2-c914-41db-85d8-a2c381da112d", "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 1a50f4da-34a7-44af-8cbb-c67c90c9619e, but expected {'tool'} run.\")\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_1ZkTWsLYIlKZ1uMyIQGUuyJx', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-dddbe2d2-2355-4ca5-9961-1ceb39d78cf9-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_1ZkTWsLYIlKZ1uMyIQGUuyJx'}])]}}\n", - "----\n", - "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_1ZkTWsLYIlKZ1uMyIQGUuyJx')]}}\n", - "----\n", - "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in managing and solving difficult tasks by dividing them into more manageable components. One common method of task decomposition is the Chain of Thought (CoT) technique, where models are instructed to think step by step to decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step and generates multiple thoughts per step, creating a tree structure. Task decomposition can be facilitated by using simple prompts, task-specific instructions, or human inputs.', response_metadata={'token_usage': {'completion_tokens': 119, 'prompt_tokens': 636, 'total_tokens': 755}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-4a701854-97f2-4ec2-b6e1-73410911fa72-0')]}}\n", - "----\n" + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "What is Task Decomposition?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "Tool Calls:\n", + " blog_post_retriever (call_WKHdiejvg4In982Hr3EympuI)\n", + " Call ID: call_WKHdiejvg4In982Hr3EympuI\n", + " Args:\n", + " query: Task Decomposition\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: blog_post_retriever\n", + "\n", + "Fig. 1. Overview of a LLM-powered autonomous agent system.\n", + "Component One: Planning#\n", + "A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n", + "Task Decomposition#\n", + "Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\n", + "\n", + "Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\n", + "Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\n", + "\n", + "(3) Task execution: Expert models execute on the specific tasks and log results.\n", + "Instruction:\n", + "\n", + "With the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\n", + "\n", + "Fig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\n", + "The system comprises of 4 stages:\n", + "(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\n", + "Instruction:\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Task Decomposition is a process used in complex problem-solving where a larger task is broken down into smaller, more manageable sub-tasks. This approach enhances the ability of models, particularly large language models (LLMs), to handle intricate tasks by allowing them to think step by step.\n", + "\n", + "There are several methods for task decomposition:\n", + "\n", + "1. **Chain of Thought (CoT)**: This technique encourages the model to articulate its reasoning process by thinking through the task in a sequential manner. It transforms a big task into smaller, manageable steps, which also provides insight into the model's thought process.\n", + "\n", + "2. **Tree of Thoughts**: An extension of CoT, this method explores multiple reasoning possibilities at each step. It decomposes the problem into various thought steps and generates multiple thoughts for each step, creating a tree structure. The evaluation of each state can be done using breadth-first search (BFS) or depth-first search (DFS).\n", + "\n", + "3. **Prompting Techniques**: Task decomposition can be achieved through simple prompts like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\" Additionally, task-specific instructions can guide the model, such as asking it to \"Write a story outline\" for creative tasks.\n", + "\n", + "4. **Human Inputs**: In some cases, human guidance can be used to assist in breaking down tasks.\n", + "\n", + "Overall, task decomposition is a crucial component in planning and executing complex tasks, allowing for better organization and clarity in the problem-solving process.\n" ] } ], "source": [ "query = \"What is Task Decomposition?\"\n", "\n", - "for s in agent_executor.stream(\n", + "for event in agent_executor.stream(\n", " {\"messages\": [HumanMessage(content=query)]},\n", + " stream_mode=\"values\",\n", "):\n", - " print(s)\n", - " print(\"----\")" + " event[\"messages\"][-1].pretty_print()" ] }, { @@ -842,12 +914,12 @@ "id": "1df703b1-aad6-48fb-b6fa-703e32ea88b9", "metadata": {}, "source": [ - "LangGraph comes with built in persistence, so we don't need to use ChatMessageHistory! Rather, we can pass in a checkpointer to our LangGraph agent directly" + "We can again take advantage of LangGraph's built-in persistence to save stateful updates to memory:" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 19, "id": "04a3a664-3c3f-4cd1-9995-26662a52da7c", "metadata": {}, "outputs": [], @@ -871,7 +943,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 20, "id": "d6d70833-b958-4cd7-9e27-29c1c08bb1b8", "metadata": {}, "outputs": [ @@ -879,19 +951,24 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-022806f0-eb26-4c87-9132-ed2fcc6c21ea-0')]}}\n", - "----\n" + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "Hi! I'm bob\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Hello Bob! How can I assist you today?\n" ] } ], "source": [ "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n", "\n", - "for s in agent_executor.stream(\n", - " {\"messages\": [HumanMessage(content=\"Hi! I'm bob\")]}, config=config\n", + "for event in agent_executor.stream(\n", + " {\"messages\": [HumanMessage(content=\"Hi! I'm bob\")]},\n", + " config=config,\n", + " stream_mode=\"values\",\n", "):\n", - " print(s)\n", - " print(\"----\")" + " event[\"messages\"][-1].pretty_print()" ] }, { @@ -904,7 +981,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 21, "id": "e2c570ae-dd91-402c-8693-ae746de63b16", "metadata": {}, "outputs": [ @@ -912,34 +989,64 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_DdAAJJgGIQOZQgKVE4duDyML', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-acc3c903-4f6f-48dd-8b36-f6f3b80d0856-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_DdAAJJgGIQOZQgKVE4duDyML'}])]}}\n", - "----\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 9a7ba580-ec91-412d-9649-1b5cbf5ae7bc, but expected {'tool'} run.\")\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_DdAAJJgGIQOZQgKVE4duDyML')]}}\n", - "----\n" + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "What is Task Decomposition?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "Tool Calls:\n", + " blog_post_retriever (call_0rhrUJiHkoOQxwqCpKTkSkiu)\n", + " Call ID: call_0rhrUJiHkoOQxwqCpKTkSkiu\n", + " Args:\n", + " query: Task Decomposition\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: blog_post_retriever\n", + "\n", + "Fig. 1. Overview of a LLM-powered autonomous agent system.\n", + "Component One: Planning#\n", + "A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n", + "Task Decomposition#\n", + "Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\n", + "\n", + "Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\n", + "Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\n", + "\n", + "(3) Task execution: Expert models execute on the specific tasks and log results.\n", + "Instruction:\n", + "\n", + "With the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\n", + "\n", + "Fig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\n", + "The system comprises of 4 stages:\n", + "(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\n", + "Instruction:\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Task Decomposition is a technique used to break down complex tasks into smaller, more manageable steps. This approach is particularly useful in the context of autonomous agents and large language models (LLMs). Here are some key points about Task Decomposition:\n", + "\n", + "1. **Chain of Thought (CoT)**: This is a prompting technique that encourages the model to \"think step by step.\" By doing so, it can utilize more computational resources to decompose difficult tasks into simpler ones, making them easier to handle.\n", + "\n", + "2. **Tree of Thoughts**: An extension of CoT, this method explores multiple reasoning possibilities at each step. It decomposes a problem into various thought steps and generates multiple thoughts for each step, creating a tree structure. This can be evaluated using search methods like breadth-first search (BFS) or depth-first search (DFS).\n", + "\n", + "3. **Methods of Decomposition**: Task decomposition can be achieved through:\n", + " - Simple prompting (e.g., asking for steps to achieve a goal).\n", + " - Task-specific instructions (e.g., requesting a story outline for writing).\n", + " - Human inputs to guide the decomposition process.\n", + "\n", + "4. **Execution**: After decomposition, expert models execute the specific tasks and log the results, allowing for a structured approach to complex problem-solving.\n", + "\n", + "Overall, Task Decomposition enhances the model's ability to tackle intricate tasks by breaking them down into simpler, actionable components.\n" ] } ], "source": [ "query = \"What is Task Decomposition?\"\n", "\n", - "for s in agent_executor.stream(\n", - " {\"messages\": [HumanMessage(content=query)]}, config=config\n", + "for event in agent_executor.stream(\n", + " {\"messages\": [HumanMessage(content=query)]},\n", + " config=config,\n", + " stream_mode=\"values\",\n", "):\n", - " print(s)\n", - " print(\"----\")" + " event[\"messages\"][-1].pretty_print()" ] }, { @@ -954,7 +1061,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "id": "570d8c68-136e-4ba5-969a-03ba195f6118", "metadata": {}, "outputs": [ @@ -962,23 +1069,66 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_KvoiamnLfGEzMeEMlV3u0TJ7', 'function': {'arguments': '{\"query\":\"common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 930, 'total_tokens': 951}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-dd842071-6dbd-4b68-8657-892eaca58638-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'common ways of task decomposition'}, 'id': 'call_KvoiamnLfGEzMeEMlV3u0TJ7'}])]}}\n", - "----\n", - "{'action': {'messages': [ToolMessage(content='Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.', name='blog_post_retriever', id='c749bb8e-c8e0-4fa3-bc11-3e2e0651880b', tool_call_id='call_KvoiamnLfGEzMeEMlV3u0TJ7')]}}\n", - "----\n", - "{'agent': {'messages': [AIMessage(content='According to the blog post, common ways of task decomposition include:\\n\\n1. Using language models with simple prompting like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\"\\n2. Utilizing task-specific instructions, for example, using \"Write a story outline\" for writing a novel.\\n3. Involving human inputs in the task decomposition process.\\n\\nThese methods help in breaking down complex tasks into smaller and more manageable steps, facilitating better planning and execution of the overall task.', response_metadata={'token_usage': {'completion_tokens': 100, 'prompt_tokens': 1475, 'total_tokens': 1575}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-98b765b3-f1a6-4c9a-ad0f-2db7950b900f-0')]}}\n", - "----\n" + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "What according to the blog post are common ways of doing it? redo the search\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "Tool Calls:\n", + " blog_post_retriever (call_bZRDF6Xr0QdurM9LItM8cN7a)\n", + " Call ID: call_bZRDF6Xr0QdurM9LItM8cN7a\n", + " Args:\n", + " query: common ways of Task Decomposition\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: blog_post_retriever\n", + "\n", + "Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\n", + "Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\n", + "\n", + "Fig. 1. Overview of a LLM-powered autonomous agent system.\n", + "Component One: Planning#\n", + "A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n", + "Task Decomposition#\n", + "Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\n", + "\n", + "Resources:\n", + "1. Internet access for searches and information gathering.\n", + "2. Long Term memory management.\n", + "3. GPT-3.5 powered Agents for delegation of simple tasks.\n", + "4. File output.\n", + "\n", + "Performance Evaluation:\n", + "1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n", + "2. Constructively self-criticize your big-picture behavior constantly.\n", + "3. Reflect on past decisions and strategies to refine your approach.\n", + "4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n", + "\n", + "(3) Task execution: Expert models execute on the specific tasks and log results.\n", + "Instruction:\n", + "\n", + "With the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "According to the blog post, common ways to perform Task Decomposition include:\n", + "\n", + "1. **Simple Prompting**: Using straightforward prompts such as \"Steps for XYZ.\\n1.\" or \"What are the subgoals for achieving XYZ?\" to guide the model in breaking down the task.\n", + "\n", + "2. **Task-Specific Instructions**: Providing specific instructions tailored to the task at hand, such as asking for a \"story outline\" when writing a novel.\n", + "\n", + "3. **Human Inputs**: Involving human guidance or input to assist in the decomposition process, allowing for a more nuanced understanding of the task requirements.\n", + "\n", + "These methods help in transforming complex tasks into smaller, manageable components, facilitating better planning and execution.\n" ] } ], "source": [ "query = \"What according to the blog post are common ways of doing it? redo the search\"\n", "\n", - "for s in agent_executor.stream(\n", - " {\"messages\": [HumanMessage(content=query)]}, config=config\n", + "for event in agent_executor.stream(\n", + " {\"messages\": [HumanMessage(content=query)]},\n", + " config=config,\n", + " stream_mode=\"values\",\n", "):\n", - " print(s)\n", - " print(\"----\")" + " event[\"messages\"][-1].pretty_print()" ] }, { @@ -1001,7 +1151,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "b1d2b4d4-e604-497d-873d-d345b808578e", "metadata": {}, "outputs": [], @@ -1016,7 +1166,7 @@ "from langgraph.prebuilt import create_react_agent\n", "\n", "memory = MemorySaver()\n", - "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n", "\n", "\n", "### Construct retriever ###\n", @@ -1064,7 +1214,7 @@ "\n", "To explore different types of retrievers and retrieval strategies, visit the [retrievers](/docs/how_to/#retrievers) section of the how-to guides.\n", "\n", - "For a detailed walkthrough of LangChain's conversation memory abstractions, visit the [How to add message history (memory)](/docs/how_to/message_history) LCEL page.\n", + "For a detailed walkthrough of LangChain's conversation memory abstractions, visit the [How to add message history (memory)](/docs/how_to/message_history) guide.\n", "\n", "To learn more about agents, head to the [Agents Modules](/docs/tutorials/agents)." ] @@ -1094,7 +1244,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.4" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/versions/migrating_chains/conversation_chain.ipynb b/docs/docs/versions/migrating_chains/conversation_chain.ipynb index 9f5eb1ba175aa..87af17a655871 100644 --- a/docs/docs/versions/migrating_chains/conversation_chain.ipynb +++ b/docs/docs/versions/migrating_chains/conversation_chain.ipynb @@ -9,13 +9,13 @@ "\n", "[`ConversationChain`](https://python.langchain.com/api_reference/langchain/chains/langchain.chains.conversation.base.ConversationChain.html) incorporated a memory of previous messages to sustain a stateful conversation.\n", "\n", - "Some advantages of switching to the LCEL implementation are:\n", + "Some advantages of switching to the Langgraph implementation are:\n", "\n", "- Innate support for threads/separate sessions. To make this work with `ConversationChain`, you'd need to instantiate a separate memory class outside the chain.\n", "- More explicit parameters. `ConversationChain` contains a hidden default prompt, which can cause confusion.\n", "- Streaming support. `ConversationChain` only supports streaming via callbacks.\n", "\n", - "`RunnableWithMessageHistory` implements sessions via configuration parameters. It should be instantiated with a callable that returns a [chat message history](https://python.langchain.com/api_reference/core/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html). By default, it expects this function to take a single argument `session_id`." + "Langgraph's [checkpointing](https://langchain-ai.github.io/langgraph/how-tos/persistence/) system supports multiple threads or sessions, which can be specified via the `\"thread_id\"` key in its configuration parameters." ] }, { @@ -61,9 +61,9 @@ { "data": { "text/plain": [ - "{'input': 'how are you?',\n", + "{'input': \"I'm Bob, how are you?\",\n", " 'history': '',\n", - " 'response': \"Arr matey, I be doin' well on the high seas, plunderin' and pillagin' as usual. How be ye?\"}" + " 'response': \"Arrr matey, I be a pirate sailin' the high seas. What be yer business with me?\"}" ] }, "execution_count": 2, @@ -93,31 +93,21 @@ " prompt=prompt,\n", ")\n", "\n", - "chain({\"input\": \"how are you?\"})" - ] - }, - { - "cell_type": "markdown", - "id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f", - "metadata": {}, - "source": [ - "\n", - "\n", - "## LCEL\n", - "\n", - "
" + "chain({\"input\": \"I'm Bob, how are you?\"})" ] }, { "cell_type": "code", "execution_count": 3, - "id": "666c92a0-b555-4418-a465-6490c1b92570", + "id": "53f2c723-178f-470a-8147-54e7cb982211", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "\"Arr, me matey! I be doin' well, sailin' the high seas and searchin' for treasure. How be ye?\"" + "{'input': 'What is my name?',\n", + " 'history': \"Human: I'm Bob, how are you?\\nAI: Arrr matey, I be a pirate sailin' the high seas. What be yer business with me?\",\n", + " 'response': 'Your name be Bob, matey.'}" ] }, "execution_count": 3, @@ -126,88 +116,120 @@ } ], "source": [ - "from langchain_core.chat_history import InMemoryChatMessageHistory\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "chain({\"input\": \"What is my name?\"})" + ] + }, + { + "cell_type": "markdown", + "id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f", + "metadata": {}, + "source": [ + "
\n", + "\n", + "## Langgraph\n", + "\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a59b910c-0d02-41aa-bc99-441f11989cf8", + "metadata": {}, + "outputs": [], + "source": [ + "import uuid\n", + "\n", "from langchain_openai import ChatOpenAI\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "from langgraph.graph import START, MessagesState, StateGraph\n", "\n", - "prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\"system\", \"You are a pirate. Answer the following questions as best you can.\"),\n", - " (\"placeholder\", \"{chat_history}\"),\n", - " (\"human\", \"{input}\"),\n", - " ]\n", - ")\n", + "model = ChatOpenAI(model=\"gpt-4o-mini\")\n", "\n", - "history = InMemoryChatMessageHistory()\n", + "# Define a new graph\n", + "workflow = StateGraph(state_schema=MessagesState)\n", "\n", "\n", - "def get_history():\n", - " return history\n", + "# Define the function that calls the model\n", + "def call_model(state: MessagesState):\n", + " response = model.invoke(state[\"messages\"])\n", + " return {\"messages\": response}\n", "\n", "\n", - "chain = prompt | ChatOpenAI() | StrOutputParser()\n", + "# Define the two nodes we will cycle between\n", + "workflow.add_edge(START, \"model\")\n", + "workflow.add_node(\"model\", call_model)\n", + "\n", + "# Add memory\n", + "memory = MemorySaver()\n", + "app = workflow.compile(checkpointer=memory)\n", "\n", - "wrapped_chain = RunnableWithMessageHistory(\n", - " chain,\n", - " get_history,\n", - " history_messages_key=\"chat_history\",\n", - ")\n", "\n", - "wrapped_chain.invoke({\"input\": \"how are you?\"})" + "# The thread id is a unique key that identifies\n", + "# this particular conversation.\n", + "# We'll just generate a random uuid here.\n", + "thread_id = uuid.uuid4()\n", + "config = {\"configurable\": {\"thread_id\": thread_id}}" ] }, { - "cell_type": "markdown", - "id": "6b386ce6-895e-442c-88f3-7bec0ab9f401", + "cell_type": "code", + "execution_count": 5, + "id": "3a9df4bb-e804-4373-9a15-a29dc0371595", "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "I'm Bob, how are you?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Ahoy, Bob! I be feelin' as lively as a ship in full sail! How be ye on this fine day?\n" + ] + } + ], "source": [ - "The above example uses the same `history` for all sessions. The example below shows how to use a different chat history for each session." + "query = \"I'm Bob, how are you?\"\n", + "\n", + "input_messages = [\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"You are a pirate. Answer the following questions as best you can.\",\n", + " },\n", + " {\"role\": \"user\", \"content\": query},\n", + "]\n", + "for event in app.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n", + " event[\"messages\"][-1].pretty_print()" ] }, { "cell_type": "code", - "execution_count": 4, - "id": "96152263-98d7-4e06-8c73-d0c0abf3e8e9", + "execution_count": 6, + "id": "d3f77e69-fa3d-496c-968c-86371e1e8cf1", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'Ahoy there, me hearty! What can this old pirate do for ye today?'" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "What is my name?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Ye be callin' yerself Bob, I reckon! A fine name for a swashbuckler like yerself!\n" + ] } ], "source": [ - "from langchain_core.chat_history import BaseChatMessageHistory\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", - "\n", - "store = {}\n", - "\n", - "\n", - "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n", - " if session_id not in store:\n", - " store[session_id] = InMemoryChatMessageHistory()\n", - " return store[session_id]\n", - "\n", - "\n", - "chain = prompt | ChatOpenAI() | StrOutputParser()\n", - "\n", - "wrapped_chain = RunnableWithMessageHistory(\n", - " chain,\n", - " get_session_history,\n", - " history_messages_key=\"chat_history\",\n", - ")\n", + "query = \"What is my name?\"\n", "\n", - "wrapped_chain.invoke(\n", - " {\"input\": \"Hello!\"},\n", - " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", - ")" + "input_messages = [{\"role\": \"user\", \"content\": query}]\n", + "for event in app.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n", + " event[\"messages\"][-1].pretty_print()" ] }, { diff --git a/docs/docs/versions/migrating_memory/chat_history.ipynb b/docs/docs/versions/migrating_memory/chat_history.ipynb new file mode 100644 index 0000000000000..fc164ee13580e --- /dev/null +++ b/docs/docs/versions/migrating_memory/chat_history.ipynb @@ -0,0 +1,300 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c298a5c9-b9af-481d-9eba-cbd65f987a8a", + "metadata": {}, + "source": [ + "# How to use BaseChatMessageHistory with LangGraph\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "* [Chat History](/docs/concepts/#chat-history)\n", + "* [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html)\n", + "* [LangGraph](https://langchain-ai.github.io/langgraph/concepts/high_level/)\n", + "* [Memory](https://langchain-ai.github.io/langgraph/concepts/agentic_concepts/#memory)\n", + ":::\n", + "\n", + "We recommend that new LangChain applications take advantage of the [built-in LangGraph peristence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to implement memory.\n", + "\n", + "In some situations, users may need to keep using an existing persistence solution for chat message history.\n", + "\n", + "Here, we will show how to use [LangChain chat message histories](https://python.langchain.com/docs/integrations/memory/) (implementations of [BaseChatMessageHistory](https://python.langchain.com/api_reference/core/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html)) with LangGraph." + ] + }, + { + "cell_type": "markdown", + "id": "548bc988-167b-43f1-860a-d247e28b2b42", + "metadata": {}, + "source": [ + "## Set up" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "6cbfd2ab-7537-4269-8249-646fa89bf016", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture --no-stderr\n", + "%pip install --upgrade --quiet langchain-anthropic langgraph" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0694febf-dfa8-46ef-babc-f8b16b5a2926", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from getpass import getpass\n", + "\n", + "if \"ANTHROPIC_API_KEY\" not in os.environ:\n", + " os.environ[\"ANTHROPIC_API_KEY\"] = getpass()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "c5e08659-b68c-48f2-8b33-e79b0c6999e1", + "metadata": {}, + "source": [ + "## ChatMessageHistory\n", + "\n", + "A message history needs to be parameterized by a conversation ID or maybe by the 2-tuple of (user ID, conversation ID).\n", + "\n", + "Many of the [LangChain chat message histories](https://python.langchain.com/docs/integrations/memory/) will have either a `session_id` or some `namespace` to allow keeping track of different conversations. Please refer to the specific implementations to check how it is parameterized.\n", + "\n", + "The built-in `InMemoryChatMessageHistory` does not contains such a parameterization, so we'll create a dictionary to keep track of the message histories." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28049308-2543-48e6-90d0-37a88951a637", + "metadata": {}, + "outputs": [], + "source": [ + "import uuid\n", + "\n", + "from langchain_core.chat_history import InMemoryChatMessageHistory\n", + "\n", + "chats_by_session_id = {}\n", + "\n", + "\n", + "def get_chat_history(session_id: str) -> InMemoryChatMessageHistory:\n", + " chat_history = chats_by_session_id.get(session_id)\n", + " if chat_history is None:\n", + " chat_history = InMemoryChatMessageHistory()\n", + " chats_by_session_id[session_id] = chat_history\n", + " return chat_history" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "94c53ce3-4212-41e6-8ad3-f0ab5df6130f", + "metadata": {}, + "source": [ + "## Use with LangGraph\n", + "\n", + "Next, we'll set up a basic chat bot using LangGraph. If you're not familiar with LangGraph, you should look at the following [Quick Start Tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/).\n", + "\n", + "We'll create a [LangGraph node](https://langchain-ai.github.io/langgraph/concepts/low_level/#nodes) for the chat model, and manually manage the conversation history, taking into account the conversation ID passed as part of the RunnableConfig.\n", + "\n", + "The conversation ID can be passed as either part of the RunnableConfig (as we'll do here), or as part of the [graph state](https://langchain-ai.github.io/langgraph/concepts/low_level/#state)." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "a6633dd2-2d6a-4121-b087-4907c9f588ca", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "hi! I'm bob\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Hello Bob! It's nice to meet you. I'm Claude, an AI assistant created by Anthropic. How are you doing today?\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "what was my name?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "You introduced yourself as Bob when you said \"hi! I'm bob\".\n" + ] + } + ], + "source": [ + "import uuid\n", + "\n", + "from langchain_anthropic import ChatAnthropic\n", + "from langchain_core.messages import BaseMessage, HumanMessage\n", + "from langchain_core.runnables import RunnableConfig\n", + "from langgraph.graph import START, MessagesState, StateGraph\n", + "\n", + "# Define a new graph\n", + "builder = StateGraph(state_schema=MessagesState)\n", + "\n", + "# Define a chat model\n", + "model = ChatAnthropic(model=\"claude-3-haiku-20240307\")\n", + "\n", + "\n", + "# Define the function that calls the model\n", + "def call_model(state: MessagesState, config: RunnableConfig) -> list[BaseMessage]:\n", + " # Make sure that config is populated with the session id\n", + " if \"configurable\" not in config or \"session_id\" not in config[\"configurable\"]:\n", + " raise ValueError(\n", + " \"Make sure that the config includes the following information: {'configurable': {'session_id': 'some_value'}}\"\n", + " )\n", + " # Fetch the history of messages and append to it any new messages.\n", + " # highlight-start\n", + " chat_history = get_chat_history(config[\"configurable\"][\"session_id\"])\n", + " messages = list(chat_history.messages) + state[\"messages\"]\n", + " # highlight-end\n", + " ai_message = model.invoke(messages)\n", + " # Finally, update the chat message history to include\n", + " # the new input message from the user together with the\n", + " # repsonse from the model.\n", + " # highlight-next-line\n", + " chat_history.add_messages(state[\"messages\"] + [ai_message])\n", + " return {\"messages\": ai_message}\n", + "\n", + "\n", + "# Define the two nodes we will cycle between\n", + "builder.add_edge(START, \"model\")\n", + "builder.add_node(\"model\", call_model)\n", + "\n", + "graph = builder.compile()\n", + "\n", + "# Here, we'll create a unique session ID to identify the conversation\n", + "session_id = uuid.uuid4()\n", + "config = {\"configurable\": {\"session_id\": session_id}}\n", + "\n", + "input_message = HumanMessage(content=\"hi! I'm bob\")\n", + "for event in graph.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n", + " event[\"messages\"][-1].pretty_print()\n", + "\n", + "# Here, let's confirm that the AI remembers our name!\n", + "input_message = HumanMessage(content=\"what was my name?\")\n", + "for event in graph.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n", + " event[\"messages\"][-1].pretty_print()" + ] + }, + { + "cell_type": "markdown", + "id": "4c0766af-a3b3-4293-b253-3a10f365ab5d", + "metadata": {}, + "source": [ + ":::hint\n", + "\n", + "This also supports streaming LLM content token by token if using langgraph >= 0.2.28.\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "044b63dd-fb15-4a03-89c5-aaaf7346ea76", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You| sai|d your| name was Bob.|" + ] + } + ], + "source": [ + "from langchain_core.messages import AIMessageChunk\n", + "\n", + "first = True\n", + "\n", + "for msg, metadata in graph.stream(\n", + " {\"messages\": input_message}, config, stream_mode=\"messages\"\n", + "):\n", + " if msg.content and not isinstance(msg, HumanMessage):\n", + " print(msg.content, end=\"|\", flush=True)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "da0536dd-9a0b-49e3-b0b6-e8c7abf3b1f9", + "metadata": {}, + "source": [ + "## Using With RunnableWithMessageHistory\n", + "\n", + "This how-to guide used the `messages` and `add_messages` interface of `BaseChatMessageHistory` directly. \n", + "\n", + "Alternatively, you can use [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html), as [LCEL](/docs/concepts/#langchain-expression-language-lcel/) can be used inside any [LangGraph node](https://langchain-ai.github.io/langgraph/concepts/low_level/#nodes).\n", + "\n", + "To do that replace the following code:\n", + "\n", + "```python\n", + "def call_model(state: MessagesState, config: RunnableConfig) -> list[BaseMessage]:\n", + " # highlight-start\n", + " # Make sure that config is populated with the session id\n", + " if \"configurable\" not in config or \"session_id\" not in config[\"configurable\"]:\n", + " raise ValueError(\n", + " \"You make sure that the config includes the following information: {'configurable': {'session_id': 'some_value'}}\"\n", + " )\n", + " # Fetch the history of messages and append to it any new messages.\n", + " chat_history = get_chat_history(config[\"configurable\"][\"session_id\"])\n", + " messages = list(chat_history.messages) + state[\"messages\"]\n", + " ai_message = model.invoke(messages)\n", + " # Finally, update the chat message history to include\n", + " # the new input message from the user together with the\n", + " # repsonse from the model.\n", + " chat_history.add_messages(state[\"messages\"] + [ai_message])\n", + " # hilight-end\n", + " return {\"messages\": ai_message}\n", + "```\n", + "\n", + "With the corresponding instance of `RunnableWithMessageHistory` defined in your current application.\n", + "\n", + "```python\n", + "runnable = RunnableWithMessageHistory(...) # From existing code\n", + "\n", + "def call_model(state: MessagesState, config: RunnableConfig) -> list[BaseMessage]:\n", + " # RunnableWithMessageHistory takes care of reading the message history\n", + " # and updating it with the new human message and ai response.\n", + " ai_message = runnable.invoke(state['messages'], config)\n", + " return {\n", + " \"messages\": ai_message\n", + " }\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/versions/migrating_memory/index.mdx b/docs/docs/versions/migrating_memory/index.mdx index 7d3d27e7203eb..800de108629cd 100644 --- a/docs/docs/versions/migrating_memory/index.mdx +++ b/docs/docs/versions/migrating_memory/index.mdx @@ -2,10 +2,31 @@ sidebar_position: 1 --- -# How to migrate from v0.0 memory +# How to migrate to LangGraph memory + +As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into their LangChain application. + +* Users that rely on `RunnableWithMessageHistory` or `BaseChatMessageHistory` do **not** need to make any changes, but are encouraged to consider using LangGraph for more complex use cases. +* Users that rely on deprecated memory abstractions from LangChain 0.0.x should follow this guide to upgrade to the new LangGraph persistence feature in LangChain 0.3.x. + +## Why use LangGraph for memory? + +The main advantages of persistence in LangGraph are: + +- Built-in support for multiple users and conversations, which is a typical requirement for real-world conversational AI applications. +- Ability to save and resume complex conversations at any point. This helps with: + - Error recovery + - Allowing human intervention in AI workflows + - Exploring different conversation paths ("time travel") +- Full compatibility with both traditional [language models](/docs/concepts/#llms) and modern [chat models](/docs/concepts/#chat-models). Early memory implementations in LangChain weren't designed for newer chat model APIs, causing issues with features like tool-calling. LangGraph memory can persist any custom state. +- Highly customizable, allowing you to fully control how memory works and use different storage backends. + +## Evolution of memory in LangChain The concept of memory has evolved significantly in LangChain since its initial release. +### LangChain 0.0.x memory + Broadly speaking, LangChain 0.0.x memory was used to handle three main use cases: | Use Case | Example | @@ -16,16 +37,27 @@ Broadly speaking, LangChain 0.0.x memory was used to handle three main use cases While the LangChain 0.0.x memory abstractions were useful, they were limited in their capabilities and not well suited for real-world conversational AI applications. These memory abstractions lacked built-in support for multi-user, multi-conversation scenarios, which are essential for practical conversational AI systems. -This guide will help you migrate your usage of memory implementations from LangChain v0.0.x to the persistence implementations of LangGraph. +Most of these implementations have been officially deprecated in LangChain 0.3.x in favor of LangGraph persistence. -## Why use LangGraph for memory? +### RunnableWithMessageHistory and BaseChatMessageHistory + +:::note +Please see [How to use BaseChatMessageHistory with LangGraph](./chat_history), if you would like to use `BaseChatMessageHistory` (with or without `RunnableWithMessageHistory`) in LangGraph. +::: + +As of LangChain v0.1, we started recommending that users rely primarily on [BaseChatMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory). `BaseChatMessageHistory` serves +as a simple persistence for storing and retrieving messages in a conversation. -The main advantages of persistence implementation in LangGraph are: +At that time, the only option for orchestrating LangChain chains was via [LCEL](https://python.langchain.com/docs/how_to/#langchain-expression-language-lcel). To incorporate memory with `LCEL`, users had to use the [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) interface. While sufficient for basic chat applications, many users found the API unintuitive and challenging to use. -- Built-in support for multi-user, multi-conversation scenarios which is often a requirement for real-world conversational AI applications. -- Ability to save and resume complex state at any time for error recovery, human-in-the-loop workflows, time travel interactions, and more. -- Full support for both [LLM](/docs/concepts/#llms) and [chat models](/docs/concepts/#chat-models). In contrast, the v0.0.x memory abstractions were created prior to the existence and widespread adoption of chat model APIs, and so it does not work well with chat models (e.g., fails with tool calling chat models). -- Offers a high degree of customization and control over the memory implementation, including the ability to use different backends. +As of LangChain v0.3, we recommend that **new** code takes advantage of LangGraph for both orchestration and persistence: + +- Orchestration: In LangGraph, users define [graphs](https://langchain-ai.github.io/langgraph/concepts/low_level/) that specify the flow of the application. This allows users to keep using `LCEL` within individual nodes when `LCEL` is needed, while making it easy to define complex orchestration logic that is more readable and maintainable. +- Persistence: Users can rely on LangGraph's [persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to store and retrieve data. LangGraph persistence is extremely flexible and can support a much wider range of use cases than the `RunnableWithMessageHistory` interface. + +:::important +If you have been using `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do not need to make any changes. We do not plan on deprecating either functionality in the near future. This functionality is sufficient for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected. +::: ## Migrations @@ -45,19 +77,21 @@ Often this involves trimming and / or summarizing the conversation history to ke Memory classes that fall into this category include: -| Memory Type | How to Migrate | Description | -|-----------------------------------|:-------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `ConversationBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A basic memory implementation that simply stores the conversation history. | -| `ConversationStringBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A special case of `ConversationBufferMemory` designed for LLMs and no longer relevant. | -| `ConversationBufferWindowMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps the last `n` turns of the conversation. Drops the oldest turn when the buffer is full. | -| `ConversationTokenBufferMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. | -| `ConversationSummaryMemory` | [Link to Migration Guide](conversation_summary_memory) | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. | -| `ConversationSummaryBufferMemory` | [Link to Migration Guide](conversation_summary_memory) | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. | -| `VectorStoreRetrieverMemory` | No migration guide yet | Stores the conversation history in a vector store and retrieves the most relevant parts of past conversation based on the input. | +| Memory Type | How to Migrate | Description | +|-----------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `ConversationBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A basic memory implementation that simply stores the conversation history. | +| `ConversationStringBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A special case of `ConversationBufferMemory` designed for LLMs and no longer relevant. | +| `ConversationBufferWindowMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps the last `n` turns of the conversation. Drops the oldest turn when the buffer is full. | +| `ConversationTokenBufferMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. | +| `ConversationSummaryMemory` | [Link to Migration Guide](conversation_summary_memory) | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. | +| `ConversationSummaryBufferMemory` | [Link to Migration Guide](conversation_summary_memory) | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. | +| `VectorStoreRetrieverMemory` | See related [long-term memory agent tutorial](https://langchain-ai.github.io/langgraph/tutorials/memory/long_term_memory_agent/) | Stores the conversation history in a vector store and retrieves the most relevant parts of past conversation based on the input. | ### 2. Extraction of structured information from the conversation history +Please see [long-term memory agent tutorial](https://langchain-ai.github.io/langgraph/tutorials/memory/long_term_memory_agent/) implements an agent that can extract structured information from the conversation history. + Memory classes that fall into this category include: | Memory Type | Description | @@ -78,9 +112,10 @@ These abstractions have not received much development since their initial releas is that for these abstractions to be useful they typically require a lot of specialization for a particular application, so these abstractions are not as widely used as the conversation history management abstractions. -For this reason, there are no migration guides for these abstractions. If you're struggling to migrate an applications -that relies on these abstractions, please open an issue on the LangChain GitHub repository and we'll try to prioritize providing -more guidance on how to migrate these abstractions. +For this reason, there are no migration guides for these abstractions. If you're struggling to migrate an application +that relies on these abstractions, please: +1) Please review this [Long-term memory agent tutorial](https://langchain-ai.github.io/langgraph/tutorials/memory/long_term_memory_agent/) which should provide a good starting point for how to extract structured information from the conversation history. +2) If you're still struggling, please open an issue on the LangChain GitHub repository, explain your use case, and we'll try to provide more guidance on how to migrate these abstractions. The general strategy for extracting structured information from the conversation history is to use a chat model with tool calling capabilities to extract structured information from the conversation history. The extracted information can then be saved into an appropriate data structure (e.g., a dictionary), and information from it can be retrieved and added into the prompt as needed. diff --git a/docs/sidebars.js b/docs/sidebars.js index ab4ea6196af7f..af0dae43d9e18 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -104,7 +104,7 @@ module.exports = { }, { type: "category", - label: "Migrating from v0.0 memory", + label: "Upgrading to LangGraph memory", link: {type: 'doc', id: 'versions/migrating_memory/index'}, collapsible: false, collapsed: false, diff --git a/docs/static/img/message_history.png b/docs/static/img/message_history.png deleted file mode 100644 index 31f7664d286bf..0000000000000 Binary files a/docs/static/img/message_history.png and /dev/null differ diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index b3db8b2104f42..05795e6c4387c 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -581,12 +581,38 @@ def trim_messages( ) -> list[BaseMessage]: """Trim messages to be below a token count. + trim_messages can be used to reduce the size of a chat history to a specified token + count or specified message count. + + In either case, if passing the trimmed chat history back into a chat model + directly, the resulting chat history should usually satisfy the following + properties: + + 1. The resulting chat history should be valid. Most chat models expect that chat + history starts with either (1) a `HumanMessage` or (2) a `SystemMessage` followed + by a `HumanMessage`. To achieve this, set `start_on="human"`. + In addition, generally a `ToolMessage` can only appear after an `AIMessage` + that involved a tool call. + Please see the following link for more information about messages: + https://python.langchain.com/docs/concepts/#messages + 2. It includes recent messages and drops old messages in the chat history. + To achieve this set the `strategy="last"`. + 3. Usually, the new chat history should include the `SystemMessage` if it + was present in the original chat history since the `SystemMessage` includes + special instructions to the chat model. The `SystemMessage` is almost always + the first message in the history if present. To achieve this set the + `include_system=True`. + + **Note** The examples below show how to configure `trim_messages` to achieve + a behavior consistent with the above properties. + Args: messages: Sequence of Message-like objects to trim. max_tokens: Max token count of trimmed messages. token_counter: Function or llm for counting tokens in a BaseMessage or a list of BaseMessage. If a BaseLanguageModel is passed in then BaseLanguageModel.get_num_tokens_from_messages() will be used. + Set to `len` to count the number of **messages** in the chat history. strategy: Strategy for trimming. - "first": Keep the first <= n_count tokens of the messages. - "last": Keep the last <= n_count tokens of the messages. @@ -633,11 +659,97 @@ def trim_messages( ``strategy`` is specified. Example: + Trim chat history based on token count, keeping the SystemMessage if + present, and ensuring that the chat history starts with a HumanMessage ( + or a SystemMessage followed by a HumanMessage). + .. code-block:: python from typing import List - from langchain_core.messages import trim_messages, AIMessage, BaseMessage, HumanMessage, SystemMessage + from langchain_core.messages import ( + AIMessage, + HumanMessage, + BaseMessage, + SystemMessage, + trim_messages, + ) + + messages = [ + SystemMessage("you're a good assistant, you always respond with a joke."), + HumanMessage("i wonder why it's called langchain"), + AIMessage( + 'Well, I guess they thought "WordRope" and "SentenceString" just didn\'t have the same ring to it!' + ), + HumanMessage("and who is harrison chasing anyways"), + AIMessage( + "Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!" + ), + HumanMessage("what do you call a speechless parrot"), + ] + + + trim_messages( + messages, + max_tokens=45, + strategy="last", + token_counter=ChatOpenAI(model="gpt-4o"), + # Most chat models expect that chat history starts with either: + # (1) a HumanMessage or + # (2) a SystemMessage followed by a HumanMessage + start_on="human", + # Usually, we want to keep the SystemMessage + # if it's present in the original history. + # The SystemMessage has special instructions for the model. + include_system=True, + allow_partial=False, + ) + + .. code-block:: python + + [ + SystemMessage(content="you're a good assistant, you always respond with a joke."), + HumanMessage(content='what do you call a speechless parrot'), + ] + + Trim chat history based on the message count, keeping the SystemMessage if + present, and ensuring that the chat history starts with a HumanMessage ( + or a SystemMessage followed by a HumanMessage). + + trim_messages( + messages, + # When `len` is passed in as the token counter function, + # max_tokens will count the number of messages in the chat history. + max_tokens=4, + strategy="last", + # Passing in `len` as a token counter function will + # count the number of messages in the chat history. + token_counter=len, + # Most chat models expect that chat history starts with either: + # (1) a HumanMessage or + # (2) a SystemMessage followed by a HumanMessage + start_on="human", + # Usually, we want to keep the SystemMessage + # if it's present in the original history. + # The SystemMessage has special instructions for the model. + include_system=True, + allow_partial=False, + ) + + .. code-block:: python + + [ + SystemMessage(content="you're a good assistant, you always respond with a joke."), + HumanMessage(content='and who is harrison chasing anyways'), + AIMessage(content="Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!"), + HumanMessage(content='what do you call a speechless parrot'), + ] + + + Trim chat history using a custom token counter function that counts the + number of tokens in each message. + + .. code-block:: python messages = [ SystemMessage("This is a 4 token text. The full message is 10 tokens."), @@ -670,18 +782,6 @@ def dummy_token_counter(messages: List[BaseMessage]) -> int: count += default_msg_prefix_len + len(msg.content) * default_content_len + default_msg_suffix_len return count - First 30 tokens, not allowing partial messages: - .. code-block:: python - - trim_messages(messages, max_tokens=30, token_counter=dummy_token_counter, strategy="first") - - .. code-block:: python - - [ - SystemMessage("This is a 4 token text. The full message is 10 tokens."), - HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"), - ] - First 30 tokens, allowing partial messages: .. code-block:: python @@ -700,108 +800,6 @@ def dummy_token_counter(messages: List[BaseMessage]) -> int: HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"), AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"), ] - - First 30 tokens, allowing partial messages, have to end on HumanMessage: - .. code-block:: python - - trim_messages( - messages, - max_tokens=30, - token_counter=dummy_token_counter, - strategy="first" - allow_partial=True, - end_on="human", - ) - - .. code-block:: python - - [ - SystemMessage("This is a 4 token text. The full message is 10 tokens."), - HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"), - ] - - - Last 30 tokens, including system message, not allowing partial messages: - .. code-block:: python - - trim_messages(messages, max_tokens=30, include_system=True, token_counter=dummy_token_counter, strategy="last") - - .. code-block:: python - - [ - SystemMessage("This is a 4 token text. The full message is 10 tokens."), - HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"), - AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"), - ] - - Last 40 tokens, including system message, allowing partial messages: - .. code-block:: python - - trim_messages( - messages, - max_tokens=40, - token_counter=dummy_token_counter, - strategy="last", - allow_partial=True, - include_system=True - ) - - .. code-block:: python - - [ - SystemMessage("This is a 4 token text. The full message is 10 tokens."), - AIMessage( - [{"type": "text", "text": "This is the FIRST 4 token block."},], - id="second", - ), - HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"), - AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"), - ] - - Last 30 tokens, including system message, allowing partial messages, end on HumanMessage: - .. code-block:: python - - trim_messages( - messages, - max_tokens=30, - token_counter=dummy_token_counter, - strategy="last", - end_on="human", - include_system=True, - allow_partial=True, - ) - - .. code-block:: python - - [ - SystemMessage("This is a 4 token text. The full message is 10 tokens."), - AIMessage( - [{"type": "text", "text": "This is the FIRST 4 token block."},], - id="second", - ), - HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"), - ] - - Last 40 tokens, including system message, allowing partial messages, start on HumanMessage: - .. code-block:: python - - trim_messages( - messages, - max_tokens=40, - token_counter=dummy_token_counter, - strategy="last", - include_system=True, - allow_partial=True, - start_on="human" - ) - - .. code-block:: python - - [ - SystemMessage("This is a 4 token text. The full message is 10 tokens."), - HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"), - AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"), - ] """ # noqa: E501 if start_on and strategy == "first":