Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft: Action server docs plugins #12333

Closed
wants to merge 9 commits into from
1 change: 1 addition & 0 deletions changelog/12480.improvement.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Skip executing the pipeline when the user message is of the form /intent or /intent + entities.
1 change: 1 addition & 0 deletions changelog/12514.improvement.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Remove tensorflow-addons from dependencies as it is now deprecated.
57 changes: 57 additions & 0 deletions docs/docs/action-server/add-sanic-extensions.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
---
id: add-sanic-extensions
sidebar_label: Adding Sanic Extensions
title: Add Sanic Extensions
---

# Add Sanic extensions
You can now create additional Sanic extensions by accessing the app object created by the action server. The hook implemented provides you access
to the Sanic app object created by `rasa-sdk` when starting the action server. Using this app object, you can now extend Sanic features such as middlewares, listeners, background tasks
and additional routes.

### Step by Step Guide on creating your own sanic extension in rasa_sdk
This example will show how to create a sanic listener using plugins.

#### Create package rasa_sdk_plugins
Create a package in your action server project which you must name `rasa_sdk_plugins`. Rasa SDK will try to instantiate this package in your project to start plugins.
If no plugins are found, it will print an info log that there are no plugins in your project.

#### Register modules containing the hooks
Create the package `rasa_sdk_plugins` and initialize the hooks by creating an `__init__.py` file where the plugin manager will look for the module where the hooks are implemented:

```
def init_hooks(manager: pluggy.PluginManager) -> None:
"""Initialise hooks into rasa sdk."""
import sys
import rasa_sdk_plugins.your_module
logger.info("Finding hooks")
manager.register(sys.modules["rasa_sdk_plugins.your_module"])
```
#### Implement your hook
Implement the hook `attach_sanic_app_extensions`. This hook forwards the app object created by Sanic in the `rasa_sdk` and allows you to create additional routes, middlewares, listeners and background tasks. Here's an example of this implementation that creates a listener.

In your `rasa_sdk_plugins.your_module.py`:

```
from __future__ import annotations
import logging

import pluggy

from functools import partial

logger = logging.getLogger(__name__)
hookimpl = pluggy.HookimplMarker("rasa_sdk")

@hookimpl # type: ignore[misc]
def attach_sanic_app_extensions(app) -> bool:
logger.info("hook called")
app.register_listener(
partial(print),
"before_server_start",
)
return app

async def print(app, loop):
logger.info("BEFORE SERVER START")
```
1 change: 1 addition & 0 deletions docs/sidebars.js
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,7 @@ module.exports = {
},
],
},
'action-server/add-sanic-extensions'
],
},
],
Expand Down
2 changes: 1 addition & 1 deletion rasa/cli/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ async def payload_from_button_question(button_question: "Question") -> Text:
response = await button_question.ask_async()
if response != FREE_TEXT_INPUT_PROMPT:
# Extract intent slash command if it's a button
response = response[response.find("(") + 1 : response.find(")")]
response = response[response.rfind("(") + 1 : response.rfind(")")]
return response


Expand Down
26 changes: 22 additions & 4 deletions rasa/core/processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,13 +62,17 @@
import rasa.core.actions.action
import rasa.shared.core.trackers
from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
YAMLStoryReader,
)
from rasa.shared.nlu.constants import (
ENTITIES,
INTENT,
INTENT_NAME_KEY,
PREDICTED_CONFIDENCE_KEY,
TEXT,
)
from rasa.shared.nlu.training_data.message import Message
from rasa.utils.endpoints import EndpointConfig

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -715,11 +719,25 @@ async def parse_message(
if self.http_interpreter:
parse_data = await self.http_interpreter.parse(message)
else:
if tracker is None:
tracker = DialogueStateTracker.from_events(message.sender_id, [])
parse_data = self._parse_message_with_graph(
message, tracker, only_output_properties
msg = YAMLStoryReader.unpack_regex_message(
message=Message({TEXT: message.text})
)
# Intent is not explicitly present. Pass message to graph.
if msg.data.get(INTENT) is None:
if tracker is None:
tracker = DialogueStateTracker.from_events(message.sender_id, [])
parse_data = self._parse_message_with_graph(
message, tracker, only_output_properties
)
else:
parse_data = {
TEXT: "",
INTENT: {INTENT_NAME_KEY: None, PREDICTED_CONFIDENCE_KEY: 0.0},
ENTITIES: [],
}
parse_data.update(
msg.as_dict(only_output_properties=only_output_properties)
)

structlogger.debug(
"processor.message.parse",
Expand Down
17 changes: 17 additions & 0 deletions tests/cli/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import rasa.shared.utils.io
from rasa.utils.common import TempDirectoryPath, get_temp_dir_name
from tests.cli.conftest import RASA_EXE
from tests.conftest import AsyncMock


@contextlib.contextmanager
Expand Down Expand Up @@ -633,3 +634,19 @@ def test_validate_assistant_id_in_config_preserves_comment() -> None:

# reset input files to original state
rasa.shared.utils.io.write_yaml(original_config_data, config_file, True)


@pytest.mark.parametrize(
"text_input, button",
[
("hi this is test text\n", "hi this is test text"),
("hi this is test text (/button_one)", "/button_one"),
("hi this is test text (and something) (/button_one)", "/button_one"),
],
)
async def test_payload_from_button_question(text_input: str, button: str) -> None:
"""Test that the payload is extracted correctly from the text input."""
question = AsyncMock()
question.ask_async.return_value = text_input
result = await rasa.cli.utils.payload_from_button_question(question)
assert result == button
12 changes: 10 additions & 2 deletions tests/core/test_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,11 +98,19 @@ async def test_agent_train(default_agent: Agent):
"start": 6,
"end": 21,
"value": "Rasa",
"extractor": "RegexMessageHandler",
}
],
},
)
),
(
"hi hello",
{
"text": "hi hello",
"intent": {"name": "greet", "confidence": 1.0},
"text_tokens": [(0, 2), (3, 8)],
"entities": [],
},
),
],
)
async def test_agent_parse_message(
Expand Down
44 changes: 33 additions & 11 deletions tests/core/test_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from _pytest.logging import LogCaptureFixture
from aioresponses import aioresponses
from typing import Optional, Text, List, Callable, Type, Any
from unittest import mock

from rasa.core.lock_store import InMemoryLockStore
from rasa.core.policies.ensemble import DefaultPolicyPredictionEnsemble
Expand Down Expand Up @@ -113,10 +114,26 @@ async def test_message_id_logging(default_processor: MessageProcessor):


async def test_parsing(default_processor: MessageProcessor):
message = UserMessage('/greet{"name": "boy"}')
parsed = await default_processor.parse_message(message)
assert parsed["intent"][INTENT_NAME_KEY] == "greet"
assert parsed["entities"][0]["entity"] == "name"
with mock.patch(
"rasa.core.processor.MessageProcessor._parse_message_with_graph"
) as mocked_function:
# Case1: message has intent and entities explicitly set.
message = UserMessage('/greet{"name": "boy"}')
parsed = await default_processor.parse_message(message)
assert parsed["intent"][INTENT_NAME_KEY] == "greet"
assert parsed["entities"][0]["entity"] == "name"
mocked_function.assert_not_called()

# Case2: Normal user message.
parse_data = {
"text": "mocked",
"intent": {"name": None, "confidence": 0.0},
"entities": [],
}
mocked_function.return_value = parse_data
message = UserMessage("hi hello how are you?")
parsed = await default_processor.parse_message(message)
mocked_function.assert_called()


async def test_check_for_unseen_feature(default_processor: MessageProcessor):
Expand Down Expand Up @@ -874,7 +891,7 @@ async def test_handle_message_with_session_start(
# make sure the sequence of events is as expected
with_model_ids_expected = with_model_ids(
[
ActionExecuted(ACTION_SESSION_START_NAME),
ActionExecuted(ACTION_SESSION_START_NAME, confidence=1.0),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
Expand All @@ -886,15 +903,18 @@ async def test_handle_message_with_session_start(
"start": 6,
"end": 22,
"value": "Core",
"extractor": "RegexMessageHandler",
}
],
),
SlotSet(entity, slot_1[entity]),
DefinePrevUserUtteredFeaturization(False),
ActionExecuted("utter_greet"),
BotUttered("hey there Core!", metadata={"utter_action": "utter_greet"}),
ActionExecuted(ACTION_LISTEN_NAME),
ActionExecuted(
"utter_greet", policy="AugmentedMemoizationPolicy", confidence=1.0
),
BotUttered(
"hey there Core!", data={}, metadata={"utter_action": "utter_greet"}
),
ActionExecuted(ACTION_LISTEN_NAME, confidence=1.0),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
# the initial SlotSet is reapplied after the SessionStarted sequence
Expand All @@ -909,15 +929,17 @@ async def test_handle_message_with_session_start(
"start": 6,
"end": 42,
"value": "post-session start hello",
"extractor": "RegexMessageHandler",
}
],
),
SlotSet(entity, slot_2[entity]),
DefinePrevUserUtteredFeaturization(False),
ActionExecuted("utter_greet"),
ActionExecuted(
"utter_greet", policy="AugmentedMemoizationPolicy", confidence=1.0
),
BotUttered(
"hey there post-session start hello!",
data={},
metadata={"utter_action": "utter_greet"},
),
ActionExecuted(ACTION_LISTEN_NAME),
Expand Down
10 changes: 10 additions & 0 deletions tests/nlu/featurizers/test_lm_featurizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,7 @@ def evaluate_message_shapes(
assert intent_sentence_vec is None

@pytest.mark.timeout(120, func_only=True)
@pytest.mark.skip_on_windows
def test_lm_featurizer_shapes_in_process_training_data(
self,
model_name: Text,
Expand All @@ -386,6 +387,7 @@ def test_lm_featurizer_shapes_in_process_training_data(
)

@pytest.mark.timeout(120, func_only=True)
@pytest.mark.skip_on_windows
def test_lm_featurizer_shapes_in_process_messages(
self,
model_name: Text,
Expand Down Expand Up @@ -581,6 +583,7 @@ def check_subtokens(
)

@pytest.mark.timeout(120, func_only=True)
@pytest.mark.skip_on_windows
def test_lm_featurizer_num_sub_tokens_process_training_data(
self,
model_name: Text,
Expand All @@ -606,6 +609,7 @@ def test_lm_featurizer_num_sub_tokens_process_training_data(
)

@pytest.mark.timeout(120, func_only=True)
@pytest.mark.skip_on_windows
def test_lm_featurizer_num_sub_tokens_process_messages(
self,
model_name: Text,
Expand Down Expand Up @@ -635,6 +639,7 @@ def test_lm_featurizer_num_sub_tokens_process_messages(
"input_sequence_length, model_name, should_overflow",
[(20, "bert", False), (1000, "bert", True), (1000, "xlnet", False)],
)
@pytest.mark.skip_on_windows
def test_sequence_length_overflow_train(
input_sequence_length: int,
model_name: Text,
Expand Down Expand Up @@ -666,6 +671,7 @@ def test_sequence_length_overflow_train(
(np.ones((1, 256, 5)), [256], "bert", False),
],
)
@pytest.mark.skip_on_windows
def test_long_sequences_extra_padding(
sequence_embeddings: np.ndarray,
actual_sequence_lengths: List[int],
Expand Down Expand Up @@ -703,6 +709,7 @@ def test_long_sequences_extra_padding(
([[1] * 200], 200, 200, False),
],
)
@pytest.mark.skip_on_windows
def test_input_padding(
token_ids: List[List[int]],
max_sequence_length_model: int,
Expand Down Expand Up @@ -730,6 +737,7 @@ def test_input_padding(
(256, "bert", "bert-base-uncased", False),
],
)
@pytest.mark.skip_on_windows
def test_log_longer_sequence(
sequence_length: int,
model_name: Text,
Expand Down Expand Up @@ -760,6 +768,7 @@ def test_log_longer_sequence(
"actual_sequence_length, max_input_sequence_length, zero_start_index",
[(256, 512, 256), (700, 700, 700), (700, 512, 512)],
)
@pytest.mark.skip_on_windows
def test_attention_mask(
actual_sequence_length: int,
max_input_sequence_length: int,
Expand Down Expand Up @@ -792,6 +801,7 @@ def test_attention_mask(
)
],
)
@pytest.mark.skip_on_windows
def test_lm_featurizer_correctly_handle_whitespace_token(
text: Text,
tokens: List[Tuple[Text, int]],
Expand Down
1 change: 1 addition & 0 deletions tests/nlu/test_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ def test_all_components_are_in_at_least_one_test_pipeline():

@pytest.mark.timeout(600, func_only=True)
@pytest.mark.parametrize("language, pipeline", pipelines_for_tests())
@pytest.mark.skip_on_windows
async def test_train_persist_load_parse(
language: Optional[Text],
pipeline: List[Dict],
Expand Down