From 8cd3e7e18e4c3176f88be0a68f8406f08f69694a Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Tue, 11 Jun 2019 12:27:45 -0500 Subject: [PATCH 01/11] Index path with original path name We already have this information from the model --- readthedocs/projects/models.py | 14 +++++++------- readthedocs/search/parse_json.py | 19 +++++++------------ 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/readthedocs/projects/models.py b/readthedocs/projects/models.py index 19f58f7fab0..eae88fd9cf6 100644 --- a/readthedocs/projects/models.py +++ b/readthedocs/projects/models.py @@ -1230,21 +1230,21 @@ def get_processed_json(self): Both lead to `foo/index.html` https://github.com/rtfd/readthedocs.org/issues/5368 """ - paths = [] + fjson_paths = [] basename = os.path.splitext(self.path)[0] - paths.append(basename + '.fjson') + fjson_paths.append(basename + '.fjson') if basename.endswith('/index'): new_basename = re.sub(r'\/index$', '', basename) - paths.append(new_basename + '.fjson') + fjson_paths.append(new_basename + '.fjson') full_json_path = self.project.get_production_media_path( type_='json', version_slug=self.version.slug, include_file=False ) try: - for path in paths: - file_path = os.path.join(full_json_path, path) + for fjson_path in fjson_paths: + file_path = os.path.join(full_json_path, fjson_path) if os.path.exists(file_path): - return process_file(file_path) + return process_file(file_path, self.path) except Exception: log.warning( 'Unhandled exception during search processing file: %s', @@ -1253,7 +1253,7 @@ def get_processed_json(self): return { 'headers': [], 'content': '', - 'path': file_path, + 'path': self.path, 'title': '', 'sections': [], } diff --git a/readthedocs/search/parse_json.py b/readthedocs/search/parse_json.py index a37be7dfc63..896cadb473b 100644 --- a/readthedocs/search/parse_json.py +++ b/readthedocs/search/parse_json.py @@ -59,40 +59,35 @@ def generate_sections_from_pyquery(body): } -def process_file(filename): +def process_file(fjson_filename, filename): """Read a file from disk and parse it into a structured dict.""" try: - with codecs.open(filename, encoding='utf-8', mode='r') as f: + with codecs.open(fjson_filename, encoding='utf-8', mode='r') as f: file_contents = f.read() except IOError: - log.info('Unable to read file: %s', filename) + log.info('Unable to read file: %s', fjson_filename) return None data = json.loads(file_contents) sections = [] title = '' body_content = '' - if 'current_page_name' in data: - path = data['current_page_name'] - else: - log.info('Unable to index file due to no name %s', filename) - return None if 'body' in data and data['body']: body = PyQuery(data['body']) body_content = body.text().replace('¶', '') sections.extend(generate_sections_from_pyquery(body)) else: - log.info('Unable to index content for: %s', filename) + log.info('Unable to index content for: %s', fjson_filename) if 'title' in data: title = data['title'] if title.startswith('<'): title = PyQuery(data['title']).text() else: - log.info('Unable to index title for: %s', filename) + log.info('Unable to index title for: %s', fjson_filename) return { - 'headers': process_headers(data, filename), + 'headers': process_headers(data, fjson_filename), 'content': body_content, - 'path': path, + 'path': filename, 'title': title, 'sections': sections, } From abf4a75ff2d1fb6c547db09724b86eeb06a637fa Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Tue, 11 Jun 2019 22:54:45 -0500 Subject: [PATCH 02/11] Don't depend on DOCUMENTATION_OPTIONS --- readthedocs/core/static-src/core/js/doc-embed/search.js | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/readthedocs/core/static-src/core/js/doc-embed/search.js b/readthedocs/core/static-src/core/js/doc-embed/search.js index 79ef3c01c77..4a341902ba8 100644 --- a/readthedocs/core/static-src/core/js/doc-embed/search.js +++ b/readthedocs/core/static-src/core/js/doc-embed/search.js @@ -38,8 +38,7 @@ function attach_elastic_search_query(data) { var list_item = $('
  • '); // Creating the result from elements - var link = doc.link + DOCUMENTATION_OPTIONS.FILE_SUFFIX + - '?highlight=' + $.urlencode(query); + var link = doc.link + '?highlight=' + $.urlencode(query); var item = $('', {'href': link}); item.html(doc.title); From 0874fba8778c164fd2ec4484a739469298cd295f Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Tue, 11 Jun 2019 22:55:04 -0500 Subject: [PATCH 03/11] Don't return None --- readthedocs/search/parse_json.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/readthedocs/search/parse_json.py b/readthedocs/search/parse_json.py index 896cadb473b..cf2f262a618 100644 --- a/readthedocs/search/parse_json.py +++ b/readthedocs/search/parse_json.py @@ -60,13 +60,13 @@ def generate_sections_from_pyquery(body): def process_file(fjson_filename, filename): - """Read a file from disk and parse it into a structured dict.""" + """Read the fjson file from disk and parse it into a structured dict.""" try: with codecs.open(fjson_filename, encoding='utf-8', mode='r') as f: file_contents = f.read() except IOError: log.info('Unable to read file: %s', fjson_filename) - return None + raise data = json.loads(file_contents) sections = [] title = '' From 2fcd502ff91e0df54bfb0ced2d61b3d5d989e501 Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Tue, 11 Jun 2019 23:19:09 -0500 Subject: [PATCH 04/11] Fix test --- readthedocs/rtd_tests/tests/test_search_json_parsing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/readthedocs/rtd_tests/tests/test_search_json_parsing.py b/readthedocs/rtd_tests/tests/test_search_json_parsing.py index 42b0839c4e9..72939f19496 100644 --- a/readthedocs/rtd_tests/tests/test_search_json_parsing.py +++ b/readthedocs/rtd_tests/tests/test_search_json_parsing.py @@ -16,6 +16,7 @@ def test_h2_parsing(self): base_dir, 'files/api.fjson', ), + 'files/api.html', ) self.assertEqual(data['sections'][1]['id'], 'a-basic-api-client-using-slumber') # Only capture h2's after the first section From 28fb13f2308ca95e3a749ce5de79c6a6c27eb282 Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Mon, 17 Jun 2019 14:55:28 -0500 Subject: [PATCH 05/11] Add new link field --- readthedocs/search/api.py | 24 ++++++++++++++++++++++-- readthedocs/search/parse_json.py | 4 +++- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/readthedocs/search/api.py b/readthedocs/search/api.py index 8084d793f74..d3277333efe 100644 --- a/readthedocs/search/api.py +++ b/readthedocs/search/api.py @@ -1,14 +1,15 @@ import logging +import os from pprint import pformat -from rest_framework import generics -from rest_framework import serializers +from rest_framework import generics, serializers from rest_framework.exceptions import ValidationError from rest_framework.pagination import PageNumberPagination from readthedocs.search.faceted_search import PageSearch from readthedocs.search.utils import get_project_list_or_404 + log = logging.getLogger(__name__) @@ -23,10 +24,29 @@ class PageSearchSerializer(serializers.Serializer): version = serializers.CharField() title = serializers.CharField() path = serializers.CharField() + # Doc url without extension link = serializers.SerializerMethodField() + # Doc url with extension + url = serializers.SerializerMethodField() highlight = serializers.SerializerMethodField() def get_link(self, obj): + """ + Gets the url without extension. + + .. warning:: + This is only used to keep compatibility with + the previous search implementation. + Use `url` instead. + """ + projects_url = self.context.get('projects_url') + if projects_url: + docs_url = projects_url[obj.project] + path = os.path.splitext(obj.path)[0] + return docs_url + path + + def get_url(self, obj): + """Gets the full url.""" projects_url = self.context.get('projects_url') if projects_url: docs_url = projects_url[obj.project] diff --git a/readthedocs/search/parse_json.py b/readthedocs/search/parse_json.py index cf2f262a618..a7ec86a8b9b 100644 --- a/readthedocs/search/parse_json.py +++ b/readthedocs/search/parse_json.py @@ -71,12 +71,14 @@ def process_file(fjson_filename, filename): sections = [] title = '' body_content = '' - if 'body' in data and data['body']: + + if data.get('body'): body = PyQuery(data['body']) body_content = body.text().replace('¶', '') sections.extend(generate_sections_from_pyquery(body)) else: log.info('Unable to index content for: %s', fjson_filename) + if 'title' in data: title = data['title'] if title.startswith('<'): From ec839cd13bb97508b10e18b3f0da0b531bee8128 Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Mon, 17 Jun 2019 14:56:16 -0500 Subject: [PATCH 06/11] Use doc.url in api response --- readthedocs/core/static-src/core/js/doc-embed/search.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/readthedocs/core/static-src/core/js/doc-embed/search.js b/readthedocs/core/static-src/core/js/doc-embed/search.js index 4a341902ba8..16e9d4141b0 100644 --- a/readthedocs/core/static-src/core/js/doc-embed/search.js +++ b/readthedocs/core/static-src/core/js/doc-embed/search.js @@ -38,9 +38,9 @@ function attach_elastic_search_query(data) { var list_item = $('
  • '); // Creating the result from elements - var link = doc.link + '?highlight=' + $.urlencode(query); + var url = doc.url + '?highlight=' + $.urlencode(query); - var item = $('
    ', {'href': link}); + var item = $('', {'href': url}); item.html(doc.title); list_item.append(item); From c9714104759dbfd6c4d211f36f1a5d77f52caa70 Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Mon, 17 Jun 2019 18:11:16 -0500 Subject: [PATCH 07/11] Keep path around, add full_path --- readthedocs/projects/models.py | 3 ++- readthedocs/search/api.py | 5 ++--- readthedocs/search/documents.py | 3 ++- readthedocs/search/parse_json.py | 9 ++++++++- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/readthedocs/projects/models.py b/readthedocs/projects/models.py index f0d1ea57192..77f1acb2068 100644 --- a/readthedocs/projects/models.py +++ b/readthedocs/projects/models.py @@ -1254,7 +1254,8 @@ def get_processed_json(self): return { 'headers': [], 'content': '', - 'path': self.path, + 'path': file_path, + 'full_path': self.path, 'title': '', 'sections': [], } diff --git a/readthedocs/search/api.py b/readthedocs/search/api.py index d3277333efe..a24e20f9957 100644 --- a/readthedocs/search/api.py +++ b/readthedocs/search/api.py @@ -42,15 +42,14 @@ def get_link(self, obj): projects_url = self.context.get('projects_url') if projects_url: docs_url = projects_url[obj.project] - path = os.path.splitext(obj.path)[0] - return docs_url + path + return docs_url + obj.path def get_url(self, obj): """Gets the full url.""" projects_url = self.context.get('projects_url') if projects_url: docs_url = projects_url[obj.project] - return docs_url + obj.path + return docs_url + obj.full_path def get_highlight(self, obj): highlight = getattr(obj.meta, 'highlight', None) diff --git a/readthedocs/search/documents.py b/readthedocs/search/documents.py index 28a3a61f477..1c36c009046 100644 --- a/readthedocs/search/documents.py +++ b/readthedocs/search/documents.py @@ -116,6 +116,7 @@ class PageDocument(RTDDocTypeMixin, DocType): project = fields.KeywordField(attr='project.slug') version = fields.KeywordField(attr='version.slug') path = fields.KeywordField(attr='processed_json.path') + full_path = fields.KeywordField(attr='processed_json.full_path') # Searchable content title = fields.TextField(attr='processed_json.title') @@ -153,7 +154,7 @@ def faceted_search( def get_queryset(self): """Overwrite default queryset to filter certain files to index.""" - queryset = super(PageDocument, self).get_queryset() + queryset = super().get_queryset() # Do not index files that belong to non sphinx project # Also do not index certain files diff --git a/readthedocs/search/parse_json.py b/readthedocs/search/parse_json.py index a7ec86a8b9b..17135f73ec6 100644 --- a/readthedocs/search/parse_json.py +++ b/readthedocs/search/parse_json.py @@ -69,9 +69,15 @@ def process_file(fjson_filename, filename): raise data = json.loads(file_contents) sections = [] + path = '' title = '' body_content = '' + if 'current_page_name' in data: + path = data['current_page_name'] + else: + log.info('Unable to index file due to no name %s', filename) + if data.get('body'): body = PyQuery(data['body']) body_content = body.text().replace('¶', '') @@ -89,7 +95,8 @@ def process_file(fjson_filename, filename): return { 'headers': process_headers(data, fjson_filename), 'content': body_content, - 'path': filename, + 'path': path, + 'full_path': filename, 'title': title, 'sections': sections, } From a2e0e3f5e442b0072a4b7cbac9efadbe2a41c224 Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Mon, 17 Jun 2019 20:11:56 -0500 Subject: [PATCH 08/11] Revert changes in the search api --- .../static-src/core/js/doc-embed/search.js | 5 +++-- readthedocs/search/api.py | 19 ------------------- .../templates/search/elastic_search.html | 2 +- 3 files changed, 4 insertions(+), 22 deletions(-) diff --git a/readthedocs/core/static-src/core/js/doc-embed/search.js b/readthedocs/core/static-src/core/js/doc-embed/search.js index 16e9d4141b0..79ef3c01c77 100644 --- a/readthedocs/core/static-src/core/js/doc-embed/search.js +++ b/readthedocs/core/static-src/core/js/doc-embed/search.js @@ -38,9 +38,10 @@ function attach_elastic_search_query(data) { var list_item = $('
  • '); // Creating the result from elements - var url = doc.url + '?highlight=' + $.urlencode(query); + var link = doc.link + DOCUMENTATION_OPTIONS.FILE_SUFFIX + + '?highlight=' + $.urlencode(query); - var item = $('
    ', {'href': url}); + var item = $('', {'href': link}); item.html(doc.title); list_item.append(item); diff --git a/readthedocs/search/api.py b/readthedocs/search/api.py index a24e20f9957..57c8efb9186 100644 --- a/readthedocs/search/api.py +++ b/readthedocs/search/api.py @@ -1,5 +1,4 @@ import logging -import os from pprint import pformat from rest_framework import generics, serializers @@ -24,33 +23,15 @@ class PageSearchSerializer(serializers.Serializer): version = serializers.CharField() title = serializers.CharField() path = serializers.CharField() - # Doc url without extension link = serializers.SerializerMethodField() - # Doc url with extension - url = serializers.SerializerMethodField() highlight = serializers.SerializerMethodField() def get_link(self, obj): - """ - Gets the url without extension. - - .. warning:: - This is only used to keep compatibility with - the previous search implementation. - Use `url` instead. - """ projects_url = self.context.get('projects_url') if projects_url: docs_url = projects_url[obj.project] return docs_url + obj.path - def get_url(self, obj): - """Gets the full url.""" - projects_url = self.context.get('projects_url') - if projects_url: - docs_url = projects_url[obj.project] - return docs_url + obj.full_path - def get_highlight(self, obj): highlight = getattr(obj.meta, 'highlight', None) if highlight: diff --git a/readthedocs/templates/search/elastic_search.html b/readthedocs/templates/search/elastic_search.html index 149e0addbf6..a7e33862a30 100644 --- a/readthedocs/templates/search/elastic_search.html +++ b/readthedocs/templates/search/elastic_search.html @@ -210,7 +210,7 @@

    {% elif 'page' in result.meta.index %} - + {{ result.project }} - {{ result.title }} {% for fragment in result.meta.highlight.content %} From 0e55e48bf88e98aba220a0db5fdeb273458b0ed4 Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Mon, 17 Jun 2019 20:43:28 -0500 Subject: [PATCH 09/11] Fix tests --- readthedocs/search/tests/data/docs/story.json | 3 ++- readthedocs/search/tests/data/docs/wiping.json | 3 ++- readthedocs/search/tests/data/kuma/docker.json | 3 ++- readthedocs/search/tests/data/kuma/documentation.json | 3 ++- readthedocs/search/tests/data/pipeline/installation.json | 3 ++- readthedocs/search/tests/data/pipeline/signals.json | 3 ++- 6 files changed, 12 insertions(+), 6 deletions(-) diff --git a/readthedocs/search/tests/data/docs/story.json b/readthedocs/search/tests/data/docs/story.json index 84ef2cf52a7..f193ba10644 100644 --- a/readthedocs/search/tests/data/docs/story.json +++ b/readthedocs/search/tests/data/docs/story.json @@ -27,5 +27,6 @@ "title": "Rationale" } ], - "path": "open-source-philosophy" + "path": "open-source-philosophy", + "full_path": "open-source-philosophy.html" } diff --git a/readthedocs/search/tests/data/docs/wiping.json b/readthedocs/search/tests/data/docs/wiping.json index 50b3ee1658e..dafd4f43239 100644 --- a/readthedocs/search/tests/data/docs/wiping.json +++ b/readthedocs/search/tests/data/docs/wiping.json @@ -11,5 +11,6 @@ "title": "Wiping a Build Environment" } ], - "path": "guides/wipe-environment" + "path": "guides/wipe-environment", + "full_path": "guides/wipe-environment.html" } diff --git a/readthedocs/search/tests/data/kuma/docker.json b/readthedocs/search/tests/data/kuma/docker.json index 16f4f1e7434..e208b3d6fb9 100644 --- a/readthedocs/search/tests/data/kuma/docker.json +++ b/readthedocs/search/tests/data/kuma/docker.json @@ -21,5 +21,6 @@ "title": "Docker Images" } ], - "path": "docker" + "path": "docker", + "full_path": "docker.html" } diff --git a/readthedocs/search/tests/data/kuma/documentation.json b/readthedocs/search/tests/data/kuma/documentation.json index 1dcefa8c2c3..c3237c484c5 100644 --- a/readthedocs/search/tests/data/kuma/documentation.json +++ b/readthedocs/search/tests/data/kuma/documentation.json @@ -17,5 +17,6 @@ "title": "Generating documentation" } ], - "path": "documentation" + "path": "documentation", + "full_path": "documentation.html" } diff --git a/readthedocs/search/tests/data/pipeline/installation.json b/readthedocs/search/tests/data/pipeline/installation.json index 40d21fed5d4..568d72248f1 100644 --- a/readthedocs/search/tests/data/pipeline/installation.json +++ b/readthedocs/search/tests/data/pipeline/installation.json @@ -29,5 +29,6 @@ "title": "Recommendations" } ], - "path": "installation" + "path": "installation", + "full_path": "installation.html" } diff --git a/readthedocs/search/tests/data/pipeline/signals.json b/readthedocs/search/tests/data/pipeline/signals.json index 78349e5e3aa..bc602110b69 100644 --- a/readthedocs/search/tests/data/pipeline/signals.json +++ b/readthedocs/search/tests/data/pipeline/signals.json @@ -23,5 +23,6 @@ "title": "js_compressed" } ], - "path": "signals" + "path": "signals", + "full_path": "signals.html" } From f55b971863987740cf553c3af478414e6ed8fca0 Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Tue, 18 Jun 2019 10:52:52 -0500 Subject: [PATCH 10/11] Better way to add a new field --- readthedocs/projects/models.py | 3 +-- readthedocs/search/documents.py | 2 +- readthedocs/search/parse_json.py | 5 ++--- readthedocs/search/tests/data/docs/story.json | 3 +-- readthedocs/search/tests/data/docs/wiping.json | 3 +-- readthedocs/search/tests/data/kuma/docker.json | 3 +-- readthedocs/search/tests/data/kuma/documentation.json | 3 +-- readthedocs/search/tests/data/pipeline/installation.json | 3 +-- readthedocs/search/tests/data/pipeline/signals.json | 3 +-- 9 files changed, 10 insertions(+), 18 deletions(-) diff --git a/readthedocs/projects/models.py b/readthedocs/projects/models.py index 77f1acb2068..e4c90e1e72d 100644 --- a/readthedocs/projects/models.py +++ b/readthedocs/projects/models.py @@ -1245,7 +1245,7 @@ def get_processed_json(self): for fjson_path in fjson_paths: file_path = os.path.join(full_json_path, fjson_path) if os.path.exists(file_path): - return process_file(file_path, self.path) + return process_file(file_path) except Exception: log.warning( 'Unhandled exception during search processing file: %s', @@ -1255,7 +1255,6 @@ def get_processed_json(self): 'headers': [], 'content': '', 'path': file_path, - 'full_path': self.path, 'title': '', 'sections': [], } diff --git a/readthedocs/search/documents.py b/readthedocs/search/documents.py index 1c36c009046..5e9d950f272 100644 --- a/readthedocs/search/documents.py +++ b/readthedocs/search/documents.py @@ -116,7 +116,7 @@ class PageDocument(RTDDocTypeMixin, DocType): project = fields.KeywordField(attr='project.slug') version = fields.KeywordField(attr='version.slug') path = fields.KeywordField(attr='processed_json.path') - full_path = fields.KeywordField(attr='processed_json.full_path') + full_path = fields.KeywordField(attr='path') # Searchable content title = fields.TextField(attr='processed_json.title') diff --git a/readthedocs/search/parse_json.py b/readthedocs/search/parse_json.py index 17135f73ec6..a3593056204 100644 --- a/readthedocs/search/parse_json.py +++ b/readthedocs/search/parse_json.py @@ -59,7 +59,7 @@ def generate_sections_from_pyquery(body): } -def process_file(fjson_filename, filename): +def process_file(fjson_filename): """Read the fjson file from disk and parse it into a structured dict.""" try: with codecs.open(fjson_filename, encoding='utf-8', mode='r') as f: @@ -76,7 +76,7 @@ def process_file(fjson_filename, filename): if 'current_page_name' in data: path = data['current_page_name'] else: - log.info('Unable to index file due to no name %s', filename) + log.info('Unable to index file due to no name %s', fjson_filename) if data.get('body'): body = PyQuery(data['body']) @@ -96,7 +96,6 @@ def process_file(fjson_filename, filename): 'headers': process_headers(data, fjson_filename), 'content': body_content, 'path': path, - 'full_path': filename, 'title': title, 'sections': sections, } diff --git a/readthedocs/search/tests/data/docs/story.json b/readthedocs/search/tests/data/docs/story.json index f193ba10644..84ef2cf52a7 100644 --- a/readthedocs/search/tests/data/docs/story.json +++ b/readthedocs/search/tests/data/docs/story.json @@ -27,6 +27,5 @@ "title": "Rationale" } ], - "path": "open-source-philosophy", - "full_path": "open-source-philosophy.html" + "path": "open-source-philosophy" } diff --git a/readthedocs/search/tests/data/docs/wiping.json b/readthedocs/search/tests/data/docs/wiping.json index dafd4f43239..50b3ee1658e 100644 --- a/readthedocs/search/tests/data/docs/wiping.json +++ b/readthedocs/search/tests/data/docs/wiping.json @@ -11,6 +11,5 @@ "title": "Wiping a Build Environment" } ], - "path": "guides/wipe-environment", - "full_path": "guides/wipe-environment.html" + "path": "guides/wipe-environment" } diff --git a/readthedocs/search/tests/data/kuma/docker.json b/readthedocs/search/tests/data/kuma/docker.json index e208b3d6fb9..16f4f1e7434 100644 --- a/readthedocs/search/tests/data/kuma/docker.json +++ b/readthedocs/search/tests/data/kuma/docker.json @@ -21,6 +21,5 @@ "title": "Docker Images" } ], - "path": "docker", - "full_path": "docker.html" + "path": "docker" } diff --git a/readthedocs/search/tests/data/kuma/documentation.json b/readthedocs/search/tests/data/kuma/documentation.json index c3237c484c5..1dcefa8c2c3 100644 --- a/readthedocs/search/tests/data/kuma/documentation.json +++ b/readthedocs/search/tests/data/kuma/documentation.json @@ -17,6 +17,5 @@ "title": "Generating documentation" } ], - "path": "documentation", - "full_path": "documentation.html" + "path": "documentation" } diff --git a/readthedocs/search/tests/data/pipeline/installation.json b/readthedocs/search/tests/data/pipeline/installation.json index 568d72248f1..40d21fed5d4 100644 --- a/readthedocs/search/tests/data/pipeline/installation.json +++ b/readthedocs/search/tests/data/pipeline/installation.json @@ -29,6 +29,5 @@ "title": "Recommendations" } ], - "path": "installation", - "full_path": "installation.html" + "path": "installation" } diff --git a/readthedocs/search/tests/data/pipeline/signals.json b/readthedocs/search/tests/data/pipeline/signals.json index bc602110b69..78349e5e3aa 100644 --- a/readthedocs/search/tests/data/pipeline/signals.json +++ b/readthedocs/search/tests/data/pipeline/signals.json @@ -23,6 +23,5 @@ "title": "js_compressed" } ], - "path": "signals", - "full_path": "signals.html" + "path": "signals" } From 96a85fa8af3cac8b139bdf99598d119eae0e0163 Mon Sep 17 00:00:00 2001 From: Santos Gallegos Date: Tue, 18 Jun 2019 10:54:38 -0500 Subject: [PATCH 11/11] Fix test --- readthedocs/rtd_tests/tests/test_search_json_parsing.py | 1 - 1 file changed, 1 deletion(-) diff --git a/readthedocs/rtd_tests/tests/test_search_json_parsing.py b/readthedocs/rtd_tests/tests/test_search_json_parsing.py index 72939f19496..42b0839c4e9 100644 --- a/readthedocs/rtd_tests/tests/test_search_json_parsing.py +++ b/readthedocs/rtd_tests/tests/test_search_json_parsing.py @@ -16,7 +16,6 @@ def test_h2_parsing(self): base_dir, 'files/api.fjson', ), - 'files/api.html', ) self.assertEqual(data['sections'][1]['id'], 'a-basic-api-client-using-slumber') # Only capture h2's after the first section