Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
7h3Rabbit committed Oct 15, 2023
1 parent 2d02e4b commit f2b1f0f
Show file tree
Hide file tree
Showing 3 changed files with 136 additions and 91 deletions.
57 changes: 3 additions & 54 deletions tests/css_validator_w3c.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from models import Rating

from tests.utils import *
from tests.w3c_base import get_errors
from tests.w3c_base import get_errors, identify_files
from tests.sitespeed_base import get_result

_local = gettext.gettext
Expand Down Expand Up @@ -64,7 +64,7 @@ def run_test(_, langCode, url):
url, sitespeed_use_docker, sitespeed_arg)

# 1. Visit page like a normal user
data = identify_styles(filename)
data = identify_files(filename)
# 2. FIND ALL INLE CSS (AND CALCULTE)
# 2.1 FINS ALL <STYLE>
has_style_elements = False
Expand All @@ -73,10 +73,9 @@ def run_test(_, langCode, url):
has_css_contenttypes = False
all_link_resources = list()

request_index = 1
for entry in data['htmls']:
req_url = entry['url']
name = get_friendly_url_name(_, req_url, request_index)
name = get_friendly_url_name(_, req_url, entry['index'])
html = entry['content']
(elements, errors) = get_errors_for_style_tags(req_url, html, _local)
if len(elements) > 0:
Expand All @@ -99,8 +98,6 @@ def run_test(_, langCode, url):
rating += create_review_and_rating(errors,
_, _local, '- `<link rel=\"stylesheet\">` in: {0}'.format(name))

request_index += 1


# 4 Check if website inlcuded css files in other ways
for link_resource in all_link_resources:
Expand Down Expand Up @@ -191,54 +188,6 @@ def run_test(_, langCode, url):

return (rating, errors)

def identify_styles(filename):
data = {
'htmls': [],
'elements': [],
'attributes': [],
'resources': []
}

with open(filename) as json_input_file:
har_data = json.load(json_input_file)

if 'log' in har_data:
har_data = har_data['log']

req_index = 1
for entry in har_data["entries"]:
req = entry['request']
res = entry['response']
req_url = req['url']

if 'content' not in res:
continue
if 'mimeType' not in res['content']:
continue
if 'size' not in res['content']:
continue
if res['content']['size'] <= 0:
continue

if 'html' in res['content']['mimeType']:
if not has_cache_file(req_url, True, cache_time_delta):
set_cache_file(req_url, res['content']['text'], True)
data['htmls'].append({
'url': req_url,
'content': res['content']['text']
})
elif 'css' in res['content']['mimeType']:
if not has_cache_file(req_url, True, cache_time_delta):
set_cache_file(req_url, res['content']['text'], True)
data['resources'].append({
'url': req_url,
'content': res['content']['text'],
'index': req_index
})
req_index += 1

return data

def get_errors_for_link_tags(html, url, _):
results = list()

Expand Down
108 changes: 72 additions & 36 deletions tests/html_validator_w3c.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,24 @@
import datetime
import re
import config
from tests.w3c_base import get_errors
from tests.utils import *
from tests.w3c_base import get_errors, identify_files
from tests.sitespeed_base import get_result
import gettext
_local = gettext.gettext

# DEFAULTS
request_timeout = config.http_request_timeout
useragent = config.useragent
review_show_improvements_only = config.review_show_improvements_only
sitespeed_use_docker = config.sitespeed_use_docker
try:
use_cache = config.cache_when_possible
cache_time_delta = config.cache_time_delta
except:
# If cache_when_possible variable is not set in config.py this will be the default
use_cache = False
cache_time_delta = timedelta(hours=1)


def run_test(_, langCode, url):
Expand All @@ -33,51 +42,78 @@ def run_test(_, langCode, url):
print(_('TEXT_TEST_START').format(
datetime.now().strftime('%Y-%m-%d %H:%M:%S')))

params = {'doc': url,
'out': 'json',
'level': 'error'}
errors = get_errors('html', params)
number_of_errors = len(errors)
errors = list()
error_message_dict = {}

error_message_grouped_dict = {}
if number_of_errors > 0:
regex = r"(“[^”]+”)"
for item in errors:
error_message = item['message']
error_message = re.sub(
regex, "X", error_message, 0, re.MULTILINE)
# We don't need extra iterations for what we are using it for
sitespeed_iterations = 1
sitespeed_arg = '--shm-size=1g -b chrome --plugins.remove screenshot --plugins.remove html --plugins.remove metrics --browsertime.screenshot false --screenshot false --screenshotLCP false --browsertime.screenshotLCP false --chrome.cdp.performance false --browsertime.chrome.timeline false --videoParams.createFilmstrip false --visualMetrics false --visualMetricsPerceptual false --visualMetricsContentful false --browsertime.headless true --browsertime.chrome.includeResponseBodies all --utc true --browsertime.chrome.args ignore-certificate-errors -n {0}'.format(
sitespeed_iterations)
if 'nt' not in os.name:
sitespeed_arg += ' --xvfb'

if error_message_grouped_dict.get(error_message, False):
error_message_grouped_dict[error_message] = error_message_grouped_dict[error_message] + 1
else:
error_message_grouped_dict[error_message] = 1
(result_folder_name, filename) = get_result(
url, sitespeed_use_docker, sitespeed_arg)

if len(error_message_grouped_dict) > 0:
error_message_grouped_sorted = sorted(
error_message_grouped_dict.items(), key=lambda x: x[1], reverse=True)
# 1. Visit page like a normal user
data = identify_files(filename)

for item in error_message_grouped_sorted:

item_value = item[1]
item_text = item[0]
for entry in data['htmls']:
req_url = entry['url']
name = get_friendly_url_name(_, req_url, entry['index'])
review_header = '- {0} '.format(name)
html = entry['content']
set_cache_file(req_url, html, True)

review += _local('TEXT_REVIEW_ERRORS_ITEM').format(item_text, item_value)
params = {'doc': req_url,
'out': 'json',
'level': 'error'}
errors = get_errors('html', params)
number_of_errors = len(errors)

number_of_error_types = len(error_message_grouped_dict)

result = calculate_rating(number_of_error_types, number_of_errors)
error_message_grouped_dict = {}
error_message_grouped_for_rating_dict = {}
if number_of_errors > 0:
regex = r"(“[^”]+”)"
for item in errors:
error_message = item['message']
error_message = re.sub(
regex, "X", error_message, 0, re.MULTILINE)

# if number_of_error_types > 0:
error_types_rating = Rating(_, review_show_improvements_only)
error_types_rating.set_overall(result[0], _local('TEXT_REVIEW_RATING_GROUPED').format(
number_of_error_types, 0.0))
rating += error_types_rating
if error_message_grouped_dict.get(error_message, False):
error_message_grouped_dict[error_message] = error_message_grouped_dict[error_message] + 1
else:
error_message_grouped_dict[error_message] = 1

if len(error_message_grouped_dict) > 0:
error_message_grouped_sorted = sorted(
error_message_grouped_dict.items(), key=lambda x: x[1], reverse=True)

for item in error_message_grouped_sorted:

item_value = item[1]
item_text = item[0]

review += _local('TEXT_REVIEW_ERRORS_ITEM').format(item_text, item_value)

number_of_error_types = len(error_message_grouped_dict)

result = calculate_rating(number_of_error_types, number_of_errors)

# if number_of_error_types > 0:
error_types_rating = Rating(_, review_show_improvements_only)
error_types_rating.set_overall(result[0], review_header + _local('TEXT_REVIEW_RATING_GROUPED').format(
number_of_error_types, 0.0))
rating += error_types_rating

# if number_of_errors > 0:
error_rating = Rating(_, review_show_improvements_only)
error_rating.set_overall(result[1], review_header + _local(
'TEXT_REVIEW_RATING_ITEMS').format(number_of_errors, 0.0))
rating += error_rating

# if number_of_errors > 0:
error_rating = Rating(_, review_show_improvements_only)
error_rating.set_overall(result[1], _local(
'TEXT_REVIEW_RATING_ITEMS').format(number_of_errors, 0.0))
rating += error_rating

points = rating.get_overall()
rating.set_standards(points)
Expand Down
62 changes: 61 additions & 1 deletion tests/w3c_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,13 @@
useragent = config.useragent
css_review_group_errors = config.css_review_group_errors
review_show_improvements_only = config.review_show_improvements_only
try:
use_cache = config.cache_when_possible
cache_time_delta = config.cache_time_delta
except:
# If cache_when_possible variable is not set in config.py this will be the default
use_cache = False
cache_time_delta = timedelta(hours=1)


def get_errors(test_type, params):
Expand All @@ -32,6 +39,10 @@ def get_errors(test_type, params):
'Tested url must start with \'https://\' or \'http://\': {0}'.format(url))

file_path = get_cache_path(url, True)
html_file_ending_fix = file_path.replace('.cache', '.cache.html')
if has_cache_file(url, True, cache_time_delta) and not os.path.exists(file_path):
os.rename(file_path, html_file_ending_fix)
file_path = html_file_ending_fix

arg = '--exit-zero-always{1} --stdout --format json --errors-only {0}'.format(
file_path, test_arg)
Expand All @@ -44,4 +55,53 @@ def get_errors(test_type, params):
if 'messages' in json_result:
errors = json_result['messages']

return errors
return errors

def identify_files(filename):
data = {
'htmls': [],
'elements': [],
'attributes': [],
'resources': []
}

with open(filename) as json_input_file:
har_data = json.load(json_input_file)

if 'log' in har_data:
har_data = har_data['log']

req_index = 1
for entry in har_data["entries"]:
req = entry['request']
res = entry['response']
req_url = req['url']

if 'content' not in res:
continue
if 'mimeType' not in res['content']:
continue
if 'size' not in res['content']:
continue
if res['content']['size'] <= 0:
continue

if 'html' in res['content']['mimeType']:
if not has_cache_file(req_url, True, cache_time_delta):
set_cache_file(req_url, res['content']['text'], True)
data['htmls'].append({
'url': req_url,
'content': res['content']['text'],
'index': req_index
})
elif 'css' in res['content']['mimeType']:
if not has_cache_file(req_url, True, cache_time_delta):
set_cache_file(req_url, res['content']['text'], True)
data['resources'].append({
'url': req_url,
'content': res['content']['text'],
'index': req_index
})
req_index += 1

return data

0 comments on commit f2b1f0f

Please sign in to comment.