From 52c79b62acbad7b3863bda6d6896fbe97c86a252 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 24 Jul 2024 10:54:59 -0700 Subject: [PATCH 01/38] fix snowflake agent bug Signed-off-by: Future-Outlier --- .../flytekitplugins/snowflake/agent.py | 26 ++++++++++--------- .../flytekitplugins/snowflake/task.py | 4 --- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index 71eba91186..1d4dafc783 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -11,8 +11,7 @@ from flytekit.models.literals import LiteralMap from flytekit.models.task import TaskTemplate from flytekit.models.types import LiteralType, StructuredDatasetType - -snowflake_connector = lazy_module("snowflake.connector") +from snowflake import connector as sc TASK_TYPE = "snowflake" SNOWFLAKE_PRIVATE_KEY = "snowflake_private_key" @@ -25,7 +24,6 @@ class SnowflakeJobMetadata(ResourceMeta): database: str schema: str warehouse: str - table: str query_id: str @@ -47,8 +45,8 @@ def get_private_key(): return pkb -def get_connection(metadata: SnowflakeJobMetadata) -> snowflake_connector: - return snowflake_connector.connect( +def get_connection(metadata: SnowflakeJobMetadata) -> sc: + return sc.connect( user=metadata.user, account=metadata.account, private_key=get_private_key(), @@ -69,10 +67,11 @@ async def create( ) -> SnowflakeJobMetadata: ctx = FlyteContextManager.current_context() literal_types = task_template.interface.inputs - params = TypeEngine.literal_map_to_kwargs(ctx, inputs, literal_types=literal_types) if inputs else None + + params = TypeEngine.literal_map_to_kwargs(ctx, inputs, literal_types=literal_types) if inputs.literals else None config = task_template.config - conn = snowflake_connector.connect( + conn = sc.connect( user=config["user"], account=config["account"], private_key=get_private_key(), @@ -90,7 +89,7 @@ async def create( database=config["database"], schema=config["schema"], warehouse=config["warehouse"], - table=config["table"], + # table=config["table"], query_id=str(cs.sfqid), ) @@ -98,25 +97,28 @@ async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: conn = get_connection(resource_meta) try: query_status = conn.get_query_status_throw_if_error(resource_meta.query_id) - except snowflake_connector.ProgrammingError as err: + except sc.ProgrammingError as err: logger.error("Failed to get snowflake job status with error:", err.msg) return Resource(phase=TaskExecution.FAILED) + + # The snowflake job's state is determined by query status. + # https://github.com/snowflakedb/snowflake-connector-python/blob/main/src/snowflake/connector/constants.py#L373 cur_phase = convert_to_flyte_phase(str(query_status.name)) res = None if cur_phase == TaskExecution.SUCCEEDED: ctx = FlyteContextManager.current_context() - output_metadata = f"snowflake://{resource_meta.user}:{resource_meta.account}/{resource_meta.warehouse}/{resource_meta.database}/{resource_meta.schema}/{resource_meta.table}" + uri = f"snowflake://{resource_meta.user}:{resource_meta.account}/{resource_meta.warehouse}/{resource_meta.database}/{resource_meta.schema}" res = literals.LiteralMap( { "results": TypeEngine.to_literal( ctx, - StructuredDataset(uri=output_metadata), + StructuredDataset(uri=uri), StructuredDataset, LiteralType(structured_dataset_type=StructuredDatasetType(format="")), ) } - ).to_flyte_idl() + ) return Resource(phase=cur_phase, outputs=res) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py index 9ac9980a88..222ce58be8 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py @@ -12,7 +12,6 @@ _DATABASE_FIELD = "database" _SCHEMA_FIELD = "schema" _WAREHOUSE_FIELD = "warehouse" -_TABLE_FIELD = "table" @dataclass @@ -31,8 +30,6 @@ class SnowflakeConfig(object): schema: Optional[str] = None # The optional warehouse to set for the given Snowflake query warehouse: Optional[str] = None - # The optional table to set for the given Snowflake query - table: Optional[str] = None class SnowflakeTask(AsyncAgentExecutorMixin, SQLTask[SnowflakeConfig]): @@ -88,7 +85,6 @@ def get_config(self, settings: SerializationSettings) -> Dict[str, str]: _DATABASE_FIELD: self.task_config.database, _SCHEMA_FIELD: self.task_config.schema, _WAREHOUSE_FIELD: self.task_config.warehouse, - _TABLE_FIELD: self.task_config.table, } def get_sql(self, settings: SerializationSettings) -> Optional[_task_model.Sql]: From 2d5c1a67fbbc99a163002f40188d88ff5fc1ac22 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 24 Jul 2024 16:23:36 -0700 Subject: [PATCH 02/38] a work version Signed-off-by: Future-Outlier --- flytekit/core/type_engine.py | 7 ++ flytekit/types/structured/__init__.py | 13 +++ flytekit/types/structured/snowflake.py | 99 +++++++++++++++++++ .../types/structured/structured_dataset.py | 38 ++++++- .../flytekitplugins/snowflake/agent.py | 5 +- .../flytekitplugins/snowflake/task.py | 19 ++-- 6 files changed, 164 insertions(+), 17 deletions(-) create mode 100644 flytekit/types/structured/snowflake.py diff --git a/flytekit/core/type_engine.py b/flytekit/core/type_engine.py index 3165b4cdf5..65601cfce4 100644 --- a/flytekit/core/type_engine.py +++ b/flytekit/core/type_engine.py @@ -983,6 +983,7 @@ def lazy_import_transformers(cls): register_arrow_handlers, register_bigquery_handlers, register_pandas_handlers, + register_snowflake_handlers, ) from flytekit.types.structured.structured_dataset import DuplicateHandlerError @@ -1015,6 +1016,12 @@ def lazy_import_transformers(cls): from flytekit.types import numpy # noqa: F401 if is_imported("PIL"): from flytekit.types.file import image # noqa: F401 + if is_imported("snowflake.connector"): + try: + register_snowflake_handlers() + except DuplicateHandlerError: + logger.debug("Transformer for snowflake is already registered.") + @classmethod def to_literal_type(cls, python_type: Type) -> LiteralType: diff --git a/flytekit/types/structured/__init__.py b/flytekit/types/structured/__init__.py index 7dffa49eec..33ef982cfb 100644 --- a/flytekit/types/structured/__init__.py +++ b/flytekit/types/structured/__init__.py @@ -68,3 +68,16 @@ def register_bigquery_handlers(): "We won't register bigquery handler for structured dataset because " "we can't find the packages google-cloud-bigquery-storage and google-cloud-bigquery" ) + +def register_snowflake_handlers(): + try: + from .snowflake import PandasToSnowflakeEncodingHandlers, SnowflakeToPandasDecodingHandler + + StructuredDatasetTransformerEngine.register(SnowflakeToPandasDecodingHandler()) + StructuredDatasetTransformerEngine.register(PandasToSnowflakeEncodingHandlers()) + + except ImportError: + logger.info( + "We won't register snowflake handler for structured dataset because " + "we can't find package snowflake-connector-python" + ) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py new file mode 100644 index 0000000000..7dd04ad9f9 --- /dev/null +++ b/flytekit/types/structured/snowflake.py @@ -0,0 +1,99 @@ +import re +import typing + +import pandas as pd +import snowflake.connector +from snowflake.connector.pandas_tools import write_pandas + +from flytekit import FlyteContext +from flytekit.models import literals +from flytekit.models.types import StructuredDatasetType +from flytekit.types.structured.structured_dataset import ( + StructuredDataset, + StructuredDatasetDecoder, + StructuredDatasetEncoder, + StructuredDatasetMetadata, +) + +SNOWFLAKE = "snowflake" + + +def get_private_key(): + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import serialization + + import flytekit + + pk_string = flytekit.current_context().secrets.get("snowflake", "private_key", encode_mode="rb") + p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) + + pkb = p_key.private_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption(), + ) + + return pkb + + +def _write_to_sf(structured_dataset: StructuredDataset): + if structured_dataset.uri is None: + raise ValueError("structured_dataset.uri cannot be None.") + + uri = structured_dataset.uri + _, user, account, warehouse, database, schema, table = re.split("\\/|://|:", uri) + df = structured_dataset.dataframe + + conn = snowflake.connector.connect( + user=user, account=account, private_key=get_private_key(), database=database, schema=schema, warehouse=warehouse + ) + + write_pandas(conn, df, table) + + +def _read_from_sf( + flyte_value: literals.StructuredDataset, current_task_metadata: StructuredDatasetMetadata +) -> pd.DataFrame: + if flyte_value.uri is None: + raise ValueError("structured_dataset.uri cannot be None.") + + uri = flyte_value.uri + _, user, account, warehouse, database, schema, table = re.split("\\/|://|:", uri) + + conn = snowflake.connector.connect( + user=user, account=account, private_key=get_private_key(), database=database, schema=schema, warehouse=warehouse + ) + + cs = conn.cursor() + cs.execute(f"select * from {table}") + + return cs.fetch_pandas_all() + + +class PandasToSnowflakeEncodingHandlers(StructuredDatasetEncoder): + def __init__(self): + super().__init__(pd.DataFrame, SNOWFLAKE, supported_format="", additional_protocols=["sf"]) + + def encode( + self, + ctx: FlyteContext, + structured_dataset: StructuredDataset, + structured_dataset_type: StructuredDatasetType, + ) -> literals.StructuredDataset: + _write_to_sf(structured_dataset) + return literals.StructuredDataset( + uri=typing.cast(str, structured_dataset.uri), metadata=StructuredDatasetMetadata(structured_dataset_type) + ) + + +class SnowflakeToPandasDecodingHandler(StructuredDatasetDecoder): + def __init__(self): + super().__init__(pd.DataFrame, SNOWFLAKE, supported_format="", additional_protocols=["sf"]) + + def decode( + self, + ctx: FlyteContext, + flyte_value: literals.StructuredDataset, + current_task_metadata: StructuredDatasetMetadata, + ) -> pd.DataFrame: + return _read_from_sf(flyte_value, current_task_metadata) diff --git a/flytekit/types/structured/structured_dataset.py b/flytekit/types/structured/structured_dataset.py index c11519462e..93be134505 100644 --- a/flytekit/types/structured/structured_dataset.py +++ b/flytekit/types/structured/structured_dataset.py @@ -1,12 +1,11 @@ from __future__ import annotations - import _datetime import collections import types import typing from abc import ABC, abstractmethod from dataclasses import dataclass, field, is_dataclass -from typing import Dict, Generator, Optional, Type, Union +from typing import Dict, Generator, Optional, Type, Union, List from dataclasses_json import config from fsspec.utils import get_protocol @@ -222,7 +221,7 @@ def extract_cols_and_format( class StructuredDatasetEncoder(ABC): - def __init__(self, python_type: Type[T], protocol: Optional[str] = None, supported_format: Optional[str] = None): + def __init__(self, python_type: Type[T], protocol: Optional[str] = None, supported_format: Optional[str] = None, additional_protocols: Optional[List[str]] = None): """ Extend this abstract class, implement the encode function, and register your concrete class with the StructuredDatasetTransformerEngine class in order for the core flytekit type engine to handle @@ -238,9 +237,15 @@ def __init__(self, python_type: Type[T], protocol: Optional[str] = None, support :param supported_format: Arbitrary string representing the format. If not supplied then an empty string will be used. An empty string implies that the encoder works with any format. If the format being asked for does not exist, the transformer engine will look for the "" encoder instead and write a warning. + :param additional_protocols: Support many protocols to let user is able to connect to the service with various options. """ self._python_type = python_type self._protocol = protocol.replace("://", "") if protocol else None + self._additional_protocols = ( + [additional_protocol.replace("://", "") for additional_protocol in additional_protocols] + if additional_protocols + else None + ) self._supported_format = supported_format or "" @property @@ -251,6 +256,10 @@ def python_type(self) -> Type[T]: def protocol(self) -> Optional[str]: return self._protocol + @property + def additional_protocols(self) -> Optional[List[str]]: + return self._additional_protocols + @property def supported_format(self) -> str: return self._supported_format @@ -284,7 +293,7 @@ def encode( class StructuredDatasetDecoder(ABC): - def __init__(self, python_type: Type[DF], protocol: Optional[str] = None, supported_format: Optional[str] = None): + def __init__(self, python_type: Type[DF], protocol: Optional[str] = None, supported_format: Optional[str] = None, additional_protocols: Optional[List[str]] = None): """ Extend this abstract class, implement the decode function, and register your concrete class with the StructuredDatasetTransformerEngine class in order for the core flytekit type engine to handle @@ -299,9 +308,15 @@ def __init__(self, python_type: Type[DF], protocol: Optional[str] = None, suppor :param supported_format: Arbitrary string representing the format. If not supplied then an empty string will be used. An empty string implies that the decoder works with any format. If the format being asked for does not exist, the transformer enginer will look for the "" decoder instead and write a warning. + :param additional_protocols: Support many protocols to let user is able to connect to the service with various options. """ self._python_type = python_type self._protocol = protocol.replace("://", "") if protocol else None + self._additional_protocols = ( + [additional_protocol.replace("://", "") for additional_protocol in additional_protocols] + if additional_protocols + else None + ) self._supported_format = supported_format or "" @property @@ -312,6 +327,10 @@ def python_type(self) -> Type[DF]: def protocol(self) -> Optional[str]: return self._protocol + @property + def additional_protocols(self) -> Optional[List[str]]: + return self._additional_protocols + @property def supported_format(self) -> str: return self._supported_format @@ -529,6 +548,17 @@ def register( h, h.protocol, default_for_type, override, default_format_for_type, default_storage_for_type ) + if h.additional_protocols is not None: + for additional_protocol in h.additional_protocols: + cls.register_for_protocol( + h, + additional_protocol, + default_for_type, + override, + default_format_for_type, + default_storage_for_type, + ) + @classmethod def register_for_protocol( cls, diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index 1d4dafc783..cd53fd5873 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -24,6 +24,7 @@ class SnowflakeJobMetadata(ResourceMeta): database: str schema: str warehouse: str + table: str query_id: str @@ -89,7 +90,7 @@ async def create( database=config["database"], schema=config["schema"], warehouse=config["warehouse"], - # table=config["table"], + table=config["table"], query_id=str(cs.sfqid), ) @@ -108,7 +109,7 @@ async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: if cur_phase == TaskExecution.SUCCEEDED: ctx = FlyteContextManager.current_context() - uri = f"snowflake://{resource_meta.user}:{resource_meta.account}/{resource_meta.warehouse}/{resource_meta.database}/{resource_meta.schema}" + uri = f"snowflake://{resource_meta.user}:{resource_meta.account}/{resource_meta.warehouse}/{resource_meta.database}/{resource_meta.schema}/{resource_meta.table}" res = literals.LiteralMap( { "results": TypeEngine.to_literal( diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py index 222ce58be8..cb5a89ce1b 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py @@ -12,7 +12,7 @@ _DATABASE_FIELD = "database" _SCHEMA_FIELD = "schema" _WAREHOUSE_FIELD = "warehouse" - +_TABLE_FIELD = "table" @dataclass class SnowflakeConfig(object): @@ -20,16 +20,12 @@ class SnowflakeConfig(object): SnowflakeConfig should be used to configure a Snowflake Task. """ - # The user to query against - user: Optional[str] = None - # The account to query against - account: Optional[str] = None - # The database to query against - database: Optional[str] = None - # The optional schema to separate query execution. - schema: Optional[str] = None - # The optional warehouse to set for the given Snowflake query - warehouse: Optional[str] = None + user: str + account: str + database: str + schema: str + warehouse: str + table: str class SnowflakeTask(AsyncAgentExecutorMixin, SQLTask[SnowflakeConfig]): @@ -85,6 +81,7 @@ def get_config(self, settings: SerializationSettings) -> Dict[str, str]: _DATABASE_FIELD: self.task_config.database, _SCHEMA_FIELD: self.task_config.schema, _WAREHOUSE_FIELD: self.task_config.warehouse, + _TABLE_FIELD: self.task_config.table, } def get_sql(self, settings: SerializationSettings) -> Optional[_task_model.Sql]: From 804052a9fee5d89a5ccd30cdc5979bd79d8eeda8 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 24 Jul 2024 20:35:05 -0700 Subject: [PATCH 03/38] Snowflake work version Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 8 ++++---- .../flytekit-snowflake/flytekitplugins/snowflake/agent.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 7dd04ad9f9..80f63e4205 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -58,15 +58,15 @@ def _read_from_sf( raise ValueError("structured_dataset.uri cannot be None.") uri = flyte_value.uri - _, user, account, warehouse, database, schema, table = re.split("\\/|://|:", uri) + _, user, account, warehouse, database, schema, query_id = re.split("\\/|://|:", uri) conn = snowflake.connector.connect( - user=user, account=account, private_key=get_private_key(), database=database, schema=schema, warehouse=warehouse + user=user, account=account, private_key=get_private_key(), database=database, schema=schema, warehouse=warehouse, + table="FLYTEAGENT.PUBLIC.TEST" ) cs = conn.cursor() - cs.execute(f"select * from {table}") - + cs.get_results_from_sfqid(query_id) return cs.fetch_pandas_all() diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index cd53fd5873..945782a524 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -91,7 +91,7 @@ async def create( schema=config["schema"], warehouse=config["warehouse"], table=config["table"], - query_id=str(cs.sfqid), + query_id=cs.sfqid, ) async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: @@ -109,7 +109,7 @@ async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: if cur_phase == TaskExecution.SUCCEEDED: ctx = FlyteContextManager.current_context() - uri = f"snowflake://{resource_meta.user}:{resource_meta.account}/{resource_meta.warehouse}/{resource_meta.database}/{resource_meta.schema}/{resource_meta.table}" + uri = f"snowflake://{resource_meta.user}:{resource_meta.account}/{resource_meta.warehouse}/{resource_meta.database}/{resource_meta.schema}/{resource_meta.query_id}" res = literals.LiteralMap( { "results": TypeEngine.to_literal( From 90f08dc289c5f5d15ae3720a65fba78bcfad8c4c Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 24 Jul 2024 21:36:27 -0700 Subject: [PATCH 04/38] fix secret encode Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 80f63e4205..46478f4f8f 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -4,7 +4,7 @@ import pandas as pd import snowflake.connector from snowflake.connector.pandas_tools import write_pandas - +import flytekit from flytekit import FlyteContext from flytekit.models import literals from flytekit.models.types import StructuredDatasetType @@ -22,9 +22,8 @@ def get_private_key(): from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - import flytekit - - pk_string = flytekit.current_context().secrets.get("snowflake", "private_key", encode_mode="rb") + pk_string = flytekit.current_context().secrets.get("snowflake", "private_key", encode_mode="r") + pk_string = pk_string.strip().encode() p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) pkb = p_key.private_bytes( From c0f84f2283bcfb1b7536ea38350bdf38a305f142 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 24 Jul 2024 22:31:10 -0700 Subject: [PATCH 05/38] all works, I am so happy Signed-off-by: Future-Outlier --- flytekit/core/type_engine.py | 1 - flytekit/types/structured/__init__.py | 1 + flytekit/types/structured/snowflake.py | 11 +++++++++-- .../types/structured/structured_dataset.py | 19 ++++++++++++++++--- .../flytekitplugins/snowflake/agent.py | 16 +++++++++------- .../flytekitplugins/snowflake/task.py | 4 ++++ 6 files changed, 39 insertions(+), 13 deletions(-) diff --git a/flytekit/core/type_engine.py b/flytekit/core/type_engine.py index 65601cfce4..5c18c6b9b7 100644 --- a/flytekit/core/type_engine.py +++ b/flytekit/core/type_engine.py @@ -1022,7 +1022,6 @@ def lazy_import_transformers(cls): except DuplicateHandlerError: logger.debug("Transformer for snowflake is already registered.") - @classmethod def to_literal_type(cls, python_type: Type) -> LiteralType: """ diff --git a/flytekit/types/structured/__init__.py b/flytekit/types/structured/__init__.py index 33ef982cfb..05d1fa86e3 100644 --- a/flytekit/types/structured/__init__.py +++ b/flytekit/types/structured/__init__.py @@ -69,6 +69,7 @@ def register_bigquery_handlers(): "we can't find the packages google-cloud-bigquery-storage and google-cloud-bigquery" ) + def register_snowflake_handlers(): try: from .snowflake import PandasToSnowflakeEncodingHandlers, SnowflakeToPandasDecodingHandler diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 46478f4f8f..5d314b9b2f 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -4,6 +4,7 @@ import pandas as pd import snowflake.connector from snowflake.connector.pandas_tools import write_pandas + import flytekit from flytekit import FlyteContext from flytekit.models import literals @@ -23,6 +24,7 @@ def get_private_key(): from cryptography.hazmat.primitives import serialization pk_string = flytekit.current_context().secrets.get("snowflake", "private_key", encode_mode="r") + # cryptography needs str to be stripped and converted to bytes pk_string = pk_string.strip().encode() p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) @@ -60,8 +62,13 @@ def _read_from_sf( _, user, account, warehouse, database, schema, query_id = re.split("\\/|://|:", uri) conn = snowflake.connector.connect( - user=user, account=account, private_key=get_private_key(), database=database, schema=schema, warehouse=warehouse, - table="FLYTEAGENT.PUBLIC.TEST" + user=user, + account=account, + private_key=get_private_key(), + database=database, + schema=schema, + warehouse=warehouse, + table="FLYTEAGENT.PUBLIC.TEST", ) cs = conn.cursor() diff --git a/flytekit/types/structured/structured_dataset.py b/flytekit/types/structured/structured_dataset.py index 93be134505..e8c1eb03ed 100644 --- a/flytekit/types/structured/structured_dataset.py +++ b/flytekit/types/structured/structured_dataset.py @@ -1,11 +1,12 @@ from __future__ import annotations + import _datetime import collections import types import typing from abc import ABC, abstractmethod from dataclasses import dataclass, field, is_dataclass -from typing import Dict, Generator, Optional, Type, Union, List +from typing import Dict, Generator, List, Optional, Type, Union from dataclasses_json import config from fsspec.utils import get_protocol @@ -221,7 +222,13 @@ def extract_cols_and_format( class StructuredDatasetEncoder(ABC): - def __init__(self, python_type: Type[T], protocol: Optional[str] = None, supported_format: Optional[str] = None, additional_protocols: Optional[List[str]] = None): + def __init__( + self, + python_type: Type[T], + protocol: Optional[str] = None, + supported_format: Optional[str] = None, + additional_protocols: Optional[List[str]] = None, + ): """ Extend this abstract class, implement the encode function, and register your concrete class with the StructuredDatasetTransformerEngine class in order for the core flytekit type engine to handle @@ -293,7 +300,13 @@ def encode( class StructuredDatasetDecoder(ABC): - def __init__(self, python_type: Type[DF], protocol: Optional[str] = None, supported_format: Optional[str] = None, additional_protocols: Optional[List[str]] = None): + def __init__( + self, + python_type: Type[DF], + protocol: Optional[str] = None, + supported_format: Optional[str] = None, + additional_protocols: Optional[List[str]] = None, + ): """ Extend this abstract class, implement the decode function, and register your concrete class with the StructuredDatasetTransformerEngine class in order for the core flytekit type engine to handle diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index 945782a524..4b86c35177 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -3,7 +3,8 @@ from flyteidl.core.execution_pb2 import TaskExecution -from flytekit import FlyteContextManager, StructuredDataset, lazy_module, logger +import flytekit +from flytekit import FlyteContextManager, StructuredDataset, logger from flytekit.core.type_engine import TypeEngine from flytekit.extend.backend.base_agent import AgentRegistry, AsyncAgentBase, Resource, ResourceMeta from flytekit.extend.backend.utils import convert_to_flyte_phase @@ -26,15 +27,16 @@ class SnowflakeJobMetadata(ResourceMeta): warehouse: str table: str query_id: str + output: bool def get_private_key(): from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - import flytekit - - pk_string = flytekit.current_context().secrets.get(SNOWFLAKE_PRIVATE_KEY, encode_mode="rb") + pk_string = flytekit.current_context().secrets.get(SNOWFLAKE_PRIVATE_KEY, encode_mode="r") + # cryptography needs str to be stripped and converted to bytes + pk_string = pk_string.strip().encode() p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) pkb = p_key.private_bytes( @@ -82,7 +84,7 @@ async def create( ) cs = conn.cursor() - cs.execute_async(task_template.sql.statement, params=params) + cs.execute_async(task_template.sql.statement, params) return SnowflakeJobMetadata( user=config["user"], @@ -92,6 +94,7 @@ async def create( warehouse=config["warehouse"], table=config["table"], query_id=cs.sfqid, + output=len(task_template.interface.outputs) > 0, ) async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: @@ -107,7 +110,7 @@ async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: cur_phase = convert_to_flyte_phase(str(query_status.name)) res = None - if cur_phase == TaskExecution.SUCCEEDED: + if cur_phase == TaskExecution.SUCCEEDED and resource_meta.output: ctx = FlyteContextManager.current_context() uri = f"snowflake://{resource_meta.user}:{resource_meta.account}/{resource_meta.warehouse}/{resource_meta.database}/{resource_meta.schema}/{resource_meta.query_id}" res = literals.LiteralMap( @@ -120,7 +123,6 @@ async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: ) } ) - return Resource(phase=cur_phase, outputs=res) async def delete(self, resource_meta: SnowflakeJobMetadata, **kwargs): diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py index cb5a89ce1b..422bd8ce0b 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py @@ -14,6 +14,7 @@ _WAREHOUSE_FIELD = "warehouse" _TABLE_FIELD = "table" + @dataclass class SnowflakeConfig(object): """ @@ -56,13 +57,16 @@ def __init__( :param output_schema_type: If some data is produced by this query, then you can specify the output schema type :param kwargs: All other args required by Parent type - SQLTask """ + outputs = None if output_schema_type is not None: outputs = { "results": output_schema_type, } + if task_config is None: task_config = SnowflakeConfig() + super().__init__( name=name, task_config=task_config, From 05adfd1eb7f7c15248d4a1f93d5cb8e782a2bb5c Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Thu, 25 Jul 2024 12:14:09 -0700 Subject: [PATCH 06/38] improve additional protocol Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 6 ++-- .../types/structured/structured_dataset.py | 32 ------------------- 2 files changed, 3 insertions(+), 35 deletions(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 5d314b9b2f..17c6472bb9 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -19,7 +19,7 @@ SNOWFLAKE = "snowflake" -def get_private_key(): +def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization @@ -78,7 +78,7 @@ def _read_from_sf( class PandasToSnowflakeEncodingHandlers(StructuredDatasetEncoder): def __init__(self): - super().__init__(pd.DataFrame, SNOWFLAKE, supported_format="", additional_protocols=["sf"]) + super().__init__(python_type=pd.DataFrame, protocol=SNOWFLAKE, supported_format="") def encode( self, @@ -94,7 +94,7 @@ def encode( class SnowflakeToPandasDecodingHandler(StructuredDatasetDecoder): def __init__(self): - super().__init__(pd.DataFrame, SNOWFLAKE, supported_format="", additional_protocols=["sf"]) + super().__init__(pd.DataFrame, protocol=SNOWFLAKE, supported_format="") def decode( self, diff --git a/flytekit/types/structured/structured_dataset.py b/flytekit/types/structured/structured_dataset.py index e8c1eb03ed..128ddab168 100644 --- a/flytekit/types/structured/structured_dataset.py +++ b/flytekit/types/structured/structured_dataset.py @@ -227,7 +227,6 @@ def __init__( python_type: Type[T], protocol: Optional[str] = None, supported_format: Optional[str] = None, - additional_protocols: Optional[List[str]] = None, ): """ Extend this abstract class, implement the encode function, and register your concrete class with the @@ -244,15 +243,9 @@ def __init__( :param supported_format: Arbitrary string representing the format. If not supplied then an empty string will be used. An empty string implies that the encoder works with any format. If the format being asked for does not exist, the transformer engine will look for the "" encoder instead and write a warning. - :param additional_protocols: Support many protocols to let user is able to connect to the service with various options. """ self._python_type = python_type self._protocol = protocol.replace("://", "") if protocol else None - self._additional_protocols = ( - [additional_protocol.replace("://", "") for additional_protocol in additional_protocols] - if additional_protocols - else None - ) self._supported_format = supported_format or "" @property @@ -263,10 +256,6 @@ def python_type(self) -> Type[T]: def protocol(self) -> Optional[str]: return self._protocol - @property - def additional_protocols(self) -> Optional[List[str]]: - return self._additional_protocols - @property def supported_format(self) -> str: return self._supported_format @@ -321,15 +310,9 @@ def __init__( :param supported_format: Arbitrary string representing the format. If not supplied then an empty string will be used. An empty string implies that the decoder works with any format. If the format being asked for does not exist, the transformer enginer will look for the "" decoder instead and write a warning. - :param additional_protocols: Support many protocols to let user is able to connect to the service with various options. """ self._python_type = python_type self._protocol = protocol.replace("://", "") if protocol else None - self._additional_protocols = ( - [additional_protocol.replace("://", "") for additional_protocol in additional_protocols] - if additional_protocols - else None - ) self._supported_format = supported_format or "" @property @@ -340,10 +323,6 @@ def python_type(self) -> Type[DF]: def protocol(self) -> Optional[str]: return self._protocol - @property - def additional_protocols(self) -> Optional[List[str]]: - return self._additional_protocols - @property def supported_format(self) -> str: return self._supported_format @@ -561,17 +540,6 @@ def register( h, h.protocol, default_for_type, override, default_format_for_type, default_storage_for_type ) - if h.additional_protocols is not None: - for additional_protocol in h.additional_protocols: - cls.register_for_protocol( - h, - additional_protocol, - default_for_type, - override, - default_format_for_type, - default_storage_for_type, - ) - @classmethod def register_for_protocol( cls, From 89d633fee1f1e9120ff0aa7bed89d1b5e3c6957d Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Thu, 25 Jul 2024 13:25:45 -0700 Subject: [PATCH 07/38] fix tests Signed-off-by: Future-Outlier --- plugins/flytekit-bigquery/flytekitplugins/bigquery/task.py | 2 +- .../flytekit-snowflake/flytekitplugins/snowflake/task.py | 5 +---- plugins/flytekit-snowflake/tests/test_agent.py | 6 ++---- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/plugins/flytekit-bigquery/flytekitplugins/bigquery/task.py b/plugins/flytekit-bigquery/flytekitplugins/bigquery/task.py index 5ae03b3f88..c1707f09af 100644 --- a/plugins/flytekit-bigquery/flytekitplugins/bigquery/task.py +++ b/plugins/flytekit-bigquery/flytekitplugins/bigquery/task.py @@ -38,7 +38,7 @@ def __init__( self, name: str, query_template: str, - task_config: Optional[BigQueryConfig], + task_config: BigQueryConfig, inputs: Optional[Dict[str, Type]] = None, output_structured_dataset_type: Optional[Type[StructuredDataset]] = None, **kwargs, diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py index 422bd8ce0b..9bacc660fa 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py @@ -41,7 +41,7 @@ def __init__( self, name: str, query_template: str, - task_config: Optional[SnowflakeConfig] = None, + task_config: SnowflakeConfig, inputs: Optional[Dict[str, Type]] = None, output_schema_type: Optional[Type[StructuredDataset]] = None, **kwargs, @@ -64,9 +64,6 @@ def __init__( "results": output_schema_type, } - if task_config is None: - task_config = SnowflakeConfig() - super().__init__( name=name, task_config=task_config, diff --git a/plugins/flytekit-snowflake/tests/test_agent.py b/plugins/flytekit-snowflake/tests/test_agent.py index adc699061f..c42519917a 100644 --- a/plugins/flytekit-snowflake/tests/test_agent.py +++ b/plugins/flytekit-snowflake/tests/test_agent.py @@ -91,6 +91,7 @@ async def test_snowflake_agent(mock_get_private_key): schema="dummy_schema", warehouse="dummy_warehouse", query_id="dummy_id", + output=False, ) metadata = await agent.create(dummy_template, task_inputs) @@ -98,10 +99,7 @@ async def test_snowflake_agent(mock_get_private_key): resource = await agent.get(metadata) assert resource.phase == TaskExecution.SUCCEEDED - assert ( - resource.outputs.literals["results"].scalar.structured_dataset.uri - == "snowflake://dummy_user:dummy_account/dummy_warehouse/dummy_database/dummy_schema/dummy_table" - ) + assert resource.outputs == None delete_response = await agent.delete(snowflake_metadata) assert delete_response is None From d1d802449c8f2f48c0dabcdc36ad21f0a94c525f Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Thu, 25 Jul 2024 13:59:39 -0700 Subject: [PATCH 08/38] Fix Tests Signed-off-by: Future-Outlier --- .../tests/test_snowflake.py | 27 +++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/plugins/flytekit-snowflake/tests/test_snowflake.py b/plugins/flytekit-snowflake/tests/test_snowflake.py index 672f4a19ad..a84cc0ac27 100644 --- a/plugins/flytekit-snowflake/tests/test_snowflake.py +++ b/plugins/flytekit-snowflake/tests/test_snowflake.py @@ -21,7 +21,12 @@ def test_serialization(): name="flytekit.demo.snowflake_task.query", inputs=kwtypes(ds=str), task_config=SnowflakeConfig( - account="snowflake", warehouse="my_warehouse", schema="my_schema", database="my_database" + account="snowflake", + user="my_user", + warehouse="my_warehouse", + schema="my_schema", + database="my_database", + table="my_table", ), query_template=query_template, # the schema literal's backend uri will be equal to the value of .raw_output_data @@ -64,6 +69,14 @@ def test_local_exec(): snowflake_task = SnowflakeTask( name="flytekit.demo.snowflake_task.query2", inputs=kwtypes(ds=str), + task_config=SnowflakeConfig( + account="TEST-ACCOUNT", + user="FLYTE", + database="FLYTEAGENT", + schema="PUBLIC", + warehouse="COMPUTE_WH", + table="FLYTEAGENT.PUBLIC.TEST", + ), query_template="select 1\n", # the schema literal's backend uri will be equal to the value of .raw_output_data output_schema_type=FlyteSchema, @@ -73,15 +86,19 @@ def test_local_exec(): assert snowflake_task.query_template == "select 1" assert len(snowflake_task.interface.outputs) == 1 - # will not run locally - with pytest.raises(Exception): - snowflake_task() - def test_sql_template(): snowflake_task = SnowflakeTask( name="flytekit.demo.snowflake_task.query2", inputs=kwtypes(ds=str), + task_config=SnowflakeConfig( + account="TEST-ACCOUNT", + user="FLYTE", + database="FLYTEAGENT", + schema="PUBLIC", + warehouse="COMPUTE_WH", + table="FLYTEAGENT.PUBLIC.TEST", + ), query_template="""select 1 from\t custom where column = 1""", output_schema_type=FlyteSchema, From a17f28d05da51cfed9ad94c3a2931c33ae787481 Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Thu, 25 Jul 2024 14:24:24 -0700 Subject: [PATCH 09/38] update agent Signed-off-by: Kevin Su --- plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index 4b86c35177..239f478998 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -94,7 +94,7 @@ async def create( warehouse=config["warehouse"], table=config["table"], query_id=cs.sfqid, - output=len(task_template.interface.outputs) > 0, + output=task_template.interface.outputs and len(task_template.interface.outputs) > 0, ) async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: From 547a801633e7ee7ef63eb10172e50107b96bd761 Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Thu, 25 Jul 2024 14:49:37 -0700 Subject: [PATCH 10/38] Add snowflake test Signed-off-by: Kevin Su --- plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index 239f478998..5a34f585ce 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -27,7 +27,7 @@ class SnowflakeJobMetadata(ResourceMeta): warehouse: str table: str query_id: str - output: bool + has_output: bool def get_private_key(): @@ -94,7 +94,7 @@ async def create( warehouse=config["warehouse"], table=config["table"], query_id=cs.sfqid, - output=task_template.interface.outputs and len(task_template.interface.outputs) > 0, + has_output=task_template.interface.outputs is not None and len(task_template.interface.outputs) > 0, ) async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: From a6de45c4f181f6e6351be7e9d5a8facaadbd950b Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Thu, 25 Jul 2024 14:59:00 -0700 Subject: [PATCH 11/38] nit Signed-off-by: Kevin Su --- plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index 5a34f585ce..b45e555f20 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -110,7 +110,7 @@ async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: cur_phase = convert_to_flyte_phase(str(query_status.name)) res = None - if cur_phase == TaskExecution.SUCCEEDED and resource_meta.output: + if cur_phase == TaskExecution.SUCCEEDED and resource_meta.has_output: ctx = FlyteContextManager.current_context() uri = f"snowflake://{resource_meta.user}:{resource_meta.account}/{resource_meta.warehouse}/{resource_meta.database}/{resource_meta.schema}/{resource_meta.query_id}" res = literals.LiteralMap( From 14c4318f87615961c0d703bdf1b5949b73544d1f Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Thu, 25 Jul 2024 15:20:09 -0700 Subject: [PATCH 12/38] sd Signed-off-by: Kevin Su --- flytekit/types/structured/snowflake.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 17c6472bb9..f1e42e5fec 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -23,7 +23,7 @@ def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - pk_string = flytekit.current_context().secrets.get("snowflake", "private_key", encode_mode="r") + pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") # cryptography needs str to be stripped and converted to bytes pk_string = pk_string.strip().encode() p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) From 76637e8f4b9eef24330ad5b3d488178ee2c2e05d Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Thu, 25 Jul 2024 16:00:15 -0700 Subject: [PATCH 13/38] snowflake loglinks Signed-off-by: Future-Outlier --- .../flytekitplugins/snowflake/agent.py | 21 +++++++++++++++++-- plugins/flytekit-snowflake/setup.py | 2 +- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index b45e555f20..4b0304af89 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -1,7 +1,7 @@ from dataclasses import dataclass from typing import Optional -from flyteidl.core.execution_pb2 import TaskExecution +from flyteidl.core.execution_pb2 import TaskExecution, TaskLog import flytekit from flytekit import FlyteContextManager, StructuredDataset, logger @@ -105,6 +105,10 @@ async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: logger.error("Failed to get snowflake job status with error:", err.msg) return Resource(phase=TaskExecution.FAILED) + log_link = TaskLog( + uri=construct_query_link(resource_meta=resource_meta), + name="Snowflake Query Details", + ) # The snowflake job's state is determined by query status. # https://github.com/snowflakedb/snowflake-connector-python/blob/main/src/snowflake/connector/constants.py#L373 cur_phase = convert_to_flyte_phase(str(query_status.name)) @@ -123,7 +127,8 @@ async def get(self, resource_meta: SnowflakeJobMetadata, **kwargs) -> Resource: ) } ) - return Resource(phase=cur_phase, outputs=res) + + return Resource(phase=cur_phase, outputs=res, log_links=[log_link]) async def delete(self, resource_meta: SnowflakeJobMetadata, **kwargs): conn = get_connection(resource_meta) @@ -135,5 +140,17 @@ async def delete(self, resource_meta: SnowflakeJobMetadata, **kwargs): cs.close() conn.close() +def construct_query_link(resource_meta: SnowflakeJobMetadata) -> str: + base_url = "https://app.snowflake.com" + + # Extract the account and region (assuming the format is account-region, you might need to adjust this based on your actual account format) + account_parts = resource_meta.account.split('-') + account = account_parts[0] + region = account_parts[1] if len(account_parts) > 1 else "" + + url = f"{base_url}/{region}/{account}/#/compute/history/queries/{resource_meta.query_id}/detail" + + return url + AgentRegistry.register(SnowflakeAgent()) diff --git a/plugins/flytekit-snowflake/setup.py b/plugins/flytekit-snowflake/setup.py index b5265c299e..c2c42044a9 100644 --- a/plugins/flytekit-snowflake/setup.py +++ b/plugins/flytekit-snowflake/setup.py @@ -4,7 +4,7 @@ microlib_name = f"flytekitplugins-{PLUGIN_NAME}" -plugin_requires = ["flytekit>1.10.7", "snowflake-connector-python>=3.1.0"] +plugin_requires = ["flytekit>1.10.7", "snowflake-connector-python>=3.11.0"] __version__ = "0.0.0+develop" From 762ad0bc43ba16fc5b258c4861fbed07bf26b503 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Fri, 26 Jul 2024 01:27:03 -0700 Subject: [PATCH 14/38] add metadata Signed-off-by: Future-Outlier --- .../flytekit-snowflake/flytekitplugins/snowflake/task.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py index 9bacc660fa..adf083fc0b 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py @@ -19,6 +19,14 @@ class SnowflakeConfig(object): """ SnowflakeConfig should be used to configure a Snowflake Task. + You can use the query below to retrieve all metadata for this config. + + SELECT + CURRENT_USER() AS "User", + CONCAT(CURRENT_ORGANIZATION_NAME(), '-', CURRENT_ACCOUNT_NAME()) AS "Account", + CURRENT_DATABASE() AS "Database", + CURRENT_SCHEMA() AS "Schema", + CURRENT_WAREHOUSE() AS "Warehouse"; """ user: str From 1fcd2de309e57d4399ef156c38e1b0925496e626 Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Mon, 29 Jul 2024 11:42:30 -0700 Subject: [PATCH 15/38] secret Signed-off-by: Kevin Su --- flytekit/types/structured/snowflake.py | 6 +++++- .../flytekit-snowflake/flytekitplugins/snowflake/agent.py | 3 ++- plugins/flytekit-snowflake/tests/test_agent.py | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index f1e42e5fec..9905b1954e 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -7,6 +7,7 @@ import flytekit from flytekit import FlyteContext +from flytekit.configuration.plugin import get_plugin from flytekit.models import literals from flytekit.models.types import StructuredDatasetType from flytekit.types.structured.structured_dataset import ( @@ -23,7 +24,10 @@ def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") + if get_plugin().secret_requires_group(): + pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") + else: + pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") # cryptography needs str to be stripped and converted to bytes pk_string = pk_string.strip().encode() p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index 4b0304af89..2cbde4513f 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -140,11 +140,12 @@ async def delete(self, resource_meta: SnowflakeJobMetadata, **kwargs): cs.close() conn.close() + def construct_query_link(resource_meta: SnowflakeJobMetadata) -> str: base_url = "https://app.snowflake.com" # Extract the account and region (assuming the format is account-region, you might need to adjust this based on your actual account format) - account_parts = resource_meta.account.split('-') + account_parts = resource_meta.account.split("-") account = account_parts[0] region = account_parts[1] if len(account_parts) > 1 else "" diff --git a/plugins/flytekit-snowflake/tests/test_agent.py b/plugins/flytekit-snowflake/tests/test_agent.py index c42519917a..93473459f5 100644 --- a/plugins/flytekit-snowflake/tests/test_agent.py +++ b/plugins/flytekit-snowflake/tests/test_agent.py @@ -91,7 +91,7 @@ async def test_snowflake_agent(mock_get_private_key): schema="dummy_schema", warehouse="dummy_warehouse", query_id="dummy_id", - output=False, + has_output=False, ) metadata = await agent.create(dummy_template, task_inputs) From 4a8c8ba18083d346a34af6d1ea4d8bc6ac03b35b Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Mon, 29 Jul 2024 11:53:21 -0700 Subject: [PATCH 16/38] nit Signed-off-by: Kevin Su --- flytekit/types/structured/snowflake.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 9905b1954e..f1e42e5fec 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -7,7 +7,6 @@ import flytekit from flytekit import FlyteContext -from flytekit.configuration.plugin import get_plugin from flytekit.models import literals from flytekit.models.types import StructuredDatasetType from flytekit.types.structured.structured_dataset import ( @@ -24,10 +23,7 @@ def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - if get_plugin().secret_requires_group(): - pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") - else: - pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") + pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") # cryptography needs str to be stripped and converted to bytes pk_string = pk_string.strip().encode() p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) From 3a7a9cd591fa642a1fbf175374cf86090e76138f Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 10:44:27 +0800 Subject: [PATCH 17/38] remove table Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 15 ++++++++++----- .../flytekitplugins/snowflake/agent.py | 2 -- .../flytekitplugins/snowflake/task.py | 3 --- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index f1e42e5fec..95bca5d470 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -6,7 +6,7 @@ from snowflake.connector.pandas_tools import write_pandas import flytekit -from flytekit import FlyteContext +from flytekit import FlyteContext, logger from flytekit.models import literals from flytekit.models.types import StructuredDatasetType from flytekit.types.structured.structured_dataset import ( @@ -17,13 +17,19 @@ ) SNOWFLAKE = "snowflake" +PROTOCOL_SEP = "\\/|://|:" def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") + try: + pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") + except Exception as e: + logger.info(f"Failed to get private key from secrets manager: {e}") + pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") + # cryptography needs str to be stripped and converted to bytes pk_string = pk_string.strip().encode() p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) @@ -42,7 +48,7 @@ def _write_to_sf(structured_dataset: StructuredDataset): raise ValueError("structured_dataset.uri cannot be None.") uri = structured_dataset.uri - _, user, account, warehouse, database, schema, table = re.split("\\/|://|:", uri) + _, user, account, warehouse, database, schema, table = re.split(PROTOCOL_SEP, uri) df = structured_dataset.dataframe conn = snowflake.connector.connect( @@ -59,7 +65,7 @@ def _read_from_sf( raise ValueError("structured_dataset.uri cannot be None.") uri = flyte_value.uri - _, user, account, warehouse, database, schema, query_id = re.split("\\/|://|:", uri) + _, user, account, warehouse, database, schema, query_id = re.split(PROTOCOL_SEP, uri) conn = snowflake.connector.connect( user=user, @@ -68,7 +74,6 @@ def _read_from_sf( database=database, schema=schema, warehouse=warehouse, - table="FLYTEAGENT.PUBLIC.TEST", ) cs = conn.cursor() diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index 2cbde4513f..38f7f49c04 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -25,7 +25,6 @@ class SnowflakeJobMetadata(ResourceMeta): database: str schema: str warehouse: str - table: str query_id: str has_output: bool @@ -92,7 +91,6 @@ async def create( database=config["database"], schema=config["schema"], warehouse=config["warehouse"], - table=config["table"], query_id=cs.sfqid, has_output=task_template.interface.outputs is not None and len(task_template.interface.outputs) > 0, ) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py index adf083fc0b..13cd15bee0 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/task.py @@ -12,7 +12,6 @@ _DATABASE_FIELD = "database" _SCHEMA_FIELD = "schema" _WAREHOUSE_FIELD = "warehouse" -_TABLE_FIELD = "table" @dataclass @@ -34,7 +33,6 @@ class SnowflakeConfig(object): database: str schema: str warehouse: str - table: str class SnowflakeTask(AsyncAgentExecutorMixin, SQLTask[SnowflakeConfig]): @@ -90,7 +88,6 @@ def get_config(self, settings: SerializationSettings) -> Dict[str, str]: _DATABASE_FIELD: self.task_config.database, _SCHEMA_FIELD: self.task_config.schema, _WAREHOUSE_FIELD: self.task_config.warehouse, - _TABLE_FIELD: self.task_config.table, } def get_sql(self, settings: SerializationSettings) -> Optional[_task_model.Sql]: From 2704555ed809641fa84c2d19e8d78904c954ee40 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 10:49:10 +0800 Subject: [PATCH 18/38] add comment for get private key Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 95bca5d470..cc13536ff3 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -24,13 +24,18 @@ def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization + """ + The first method is for Union SDK, the second is for Open Source. + The second method to get the secret is not recommended. + It will be removed once we complete the Flyte connection, which provides a new way to get the secret. + """ try: pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") except Exception as e: logger.info(f"Failed to get private key from secrets manager: {e}") pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") - # cryptography needs str to be stripped and converted to bytes + # Cryptography needs the string to be stripped and converted to bytes pk_string = pk_string.strip().encode() p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) From 469b86cec16a6788ca316fe95da8b24d2c3bb441 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 10:53:11 +0800 Subject: [PATCH 19/38] update comments: Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index cc13536ff3..b0178bc356 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -25,7 +25,7 @@ def get_private_key() -> bytes: from cryptography.hazmat.primitives import serialization """ - The first method is for Union SDK, the second is for Open Source. + The first method is for Union Products, the second is for Open Source. The second method to get the secret is not recommended. It will be removed once we complete the Flyte connection, which provides a new way to get the secret. """ From 378327f8e69ae453630da65778119021d7700e4d Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 10:54:06 +0800 Subject: [PATCH 20/38] Fix Tests Signed-off-by: Future-Outlier --- plugins/flytekit-snowflake/tests/test_agent.py | 2 -- plugins/flytekit-snowflake/tests/test_snowflake.py | 3 --- 2 files changed, 5 deletions(-) diff --git a/plugins/flytekit-snowflake/tests/test_agent.py b/plugins/flytekit-snowflake/tests/test_agent.py index 93473459f5..e63ddb9f85 100644 --- a/plugins/flytekit-snowflake/tests/test_agent.py +++ b/plugins/flytekit-snowflake/tests/test_agent.py @@ -55,7 +55,6 @@ async def test_snowflake_agent(mock_get_private_key): "database": "dummy_database", "schema": "dummy_schema", "warehouse": "dummy_warehouse", - "table": "dummy_table", } int_type = types.LiteralType(types.SimpleType.INTEGER) @@ -86,7 +85,6 @@ async def test_snowflake_agent(mock_get_private_key): snowflake_metadata = SnowflakeJobMetadata( user="dummy_user", account="dummy_account", - table="dummy_table", database="dummy_database", schema="dummy_schema", warehouse="dummy_warehouse", diff --git a/plugins/flytekit-snowflake/tests/test_snowflake.py b/plugins/flytekit-snowflake/tests/test_snowflake.py index a84cc0ac27..61db311c68 100644 --- a/plugins/flytekit-snowflake/tests/test_snowflake.py +++ b/plugins/flytekit-snowflake/tests/test_snowflake.py @@ -26,7 +26,6 @@ def test_serialization(): warehouse="my_warehouse", schema="my_schema", database="my_database", - table="my_table", ), query_template=query_template, # the schema literal's backend uri will be equal to the value of .raw_output_data @@ -75,7 +74,6 @@ def test_local_exec(): database="FLYTEAGENT", schema="PUBLIC", warehouse="COMPUTE_WH", - table="FLYTEAGENT.PUBLIC.TEST", ), query_template="select 1\n", # the schema literal's backend uri will be equal to the value of .raw_output_data @@ -97,7 +95,6 @@ def test_sql_template(): database="FLYTEAGENT", schema="PUBLIC", warehouse="COMPUTE_WH", - table="FLYTEAGENT.PUBLIC.TEST", ), query_template="""select 1 from\t custom where column = 1""", From d71ef8ffcf997bb57e9e3e9f4156f6867fb44144 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 10:57:06 +0800 Subject: [PATCH 21/38] update comments Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index b0178bc356..d70501c990 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -28,6 +28,7 @@ def get_private_key() -> bytes: The first method is for Union Products, the second is for Open Source. The second method to get the secret is not recommended. It will be removed once we complete the Flyte connection, which provides a new way to get the secret. + Note: This is discussed with Ketan Umare, the creator of Flyte. """ try: pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") From 6a8cd9a624ead73d84cb32871d1ac23a989d7658 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 10:57:48 +0800 Subject: [PATCH 22/38] update comments Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index d70501c990..0ca6c940c0 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -26,7 +26,7 @@ def get_private_key() -> bytes: """ The first method is for Union Products, the second is for Open Source. - The second method to get the secret is not recommended. + The second method to get the secret is not recommended. It will be removed once we complete the Flyte connection, which provides a new way to get the secret. Note: This is discussed with Ketan Umare, the creator of Flyte. """ From 503506321e89bf5c4a153766570633472f981306 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 14:55:15 +0800 Subject: [PATCH 23/38] Better Secrets Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 0ca6c940c0..36bad8b557 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -24,17 +24,7 @@ def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - """ - The first method is for Union Products, the second is for Open Source. - The second method to get the secret is not recommended. - It will be removed once we complete the Flyte connection, which provides a new way to get the secret. - Note: This is discussed with Ketan Umare, the creator of Flyte. - """ - try: - pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") - except Exception as e: - logger.info(f"Failed to get private key from secrets manager: {e}") - pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") + pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") # Cryptography needs the string to be stripped and converted to bytes pk_string = pk_string.strip().encode() From aaff3d2fb357c0435f964a6ccea2584a909e2169 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 15:55:36 +0800 Subject: [PATCH 24/38] use union secret Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 36bad8b557..ca7ba8806d 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -6,7 +6,7 @@ from snowflake.connector.pandas_tools import write_pandas import flytekit -from flytekit import FlyteContext, logger +from flytekit import FlyteContext from flytekit.models import literals from flytekit.models.types import StructuredDatasetType from flytekit.types.structured.structured_dataset import ( @@ -24,7 +24,7 @@ def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") + pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") # Cryptography needs the string to be stripped and converted to bytes pk_string = pk_string.strip().encode() From 45a788d13a6bab213114744e3ca52b8cd5bb8a6f Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 16:05:55 +0800 Subject: [PATCH 25/38] Update Changes Signed-off-by: Future-Outlier --- flytekit/models/security.py | 2 ++ flytekit/types/structured/snowflake.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/flytekit/models/security.py b/flytekit/models/security.py index e210c910b7..f39f2fb7ab 100644 --- a/flytekit/models/security.py +++ b/flytekit/models/security.py @@ -47,6 +47,8 @@ def __post_init__(self): # Only check for the groups during registration. execution = FlyteContextManager.current_context().execution_state in_registration_context = execution.mode is None + if get_plugin().secret_requires_group() is False: + self.group = None if in_registration_context and get_plugin().secret_requires_group() and self.group is None: raise ValueError("Group is a required parameter") diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index ca7ba8806d..c603b55669 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -24,7 +24,7 @@ def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") + pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") # Cryptography needs the string to be stripped and converted to bytes pk_string = pk_string.strip().encode() From dfe6f97bddf0155a7a47a2693645cdfcc8c8f8ef Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 16:13:48 +0800 Subject: [PATCH 26/38] use if not get_plugin().secret_requires_group() Signed-off-by: Future-Outlier --- flytekit/core/context_manager.py | 3 +++ flytekit/models/security.py | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/flytekit/core/context_manager.py b/flytekit/core/context_manager.py index 340046e941..49adc21e97 100644 --- a/flytekit/core/context_manager.py +++ b/flytekit/core/context_manager.py @@ -367,6 +367,9 @@ def get( Retrieves a secret using the resolution order -> Env followed by file. If not found raises a ValueError param encode_mode, defines the mode to open files, it can either be "r" to read file, or "rb" to read binary file """ + from flytekit.configuration.plugin import get_plugin + if not get_plugin().secret_requires_group(): + group = None env_var = self.get_secrets_env_var(group, key, group_version) fpath = self.get_secrets_file(group, key, group_version) v = os.environ.get(env_var) diff --git a/flytekit/models/security.py b/flytekit/models/security.py index f39f2fb7ab..e210c910b7 100644 --- a/flytekit/models/security.py +++ b/flytekit/models/security.py @@ -47,8 +47,6 @@ def __post_init__(self): # Only check for the groups during registration. execution = FlyteContextManager.current_context().execution_state in_registration_context = execution.mode is None - if get_plugin().secret_requires_group() is False: - self.group = None if in_registration_context and get_plugin().secret_requires_group() and self.group is None: raise ValueError("Group is a required parameter") From 03e8b6989542bfbc7c3cb7e8bd097534c8895016 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 16:44:25 +0800 Subject: [PATCH 27/38] Use Union SDK Signed-off-by: Future-Outlier --- flytekit/core/context_manager.py | 3 --- flytekit/types/structured/snowflake.py | 7 ++++++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/flytekit/core/context_manager.py b/flytekit/core/context_manager.py index 49adc21e97..340046e941 100644 --- a/flytekit/core/context_manager.py +++ b/flytekit/core/context_manager.py @@ -367,9 +367,6 @@ def get( Retrieves a secret using the resolution order -> Env followed by file. If not found raises a ValueError param encode_mode, defines the mode to open files, it can either be "r" to read file, or "rb" to read binary file """ - from flytekit.configuration.plugin import get_plugin - if not get_plugin().secret_requires_group(): - group = None env_var = self.get_secrets_env_var(group, key, group_version) fpath = self.get_secrets_file(group, key, group_version) v = os.environ.get(env_var) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index c603b55669..db8608e838 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -24,7 +24,12 @@ def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") + from flytekit.configuration.plugin import get_plugin + + if get_plugin().secret_requires_group(): + pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") + else: + pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") # Cryptography needs the string to be stripped and converted to bytes pk_string = pk_string.strip().encode() From 41e2a195141a5721a07e5e2068601ade71faa745 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 30 Jul 2024 16:53:05 +0800 Subject: [PATCH 28/38] Update Signed-off-by: Future-Outlier --- .../flytekit-snowflake/flytekitplugins/snowflake/agent.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py index 38f7f49c04..831b431afa 100644 --- a/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py +++ b/plugins/flytekit-snowflake/flytekitplugins/snowflake/agent.py @@ -3,11 +3,10 @@ from flyteidl.core.execution_pb2 import TaskExecution, TaskLog -import flytekit from flytekit import FlyteContextManager, StructuredDataset, logger from flytekit.core.type_engine import TypeEngine from flytekit.extend.backend.base_agent import AgentRegistry, AsyncAgentBase, Resource, ResourceMeta -from flytekit.extend.backend.utils import convert_to_flyte_phase +from flytekit.extend.backend.utils import convert_to_flyte_phase, get_agent_secret from flytekit.models import literals from flytekit.models.literals import LiteralMap from flytekit.models.task import TaskTemplate @@ -33,7 +32,7 @@ def get_private_key(): from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - pk_string = flytekit.current_context().secrets.get(SNOWFLAKE_PRIVATE_KEY, encode_mode="r") + pk_string = get_agent_secret(SNOWFLAKE_PRIVATE_KEY) # cryptography needs str to be stripped and converted to bytes pk_string = pk_string.strip().encode() p_key = serialization.load_pem_private_key(pk_string, password=None, backend=default_backend()) From af5a2f169119bddbf2554ab5a6274e161209dd1d Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 07:08:14 +0800 Subject: [PATCH 29/38] Fix Secrets Signed-off-by: Future-Outlier --- flytekit/core/context_manager.py | 6 + flytekit/types/structured/snowflake.py | 5 +- node_modules/.package-lock.json | 149 ++ node_modules/base64-js/LICENSE | 21 + node_modules/base64-js/README.md | 34 + node_modules/base64-js/base64js.min.js | 1 + node_modules/base64-js/index.d.ts | 3 + node_modules/base64-js/index.js | 150 ++ node_modules/base64-js/package.json | 47 + node_modules/bl/.github/dependabot.yml | 16 + .../bl/.github/workflows/test-and-release.yml | 61 + node_modules/bl/BufferList.d.ts | 382 +++ node_modules/bl/BufferList.js | 396 ++++ node_modules/bl/CHANGELOG.md | 17 + node_modules/bl/LICENSE.md | 13 + node_modules/bl/README.md | 247 ++ node_modules/bl/bl.js | 84 + node_modules/bl/index.d.ts | 88 + node_modules/bl/package.json | 123 + node_modules/bl/test/convert.js | 21 + node_modules/bl/test/indexOf.js | 492 ++++ node_modules/bl/test/isBufferList.js | 32 + node_modules/bl/test/test.js | 914 +++++++ node_modules/buffer/AUTHORS.md | 73 + node_modules/buffer/LICENSE | 21 + node_modules/buffer/README.md | 410 ++++ node_modules/buffer/index.d.ts | 194 ++ node_modules/buffer/index.js | 2106 +++++++++++++++++ node_modules/buffer/package.json | 93 + node_modules/ieee754/LICENSE | 11 + node_modules/ieee754/README.md | 51 + node_modules/ieee754/index.d.ts | 10 + node_modules/ieee754/index.js | 85 + node_modules/ieee754/package.json | 52 + node_modules/inherits/LICENSE | 16 + node_modules/inherits/README.md | 42 + node_modules/inherits/inherits.js | 9 + node_modules/inherits/inherits_browser.js | 27 + node_modules/inherits/package.json | 29 + .../msgpack5/.github/workflows/ci.yml | 30 + node_modules/msgpack5/.jshintrc | 7 + node_modules/msgpack5/CONTRIBUTING.md | 41 + node_modules/msgpack5/LICENSE | 21 + node_modules/msgpack5/README.md | 250 ++ .../msgpack5/benchmarks/encodedecode.js | 21 + .../msgpack5/benchmarks/parseshortmap.js | 21 + node_modules/msgpack5/example.js | 44 + node_modules/msgpack5/index.js | 91 + node_modules/msgpack5/lib/codecs/DateCodec.js | 131 + node_modules/msgpack5/lib/decoder.js | 268 +++ node_modules/msgpack5/lib/encoder.js | 298 +++ node_modules/msgpack5/lib/helpers.js | 20 + node_modules/msgpack5/lib/streams.js | 90 + node_modules/msgpack5/package.json | 52 + node_modules/msgpack5/spec.html | 459 ++++ node_modules/msgpack5/spec.md | 499 ++++ .../msgpack5/test/1-byte-length-buffers.js | 79 + .../msgpack5/test/1-byte-length-exts.js | 102 + .../msgpack5/test/1-byte-length-strings.js | 80 + .../test/1-byte-length-uint8arrays.js | 43 + .../msgpack5/test/15-elements-arrays.js | 84 + .../msgpack5/test/15-elements-maps.js | 119 + .../msgpack5/test/16-bits-signed-integers.js | 56 + .../test/16-bits-unsigned-integers.js | 56 + .../msgpack5/test/2-bytes-length-arrays.js | 84 + .../msgpack5/test/2-bytes-length-buffers.js | 79 + .../msgpack5/test/2-bytes-length-exts.js | 86 + .../msgpack5/test/2-bytes-length-maps.js | 85 + .../msgpack5/test/2-bytes-length-strings.js | 87 + .../test/2-bytes-length-uint8arrays.js | 43 + .../msgpack5/test/31-chars-strings.js | 59 + .../msgpack5/test/32-bits-signed-integers.js | 55 + .../test/32-bits-unsigned-integers.js | 56 + .../msgpack5/test/32-bytes-strings.js | 39 + .../msgpack5/test/4-bytes-length-arrays.js | 78 + .../msgpack5/test/4-bytes-length-buffers.js | 78 + .../msgpack5/test/4-bytes-length-exts.js | 86 + .../msgpack5/test/4-bytes-length-strings.js | 83 + .../test/4-bytes-length-uint8arrays.js | 42 + .../msgpack5/test/5-bits-negative-integers.js | 36 + .../msgpack5/test/64-bits-signed-integers.js | 48 + .../test/64-bits-unsigned-integers.js | 48 + .../msgpack5/test/7-bits-positive-integers.js | 36 + .../msgpack5/test/8-bits-positive-integers.js | 51 + .../msgpack5/test/8-bits-signed-integers.js | 53 + node_modules/msgpack5/test/NaN.js | 52 + node_modules/msgpack5/test/booleans.js | 21 + .../msgpack5/test/compatibility-mode.js | 73 + node_modules/msgpack5/test/datenull.js | 13 + node_modules/msgpack5/test/doubles.js | 57 + .../msgpack5/test/ext-custom-encode-check.js | 64 + node_modules/msgpack5/test/fixexts.js | 497 ++++ node_modules/msgpack5/test/floats.js | 117 + node_modules/msgpack5/test/functions.js | 19 + .../msgpack5/test/levelup-encoding.js | 69 + .../msgpack5/test/map-with-object-key.js | 25 + .../msgpack5/test/nested-containers.js | 44 + node_modules/msgpack5/test/null.js | 16 + .../msgpack5/test/numerictypeasserts.js | 49 + .../test/object-prototype-poisoning.js | 49 + .../msgpack5/test/object-with-arrays.js | 69 + .../msgpack5/test/object-with-buffers.js | 33 + .../msgpack5/test/object-with-many-keys.js | 71 + .../msgpack5/test/object-with-strings.js | 32 + node_modules/msgpack5/test/prefer-map.js | 71 + node_modules/msgpack5/test/sparse-arrays.js | 18 + node_modules/msgpack5/test/streams.js | 261 ++ node_modules/msgpack5/test/timestamps.js | 116 + node_modules/readable-stream/CONTRIBUTING.md | 38 + node_modules/readable-stream/GOVERNANCE.md | 136 ++ node_modules/readable-stream/LICENSE | 47 + node_modules/readable-stream/README.md | 106 + .../readable-stream/errors-browser.js | 127 + node_modules/readable-stream/errors.js | 116 + .../readable-stream/experimentalWarning.js | 17 + .../readable-stream/lib/_stream_duplex.js | 126 + .../lib/_stream_passthrough.js | 37 + .../readable-stream/lib/_stream_readable.js | 1027 ++++++++ .../readable-stream/lib/_stream_transform.js | 190 ++ .../readable-stream/lib/_stream_writable.js | 641 +++++ .../lib/internal/streams/async_iterator.js | 180 ++ .../lib/internal/streams/buffer_list.js | 183 ++ .../lib/internal/streams/destroy.js | 96 + .../lib/internal/streams/end-of-stream.js | 86 + .../lib/internal/streams/from-browser.js | 3 + .../lib/internal/streams/from.js | 52 + .../lib/internal/streams/pipeline.js | 86 + .../lib/internal/streams/state.js | 22 + .../lib/internal/streams/stream-browser.js | 1 + .../lib/internal/streams/stream.js | 1 + node_modules/readable-stream/package.json | 68 + .../readable-stream/readable-browser.js | 9 + node_modules/readable-stream/readable.js | 16 + node_modules/safe-buffer/LICENSE | 21 + node_modules/safe-buffer/README.md | 584 +++++ node_modules/safe-buffer/index.d.ts | 187 ++ node_modules/safe-buffer/index.js | 65 + node_modules/safe-buffer/package.json | 51 + node_modules/string_decoder/LICENSE | 48 + node_modules/string_decoder/README.md | 47 + .../string_decoder/lib/string_decoder.js | 296 +++ node_modules/string_decoder/package.json | 34 + node_modules/util-deprecate/History.md | 16 + node_modules/util-deprecate/LICENSE | 24 + node_modules/util-deprecate/README.md | 53 + node_modules/util-deprecate/browser.js | 67 + node_modules/util-deprecate/node.js | 6 + node_modules/util-deprecate/package.json | 27 + package-lock.json | 154 ++ package.json | 5 + 150 files changed, 17733 insertions(+), 4 deletions(-) create mode 100644 node_modules/.package-lock.json create mode 100644 node_modules/base64-js/LICENSE create mode 100644 node_modules/base64-js/README.md create mode 100644 node_modules/base64-js/base64js.min.js create mode 100644 node_modules/base64-js/index.d.ts create mode 100644 node_modules/base64-js/index.js create mode 100644 node_modules/base64-js/package.json create mode 100644 node_modules/bl/.github/dependabot.yml create mode 100644 node_modules/bl/.github/workflows/test-and-release.yml create mode 100644 node_modules/bl/BufferList.d.ts create mode 100644 node_modules/bl/BufferList.js create mode 100644 node_modules/bl/CHANGELOG.md create mode 100644 node_modules/bl/LICENSE.md create mode 100644 node_modules/bl/README.md create mode 100644 node_modules/bl/bl.js create mode 100644 node_modules/bl/index.d.ts create mode 100644 node_modules/bl/package.json create mode 100644 node_modules/bl/test/convert.js create mode 100644 node_modules/bl/test/indexOf.js create mode 100644 node_modules/bl/test/isBufferList.js create mode 100644 node_modules/bl/test/test.js create mode 100644 node_modules/buffer/AUTHORS.md create mode 100644 node_modules/buffer/LICENSE create mode 100644 node_modules/buffer/README.md create mode 100644 node_modules/buffer/index.d.ts create mode 100644 node_modules/buffer/index.js create mode 100644 node_modules/buffer/package.json create mode 100644 node_modules/ieee754/LICENSE create mode 100644 node_modules/ieee754/README.md create mode 100644 node_modules/ieee754/index.d.ts create mode 100644 node_modules/ieee754/index.js create mode 100644 node_modules/ieee754/package.json create mode 100644 node_modules/inherits/LICENSE create mode 100644 node_modules/inherits/README.md create mode 100644 node_modules/inherits/inherits.js create mode 100644 node_modules/inherits/inherits_browser.js create mode 100644 node_modules/inherits/package.json create mode 100644 node_modules/msgpack5/.github/workflows/ci.yml create mode 100644 node_modules/msgpack5/.jshintrc create mode 100644 node_modules/msgpack5/CONTRIBUTING.md create mode 100644 node_modules/msgpack5/LICENSE create mode 100644 node_modules/msgpack5/README.md create mode 100644 node_modules/msgpack5/benchmarks/encodedecode.js create mode 100644 node_modules/msgpack5/benchmarks/parseshortmap.js create mode 100644 node_modules/msgpack5/example.js create mode 100644 node_modules/msgpack5/index.js create mode 100644 node_modules/msgpack5/lib/codecs/DateCodec.js create mode 100644 node_modules/msgpack5/lib/decoder.js create mode 100644 node_modules/msgpack5/lib/encoder.js create mode 100644 node_modules/msgpack5/lib/helpers.js create mode 100644 node_modules/msgpack5/lib/streams.js create mode 100644 node_modules/msgpack5/package.json create mode 100644 node_modules/msgpack5/spec.html create mode 100644 node_modules/msgpack5/spec.md create mode 100644 node_modules/msgpack5/test/1-byte-length-buffers.js create mode 100644 node_modules/msgpack5/test/1-byte-length-exts.js create mode 100644 node_modules/msgpack5/test/1-byte-length-strings.js create mode 100644 node_modules/msgpack5/test/1-byte-length-uint8arrays.js create mode 100644 node_modules/msgpack5/test/15-elements-arrays.js create mode 100644 node_modules/msgpack5/test/15-elements-maps.js create mode 100644 node_modules/msgpack5/test/16-bits-signed-integers.js create mode 100644 node_modules/msgpack5/test/16-bits-unsigned-integers.js create mode 100644 node_modules/msgpack5/test/2-bytes-length-arrays.js create mode 100644 node_modules/msgpack5/test/2-bytes-length-buffers.js create mode 100644 node_modules/msgpack5/test/2-bytes-length-exts.js create mode 100644 node_modules/msgpack5/test/2-bytes-length-maps.js create mode 100644 node_modules/msgpack5/test/2-bytes-length-strings.js create mode 100644 node_modules/msgpack5/test/2-bytes-length-uint8arrays.js create mode 100644 node_modules/msgpack5/test/31-chars-strings.js create mode 100644 node_modules/msgpack5/test/32-bits-signed-integers.js create mode 100644 node_modules/msgpack5/test/32-bits-unsigned-integers.js create mode 100644 node_modules/msgpack5/test/32-bytes-strings.js create mode 100644 node_modules/msgpack5/test/4-bytes-length-arrays.js create mode 100644 node_modules/msgpack5/test/4-bytes-length-buffers.js create mode 100644 node_modules/msgpack5/test/4-bytes-length-exts.js create mode 100644 node_modules/msgpack5/test/4-bytes-length-strings.js create mode 100644 node_modules/msgpack5/test/4-bytes-length-uint8arrays.js create mode 100644 node_modules/msgpack5/test/5-bits-negative-integers.js create mode 100644 node_modules/msgpack5/test/64-bits-signed-integers.js create mode 100644 node_modules/msgpack5/test/64-bits-unsigned-integers.js create mode 100644 node_modules/msgpack5/test/7-bits-positive-integers.js create mode 100644 node_modules/msgpack5/test/8-bits-positive-integers.js create mode 100644 node_modules/msgpack5/test/8-bits-signed-integers.js create mode 100644 node_modules/msgpack5/test/NaN.js create mode 100644 node_modules/msgpack5/test/booleans.js create mode 100644 node_modules/msgpack5/test/compatibility-mode.js create mode 100644 node_modules/msgpack5/test/datenull.js create mode 100644 node_modules/msgpack5/test/doubles.js create mode 100644 node_modules/msgpack5/test/ext-custom-encode-check.js create mode 100644 node_modules/msgpack5/test/fixexts.js create mode 100644 node_modules/msgpack5/test/floats.js create mode 100644 node_modules/msgpack5/test/functions.js create mode 100644 node_modules/msgpack5/test/levelup-encoding.js create mode 100644 node_modules/msgpack5/test/map-with-object-key.js create mode 100644 node_modules/msgpack5/test/nested-containers.js create mode 100644 node_modules/msgpack5/test/null.js create mode 100644 node_modules/msgpack5/test/numerictypeasserts.js create mode 100644 node_modules/msgpack5/test/object-prototype-poisoning.js create mode 100644 node_modules/msgpack5/test/object-with-arrays.js create mode 100644 node_modules/msgpack5/test/object-with-buffers.js create mode 100644 node_modules/msgpack5/test/object-with-many-keys.js create mode 100644 node_modules/msgpack5/test/object-with-strings.js create mode 100644 node_modules/msgpack5/test/prefer-map.js create mode 100644 node_modules/msgpack5/test/sparse-arrays.js create mode 100644 node_modules/msgpack5/test/streams.js create mode 100644 node_modules/msgpack5/test/timestamps.js create mode 100644 node_modules/readable-stream/CONTRIBUTING.md create mode 100644 node_modules/readable-stream/GOVERNANCE.md create mode 100644 node_modules/readable-stream/LICENSE create mode 100644 node_modules/readable-stream/README.md create mode 100644 node_modules/readable-stream/errors-browser.js create mode 100644 node_modules/readable-stream/errors.js create mode 100644 node_modules/readable-stream/experimentalWarning.js create mode 100644 node_modules/readable-stream/lib/_stream_duplex.js create mode 100644 node_modules/readable-stream/lib/_stream_passthrough.js create mode 100644 node_modules/readable-stream/lib/_stream_readable.js create mode 100644 node_modules/readable-stream/lib/_stream_transform.js create mode 100644 node_modules/readable-stream/lib/_stream_writable.js create mode 100644 node_modules/readable-stream/lib/internal/streams/async_iterator.js create mode 100644 node_modules/readable-stream/lib/internal/streams/buffer_list.js create mode 100644 node_modules/readable-stream/lib/internal/streams/destroy.js create mode 100644 node_modules/readable-stream/lib/internal/streams/end-of-stream.js create mode 100644 node_modules/readable-stream/lib/internal/streams/from-browser.js create mode 100644 node_modules/readable-stream/lib/internal/streams/from.js create mode 100644 node_modules/readable-stream/lib/internal/streams/pipeline.js create mode 100644 node_modules/readable-stream/lib/internal/streams/state.js create mode 100644 node_modules/readable-stream/lib/internal/streams/stream-browser.js create mode 100644 node_modules/readable-stream/lib/internal/streams/stream.js create mode 100644 node_modules/readable-stream/package.json create mode 100644 node_modules/readable-stream/readable-browser.js create mode 100644 node_modules/readable-stream/readable.js create mode 100644 node_modules/safe-buffer/LICENSE create mode 100644 node_modules/safe-buffer/README.md create mode 100644 node_modules/safe-buffer/index.d.ts create mode 100644 node_modules/safe-buffer/index.js create mode 100644 node_modules/safe-buffer/package.json create mode 100644 node_modules/string_decoder/LICENSE create mode 100644 node_modules/string_decoder/README.md create mode 100644 node_modules/string_decoder/lib/string_decoder.js create mode 100644 node_modules/string_decoder/package.json create mode 100644 node_modules/util-deprecate/History.md create mode 100644 node_modules/util-deprecate/LICENSE create mode 100644 node_modules/util-deprecate/README.md create mode 100644 node_modules/util-deprecate/browser.js create mode 100644 node_modules/util-deprecate/node.js create mode 100644 node_modules/util-deprecate/package.json create mode 100644 package-lock.json create mode 100644 package.json diff --git a/flytekit/core/context_manager.py b/flytekit/core/context_manager.py index 340046e941..13691162d5 100644 --- a/flytekit/core/context_manager.py +++ b/flytekit/core/context_manager.py @@ -367,6 +367,12 @@ def get( Retrieves a secret using the resolution order -> Env followed by file. If not found raises a ValueError param encode_mode, defines the mode to open files, it can either be "r" to read file, or "rb" to read binary file """ + + from flytekit.configuration.plugin import get_plugin + + if not get_plugin().secret_requires_group(): + group, group_version = None, None + env_var = self.get_secrets_env_var(group, key, group_version) fpath = self.get_secrets_file(group, key, group_version) v = os.environ.get(env_var) diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index db8608e838..5b2104041d 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -26,10 +26,7 @@ def get_private_key() -> bytes: from flytekit.configuration.plugin import get_plugin - if get_plugin().secret_requires_group(): - pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") - else: - pk_string = flytekit.current_context().secrets.get(None, "snowflake", encode_mode="r") + pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") # Cryptography needs the string to be stripped and converted to bytes pk_string = pk_string.strip().encode() diff --git a/node_modules/.package-lock.json b/node_modules/.package-lock.json new file mode 100644 index 0000000000..d0d7582808 --- /dev/null +++ b/node_modules/.package-lock.json @@ -0,0 +1,149 @@ +{ + "name": "flytekit", + "lockfileVersion": 3, + "requires": true, + "packages": { + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/bl": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-5.1.0.tgz", + "integrity": "sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ==", + "license": "MIT", + "dependencies": { + "buffer": "^6.0.3", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/msgpack5": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/msgpack5/-/msgpack5-6.0.2.tgz", + "integrity": "sha512-kBSpECAWslrciRF3jy6HkMckNa14j3VZwNUUe1ONO/yihs19MskiFnsWXm0Q0aPkDYDBRFvTKkEuEDY+HVxBvQ==", + "license": "MIT", + "dependencies": { + "bl": "^5.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.0.0", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + } + } +} diff --git a/node_modules/base64-js/LICENSE b/node_modules/base64-js/LICENSE new file mode 100644 index 0000000000..6d52b8acfb --- /dev/null +++ b/node_modules/base64-js/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Jameson Little + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/node_modules/base64-js/README.md b/node_modules/base64-js/README.md new file mode 100644 index 0000000000..b42a48f416 --- /dev/null +++ b/node_modules/base64-js/README.md @@ -0,0 +1,34 @@ +base64-js +========= + +`base64-js` does basic base64 encoding/decoding in pure JS. + +[![build status](https://secure.travis-ci.org/beatgammit/base64-js.png)](http://travis-ci.org/beatgammit/base64-js) + +Many browsers already have base64 encoding/decoding functionality, but it is for text data, not all-purpose binary data. + +Sometimes encoding/decoding binary data in the browser is useful, and that is what this module does. + +## install + +With [npm](https://npmjs.org) do: + +`npm install base64-js` and `var base64js = require('base64-js')` + +For use in web browsers do: + +`` + +[Get supported base64-js with the Tidelift Subscription](https://tidelift.com/subscription/pkg/npm-base64-js?utm_source=npm-base64-js&utm_medium=referral&utm_campaign=readme) + +## methods + +`base64js` has three exposed functions, `byteLength`, `toByteArray` and `fromByteArray`, which both take a single argument. + +* `byteLength` - Takes a base64 string and returns length of byte array +* `toByteArray` - Takes a base64 string and returns a byte array +* `fromByteArray` - Takes a byte array and returns a base64 string + +## license + +MIT diff --git a/node_modules/base64-js/base64js.min.js b/node_modules/base64-js/base64js.min.js new file mode 100644 index 0000000000..908ac83fd1 --- /dev/null +++ b/node_modules/base64-js/base64js.min.js @@ -0,0 +1 @@ +(function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;b="undefined"==typeof window?"undefined"==typeof global?"undefined"==typeof self?this:self:global:window,b.base64js=a()}})(function(){return function(){function b(d,e,g){function a(j,i){if(!e[j]){if(!d[j]){var f="function"==typeof require&&require;if(!i&&f)return f(j,!0);if(h)return h(j,!0);var c=new Error("Cannot find module '"+j+"'");throw c.code="MODULE_NOT_FOUND",c}var k=e[j]={exports:{}};d[j][0].call(k.exports,function(b){var c=d[j][1][b];return a(c||b)},k,k.exports,b,d,e,g)}return e[j].exports}for(var h="function"==typeof require&&require,c=0;c>16,j[k++]=255&b>>8,j[k++]=255&b;return 2===h&&(b=l[a.charCodeAt(c)]<<2|l[a.charCodeAt(c+1)]>>4,j[k++]=255&b),1===h&&(b=l[a.charCodeAt(c)]<<10|l[a.charCodeAt(c+1)]<<4|l[a.charCodeAt(c+2)]>>2,j[k++]=255&b>>8,j[k++]=255&b),j}function g(a){return k[63&a>>18]+k[63&a>>12]+k[63&a>>6]+k[63&a]}function h(a,b,c){for(var d,e=[],f=b;fj?j:g+f));return 1===d?(b=a[c-1],e.push(k[b>>2]+k[63&b<<4]+"==")):2===d&&(b=(a[c-2]<<8)+a[c-1],e.push(k[b>>10]+k[63&b>>4]+k[63&b<<2]+"=")),e.join("")}c.byteLength=function(a){var b=d(a),c=b[0],e=b[1];return 3*(c+e)/4-e},c.toByteArray=f,c.fromByteArray=j;for(var k=[],l=[],m="undefined"==typeof Uint8Array?Array:Uint8Array,n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",o=0,p=n.length;o 0) { + throw new Error('Invalid string. Length must be a multiple of 4') + } + + // Trim off extra bytes after placeholder bytes are found + // See: https://github.com/beatgammit/base64-js/issues/42 + var validLen = b64.indexOf('=') + if (validLen === -1) validLen = len + + var placeHoldersLen = validLen === len + ? 0 + : 4 - (validLen % 4) + + return [validLen, placeHoldersLen] +} + +// base64 is 4/3 + up to two characters of the original data +function byteLength (b64) { + var lens = getLens(b64) + var validLen = lens[0] + var placeHoldersLen = lens[1] + return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen +} + +function _byteLength (b64, validLen, placeHoldersLen) { + return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen +} + +function toByteArray (b64) { + var tmp + var lens = getLens(b64) + var validLen = lens[0] + var placeHoldersLen = lens[1] + + var arr = new Arr(_byteLength(b64, validLen, placeHoldersLen)) + + var curByte = 0 + + // if there are placeholders, only get up to the last complete 4 chars + var len = placeHoldersLen > 0 + ? validLen - 4 + : validLen + + var i + for (i = 0; i < len; i += 4) { + tmp = + (revLookup[b64.charCodeAt(i)] << 18) | + (revLookup[b64.charCodeAt(i + 1)] << 12) | + (revLookup[b64.charCodeAt(i + 2)] << 6) | + revLookup[b64.charCodeAt(i + 3)] + arr[curByte++] = (tmp >> 16) & 0xFF + arr[curByte++] = (tmp >> 8) & 0xFF + arr[curByte++] = tmp & 0xFF + } + + if (placeHoldersLen === 2) { + tmp = + (revLookup[b64.charCodeAt(i)] << 2) | + (revLookup[b64.charCodeAt(i + 1)] >> 4) + arr[curByte++] = tmp & 0xFF + } + + if (placeHoldersLen === 1) { + tmp = + (revLookup[b64.charCodeAt(i)] << 10) | + (revLookup[b64.charCodeAt(i + 1)] << 4) | + (revLookup[b64.charCodeAt(i + 2)] >> 2) + arr[curByte++] = (tmp >> 8) & 0xFF + arr[curByte++] = tmp & 0xFF + } + + return arr +} + +function tripletToBase64 (num) { + return lookup[num >> 18 & 0x3F] + + lookup[num >> 12 & 0x3F] + + lookup[num >> 6 & 0x3F] + + lookup[num & 0x3F] +} + +function encodeChunk (uint8, start, end) { + var tmp + var output = [] + for (var i = start; i < end; i += 3) { + tmp = + ((uint8[i] << 16) & 0xFF0000) + + ((uint8[i + 1] << 8) & 0xFF00) + + (uint8[i + 2] & 0xFF) + output.push(tripletToBase64(tmp)) + } + return output.join('') +} + +function fromByteArray (uint8) { + var tmp + var len = uint8.length + var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes + var parts = [] + var maxChunkLength = 16383 // must be multiple of 3 + + // go through the array every three bytes, we'll deal with trailing stuff later + for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) { + parts.push(encodeChunk(uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength))) + } + + // pad the end with zeros, but make sure to not forget the extra bytes + if (extraBytes === 1) { + tmp = uint8[len - 1] + parts.push( + lookup[tmp >> 2] + + lookup[(tmp << 4) & 0x3F] + + '==' + ) + } else if (extraBytes === 2) { + tmp = (uint8[len - 2] << 8) + uint8[len - 1] + parts.push( + lookup[tmp >> 10] + + lookup[(tmp >> 4) & 0x3F] + + lookup[(tmp << 2) & 0x3F] + + '=' + ) + } + + return parts.join('') +} diff --git a/node_modules/base64-js/package.json b/node_modules/base64-js/package.json new file mode 100644 index 0000000000..c3972e39f2 --- /dev/null +++ b/node_modules/base64-js/package.json @@ -0,0 +1,47 @@ +{ + "name": "base64-js", + "description": "Base64 encoding/decoding in pure JS", + "version": "1.5.1", + "author": "T. Jameson Little ", + "typings": "index.d.ts", + "bugs": { + "url": "https://github.com/beatgammit/base64-js/issues" + }, + "devDependencies": { + "babel-minify": "^0.5.1", + "benchmark": "^2.1.4", + "browserify": "^16.3.0", + "standard": "*", + "tape": "4.x" + }, + "homepage": "https://github.com/beatgammit/base64-js", + "keywords": [ + "base64" + ], + "license": "MIT", + "main": "index.js", + "repository": { + "type": "git", + "url": "git://github.com/beatgammit/base64-js.git" + }, + "scripts": { + "build": "browserify -s base64js -r ./ | minify > base64js.min.js", + "lint": "standard", + "test": "npm run lint && npm run unit", + "unit": "tape test/*.js" + }, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] +} diff --git a/node_modules/bl/.github/dependabot.yml b/node_modules/bl/.github/dependabot.yml new file mode 100644 index 0000000000..f4689933fb --- /dev/null +++ b/node_modules/bl/.github/dependabot.yml @@ -0,0 +1,16 @@ +version: 2 +updates: + - package-ecosystem: 'github-actions' + directory: '/' + schedule: + interval: 'daily' + commit-message: + prefix: 'chore' + include: 'scope' + - package-ecosystem: 'npm' + directory: '/' + schedule: + interval: 'daily' + commit-message: + prefix: 'chore' + include: 'scope' diff --git a/node_modules/bl/.github/workflows/test-and-release.yml b/node_modules/bl/.github/workflows/test-and-release.yml new file mode 100644 index 0000000000..65887a0296 --- /dev/null +++ b/node_modules/bl/.github/workflows/test-and-release.yml @@ -0,0 +1,61 @@ +name: Test & Maybe Release +on: [push, pull_request] +jobs: + test: + strategy: + fail-fast: false + matrix: + node: [14.x, 16.x, 18.x, lts/*, current] + os: [macos-latest, ubuntu-latest, windows-latest] + runs-on: ${{ matrix.os }} + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + - name: Use Node.js ${{ matrix.node }} + uses: actions/setup-node@v3.5.1 + with: + node-version: ${{ matrix.node }} + - name: Install Dependencies + run: | + npm install --no-progress + - name: Run tests + run: | + npm config set script-shell bash + npm run test:ci + release: + name: Release + needs: test + runs-on: ubuntu-latest + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Setup Node.js + uses: actions/setup-node@v3.5.1 + with: + node-version: 14 + - name: Install dependencies + run: | + npm install --no-progress --no-package-lock --no-save + - name: Build + run: | + npm run build + - name: Install plugins + run: | + npm install \ + @semantic-release/commit-analyzer \ + conventional-changelog-conventionalcommits \ + @semantic-release/release-notes-generator \ + @semantic-release/npm \ + @semantic-release/github \ + @semantic-release/git \ + @semantic-release/changelog \ + --no-progress --no-package-lock --no-save + - name: Release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NPM_TOKEN: ${{ secrets.NPM_TOKEN }} + run: npx semantic-release + diff --git a/node_modules/bl/BufferList.d.ts b/node_modules/bl/BufferList.d.ts new file mode 100644 index 0000000000..1561583997 --- /dev/null +++ b/node_modules/bl/BufferList.d.ts @@ -0,0 +1,382 @@ +export type BufferListAcceptedTypes = + | Buffer + | BufferList + | Uint8Array + | BufferListAcceptedTypes[] + | string + | number; + +export interface BufferListConstructor { + new (initData?: BufferListAcceptedTypes): BufferList; + (initData?: BufferListAcceptedTypes): BufferList; + + /** + * Determines if the passed object is a BufferList. It will return true + * if the passed object is an instance of BufferList or BufferListStream + * and false otherwise. + * + * N.B. this won't return true for BufferList or BufferListStream instances + * created by versions of this library before this static method was added. + * + * @param other + */ + + isBufferList(other: unknown): boolean; +} + +interface BufferList { + prototype: Object + + /** + * Get the length of the list in bytes. This is the sum of the lengths + * of all of the buffers contained in the list, minus any initial offset + * for a semi-consumed buffer at the beginning. Should accurately + * represent the total number of bytes that can be read from the list. + */ + + length: number; + + /** + * Adds an additional buffer or BufferList to the internal list. + * this is returned so it can be chained. + * + * @param buffer + */ + + append(buffer: BufferListAcceptedTypes): this; + + /** + * Will return the byte at the specified index. + * @param index + */ + + get(index: number): number; + + /** + * Returns a new Buffer object containing the bytes within the + * range specified. Both start and end are optional and will + * default to the beginning and end of the list respectively. + * + * If the requested range spans a single internal buffer then a + * slice of that buffer will be returned which shares the original + * memory range of that Buffer. If the range spans multiple buffers + * then copy operations will likely occur to give you a uniform Buffer. + * + * @param start + * @param end + */ + + slice(start?: number, end?: number): Buffer; + + /** + * Returns a new BufferList object containing the bytes within the + * range specified. Both start and end are optional and will default + * to the beginning and end of the list respectively. + * + * No copies will be performed. All buffers in the result share + * memory with the original list. + * + * @param start + * @param end + */ + + shallowSlice(start?: number, end?: number): this; + + /** + * Copies the content of the list in the `dest` buffer, starting from + * `destStart` and containing the bytes within the range specified + * with `srcStart` to `srcEnd`. + * + * `destStart`, `start` and `end` are optional and will default to the + * beginning of the dest buffer, and the beginning and end of the + * list respectively. + * + * @param dest + * @param destStart + * @param srcStart + * @param srcEnd + */ + + copy( + dest: Buffer, + destStart?: number, + srcStart?: number, + srcEnd?: number + ): Buffer; + + /** + * Performs a shallow-copy of the list. The internal Buffers remains the + * same, so if you change the underlying Buffers, the change will be + * reflected in both the original and the duplicate. + * + * This method is needed if you want to call consume() or pipe() and + * still keep the original list. + * + * @example + * + * ```js + * var bl = new BufferListStream(); + * bl.append('hello'); + * bl.append(' world'); + * bl.append('\n'); + * bl.duplicate().pipe(process.stdout, { end: false }); + * + * console.log(bl.toString()) + * ``` + */ + + duplicate(): this; + + /** + * Will shift bytes off the start of the list. The number of bytes + * consumed don't need to line up with the sizes of the internal + * Buffers—initial offsets will be calculated accordingly in order + * to give you a consistent view of the data. + * + * @param bytes + */ + + consume(bytes?: number): void; + + /** + * Will return a string representation of the buffer. The optional + * `start` and `end` arguments are passed on to `slice()`, while + * the encoding is passed on to `toString()` of the resulting Buffer. + * + * See the [`Buffer#toString()`](http://nodejs.org/docs/latest/api/buffer.html#buffer_buf_tostring_encoding_start_end) + * documentation for more information. + * + * @param encoding + * @param start + * @param end + */ + + toString(encoding?: string, start?: number, end?: number): string; + + /** + * Will return the byte at the specified index. indexOf() method + * returns the first index at which a given element can be found + * in the BufferList, or -1 if it is not present. + * + * @param value + * @param byteOffset + * @param encoding + */ + + indexOf( + value: string | number | Uint8Array | BufferList | Buffer, + byteOffset?: number, + encoding?: string + ): number; + + /** + * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + * + * @param offset + */ + + readDoubleBE: Buffer['readDoubleBE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + * + * @param offset + */ + + readDoubleLE: Buffer['readDoubleLE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + * + * @param offset + */ + + readFloatBE: Buffer['readFloatBE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + * + * @param offset + */ + + readFloatLE: Buffer['readFloatLE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + * + * @param offset + */ + + readInt32BE: Buffer['readInt32BE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + * + * @param offset + */ + + readInt32LE: Buffer['readInt32LE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + * + * @param offset + */ + + readUInt32BE: Buffer['readUInt32BE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + * + * @param offset + */ + + readUInt32LE: Buffer['readUInt32LE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + * + * @param offset + */ + + readInt16BE: Buffer['readInt16BE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are + * implemented and will operate across internal Buffer boundaries transparently. + * + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) + * documentation for how these work. + * + * @param offset + */ + + readInt16LE: Buffer['readInt16LE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are + * implemented and will operate across internal Buffer boundaries transparently. + * + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) + * documentation for how these work. + * + * @param offset + */ + + readUInt16BE: Buffer['readUInt16BE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are + * implemented and will operate across internal Buffer boundaries transparently. + * + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) + * documentation for how these work. + * + * @param offset + */ + + readUInt16LE: Buffer['readUInt16LE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are + * implemented and will operate across internal Buffer boundaries transparently. + * + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) + * documentation for how these work. + * + * @param offset + */ + + readInt8: Buffer['readInt8']; + + /** + * All of the standard byte-reading methods of the Buffer interface are + * implemented and will operate across internal Buffer boundaries transparently. + * + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) + * documentation for how these work. + * + * @param offset + */ + + readUInt8: Buffer['readUInt8']; + + /** + * All of the standard byte-reading methods of the Buffer interface are + * implemented and will operate across internal Buffer boundaries transparently. + * + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) + * documentation for how these work. + * + * @param offset + */ + + readIntBE: Buffer['readIntBE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are + * implemented and will operate across internal Buffer boundaries transparently. + * + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) + * documentation for how these work. + * + * @param offset + */ + + readIntLE: Buffer['readIntLE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are + * implemented and will operate across internal Buffer boundaries transparently. + * + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) + * documentation for how these work. + * + * @param offset + */ + + readUIntBE: Buffer['readUIntBE']; + + /** + * All of the standard byte-reading methods of the Buffer interface are + * implemented and will operate across internal Buffer boundaries transparently. + * + * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) + * documentation for how these work. + * + * @param offset + */ + + readUIntLE: Buffer['readUIntLE']; +} + +/** + * No arguments are required for the constructor, but you can initialise + * the list by passing in a single Buffer object or an array of Buffer + * objects. + * + * `new` is not strictly required, if you don't instantiate a new object, + * it will be done automatically for you so you can create a new instance + * simply with: + * + * ```js + * const { BufferList } = require('bl') + * const bl = BufferList() + * + * // equivalent to: + * + * const { BufferList } = require('bl') + * const bl = new BufferList() + * ``` + */ + +declare const BufferList: BufferListConstructor; diff --git a/node_modules/bl/BufferList.js b/node_modules/bl/BufferList.js new file mode 100644 index 0000000000..471ee77889 --- /dev/null +++ b/node_modules/bl/BufferList.js @@ -0,0 +1,396 @@ +'use strict' + +const { Buffer } = require('buffer') +const symbol = Symbol.for('BufferList') + +function BufferList (buf) { + if (!(this instanceof BufferList)) { + return new BufferList(buf) + } + + BufferList._init.call(this, buf) +} + +BufferList._init = function _init (buf) { + Object.defineProperty(this, symbol, { value: true }) + + this._bufs = [] + this.length = 0 + + if (buf) { + this.append(buf) + } +} + +BufferList.prototype._new = function _new (buf) { + return new BufferList(buf) +} + +BufferList.prototype._offset = function _offset (offset) { + if (offset === 0) { + return [0, 0] + } + + let tot = 0 + + for (let i = 0; i < this._bufs.length; i++) { + const _t = tot + this._bufs[i].length + if (offset < _t || i === this._bufs.length - 1) { + return [i, offset - tot] + } + tot = _t + } +} + +BufferList.prototype._reverseOffset = function (blOffset) { + const bufferId = blOffset[0] + let offset = blOffset[1] + + for (let i = 0; i < bufferId; i++) { + offset += this._bufs[i].length + } + + return offset +} + +BufferList.prototype.get = function get (index) { + if (index > this.length || index < 0) { + return undefined + } + + const offset = this._offset(index) + + return this._bufs[offset[0]][offset[1]] +} + +BufferList.prototype.slice = function slice (start, end) { + if (typeof start === 'number' && start < 0) { + start += this.length + } + + if (typeof end === 'number' && end < 0) { + end += this.length + } + + return this.copy(null, 0, start, end) +} + +BufferList.prototype.copy = function copy (dst, dstStart, srcStart, srcEnd) { + if (typeof srcStart !== 'number' || srcStart < 0) { + srcStart = 0 + } + + if (typeof srcEnd !== 'number' || srcEnd > this.length) { + srcEnd = this.length + } + + if (srcStart >= this.length) { + return dst || Buffer.alloc(0) + } + + if (srcEnd <= 0) { + return dst || Buffer.alloc(0) + } + + const copy = !!dst + const off = this._offset(srcStart) + const len = srcEnd - srcStart + let bytes = len + let bufoff = (copy && dstStart) || 0 + let start = off[1] + + // copy/slice everything + if (srcStart === 0 && srcEnd === this.length) { + if (!copy) { + // slice, but full concat if multiple buffers + return this._bufs.length === 1 + ? this._bufs[0] + : Buffer.concat(this._bufs, this.length) + } + + // copy, need to copy individual buffers + for (let i = 0; i < this._bufs.length; i++) { + this._bufs[i].copy(dst, bufoff) + bufoff += this._bufs[i].length + } + + return dst + } + + // easy, cheap case where it's a subset of one of the buffers + if (bytes <= this._bufs[off[0]].length - start) { + return copy + ? this._bufs[off[0]].copy(dst, dstStart, start, start + bytes) + : this._bufs[off[0]].slice(start, start + bytes) + } + + if (!copy) { + // a slice, we need something to copy in to + dst = Buffer.allocUnsafe(len) + } + + for (let i = off[0]; i < this._bufs.length; i++) { + const l = this._bufs[i].length - start + + if (bytes > l) { + this._bufs[i].copy(dst, bufoff, start) + bufoff += l + } else { + this._bufs[i].copy(dst, bufoff, start, start + bytes) + bufoff += l + break + } + + bytes -= l + + if (start) { + start = 0 + } + } + + // safeguard so that we don't return uninitialized memory + if (dst.length > bufoff) return dst.slice(0, bufoff) + + return dst +} + +BufferList.prototype.shallowSlice = function shallowSlice (start, end) { + start = start || 0 + end = typeof end !== 'number' ? this.length : end + + if (start < 0) { + start += this.length + } + + if (end < 0) { + end += this.length + } + + if (start === end) { + return this._new() + } + + const startOffset = this._offset(start) + const endOffset = this._offset(end) + const buffers = this._bufs.slice(startOffset[0], endOffset[0] + 1) + + if (endOffset[1] === 0) { + buffers.pop() + } else { + buffers[buffers.length - 1] = buffers[buffers.length - 1].slice(0, endOffset[1]) + } + + if (startOffset[1] !== 0) { + buffers[0] = buffers[0].slice(startOffset[1]) + } + + return this._new(buffers) +} + +BufferList.prototype.toString = function toString (encoding, start, end) { + return this.slice(start, end).toString(encoding) +} + +BufferList.prototype.consume = function consume (bytes) { + // first, normalize the argument, in accordance with how Buffer does it + bytes = Math.trunc(bytes) + // do nothing if not a positive number + if (Number.isNaN(bytes) || bytes <= 0) return this + + while (this._bufs.length) { + if (bytes >= this._bufs[0].length) { + bytes -= this._bufs[0].length + this.length -= this._bufs[0].length + this._bufs.shift() + } else { + this._bufs[0] = this._bufs[0].slice(bytes) + this.length -= bytes + break + } + } + + return this +} + +BufferList.prototype.duplicate = function duplicate () { + const copy = this._new() + + for (let i = 0; i < this._bufs.length; i++) { + copy.append(this._bufs[i]) + } + + return copy +} + +BufferList.prototype.append = function append (buf) { + if (buf == null) { + return this + } + + if (buf.buffer) { + // append a view of the underlying ArrayBuffer + this._appendBuffer(Buffer.from(buf.buffer, buf.byteOffset, buf.byteLength)) + } else if (Array.isArray(buf)) { + for (let i = 0; i < buf.length; i++) { + this.append(buf[i]) + } + } else if (this._isBufferList(buf)) { + // unwrap argument into individual BufferLists + for (let i = 0; i < buf._bufs.length; i++) { + this.append(buf._bufs[i]) + } + } else { + // coerce number arguments to strings, since Buffer(number) does + // uninitialized memory allocation + if (typeof buf === 'number') { + buf = buf.toString() + } + + this._appendBuffer(Buffer.from(buf)) + } + + return this +} + +BufferList.prototype._appendBuffer = function appendBuffer (buf) { + this._bufs.push(buf) + this.length += buf.length +} + +BufferList.prototype.indexOf = function (search, offset, encoding) { + if (encoding === undefined && typeof offset === 'string') { + encoding = offset + offset = undefined + } + + if (typeof search === 'function' || Array.isArray(search)) { + throw new TypeError('The "value" argument must be one of type string, Buffer, BufferList, or Uint8Array.') + } else if (typeof search === 'number') { + search = Buffer.from([search]) + } else if (typeof search === 'string') { + search = Buffer.from(search, encoding) + } else if (this._isBufferList(search)) { + search = search.slice() + } else if (Array.isArray(search.buffer)) { + search = Buffer.from(search.buffer, search.byteOffset, search.byteLength) + } else if (!Buffer.isBuffer(search)) { + search = Buffer.from(search) + } + + offset = Number(offset || 0) + + if (isNaN(offset)) { + offset = 0 + } + + if (offset < 0) { + offset = this.length + offset + } + + if (offset < 0) { + offset = 0 + } + + if (search.length === 0) { + return offset > this.length ? this.length : offset + } + + const blOffset = this._offset(offset) + let blIndex = blOffset[0] // index of which internal buffer we're working on + let buffOffset = blOffset[1] // offset of the internal buffer we're working on + + // scan over each buffer + for (; blIndex < this._bufs.length; blIndex++) { + const buff = this._bufs[blIndex] + + while (buffOffset < buff.length) { + const availableWindow = buff.length - buffOffset + + if (availableWindow >= search.length) { + const nativeSearchResult = buff.indexOf(search, buffOffset) + + if (nativeSearchResult !== -1) { + return this._reverseOffset([blIndex, nativeSearchResult]) + } + + buffOffset = buff.length - search.length + 1 // end of native search window + } else { + const revOffset = this._reverseOffset([blIndex, buffOffset]) + + if (this._match(revOffset, search)) { + return revOffset + } + + buffOffset++ + } + } + + buffOffset = 0 + } + + return -1 +} + +BufferList.prototype._match = function (offset, search) { + if (this.length - offset < search.length) { + return false + } + + for (let searchOffset = 0; searchOffset < search.length; searchOffset++) { + if (this.get(offset + searchOffset) !== search[searchOffset]) { + return false + } + } + return true +} + +;(function () { + const methods = { + readDoubleBE: 8, + readDoubleLE: 8, + readFloatBE: 4, + readFloatLE: 4, + readInt32BE: 4, + readInt32LE: 4, + readUInt32BE: 4, + readUInt32LE: 4, + readInt16BE: 2, + readInt16LE: 2, + readUInt16BE: 2, + readUInt16LE: 2, + readInt8: 1, + readUInt8: 1, + readIntBE: null, + readIntLE: null, + readUIntBE: null, + readUIntLE: null + } + + for (const m in methods) { + (function (m) { + if (methods[m] === null) { + BufferList.prototype[m] = function (offset, byteLength) { + return this.slice(offset, offset + byteLength)[m](0, byteLength) + } + } else { + BufferList.prototype[m] = function (offset = 0) { + return this.slice(offset, offset + methods[m])[m](0) + } + } + }(m)) + } +}()) + +// Used internally by the class and also as an indicator of this object being +// a `BufferList`. It's not possible to use `instanceof BufferList` in a browser +// environment because there could be multiple different copies of the +// BufferList class and some `BufferList`s might be `BufferList`s. +BufferList.prototype._isBufferList = function _isBufferList (b) { + return b instanceof BufferList || BufferList.isBufferList(b) +} + +BufferList.isBufferList = function isBufferList (b) { + return b != null && b[symbol] +} + +module.exports = BufferList diff --git a/node_modules/bl/CHANGELOG.md b/node_modules/bl/CHANGELOG.md new file mode 100644 index 0000000000..a6156dcf56 --- /dev/null +++ b/node_modules/bl/CHANGELOG.md @@ -0,0 +1,17 @@ +## [5.1.0](https://github.com/rvagg/bl/compare/v5.0.0...v5.1.0) (2022-10-18) + + +### Features + +* added integrated TypeScript typings ([#108](https://github.com/rvagg/bl/issues/108)) ([433ff89](https://github.com/rvagg/bl/commit/433ff8942f47fab8a5c9d13b2c00989ccf8d0710)) + + +### Bug Fixes + +* windows support in tests ([387dfaf](https://github.com/rvagg/bl/commit/387dfaf9b2bca7849f12785436ceb01e42adac2c)) + + +### Trivial Changes + +* GH Actions, Dependabot, auto-release, remove Travis ([997f058](https://github.com/rvagg/bl/commit/997f058357de8f2a7f66998e80a72b491835573f)) +* **no-release:** bump standard from 16.0.4 to 17.0.0 ([#112](https://github.com/rvagg/bl/issues/112)) ([078bfe3](https://github.com/rvagg/bl/commit/078bfe33390d125297b1c946e5989c4aa9228961)) diff --git a/node_modules/bl/LICENSE.md b/node_modules/bl/LICENSE.md new file mode 100644 index 0000000000..ecbe516374 --- /dev/null +++ b/node_modules/bl/LICENSE.md @@ -0,0 +1,13 @@ +The MIT License (MIT) +===================== + +Copyright (c) 2013-2019 bl contributors +---------------------------------- + +*bl contributors listed at * + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/node_modules/bl/README.md b/node_modules/bl/README.md new file mode 100644 index 0000000000..9680b1dcb4 --- /dev/null +++ b/node_modules/bl/README.md @@ -0,0 +1,247 @@ +# bl *(BufferList)* + +[![Build Status](https://api.travis-ci.com/rvagg/bl.svg?branch=master)](https://travis-ci.com/rvagg/bl/) + +**A Node.js Buffer list collector, reader and streamer thingy.** + +[![NPM](https://nodei.co/npm/bl.svg)](https://nodei.co/npm/bl/) + +**bl** is a storage object for collections of Node Buffers, exposing them with the main Buffer readable API. Also works as a duplex stream so you can collect buffers from a stream that emits them and emit buffers to a stream that consumes them! + +The original buffers are kept intact and copies are only done as necessary. Any reads that require the use of a single original buffer will return a slice of that buffer only (which references the same memory as the original buffer). Reads that span buffers perform concatenation as required and return the results transparently. + +```js +const { BufferList } = require('bl') + +const bl = new BufferList() +bl.append(Buffer.from('abcd')) +bl.append(Buffer.from('efg')) +bl.append('hi') // bl will also accept & convert Strings +bl.append(Buffer.from('j')) +bl.append(Buffer.from([ 0x3, 0x4 ])) + +console.log(bl.length) // 12 + +console.log(bl.slice(0, 10).toString('ascii')) // 'abcdefghij' +console.log(bl.slice(3, 10).toString('ascii')) // 'defghij' +console.log(bl.slice(3, 6).toString('ascii')) // 'def' +console.log(bl.slice(3, 8).toString('ascii')) // 'defgh' +console.log(bl.slice(5, 10).toString('ascii')) // 'fghij' + +console.log(bl.indexOf('def')) // 3 +console.log(bl.indexOf('asdf')) // -1 + +// or just use toString! +console.log(bl.toString()) // 'abcdefghij\u0003\u0004' +console.log(bl.toString('ascii', 3, 8)) // 'defgh' +console.log(bl.toString('ascii', 5, 10)) // 'fghij' + +// other standard Buffer readables +console.log(bl.readUInt16BE(10)) // 0x0304 +console.log(bl.readUInt16LE(10)) // 0x0403 +``` + +Give it a callback in the constructor and use it just like **[concat-stream](https://github.com/maxogden/node-concat-stream)**: + +```js +const { BufferListStream } = require('bl') +const fs = require('fs') + +fs.createReadStream('README.md') + .pipe(BufferListStream((err, data) => { // note 'new' isn't strictly required + // `data` is a complete Buffer object containing the full data + console.log(data.toString()) + })) +``` + +Note that when you use the *callback* method like this, the resulting `data` parameter is a concatenation of all `Buffer` objects in the list. If you want to avoid the overhead of this concatenation (in cases of extreme performance consciousness), then avoid the *callback* method and just listen to `'end'` instead, like a standard Stream. + +Or to fetch a URL using [hyperquest](https://github.com/substack/hyperquest) (should work with [request](http://github.com/mikeal/request) and even plain Node http too!): + +```js +const hyperquest = require('hyperquest') +const { BufferListStream } = require('bl') + +const url = 'https://raw.github.com/rvagg/bl/master/README.md' + +hyperquest(url).pipe(BufferListStream((err, data) => { + console.log(data.toString()) +})) +``` + +Or, use it as a readable stream to recompose a list of Buffers to an output source: + +```js +const { BufferListStream } = require('bl') +const fs = require('fs') + +var bl = new BufferListStream() +bl.append(Buffer.from('abcd')) +bl.append(Buffer.from('efg')) +bl.append(Buffer.from('hi')) +bl.append(Buffer.from('j')) + +bl.pipe(fs.createWriteStream('gibberish.txt')) +``` + +## API + + * new BufferList([ buf ]) + * BufferList.isBufferList(obj) + * bl.length + * bl.append(buffer) + * bl.get(index) + * bl.indexOf(value[, byteOffset][, encoding]) + * bl.slice([ start[, end ] ]) + * bl.shallowSlice([ start[, end ] ]) + * bl.copy(dest, [ destStart, [ srcStart [, srcEnd ] ] ]) + * bl.duplicate() + * bl.consume(bytes) + * bl.toString([encoding, [ start, [ end ]]]) + * bl.readDoubleBE(), bl.readDoubleLE(), bl.readFloatBE(), bl.readFloatLE(), bl.readInt32BE(), bl.readInt32LE(), bl.readUInt32BE(), bl.readUInt32LE(), bl.readInt16BE(), bl.readInt16LE(), bl.readUInt16BE(), bl.readUInt16LE(), bl.readInt8(), bl.readUInt8() + * new BufferListStream([ callback ]) + +-------------------------------------------------------- + +### new BufferList([ Buffer | Buffer array | BufferList | BufferList array | String ]) +No arguments are _required_ for the constructor, but you can initialise the list by passing in a single `Buffer` object or an array of `Buffer` objects. + +`new` is not strictly required, if you don't instantiate a new object, it will be done automatically for you so you can create a new instance simply with: + +```js +const { BufferList } = require('bl') +const bl = BufferList() + +// equivalent to: + +const { BufferList } = require('bl') +const bl = new BufferList() +``` + +-------------------------------------------------------- + +### BufferList.isBufferList(obj) +Determines if the passed object is a `BufferList`. It will return `true` if the passed object is an instance of `BufferList` **or** `BufferListStream` and `false` otherwise. + +N.B. this won't return `true` for `BufferList` or `BufferListStream` instances created by versions of this library before this static method was added. + +-------------------------------------------------------- + +### bl.length +Get the length of the list in bytes. This is the sum of the lengths of all of the buffers contained in the list, minus any initial offset for a semi-consumed buffer at the beginning. Should accurately represent the total number of bytes that can be read from the list. + +-------------------------------------------------------- + +### bl.append(Buffer | Buffer array | BufferList | BufferList array | String) +`append(buffer)` adds an additional buffer or BufferList to the internal list. `this` is returned so it can be chained. + +-------------------------------------------------------- + +### bl.get(index) +`get()` will return the byte at the specified index. + +-------------------------------------------------------- + +### bl.indexOf(value[, byteOffset][, encoding]) +`get()` will return the byte at the specified index. +`indexOf()` method returns the first index at which a given element can be found in the BufferList, or -1 if it is not present. + +-------------------------------------------------------- + +### bl.slice([ start, [ end ] ]) +`slice()` returns a new `Buffer` object containing the bytes within the range specified. Both `start` and `end` are optional and will default to the beginning and end of the list respectively. + +If the requested range spans a single internal buffer then a slice of that buffer will be returned which shares the original memory range of that Buffer. If the range spans multiple buffers then copy operations will likely occur to give you a uniform Buffer. + +-------------------------------------------------------- + +### bl.shallowSlice([ start, [ end ] ]) +`shallowSlice()` returns a new `BufferList` object containing the bytes within the range specified. Both `start` and `end` are optional and will default to the beginning and end of the list respectively. + +No copies will be performed. All buffers in the result share memory with the original list. + +-------------------------------------------------------- + +### bl.copy(dest, [ destStart, [ srcStart [, srcEnd ] ] ]) +`copy()` copies the content of the list in the `dest` buffer, starting from `destStart` and containing the bytes within the range specified with `srcStart` to `srcEnd`. `destStart`, `start` and `end` are optional and will default to the beginning of the `dest` buffer, and the beginning and end of the list respectively. + +-------------------------------------------------------- + +### bl.duplicate() +`duplicate()` performs a **shallow-copy** of the list. The internal Buffers remains the same, so if you change the underlying Buffers, the change will be reflected in both the original and the duplicate. This method is needed if you want to call `consume()` or `pipe()` and still keep the original list.Example: + +```js +var bl = new BufferListStream() + +bl.append('hello') +bl.append(' world') +bl.append('\n') + +bl.duplicate().pipe(process.stdout, { end: false }) + +console.log(bl.toString()) +``` + +-------------------------------------------------------- + +### bl.consume(bytes) +`consume()` will shift bytes *off the start of the list*. The number of bytes consumed don't need to line up with the sizes of the internal Buffers—initial offsets will be calculated accordingly in order to give you a consistent view of the data. + +-------------------------------------------------------- + +### bl.toString([encoding, [ start, [ end ]]]) +`toString()` will return a string representation of the buffer. The optional `start` and `end` arguments are passed on to `slice()`, while the `encoding` is passed on to `toString()` of the resulting Buffer. See the [Buffer#toString()](http://nodejs.org/docs/latest/api/buffer.html#buffer_buf_tostring_encoding_start_end) documentation for more information. + +-------------------------------------------------------- + +### bl.readDoubleBE(), bl.readDoubleLE(), bl.readFloatBE(), bl.readFloatLE(), bl.readInt32BE(), bl.readInt32LE(), bl.readUInt32BE(), bl.readUInt32LE(), bl.readInt16BE(), bl.readInt16LE(), bl.readUInt16BE(), bl.readUInt16LE(), bl.readInt8(), bl.readUInt8() + +All of the standard byte-reading methods of the `Buffer` interface are implemented and will operate across internal Buffer boundaries transparently. + +See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + +-------------------------------------------------------- + +### new BufferListStream([ callback | Buffer | Buffer array | BufferList | BufferList array | String ]) +**BufferListStream** is a Node **[Duplex Stream](http://nodejs.org/docs/latest/api/stream.html#stream_class_stream_duplex)**, so it can be read from and written to like a standard Node stream. You can also `pipe()` to and from a **BufferListStream** instance. + +The constructor takes an optional callback, if supplied, the callback will be called with an error argument followed by a reference to the **bl** instance, when `bl.end()` is called (i.e. from a piped stream). This is a convenient method of collecting the entire contents of a stream, particularly when the stream is *chunky*, such as a network stream. + +Normally, no arguments are required for the constructor, but you can initialise the list by passing in a single `Buffer` object or an array of `Buffer` object. + +`new` is not strictly required, if you don't instantiate a new object, it will be done automatically for you so you can create a new instance simply with: + +```js +const { BufferListStream } = require('bl') +const bl = BufferListStream() + +// equivalent to: + +const { BufferListStream } = require('bl') +const bl = new BufferListStream() +``` + +N.B. For backwards compatibility reasons, `BufferListStream` is the **default** export when you `require('bl')`: + +```js +const { BufferListStream } = require('bl') +// equivalent to: +const BufferListStream = require('bl') +``` + +-------------------------------------------------------- + +## Contributors + +**bl** is brought to you by the following hackers: + + * [Rod Vagg](https://github.com/rvagg) + * [Matteo Collina](https://github.com/mcollina) + * [Jarett Cruger](https://github.com/jcrugzz) + + +## License & copyright + +Copyright (c) 2013-2019 bl contributors (listed above). + +bl is licensed under the MIT license. All rights not explicitly granted in the MIT license are reserved. See the included LICENSE.md file for more details. diff --git a/node_modules/bl/bl.js b/node_modules/bl/bl.js new file mode 100644 index 0000000000..40228f8799 --- /dev/null +++ b/node_modules/bl/bl.js @@ -0,0 +1,84 @@ +'use strict' + +const DuplexStream = require('readable-stream').Duplex +const inherits = require('inherits') +const BufferList = require('./BufferList') + +function BufferListStream (callback) { + if (!(this instanceof BufferListStream)) { + return new BufferListStream(callback) + } + + if (typeof callback === 'function') { + this._callback = callback + + const piper = function piper (err) { + if (this._callback) { + this._callback(err) + this._callback = null + } + }.bind(this) + + this.on('pipe', function onPipe (src) { + src.on('error', piper) + }) + this.on('unpipe', function onUnpipe (src) { + src.removeListener('error', piper) + }) + + callback = null + } + + BufferList._init.call(this, callback) + DuplexStream.call(this) +} + +inherits(BufferListStream, DuplexStream) +Object.assign(BufferListStream.prototype, BufferList.prototype) + +BufferListStream.prototype._new = function _new (callback) { + return new BufferListStream(callback) +} + +BufferListStream.prototype._write = function _write (buf, encoding, callback) { + this._appendBuffer(buf) + + if (typeof callback === 'function') { + callback() + } +} + +BufferListStream.prototype._read = function _read (size) { + if (!this.length) { + return this.push(null) + } + + size = Math.min(size, this.length) + this.push(this.slice(0, size)) + this.consume(size) +} + +BufferListStream.prototype.end = function end (chunk) { + DuplexStream.prototype.end.call(this, chunk) + + if (this._callback) { + this._callback(null, this.slice()) + this._callback = null + } +} + +BufferListStream.prototype._destroy = function _destroy (err, cb) { + this._bufs.length = 0 + this.length = 0 + cb(err) +} + +BufferListStream.prototype._isBufferList = function _isBufferList (b) { + return b instanceof BufferListStream || b instanceof BufferList || BufferListStream.isBufferList(b) +} + +BufferListStream.isBufferList = BufferList.isBufferList + +module.exports = BufferListStream +module.exports.BufferListStream = BufferListStream +module.exports.BufferList = BufferList diff --git a/node_modules/bl/index.d.ts b/node_modules/bl/index.d.ts new file mode 100644 index 0000000000..07a8ee3d53 --- /dev/null +++ b/node_modules/bl/index.d.ts @@ -0,0 +1,88 @@ +import { Duplex } from "readable-stream"; +import { + BufferList as BL, + BufferListConstructor, + BufferListAcceptedTypes, +} from "./BufferList"; + +type BufferListStreamInit = + | ((err: Error, buffer: Buffer) => void) + | BufferListAcceptedTypes; + +interface BufferListStreamConstructor { + new (initData?: BufferListStreamInit): BufferListStream; + (callback?: BufferListStreamInit): BufferListStream; + + /** + * Determines if the passed object is a BufferList. It will return true + * if the passed object is an instance of BufferList or BufferListStream + * and false otherwise. + * + * N.B. this won't return true for BufferList or BufferListStream instances + * created by versions of this library before this static method was added. + * + * @param other + */ + + isBufferList(other: unknown): boolean; + + /** + * Rexporting BufferList and BufferListStream to fix + * issue with require/commonjs import and "export = " below. + */ + + BufferList: BufferListConstructor; + BufferListStream: BufferListStreamConstructor; +} + +interface BufferListStream extends Duplex, BL { + prototype: BufferListStream & BL; +} + +/** + * BufferListStream is a Node Duplex Stream, so it can be read from + * and written to like a standard Node stream. You can also pipe() + * to and from a BufferListStream instance. + * + * The constructor takes an optional callback, if supplied, the + * callback will be called with an error argument followed by a + * reference to the bl instance, when bl.end() is called + * (i.e. from a piped stream). + * + * This is a convenient method of collecting the entire contents of + * a stream, particularly when the stream is chunky, such as a network + * stream. + * + * Normally, no arguments are required for the constructor, but you can + * initialise the list by passing in a single Buffer object or an array + * of Buffer object. + * + * `new` is not strictly required, if you don't instantiate a new object, + * it will be done automatically for you so you can create a new instance + * simply with: + * + * ```js + * const { BufferListStream } = require('bl'); + * const bl = BufferListStream(); + * + * // equivalent to: + * + * const { BufferListStream } = require('bl'); + * const bl = new BufferListStream(); + * ``` + * + * N.B. For backwards compatibility reasons, BufferListStream is the default + * export when you `require('bl')`: + * + * ```js + * const { BufferListStream } = require('bl') + * + * // equivalent to: + * + * const BufferListStream = require('bl') + * ``` + */ + +declare const BufferListStream: BufferListStreamConstructor; + +export = BufferListStream; diff --git a/node_modules/bl/package.json b/node_modules/bl/package.json new file mode 100644 index 0000000000..92a08e3089 --- /dev/null +++ b/node_modules/bl/package.json @@ -0,0 +1,123 @@ +{ + "name": "bl", + "version": "5.1.0", + "description": "Buffer List: collect buffers and access with a standard readable Buffer interface, streamable too!", + "license": "MIT", + "main": "bl.js", + "scripts": { + "lint": "standard *.js test/*.js", + "test": "npm run lint && npm run test:types && node test/test.js | faucet", + "test:ci": "npm run lint && node test/test.js && npm run test:types", + "test:types": "tsc --allowJs --noEmit test/test.js", + "build": "true" + }, + "repository": { + "type": "git", + "url": "https://github.com/rvagg/bl.git" + }, + "homepage": "https://github.com/rvagg/bl", + "authors": [ + "Rod Vagg (https://github.com/rvagg)", + "Matteo Collina (https://github.com/mcollina)", + "Jarett Cruger (https://github.com/jcrugzz)" + ], + "keywords": [ + "buffer", + "buffers", + "stream", + "awesomesauce" + ], + "dependencies": { + "buffer": "^6.0.3", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + }, + "devDependencies": { + "@types/readable-stream": "^2.3.13", + "faucet": "~0.0.1", + "standard": "^17.0.0", + "tape": "^5.2.2", + "typescript": "~4.7.3" + }, + "release": { + "branches": [ + "master" + ], + "plugins": [ + [ + "@semantic-release/commit-analyzer", + { + "preset": "conventionalcommits", + "releaseRules": [ + { + "breaking": true, + "release": "major" + }, + { + "revert": true, + "release": "patch" + }, + { + "type": "feat", + "release": "minor" + }, + { + "type": "fix", + "release": "patch" + }, + { + "type": "chore", + "release": "patch" + }, + { + "type": "docs", + "release": "patch" + }, + { + "type": "test", + "release": "patch" + }, + { + "scope": "no-release", + "release": false + } + ] + } + ], + [ + "@semantic-release/release-notes-generator", + { + "preset": "conventionalcommits", + "presetConfig": { + "types": [ + { + "type": "feat", + "section": "Features" + }, + { + "type": "fix", + "section": "Bug Fixes" + }, + { + "type": "chore", + "section": "Trivial Changes" + }, + { + "type": "docs", + "section": "Trivial Changes" + }, + { + "type": "test", + "section": "Tests" + } + ] + } + } + ], + "@semantic-release/changelog", + "@semantic-release/npm", + "@semantic-release/github", + "@semantic-release/git" + ] + } +} diff --git a/node_modules/bl/test/convert.js b/node_modules/bl/test/convert.js new file mode 100644 index 0000000000..9f3e23599e --- /dev/null +++ b/node_modules/bl/test/convert.js @@ -0,0 +1,21 @@ +'use strict' + +const tape = require('tape') +const { BufferList, BufferListStream } = require('../') +const { Buffer } = require('buffer') + +tape('convert from BufferList to BufferListStream', (t) => { + const data = Buffer.from(`TEST-${Date.now()}`) + const bl = new BufferList(data) + const bls = new BufferListStream(bl) + t.ok(bl.slice().equals(bls.slice())) + t.end() +}) + +tape('convert from BufferListStream to BufferList', (t) => { + const data = Buffer.from(`TEST-${Date.now()}`) + const bls = new BufferListStream(data) + const bl = new BufferList(bls) + t.ok(bl.slice().equals(bls.slice())) + t.end() +}) diff --git a/node_modules/bl/test/indexOf.js b/node_modules/bl/test/indexOf.js new file mode 100644 index 0000000000..62dcb01f3b --- /dev/null +++ b/node_modules/bl/test/indexOf.js @@ -0,0 +1,492 @@ +'use strict' + +const tape = require('tape') +const BufferList = require('../') +const { Buffer } = require('buffer') + +tape('indexOf single byte needle', (t) => { + const bl = new BufferList(['abcdefg', 'abcdefg', '12345']) + + t.equal(bl.indexOf('e'), 4) + t.equal(bl.indexOf('e', 5), 11) + t.equal(bl.indexOf('e', 12), -1) + t.equal(bl.indexOf('5'), 18) + + t.end() +}) + +tape('indexOf multiple byte needle', (t) => { + const bl = new BufferList(['abcdefg', 'abcdefg']) + + t.equal(bl.indexOf('ef'), 4) + t.equal(bl.indexOf('ef', 5), 11) + + t.end() +}) + +tape('indexOf multiple byte needles across buffer boundaries', (t) => { + const bl = new BufferList(['abcdefg', 'abcdefg']) + + t.equal(bl.indexOf('fgabc'), 5) + + t.end() +}) + +tape('indexOf takes a Uint8Array search', (t) => { + const bl = new BufferList(['abcdefg', 'abcdefg']) + const search = new Uint8Array([102, 103, 97, 98, 99]) // fgabc + + t.equal(bl.indexOf(search), 5) + + t.end() +}) + +tape('indexOf takes a buffer list search', (t) => { + const bl = new BufferList(['abcdefg', 'abcdefg']) + const search = new BufferList('fgabc') + + t.equal(bl.indexOf(search), 5) + + t.end() +}) + +tape('indexOf a zero byte needle', (t) => { + const b = new BufferList('abcdef') + const bufEmpty = Buffer.from('') + + t.equal(b.indexOf(''), 0) + t.equal(b.indexOf('', 1), 1) + t.equal(b.indexOf('', b.length + 1), b.length) + t.equal(b.indexOf('', Infinity), b.length) + t.equal(b.indexOf(bufEmpty), 0) + t.equal(b.indexOf(bufEmpty, 1), 1) + t.equal(b.indexOf(bufEmpty, b.length + 1), b.length) + t.equal(b.indexOf(bufEmpty, Infinity), b.length) + + t.end() +}) + +tape('indexOf buffers smaller and larger than the needle', (t) => { + const bl = new BufferList(['abcdefg', 'a', 'bcdefg', 'a', 'bcfgab']) + + t.equal(bl.indexOf('fgabc'), 5) + t.equal(bl.indexOf('fgabc', 6), 12) + t.equal(bl.indexOf('fgabc', 13), -1) + + t.end() +}) + +// only present in node 6+ +;(process.version.substr(1).split('.')[0] >= 6) && tape('indexOf latin1 and binary encoding', (t) => { + const b = new BufferList('abcdef') + + // test latin1 encoding + t.equal( + new BufferList(Buffer.from(b.toString('latin1'), 'latin1')) + .indexOf('d', 0, 'latin1'), + 3 + ) + t.equal( + new BufferList(Buffer.from(b.toString('latin1'), 'latin1')) + .indexOf(Buffer.from('d', 'latin1'), 0, 'latin1'), + 3 + ) + t.equal( + new BufferList(Buffer.from('aa\u00e8aa', 'latin1')) + .indexOf('\u00e8', 'latin1'), + 2 + ) + t.equal( + new BufferList(Buffer.from('\u00e8', 'latin1')) + .indexOf('\u00e8', 'latin1'), + 0 + ) + t.equal( + new BufferList(Buffer.from('\u00e8', 'latin1')) + .indexOf(Buffer.from('\u00e8', 'latin1'), 'latin1'), + 0 + ) + + // test binary encoding + t.equal( + new BufferList(Buffer.from(b.toString('binary'), 'binary')) + .indexOf('d', 0, 'binary'), + 3 + ) + t.equal( + new BufferList(Buffer.from(b.toString('binary'), 'binary')) + .indexOf(Buffer.from('d', 'binary'), 0, 'binary'), + 3 + ) + t.equal( + new BufferList(Buffer.from('aa\u00e8aa', 'binary')) + .indexOf('\u00e8', 'binary'), + 2 + ) + t.equal( + new BufferList(Buffer.from('\u00e8', 'binary')) + .indexOf('\u00e8', 'binary'), + 0 + ) + t.equal( + new BufferList(Buffer.from('\u00e8', 'binary')) + .indexOf(Buffer.from('\u00e8', 'binary'), 'binary'), + 0 + ) + + t.end() +}) + +tape('indexOf the entire nodejs10 buffer test suite', (t) => { + const b = new BufferList('abcdef') + const bufA = Buffer.from('a') + const bufBc = Buffer.from('bc') + const bufF = Buffer.from('f') + const bufZ = Buffer.from('z') + + const stringComparison = 'abcdef' + + t.equal(b.indexOf('a'), 0) + t.equal(b.indexOf('a', 1), -1) + t.equal(b.indexOf('a', -1), -1) + t.equal(b.indexOf('a', -4), -1) + t.equal(b.indexOf('a', -b.length), 0) + t.equal(b.indexOf('a', NaN), 0) + t.equal(b.indexOf('a', -Infinity), 0) + t.equal(b.indexOf('a', Infinity), -1) + t.equal(b.indexOf('bc'), 1) + t.equal(b.indexOf('bc', 2), -1) + t.equal(b.indexOf('bc', -1), -1) + t.equal(b.indexOf('bc', -3), -1) + t.equal(b.indexOf('bc', -5), 1) + t.equal(b.indexOf('bc', NaN), 1) + t.equal(b.indexOf('bc', -Infinity), 1) + t.equal(b.indexOf('bc', Infinity), -1) + t.equal(b.indexOf('f'), b.length - 1) + t.equal(b.indexOf('z'), -1) + + // empty search tests + t.equal(b.indexOf(bufA), 0) + t.equal(b.indexOf(bufA, 1), -1) + t.equal(b.indexOf(bufA, -1), -1) + t.equal(b.indexOf(bufA, -4), -1) + t.equal(b.indexOf(bufA, -b.length), 0) + t.equal(b.indexOf(bufA, NaN), 0) + t.equal(b.indexOf(bufA, -Infinity), 0) + t.equal(b.indexOf(bufA, Infinity), -1) + t.equal(b.indexOf(bufBc), 1) + t.equal(b.indexOf(bufBc, 2), -1) + t.equal(b.indexOf(bufBc, -1), -1) + t.equal(b.indexOf(bufBc, -3), -1) + t.equal(b.indexOf(bufBc, -5), 1) + t.equal(b.indexOf(bufBc, NaN), 1) + t.equal(b.indexOf(bufBc, -Infinity), 1) + t.equal(b.indexOf(bufBc, Infinity), -1) + t.equal(b.indexOf(bufF), b.length - 1) + t.equal(b.indexOf(bufZ), -1) + t.equal(b.indexOf(0x61), 0) + t.equal(b.indexOf(0x61, 1), -1) + t.equal(b.indexOf(0x61, -1), -1) + t.equal(b.indexOf(0x61, -4), -1) + t.equal(b.indexOf(0x61, -b.length), 0) + t.equal(b.indexOf(0x61, NaN), 0) + t.equal(b.indexOf(0x61, -Infinity), 0) + t.equal(b.indexOf(0x61, Infinity), -1) + t.equal(b.indexOf(0x0), -1) + + // test offsets + t.equal(b.indexOf('d', 2), 3) + t.equal(b.indexOf('f', 5), 5) + t.equal(b.indexOf('f', -1), 5) + t.equal(b.indexOf('f', 6), -1) + + t.equal(b.indexOf(Buffer.from('d'), 2), 3) + t.equal(b.indexOf(Buffer.from('f'), 5), 5) + t.equal(b.indexOf(Buffer.from('f'), -1), 5) + t.equal(b.indexOf(Buffer.from('f'), 6), -1) + + t.equal(Buffer.from('ff').indexOf(Buffer.from('f'), 1, 'ucs2'), -1) + + // test invalid and uppercase encoding + t.equal(b.indexOf('b', 'utf8'), 1) + t.equal(b.indexOf('b', 'UTF8'), 1) + t.equal(b.indexOf('62', 'HEX'), 1) + t.throws(() => b.indexOf('bad', 'enc'), TypeError) + + // test hex encoding + t.equal( + Buffer.from(b.toString('hex'), 'hex') + .indexOf('64', 0, 'hex'), + 3 + ) + t.equal( + Buffer.from(b.toString('hex'), 'hex') + .indexOf(Buffer.from('64', 'hex'), 0, 'hex'), + 3 + ) + + // test base64 encoding + t.equal( + Buffer.from(b.toString('base64'), 'base64') + .indexOf('ZA==', 0, 'base64'), + 3 + ) + t.equal( + Buffer.from(b.toString('base64'), 'base64') + .indexOf(Buffer.from('ZA==', 'base64'), 0, 'base64'), + 3 + ) + + // test ascii encoding + t.equal( + Buffer.from(b.toString('ascii'), 'ascii') + .indexOf('d', 0, 'ascii'), + 3 + ) + t.equal( + Buffer.from(b.toString('ascii'), 'ascii') + .indexOf(Buffer.from('d', 'ascii'), 0, 'ascii'), + 3 + ) + + // test optional offset with passed encoding + t.equal(Buffer.from('aaaa0').indexOf('30', 'hex'), 4) + t.equal(Buffer.from('aaaa00a').indexOf('3030', 'hex'), 4) + + { + // test usc2 encoding + const twoByteString = Buffer.from('\u039a\u0391\u03a3\u03a3\u0395', 'ucs2') + + t.equal(8, twoByteString.indexOf('\u0395', 4, 'ucs2')) + t.equal(6, twoByteString.indexOf('\u03a3', -4, 'ucs2')) + t.equal(4, twoByteString.indexOf('\u03a3', -6, 'ucs2')) + t.equal(4, twoByteString.indexOf( + Buffer.from('\u03a3', 'ucs2'), -6, 'ucs2')) + t.equal(-1, twoByteString.indexOf('\u03a3', -2, 'ucs2')) + } + + const mixedByteStringUcs2 = + Buffer.from('\u039a\u0391abc\u03a3\u03a3\u0395', 'ucs2') + + t.equal(6, mixedByteStringUcs2.indexOf('bc', 0, 'ucs2')) + t.equal(10, mixedByteStringUcs2.indexOf('\u03a3', 0, 'ucs2')) + t.equal(-1, mixedByteStringUcs2.indexOf('\u0396', 0, 'ucs2')) + + t.equal( + 6, mixedByteStringUcs2.indexOf(Buffer.from('bc', 'ucs2'), 0, 'ucs2')) + t.equal( + 10, mixedByteStringUcs2.indexOf(Buffer.from('\u03a3', 'ucs2'), 0, 'ucs2')) + t.equal( + -1, mixedByteStringUcs2.indexOf(Buffer.from('\u0396', 'ucs2'), 0, 'ucs2')) + + { + const twoByteString = Buffer.from('\u039a\u0391\u03a3\u03a3\u0395', 'ucs2') + + // Test single char pattern + t.equal(0, twoByteString.indexOf('\u039a', 0, 'ucs2')) + let index = twoByteString.indexOf('\u0391', 0, 'ucs2') + t.equal(2, index, `Alpha - at index ${index}`) + index = twoByteString.indexOf('\u03a3', 0, 'ucs2') + t.equal(4, index, `First Sigma - at index ${index}`) + index = twoByteString.indexOf('\u03a3', 6, 'ucs2') + t.equal(6, index, `Second Sigma - at index ${index}`) + index = twoByteString.indexOf('\u0395', 0, 'ucs2') + t.equal(8, index, `Epsilon - at index ${index}`) + index = twoByteString.indexOf('\u0392', 0, 'ucs2') + t.equal(-1, index, `Not beta - at index ${index}`) + + // Test multi-char pattern + index = twoByteString.indexOf('\u039a\u0391', 0, 'ucs2') + t.equal(0, index, `Lambda Alpha - at index ${index}`) + index = twoByteString.indexOf('\u0391\u03a3', 0, 'ucs2') + t.equal(2, index, `Alpha Sigma - at index ${index}`) + index = twoByteString.indexOf('\u03a3\u03a3', 0, 'ucs2') + t.equal(4, index, `Sigma Sigma - at index ${index}`) + index = twoByteString.indexOf('\u03a3\u0395', 0, 'ucs2') + t.equal(6, index, `Sigma Epsilon - at index ${index}`) + } + + const mixedByteStringUtf8 = Buffer.from('\u039a\u0391abc\u03a3\u03a3\u0395') + + t.equal(5, mixedByteStringUtf8.indexOf('bc')) + t.equal(5, mixedByteStringUtf8.indexOf('bc', 5)) + t.equal(5, mixedByteStringUtf8.indexOf('bc', -8)) + t.equal(7, mixedByteStringUtf8.indexOf('\u03a3')) + t.equal(-1, mixedByteStringUtf8.indexOf('\u0396')) + + // Test complex string indexOf algorithms. Only trigger for long strings. + // Long string that isn't a simple repeat of a shorter string. + let longString = 'A' + for (let i = 66; i < 76; i++) { // from 'B' to 'K' + longString = longString + String.fromCharCode(i) + longString + } + + const longBufferString = Buffer.from(longString) + + // pattern of 15 chars, repeated every 16 chars in long + let pattern = 'ABACABADABACABA' + for (let i = 0; i < longBufferString.length - pattern.length; i += 7) { + const index = longBufferString.indexOf(pattern, i) + t.equal((i + 15) & ~0xf, index, + `Long ABACABA...-string at index ${i}`) + } + + let index = longBufferString.indexOf('AJABACA') + t.equal(510, index, `Long AJABACA, First J - at index ${index}`) + index = longBufferString.indexOf('AJABACA', 511) + t.equal(1534, index, `Long AJABACA, Second J - at index ${index}`) + + pattern = 'JABACABADABACABA' + index = longBufferString.indexOf(pattern) + t.equal(511, index, `Long JABACABA..., First J - at index ${index}`) + index = longBufferString.indexOf(pattern, 512) + t.equal( + 1535, index, `Long JABACABA..., Second J - at index ${index}`) + + // Search for a non-ASCII string in a pure ASCII string. + const asciiString = Buffer.from( + 'somethingnotatallsinisterwhichalsoworks') + t.equal(-1, asciiString.indexOf('\x2061')) + t.equal(3, asciiString.indexOf('eth', 0)) + + // Search in string containing many non-ASCII chars. + const allCodePoints = [] + for (let i = 0; i < 65536; i++) { + allCodePoints[i] = i + } + + const allCharsString = String.fromCharCode.apply(String, allCodePoints) + const allCharsBufferUtf8 = Buffer.from(allCharsString) + const allCharsBufferUcs2 = Buffer.from(allCharsString, 'ucs2') + + // Search for string long enough to trigger complex search with ASCII pattern + // and UC16 subject. + t.equal(-1, allCharsBufferUtf8.indexOf('notfound')) + t.equal(-1, allCharsBufferUcs2.indexOf('notfound')) + + // Needle is longer than haystack, but only because it's encoded as UTF-16 + t.equal(Buffer.from('aaaa').indexOf('a'.repeat(4), 'ucs2'), -1) + + t.equal(Buffer.from('aaaa').indexOf('a'.repeat(4), 'utf8'), 0) + t.equal(Buffer.from('aaaa').indexOf('你好', 'ucs2'), -1) + + // Haystack has odd length, but the needle is UCS2. + t.equal(Buffer.from('aaaaa').indexOf('b', 'ucs2'), -1) + + { + // Find substrings in Utf8. + const lengths = [1, 3, 15] // Single char, simple and complex. + const indices = [0x5, 0x60, 0x400, 0x680, 0x7ee, 0xFF02, 0x16610, 0x2f77b] + for (let lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) { + for (let i = 0; i < indices.length; i++) { + const index = indices[i] + let length = lengths[lengthIndex] + + if (index + length > 0x7F) { + length = 2 * length + } + + if (index + length > 0x7FF) { + length = 3 * length + } + + if (index + length > 0xFFFF) { + length = 4 * length + } + + const patternBufferUtf8 = allCharsBufferUtf8.slice(index, index + length) + t.equal(index, allCharsBufferUtf8.indexOf(patternBufferUtf8)) + + const patternStringUtf8 = patternBufferUtf8.toString() + t.equal(index, allCharsBufferUtf8.indexOf(patternStringUtf8)) + } + } + } + + { + // Find substrings in Usc2. + const lengths = [2, 4, 16] // Single char, simple and complex. + const indices = [0x5, 0x65, 0x105, 0x205, 0x285, 0x2005, 0x2085, 0xfff0] + + for (let lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) { + for (let i = 0; i < indices.length; i++) { + const index = indices[i] * 2 + const length = lengths[lengthIndex] + + const patternBufferUcs2 = + allCharsBufferUcs2.slice(index, index + length) + t.equal( + index, allCharsBufferUcs2.indexOf(patternBufferUcs2, 0, 'ucs2')) + + const patternStringUcs2 = patternBufferUcs2.toString('ucs2') + t.equal( + index, allCharsBufferUcs2.indexOf(patternStringUcs2, 0, 'ucs2')) + } + } + } + + [ + () => {}, + {}, + [] + ].forEach((val) => { + t.throws(() => b.indexOf(val), TypeError, `"${JSON.stringify(val)}" should throw`) + }) + + // Test weird offset arguments. + // The following offsets coerce to NaN or 0, searching the whole Buffer + t.equal(b.indexOf('b', undefined), 1) + t.equal(b.indexOf('b', {}), 1) + t.equal(b.indexOf('b', 0), 1) + t.equal(b.indexOf('b', null), 1) + t.equal(b.indexOf('b', []), 1) + + // The following offset coerces to 2, in other words +[2] === 2 + t.equal(b.indexOf('b', [2]), -1) + + // Behavior should match String.indexOf() + t.equal( + b.indexOf('b', undefined), + stringComparison.indexOf('b', undefined)) + t.equal( + b.indexOf('b', {}), + stringComparison.indexOf('b', {})) + t.equal( + b.indexOf('b', 0), + stringComparison.indexOf('b', 0)) + t.equal( + b.indexOf('b', null), + stringComparison.indexOf('b', null)) + t.equal( + b.indexOf('b', []), + stringComparison.indexOf('b', [])) + t.equal( + b.indexOf('b', [2]), + stringComparison.indexOf('b', [2])) + + // test truncation of Number arguments to uint8 + { + const buf = Buffer.from('this is a test') + + t.equal(buf.indexOf(0x6973), 3) + t.equal(buf.indexOf(0x697320), 4) + t.equal(buf.indexOf(0x69732069), 2) + t.equal(buf.indexOf(0x697374657374), 0) + t.equal(buf.indexOf(0x69737374), 0) + t.equal(buf.indexOf(0x69737465), 11) + t.equal(buf.indexOf(0x69737465), 11) + t.equal(buf.indexOf(-140), 0) + t.equal(buf.indexOf(-152), 1) + t.equal(buf.indexOf(0xff), -1) + t.equal(buf.indexOf(0xffff), -1) + } + + // Test that Uint8Array arguments are okay. + { + const needle = new Uint8Array([0x66, 0x6f, 0x6f]) + const haystack = new BufferList(Buffer.from('a foo b foo')) + t.equal(haystack.indexOf(needle), 2) + } + + t.end() +}) diff --git a/node_modules/bl/test/isBufferList.js b/node_modules/bl/test/isBufferList.js new file mode 100644 index 0000000000..9d895d59b3 --- /dev/null +++ b/node_modules/bl/test/isBufferList.js @@ -0,0 +1,32 @@ +'use strict' + +const tape = require('tape') +const { BufferList, BufferListStream } = require('../') +const { Buffer } = require('buffer') + +tape('isBufferList positives', (t) => { + t.ok(BufferList.isBufferList(new BufferList())) + t.ok(BufferList.isBufferList(new BufferListStream())) + + t.end() +}) + +tape('isBufferList negatives', (t) => { + const types = [ + null, + undefined, + NaN, + true, + false, + {}, + [], + Buffer.alloc(0), + [Buffer.alloc(0)] + ] + + for (const obj of types) { + t.notOk(BufferList.isBufferList(obj)) + } + + t.end() +}) diff --git a/node_modules/bl/test/test.js b/node_modules/bl/test/test.js new file mode 100644 index 0000000000..668dc170b3 --- /dev/null +++ b/node_modules/bl/test/test.js @@ -0,0 +1,914 @@ +// @ts-check +'use strict' + +const tape = require('tape') +const crypto = require('crypto') +const fs = require('fs') +const path = require('path') +const os = require('os') +const BufferListStream = require('../') +const { Buffer } = require('buffer') + +/** + * This typedef allows us to add _bufs to the API without declaring it publicly on types. + * @typedef { BufferListStream & { _bufs?: Buffer[] }} BufferListStreamWithPrivate + */ + +/** + * Just for typechecking in js + * @type { NodeJS.Process & { browser?: boolean }} + */ + +const process = globalThis.process + +/** @type {BufferEncoding[]} */ +const encodings = ['ascii', 'utf8', 'utf-8', 'hex', 'binary', 'base64'] + +if (process.browser) { + encodings.push( + 'ucs2', + 'ucs-2', + 'utf16le', + /** + * This alias is not in typescript typings for BufferEncoding. Still have to fix + * @see https://nodejs.org/api/buffer.html#buffers-and-character-encodings + */ + // @ts-ignore + 'utf-16le' + ) +} + +require('./indexOf') +require('./isBufferList') +require('./convert') + +tape('single bytes from single buffer', function (t) { + const bl = new BufferListStream() + + bl.append(Buffer.from('abcd')) + + t.equal(bl.length, 4) + t.equal(bl.get(-1), undefined) + t.equal(bl.get(0), 97) + t.equal(bl.get(1), 98) + t.equal(bl.get(2), 99) + t.equal(bl.get(3), 100) + t.equal(bl.get(4), undefined) + + t.end() +}) + +tape('single bytes from multiple buffers', function (t) { + const bl = new BufferListStream() + + bl.append(Buffer.from('abcd')) + bl.append(Buffer.from('efg')) + bl.append(Buffer.from('hi')) + bl.append(Buffer.from('j')) + + t.equal(bl.length, 10) + + t.equal(bl.get(0), 97) + t.equal(bl.get(1), 98) + t.equal(bl.get(2), 99) + t.equal(bl.get(3), 100) + t.equal(bl.get(4), 101) + t.equal(bl.get(5), 102) + t.equal(bl.get(6), 103) + t.equal(bl.get(7), 104) + t.equal(bl.get(8), 105) + t.equal(bl.get(9), 106) + + t.end() +}) + +tape('multi bytes from single buffer', function (t) { + const bl = new BufferListStream() + + bl.append(Buffer.from('abcd')) + + t.equal(bl.length, 4) + + t.equal(bl.slice(0, 4).toString('ascii'), 'abcd') + t.equal(bl.slice(0, 3).toString('ascii'), 'abc') + t.equal(bl.slice(1, 4).toString('ascii'), 'bcd') + t.equal(bl.slice(-4, -1).toString('ascii'), 'abc') + + t.end() +}) + +tape('multi bytes from single buffer (negative indexes)', function (t) { + const bl = new BufferListStream() + + bl.append(Buffer.from('buffer')) + + t.equal(bl.length, 6) + + t.equal(bl.slice(-6, -1).toString('ascii'), 'buffe') + t.equal(bl.slice(-6, -2).toString('ascii'), 'buff') + t.equal(bl.slice(-5, -2).toString('ascii'), 'uff') + + t.end() +}) + +tape('multiple bytes from multiple buffers', function (t) { + const bl = new BufferListStream() + + bl.append(Buffer.from('abcd')) + bl.append(Buffer.from('efg')) + bl.append(Buffer.from('hi')) + bl.append(Buffer.from('j')) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') + t.equal(bl.slice(3, 6).toString('ascii'), 'def') + t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') + t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') + t.equal(bl.slice(-7, -4).toString('ascii'), 'def') + + t.end() +}) + +tape('multiple bytes from multiple buffer lists', function (t) { + const bl = new BufferListStream() + + bl.append(new BufferListStream([Buffer.from('abcd'), Buffer.from('efg')])) + bl.append(new BufferListStream([Buffer.from('hi'), Buffer.from('j')])) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + + t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') + t.equal(bl.slice(3, 6).toString('ascii'), 'def') + t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') + t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') + + t.end() +}) + +// same data as previous test, just using nested constructors +tape('multiple bytes from crazy nested buffer lists', function (t) { + const bl = new BufferListStream() + + bl.append( + new BufferListStream([ + new BufferListStream([ + new BufferListStream(Buffer.from('abc')), + Buffer.from('d'), + new BufferListStream(Buffer.from('efg')) + ]), + new BufferListStream([Buffer.from('hi')]), + new BufferListStream(Buffer.from('j')) + ]) + ) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + + t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') + t.equal(bl.slice(3, 6).toString('ascii'), 'def') + t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') + t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') + + t.end() +}) + +tape('append accepts arrays of Buffers', function (t) { + const bl = new BufferListStream() + + bl.append(Buffer.from('abc')) + bl.append([Buffer.from('def')]) + bl.append([Buffer.from('ghi'), Buffer.from('jkl')]) + bl.append([Buffer.from('mnop'), Buffer.from('qrstu'), Buffer.from('vwxyz')]) + t.equal(bl.length, 26) + t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') + + t.end() +}) + +tape('append accepts arrays of Uint8Arrays', function (t) { + const bl = new BufferListStream() + + bl.append(new Uint8Array([97, 98, 99])) + bl.append([Uint8Array.from([100, 101, 102])]) + bl.append([new Uint8Array([103, 104, 105]), new Uint8Array([106, 107, 108])]) + bl.append([new Uint8Array([109, 110, 111, 112]), new Uint8Array([113, 114, 115, 116, 117]), new Uint8Array([118, 119, 120, 121, 122])]) + t.equal(bl.length, 26) + t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') + + t.end() +}) + +tape('append accepts arrays of BufferLists', function (t) { + const bl = new BufferListStream() + + bl.append(Buffer.from('abc')) + bl.append([new BufferListStream('def')]) + bl.append( + new BufferListStream([Buffer.from('ghi'), new BufferListStream('jkl')]) + ) + bl.append([ + Buffer.from('mnop'), + new BufferListStream([Buffer.from('qrstu'), Buffer.from('vwxyz')]) + ]) + t.equal(bl.length, 26) + t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') + + t.end() +}) + +tape('append chainable', function (t) { + const bl = new BufferListStream() + + t.ok(bl.append(Buffer.from('abcd')) === bl) + t.ok(bl.append([Buffer.from('abcd')]) === bl) + t.ok(bl.append(new BufferListStream(Buffer.from('abcd'))) === bl) + t.ok(bl.append([new BufferListStream(Buffer.from('abcd'))]) === bl) + + t.end() +}) + +tape('append chainable (test results)', function (t) { + const bl = new BufferListStream('abc') + .append([new BufferListStream('def')]) + .append( + new BufferListStream([Buffer.from('ghi'), new BufferListStream('jkl')]) + ) + .append([ + Buffer.from('mnop'), + new BufferListStream([Buffer.from('qrstu'), Buffer.from('vwxyz')]) + ]) + + t.equal(bl.length, 26) + t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') + + t.end() +}) + +tape('consuming from multiple buffers', function (t) { + const bl = new BufferListStream() + + bl.append(Buffer.from('abcd')) + bl.append(Buffer.from('efg')) + bl.append(Buffer.from('hi')) + bl.append(Buffer.from('j')) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + + bl.consume(3) + t.equal(bl.length, 7) + t.equal(bl.slice(0, 7).toString('ascii'), 'defghij') + + bl.consume(2) + t.equal(bl.length, 5) + t.equal(bl.slice(0, 5).toString('ascii'), 'fghij') + + bl.consume(1) + t.equal(bl.length, 4) + t.equal(bl.slice(0, 4).toString('ascii'), 'ghij') + + bl.consume(1) + t.equal(bl.length, 3) + t.equal(bl.slice(0, 3).toString('ascii'), 'hij') + + bl.consume(2) + t.equal(bl.length, 1) + t.equal(bl.slice(0, 1).toString('ascii'), 'j') + + t.end() +}) + +tape('complete consumption', function (t) { + /** @type {BufferListStreamWithPrivate} */ + const bl = new BufferListStream() + + bl.append(Buffer.from('a')) + bl.append(Buffer.from('b')) + + bl.consume(2) + + t.equal(bl.length, 0) + t.equal(bl._bufs.length, 0) + + t.end() +}) + +tape('test readUInt8 / readInt8', function (t) { + const buf1 = Buffer.alloc(1) + const buf2 = Buffer.alloc(3) + const buf3 = Buffer.alloc(3) + const bl = new BufferListStream() + + buf1[0] = 0x1 + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUInt8(), 0x1) + t.equal(bl.readUInt8(2), 0x3) + t.equal(bl.readInt8(2), 0x3) + t.equal(bl.readUInt8(3), 0x4) + t.equal(bl.readInt8(3), 0x4) + t.equal(bl.readUInt8(4), 0x23) + t.equal(bl.readInt8(4), 0x23) + t.equal(bl.readUInt8(5), 0x42) + t.equal(bl.readInt8(5), 0x42) + + t.end() +}) + +tape('test readUInt16LE / readUInt16BE / readInt16LE / readInt16BE', function (t) { + const buf1 = Buffer.alloc(1) + const buf2 = Buffer.alloc(3) + const buf3 = Buffer.alloc(3) + const bl = new BufferListStream() + + buf1[0] = 0x1 + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUInt16BE(), 0x0100) + t.equal(bl.readUInt16LE(), 0x0001) + t.equal(bl.readUInt16BE(2), 0x0304) + t.equal(bl.readUInt16LE(2), 0x0403) + t.equal(bl.readInt16BE(2), 0x0304) + t.equal(bl.readInt16LE(2), 0x0403) + t.equal(bl.readUInt16BE(3), 0x0423) + t.equal(bl.readUInt16LE(3), 0x2304) + t.equal(bl.readInt16BE(3), 0x0423) + t.equal(bl.readInt16LE(3), 0x2304) + t.equal(bl.readUInt16BE(4), 0x2342) + t.equal(bl.readUInt16LE(4), 0x4223) + t.equal(bl.readInt16BE(4), 0x2342) + t.equal(bl.readInt16LE(4), 0x4223) + + t.end() +}) + +tape('test readUInt32LE / readUInt32BE / readInt32LE / readInt32BE', function (t) { + const buf1 = Buffer.alloc(1) + const buf2 = Buffer.alloc(3) + const buf3 = Buffer.alloc(3) + const bl = new BufferListStream() + + buf1[0] = 0x1 + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUInt32BE(), 0x01000304) + t.equal(bl.readUInt32LE(), 0x04030001) + t.equal(bl.readUInt32BE(2), 0x03042342) + t.equal(bl.readUInt32LE(2), 0x42230403) + t.equal(bl.readInt32BE(2), 0x03042342) + t.equal(bl.readInt32LE(2), 0x42230403) + + t.end() +}) + +tape('test readUIntLE / readUIntBE / readIntLE / readIntBE', function (t) { + const buf1 = Buffer.alloc(1) + const buf2 = Buffer.alloc(3) + const buf3 = Buffer.alloc(3) + const bl = new BufferListStream() + + buf2[0] = 0x2 + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + buf3[2] = 0x61 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUIntBE(1, 1), 0x02) + t.equal(bl.readUIntBE(1, 2), 0x0203) + t.equal(bl.readUIntBE(1, 3), 0x020304) + t.equal(bl.readUIntBE(1, 4), 0x02030423) + t.equal(bl.readUIntBE(1, 5), 0x0203042342) + t.equal(bl.readUIntBE(1, 6), 0x020304234261) + t.equal(bl.readUIntLE(1, 1), 0x02) + t.equal(bl.readUIntLE(1, 2), 0x0302) + t.equal(bl.readUIntLE(1, 3), 0x040302) + t.equal(bl.readUIntLE(1, 4), 0x23040302) + t.equal(bl.readUIntLE(1, 5), 0x4223040302) + t.equal(bl.readUIntLE(1, 6), 0x614223040302) + t.equal(bl.readIntBE(1, 1), 0x02) + t.equal(bl.readIntBE(1, 2), 0x0203) + t.equal(bl.readIntBE(1, 3), 0x020304) + t.equal(bl.readIntBE(1, 4), 0x02030423) + t.equal(bl.readIntBE(1, 5), 0x0203042342) + t.equal(bl.readIntBE(1, 6), 0x020304234261) + t.equal(bl.readIntLE(1, 1), 0x02) + t.equal(bl.readIntLE(1, 2), 0x0302) + t.equal(bl.readIntLE(1, 3), 0x040302) + t.equal(bl.readIntLE(1, 4), 0x23040302) + t.equal(bl.readIntLE(1, 5), 0x4223040302) + t.equal(bl.readIntLE(1, 6), 0x614223040302) + + t.end() +}) + +tape('test readFloatLE / readFloatBE', function (t) { + const buf1 = Buffer.alloc(1) + const buf2 = Buffer.alloc(3) + const buf3 = Buffer.alloc(3) + const bl = new BufferListStream() + + buf1[0] = 0x01 + buf2[1] = 0x00 + buf2[2] = 0x00 + buf3[0] = 0x80 + buf3[1] = 0x3f + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + const canonical = Buffer.concat([buf1, buf2, buf3]) + t.equal(bl.readFloatLE(), canonical.readFloatLE()) + t.equal(bl.readFloatBE(), canonical.readFloatBE()) + t.equal(bl.readFloatLE(2), canonical.readFloatLE(2)) + t.equal(bl.readFloatBE(2), canonical.readFloatBE(2)) + + t.end() +}) + +tape('test readDoubleLE / readDoubleBE', function (t) { + const buf1 = Buffer.alloc(1) + const buf2 = Buffer.alloc(3) + const buf3 = Buffer.alloc(10) + const bl = new BufferListStream() + + buf1[0] = 0x01 + buf2[1] = 0x55 + buf2[2] = 0x55 + buf3[0] = 0x55 + buf3[1] = 0x55 + buf3[2] = 0x55 + buf3[3] = 0x55 + buf3[4] = 0xd5 + buf3[5] = 0x3f + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + const canonical = Buffer.concat([buf1, buf2, buf3]) + t.equal(bl.readDoubleBE(), canonical.readDoubleBE()) + t.equal(bl.readDoubleLE(), canonical.readDoubleLE()) + t.equal(bl.readDoubleBE(2), canonical.readDoubleBE(2)) + t.equal(bl.readDoubleLE(2), canonical.readDoubleLE(2)) + + t.end() +}) + +tape('test toString', function (t) { + const bl = new BufferListStream() + + bl.append(Buffer.from('abcd')) + bl.append(Buffer.from('efg')) + bl.append(Buffer.from('hi')) + bl.append(Buffer.from('j')) + + t.equal(bl.toString('ascii', 0, 10), 'abcdefghij') + t.equal(bl.toString('ascii', 3, 10), 'defghij') + t.equal(bl.toString('ascii', 3, 6), 'def') + t.equal(bl.toString('ascii', 3, 8), 'defgh') + t.equal(bl.toString('ascii', 5, 10), 'fghij') + + t.end() +}) + +tape('test toString encoding', function (t) { + const bl = new BufferListStream() + const b = Buffer.from('abcdefghij\xff\x00') + + bl.append(Buffer.from('abcd')) + bl.append(Buffer.from('efg')) + bl.append(Buffer.from('hi')) + bl.append(Buffer.from('j')) + bl.append(Buffer.from('\xff\x00')) + + encodings.forEach(function (enc) { + t.equal(bl.toString(enc), b.toString(enc), enc) + }) + + t.end() +}) + +tape('uninitialized memory', function (t) { + const secret = crypto.randomBytes(256) + for (let i = 0; i < 1e6; i++) { + const clone = Buffer.from(secret) + const bl = new BufferListStream() + bl.append(Buffer.from('a')) + bl.consume(-1024) + const buf = bl.slice(1) + if (buf.indexOf(clone) !== -1) { + t.fail(`Match (at ${i})`) + break + } + } + t.end() +}) + +!process.browser && tape('test stream', function (t) { + const random = crypto.randomBytes(65534) + + const bl = new BufferListStream((err, buf) => { + t.ok(Buffer.isBuffer(buf)) + t.ok(err === null) + t.ok(random.equals(bl.slice())) + t.ok(random.equals(buf.slice())) + + bl.pipe(fs.createWriteStream(path.join(os.tmpdir(), 'bl_test_rnd_out.dat'))) + .on('close', function () { + const rndhash = crypto.createHash('md5').update(random).digest('hex') + const md5sum = crypto.createHash('md5') + const s = fs.createReadStream(path.join(os.tmpdir(), 'bl_test_rnd_out.dat')) + + s.on('data', md5sum.update.bind(md5sum)) + s.on('end', function () { + t.equal(rndhash, md5sum.digest('hex'), 'woohoo! correct hash!') + t.end() + }) + }) + }) + + fs.writeFileSync(path.join(os.tmpdir(), 'bl_test_rnd.dat'), random) + fs.createReadStream(path.join(os.tmpdir(), 'bl_test_rnd.dat')).pipe(bl) +}) + +tape('instantiation with Buffer', function (t) { + const buf = crypto.randomBytes(1024) + const buf2 = crypto.randomBytes(1024) + let b = BufferListStream(buf) + + t.equal(buf.toString('hex'), b.slice().toString('hex'), 'same buffer') + b = BufferListStream([buf, buf2]) + t.equal(b.slice().toString('hex'), Buffer.concat([buf, buf2]).toString('hex'), 'same buffer') + + t.end() +}) + +tape('test String appendage', function (t) { + const bl = new BufferListStream() + const b = Buffer.from('abcdefghij\xff\x00') + + bl.append('abcd') + bl.append('efg') + bl.append('hi') + bl.append('j') + bl.append('\xff\x00') + + encodings.forEach(function (enc) { + t.equal(bl.toString(enc), b.toString(enc)) + }) + + t.end() +}) + +tape('test Number appendage', function (t) { + const bl = new BufferListStream() + const b = Buffer.from('1234567890') + + bl.append(1234) + bl.append(567) + bl.append(89) + bl.append(0) + + encodings.forEach(function (enc) { + t.equal(bl.toString(enc), b.toString(enc)) + }) + + t.end() +}) + +tape('write nothing, should get empty buffer', function (t) { + t.plan(3) + BufferListStream(function (err, data) { + t.notOk(err, 'no error') + t.ok(Buffer.isBuffer(data), 'got a buffer') + t.equal(0, data.length, 'got a zero-length buffer') + t.end() + }).end() +}) + +tape('unicode string', function (t) { + t.plan(2) + + const inp1 = '\u2600' + const inp2 = '\u2603' + const exp = inp1 + ' and ' + inp2 + const bl = BufferListStream() + + bl.write(inp1) + bl.write(' and ') + bl.write(inp2) + t.equal(exp, bl.toString()) + t.equal(Buffer.from(exp).toString('hex'), bl.toString('hex')) +}) + +tape('should emit finish', function (t) { + const source = BufferListStream() + const dest = BufferListStream() + + source.write('hello') + source.pipe(dest) + + dest.on('finish', function () { + t.equal(dest.toString('utf8'), 'hello') + t.end() + }) +}) + +tape('basic copy', function (t) { + const buf = crypto.randomBytes(1024) + const buf2 = Buffer.alloc(1024) + const b = BufferListStream(buf) + + b.copy(buf2) + t.equal(b.slice().toString('hex'), buf2.toString('hex'), 'same buffer') + + t.end() +}) + +tape('copy after many appends', function (t) { + const buf = crypto.randomBytes(512) + const buf2 = Buffer.alloc(1024) + const b = BufferListStream(buf) + + b.append(buf) + b.copy(buf2) + t.equal(b.slice().toString('hex'), buf2.toString('hex'), 'same buffer') + + t.end() +}) + +tape('copy at a precise position', function (t) { + const buf = crypto.randomBytes(1004) + const buf2 = Buffer.alloc(1024) + const b = BufferListStream(buf) + + b.copy(buf2, 20) + t.equal(b.slice().toString('hex'), buf2.slice(20).toString('hex'), 'same buffer') + + t.end() +}) + +tape('copy starting from a precise location', function (t) { + const buf = crypto.randomBytes(10) + const buf2 = Buffer.alloc(5) + const b = BufferListStream(buf) + + b.copy(buf2, 0, 5) + t.equal(b.slice(5).toString('hex'), buf2.toString('hex'), 'same buffer') + + t.end() +}) + +tape('copy in an interval', function (t) { + const rnd = crypto.randomBytes(10) + const b = BufferListStream(rnd) // put the random bytes there + const actual = Buffer.alloc(3) + const expected = Buffer.alloc(3) + + rnd.copy(expected, 0, 5, 8) + b.copy(actual, 0, 5, 8) + + t.equal(actual.toString('hex'), expected.toString('hex'), 'same buffer') + + t.end() +}) + +tape('copy an interval between two buffers', function (t) { + const buf = crypto.randomBytes(10) + const buf2 = Buffer.alloc(10) + const b = BufferListStream(buf) + + b.append(buf) + b.copy(buf2, 0, 5, 15) + + t.equal(b.slice(5, 15).toString('hex'), buf2.toString('hex'), 'same buffer') + + t.end() +}) + +tape('shallow slice across buffer boundaries', function (t) { + const bl = new BufferListStream(['First', 'Second', 'Third']) + + t.equal(bl.shallowSlice(3, 13).toString(), 'stSecondTh') + + t.end() +}) + +tape('shallow slice within single buffer', function (t) { + t.plan(2) + + const bl = new BufferListStream(['First', 'Second', 'Third']) + + t.equal(bl.shallowSlice(5, 10).toString(), 'Secon') + t.equal(bl.shallowSlice(7, 10).toString(), 'con') + + t.end() +}) + +tape('shallow slice single buffer', function (t) { + t.plan(3) + + const bl = new BufferListStream(['First', 'Second', 'Third']) + + t.equal(bl.shallowSlice(0, 5).toString(), 'First') + t.equal(bl.shallowSlice(5, 11).toString(), 'Second') + t.equal(bl.shallowSlice(11, 16).toString(), 'Third') +}) + +tape('shallow slice with negative or omitted indices', function (t) { + t.plan(4) + + const bl = new BufferListStream(['First', 'Second', 'Third']) + + t.equal(bl.shallowSlice().toString(), 'FirstSecondThird') + t.equal(bl.shallowSlice(5).toString(), 'SecondThird') + t.equal(bl.shallowSlice(5, -3).toString(), 'SecondTh') + t.equal(bl.shallowSlice(-8).toString(), 'ondThird') +}) + +tape('shallow slice does not make a copy', function (t) { + t.plan(1) + + const buffers = [Buffer.from('First'), Buffer.from('Second'), Buffer.from('Third')] + const bl = new BufferListStream(buffers).shallowSlice(5, -3) + + buffers[1].fill('h') + buffers[2].fill('h') + + t.equal(bl.toString(), 'hhhhhhhh') +}) + +tape('shallow slice with 0 length', function (t) { + t.plan(1) + + const buffers = [Buffer.from('First'), Buffer.from('Second'), Buffer.from('Third')] + const bl = (new BufferListStream(buffers)).shallowSlice(0, 0) + + t.equal(bl.length, 0) +}) + +tape('shallow slice with 0 length from middle', function (t) { + t.plan(1) + + const buffers = [Buffer.from('First'), Buffer.from('Second'), Buffer.from('Third')] + const bl = (new BufferListStream(buffers)).shallowSlice(10, 10) + + t.equal(bl.length, 0) +}) + +tape('duplicate', function (t) { + t.plan(2) + + const bl = new BufferListStream('abcdefghij\xff\x00') + const dup = bl.duplicate() + + t.equal(bl.prototype, dup.prototype) + t.equal(bl.toString('hex'), dup.toString('hex')) +}) + +tape('destroy no pipe', function (t) { + t.plan(2) + + /** @type {BufferListStreamWithPrivate} */ + const bl = new BufferListStream('alsdkfja;lsdkfja;lsdk') + + bl.destroy() + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) +}) + +tape('destroy with error', function (t) { + t.plan(3) + + /** @type {BufferListStreamWithPrivate} */ + const bl = new BufferListStream('alsdkfja;lsdkfja;lsdk') + const err = new Error('kaboom') + + bl.destroy(err) + bl.on('error', function (_err) { + t.equal(_err, err) + }) + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) +}) + +!process.browser && tape('destroy with pipe before read end', function (t) { + t.plan(2) + + /** @type {BufferListStreamWithPrivate} */ + const bl = new BufferListStream() + fs.createReadStream(path.join(__dirname, '/test.js')) + .pipe(bl) + + bl.destroy() + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) +}) + +!process.browser && tape('destroy with pipe before read end with race', function (t) { + t.plan(2) + + /** @type {BufferListStreamWithPrivate} */ + const bl = new BufferListStream() + + fs.createReadStream(path.join(__dirname, '/test.js')) + .pipe(bl) + + setTimeout(function () { + bl.destroy() + setTimeout(function () { + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) + }, 500) + }, 500) +}) + +!process.browser && tape('destroy with pipe after read end', function (t) { + t.plan(2) + + /** @type {BufferListStreamWithPrivate} */ + const bl = new BufferListStream() + fs.createReadStream(path.join(__dirname, '/test.js')) + .on('end', onEnd) + .pipe(bl) + + function onEnd () { + bl.destroy() + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) + } +}) + +!process.browser && tape('destroy with pipe while writing to a destination', function (t) { + t.plan(4) + + /** @type {BufferListStreamWithPrivate} */ + const bl = new BufferListStream() + const ds = new BufferListStream() + + fs.createReadStream(path.join(__dirname, '/test.js')) + .on('end', onEnd) + .pipe(bl) + + function onEnd () { + bl.pipe(ds) + + setTimeout(function () { + bl.destroy() + + t.equals(bl._bufs.length, 0) + t.equals(bl.length, 0) + + ds.destroy() + + t.equals(bl._bufs.length, 0) + t.equals(bl.length, 0) + }, 100) + } +}) + +!process.browser && tape('handle error', function (t) { + t.plan(2) + + fs.createReadStream('/does/not/exist').pipe(BufferListStream(function (err, data) { + t.ok(err instanceof Error, 'has error') + t.notOk(data, 'no data') + })) +}) diff --git a/node_modules/buffer/AUTHORS.md b/node_modules/buffer/AUTHORS.md new file mode 100644 index 0000000000..468aa1908c --- /dev/null +++ b/node_modules/buffer/AUTHORS.md @@ -0,0 +1,73 @@ +# Authors + +#### Ordered by first contribution. + +- Romain Beauxis (toots@rastageeks.org) +- Tobias Koppers (tobias.koppers@googlemail.com) +- Janus (ysangkok@gmail.com) +- Rainer Dreyer (rdrey1@gmail.com) +- Tõnis Tiigi (tonistiigi@gmail.com) +- James Halliday (mail@substack.net) +- Michael Williamson (mike@zwobble.org) +- elliottcable (github@elliottcable.name) +- rafael (rvalle@livelens.net) +- Andrew Kelley (superjoe30@gmail.com) +- Andreas Madsen (amwebdk@gmail.com) +- Mike Brevoort (mike.brevoort@pearson.com) +- Brian White (mscdex@mscdex.net) +- Feross Aboukhadijeh (feross@feross.org) +- Ruben Verborgh (ruben@verborgh.org) +- eliang (eliang.cs@gmail.com) +- Jesse Tane (jesse.tane@gmail.com) +- Alfonso Boza (alfonso@cloud.com) +- Mathias Buus (mathiasbuus@gmail.com) +- Devon Govett (devongovett@gmail.com) +- Daniel Cousens (github@dcousens.com) +- Joseph Dykstra (josephdykstra@gmail.com) +- Parsha Pourkhomami (parshap+git@gmail.com) +- Damjan Košir (damjan.kosir@gmail.com) +- daverayment (dave.rayment@gmail.com) +- kawanet (u-suke@kawa.net) +- Linus Unnebäck (linus@folkdatorn.se) +- Nolan Lawson (nolan.lawson@gmail.com) +- Calvin Metcalf (calvin.metcalf@gmail.com) +- Koki Takahashi (hakatasiloving@gmail.com) +- Guy Bedford (guybedford@gmail.com) +- Jan Schär (jscissr@gmail.com) +- RaulTsc (tomescu.raul@gmail.com) +- Matthieu Monsch (monsch@alum.mit.edu) +- Dan Ehrenberg (littledan@chromium.org) +- Kirill Fomichev (fanatid@ya.ru) +- Yusuke Kawasaki (u-suke@kawa.net) +- DC (dcposch@dcpos.ch) +- John-David Dalton (john.david.dalton@gmail.com) +- adventure-yunfei (adventure030@gmail.com) +- Emil Bay (github@tixz.dk) +- Sam Sudar (sudar.sam@gmail.com) +- Volker Mische (volker.mische@gmail.com) +- David Walton (support@geekstocks.com) +- Сковорода Никита Андреевич (chalkerx@gmail.com) +- greenkeeper[bot] (greenkeeper[bot]@users.noreply.github.com) +- ukstv (sergey.ukustov@machinomy.com) +- Renée Kooi (renee@kooi.me) +- ranbochen (ranbochen@qq.com) +- Vladimir Borovik (bobahbdb@gmail.com) +- greenkeeper[bot] (23040076+greenkeeper[bot]@users.noreply.github.com) +- kumavis (aaron@kumavis.me) +- Sergey Ukustov (sergey.ukustov@machinomy.com) +- Fei Liu (liu.feiwood@gmail.com) +- Blaine Bublitz (blaine.bublitz@gmail.com) +- clement (clement@seald.io) +- Koushik Dutta (koushd@gmail.com) +- Jordan Harband (ljharb@gmail.com) +- Niklas Mischkulnig (mischnic@users.noreply.github.com) +- Nikolai Vavilov (vvnicholas@gmail.com) +- Fedor Nezhivoi (gyzerok@users.noreply.github.com) +- shuse2 (shus.toda@gmail.com) +- Peter Newman (peternewman@users.noreply.github.com) +- mathmakgakpak (44949126+mathmakgakpak@users.noreply.github.com) +- jkkang (jkkang@smartauth.kr) +- Deklan Webster (deklanw@gmail.com) +- Martin Heidegger (martin.heidegger@gmail.com) + +#### Generated by bin/update-authors.sh. diff --git a/node_modules/buffer/LICENSE b/node_modules/buffer/LICENSE new file mode 100644 index 0000000000..d6bf75dcf1 --- /dev/null +++ b/node_modules/buffer/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Feross Aboukhadijeh, and other contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/node_modules/buffer/README.md b/node_modules/buffer/README.md new file mode 100644 index 0000000000..451e23576b --- /dev/null +++ b/node_modules/buffer/README.md @@ -0,0 +1,410 @@ +# buffer [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url] [![javascript style guide][standard-image]][standard-url] + +[travis-image]: https://img.shields.io/travis/feross/buffer/master.svg +[travis-url]: https://travis-ci.org/feross/buffer +[npm-image]: https://img.shields.io/npm/v/buffer.svg +[npm-url]: https://npmjs.org/package/buffer +[downloads-image]: https://img.shields.io/npm/dm/buffer.svg +[downloads-url]: https://npmjs.org/package/buffer +[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg +[standard-url]: https://standardjs.com + +#### The buffer module from [node.js](https://nodejs.org/), for the browser. + +[![saucelabs][saucelabs-image]][saucelabs-url] + +[saucelabs-image]: https://saucelabs.com/browser-matrix/buffer.svg +[saucelabs-url]: https://saucelabs.com/u/buffer + +With [browserify](http://browserify.org), simply `require('buffer')` or use the `Buffer` global and you will get this module. + +The goal is to provide an API that is 100% identical to +[node's Buffer API](https://nodejs.org/api/buffer.html). Read the +[official docs](https://nodejs.org/api/buffer.html) for the full list of properties, +instance methods, and class methods that are supported. + +## features + +- Manipulate binary data like a boss, in all browsers! +- Super fast. Backed by Typed Arrays (`Uint8Array`/`ArrayBuffer`, not `Object`) +- Extremely small bundle size (**6.75KB minified + gzipped**, 51.9KB with comments) +- Excellent browser support (Chrome, Firefox, Edge, Safari 11+, iOS 11+, Android, etc.) +- Preserves Node API exactly, with one minor difference (see below) +- Square-bracket `buf[4]` notation works! +- Does not modify any browser prototypes or put anything on `window` +- Comprehensive test suite (including all buffer tests from node.js core) + +## install + +To use this module directly (without browserify), install it: + +```bash +npm install buffer +``` + +This module was previously called **native-buffer-browserify**, but please use **buffer** +from now on. + +If you do not use a bundler, you can use the [standalone script](https://bundle.run/buffer). + +## usage + +The module's API is identical to node's `Buffer` API. Read the +[official docs](https://nodejs.org/api/buffer.html) for the full list of properties, +instance methods, and class methods that are supported. + +As mentioned above, `require('buffer')` or use the `Buffer` global with +[browserify](http://browserify.org) and this module will automatically be included +in your bundle. Almost any npm module will work in the browser, even if it assumes that +the node `Buffer` API will be available. + +To depend on this module explicitly (without browserify), require it like this: + +```js +var Buffer = require('buffer/').Buffer // note: the trailing slash is important! +``` + +To require this module explicitly, use `require('buffer/')` which tells the node.js module +lookup algorithm (also used by browserify) to use the **npm module** named `buffer` +instead of the **node.js core** module named `buffer`! + + +## how does it work? + +The Buffer constructor returns instances of `Uint8Array` that have their prototype +changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of `Uint8Array`, +so the returned instances will have all the node `Buffer` methods and the +`Uint8Array` methods. Square bracket notation works as expected -- it returns a +single octet. + +The `Uint8Array` prototype remains unmodified. + + +## tracking the latest node api + +This module tracks the Buffer API in the latest (unstable) version of node.js. The Buffer +API is considered **stable** in the +[node stability index](https://nodejs.org/docs/latest/api/documentation.html#documentation_stability_index), +so it is unlikely that there will ever be breaking changes. +Nonetheless, when/if the Buffer API changes in node, this module's API will change +accordingly. + +## related packages + +- [`buffer-reverse`](https://www.npmjs.com/package/buffer-reverse) - Reverse a buffer +- [`buffer-xor`](https://www.npmjs.com/package/buffer-xor) - Bitwise xor a buffer +- [`is-buffer`](https://www.npmjs.com/package/is-buffer) - Determine if an object is a Buffer without including the whole `Buffer` package + +## conversion packages + +### convert typed array to buffer + +Use [`typedarray-to-buffer`](https://www.npmjs.com/package/typedarray-to-buffer) to convert any kind of typed array to a `Buffer`. Does not perform a copy, so it's super fast. + +### convert buffer to typed array + +`Buffer` is a subclass of `Uint8Array` (which is a typed array). So there is no need to explicitly convert to typed array. Just use the buffer as a `Uint8Array`. + +### convert blob to buffer + +Use [`blob-to-buffer`](https://www.npmjs.com/package/blob-to-buffer) to convert a `Blob` to a `Buffer`. + +### convert buffer to blob + +To convert a `Buffer` to a `Blob`, use the `Blob` constructor: + +```js +var blob = new Blob([ buffer ]) +``` + +Optionally, specify a mimetype: + +```js +var blob = new Blob([ buffer ], { type: 'text/html' }) +``` + +### convert arraybuffer to buffer + +To convert an `ArrayBuffer` to a `Buffer`, use the `Buffer.from` function. Does not perform a copy, so it's super fast. + +```js +var buffer = Buffer.from(arrayBuffer) +``` + +### convert buffer to arraybuffer + +To convert a `Buffer` to an `ArrayBuffer`, use the `.buffer` property (which is present on all `Uint8Array` objects): + +```js +var arrayBuffer = buffer.buffer.slice( + buffer.byteOffset, buffer.byteOffset + buffer.byteLength +) +``` + +Alternatively, use the [`to-arraybuffer`](https://www.npmjs.com/package/to-arraybuffer) module. + +## performance + +See perf tests in `/perf`. + +`BrowserBuffer` is the browser `buffer` module (this repo). `Uint8Array` is included as a +sanity check (since `BrowserBuffer` uses `Uint8Array` under the hood, `Uint8Array` will +always be at least a bit faster). Finally, `NodeBuffer` is the node.js buffer module, +which is included to compare against. + +NOTE: Performance has improved since these benchmarks were taken. PR welcome to update the README. + +### Chrome 38 + +| Method | Operations | Accuracy | Sampled | Fastest | +|:-------|:-----------|:---------|:--------|:-------:| +| BrowserBuffer#bracket-notation | 11,457,464 ops/sec | ±0.86% | 66 | ✓ | +| Uint8Array#bracket-notation | 10,824,332 ops/sec | ±0.74% | 65 | | +| | | | | +| BrowserBuffer#concat | 450,532 ops/sec | ±0.76% | 68 | | +| Uint8Array#concat | 1,368,911 ops/sec | ±1.50% | 62 | ✓ | +| | | | | +| BrowserBuffer#copy(16000) | 903,001 ops/sec | ±0.96% | 67 | | +| Uint8Array#copy(16000) | 1,422,441 ops/sec | ±1.04% | 66 | ✓ | +| | | | | +| BrowserBuffer#copy(16) | 11,431,358 ops/sec | ±0.46% | 69 | | +| Uint8Array#copy(16) | 13,944,163 ops/sec | ±1.12% | 68 | ✓ | +| | | | | +| BrowserBuffer#new(16000) | 106,329 ops/sec | ±6.70% | 44 | | +| Uint8Array#new(16000) | 131,001 ops/sec | ±2.85% | 31 | ✓ | +| | | | | +| BrowserBuffer#new(16) | 1,554,491 ops/sec | ±1.60% | 65 | | +| Uint8Array#new(16) | 6,623,930 ops/sec | ±1.66% | 65 | ✓ | +| | | | | +| BrowserBuffer#readDoubleBE | 112,830 ops/sec | ±0.51% | 69 | ✓ | +| DataView#getFloat64 | 93,500 ops/sec | ±0.57% | 68 | | +| | | | | +| BrowserBuffer#readFloatBE | 146,678 ops/sec | ±0.95% | 68 | ✓ | +| DataView#getFloat32 | 99,311 ops/sec | ±0.41% | 67 | | +| | | | | +| BrowserBuffer#readUInt32LE | 843,214 ops/sec | ±0.70% | 69 | ✓ | +| DataView#getUint32 | 103,024 ops/sec | ±0.64% | 67 | | +| | | | | +| BrowserBuffer#slice | 1,013,941 ops/sec | ±0.75% | 67 | | +| Uint8Array#subarray | 1,903,928 ops/sec | ±0.53% | 67 | ✓ | +| | | | | +| BrowserBuffer#writeFloatBE | 61,387 ops/sec | ±0.90% | 67 | | +| DataView#setFloat32 | 141,249 ops/sec | ±0.40% | 66 | ✓ | + + +### Firefox 33 + +| Method | Operations | Accuracy | Sampled | Fastest | +|:-------|:-----------|:---------|:--------|:-------:| +| BrowserBuffer#bracket-notation | 20,800,421 ops/sec | ±1.84% | 60 | | +| Uint8Array#bracket-notation | 20,826,235 ops/sec | ±2.02% | 61 | ✓ | +| | | | | +| BrowserBuffer#concat | 153,076 ops/sec | ±2.32% | 61 | | +| Uint8Array#concat | 1,255,674 ops/sec | ±8.65% | 52 | ✓ | +| | | | | +| BrowserBuffer#copy(16000) | 1,105,312 ops/sec | ±1.16% | 63 | | +| Uint8Array#copy(16000) | 1,615,911 ops/sec | ±0.55% | 66 | ✓ | +| | | | | +| BrowserBuffer#copy(16) | 16,357,599 ops/sec | ±0.73% | 68 | | +| Uint8Array#copy(16) | 31,436,281 ops/sec | ±1.05% | 68 | ✓ | +| | | | | +| BrowserBuffer#new(16000) | 52,995 ops/sec | ±6.01% | 35 | | +| Uint8Array#new(16000) | 87,686 ops/sec | ±5.68% | 45 | ✓ | +| | | | | +| BrowserBuffer#new(16) | 252,031 ops/sec | ±1.61% | 66 | | +| Uint8Array#new(16) | 8,477,026 ops/sec | ±0.49% | 68 | ✓ | +| | | | | +| BrowserBuffer#readDoubleBE | 99,871 ops/sec | ±0.41% | 69 | | +| DataView#getFloat64 | 285,663 ops/sec | ±0.70% | 68 | ✓ | +| | | | | +| BrowserBuffer#readFloatBE | 115,540 ops/sec | ±0.42% | 69 | | +| DataView#getFloat32 | 288,722 ops/sec | ±0.82% | 68 | ✓ | +| | | | | +| BrowserBuffer#readUInt32LE | 633,926 ops/sec | ±1.08% | 67 | ✓ | +| DataView#getUint32 | 294,808 ops/sec | ±0.79% | 64 | | +| | | | | +| BrowserBuffer#slice | 349,425 ops/sec | ±0.46% | 69 | | +| Uint8Array#subarray | 5,965,819 ops/sec | ±0.60% | 65 | ✓ | +| | | | | +| BrowserBuffer#writeFloatBE | 59,980 ops/sec | ±0.41% | 67 | | +| DataView#setFloat32 | 317,634 ops/sec | ±0.63% | 68 | ✓ | + +### Safari 8 + +| Method | Operations | Accuracy | Sampled | Fastest | +|:-------|:-----------|:---------|:--------|:-------:| +| BrowserBuffer#bracket-notation | 10,279,729 ops/sec | ±2.25% | 56 | ✓ | +| Uint8Array#bracket-notation | 10,030,767 ops/sec | ±2.23% | 59 | | +| | | | | +| BrowserBuffer#concat | 144,138 ops/sec | ±1.38% | 65 | | +| Uint8Array#concat | 4,950,764 ops/sec | ±1.70% | 63 | ✓ | +| | | | | +| BrowserBuffer#copy(16000) | 1,058,548 ops/sec | ±1.51% | 64 | | +| Uint8Array#copy(16000) | 1,409,666 ops/sec | ±1.17% | 65 | ✓ | +| | | | | +| BrowserBuffer#copy(16) | 6,282,529 ops/sec | ±1.88% | 58 | | +| Uint8Array#copy(16) | 11,907,128 ops/sec | ±2.87% | 58 | ✓ | +| | | | | +| BrowserBuffer#new(16000) | 101,663 ops/sec | ±3.89% | 57 | | +| Uint8Array#new(16000) | 22,050,818 ops/sec | ±6.51% | 46 | ✓ | +| | | | | +| BrowserBuffer#new(16) | 176,072 ops/sec | ±2.13% | 64 | | +| Uint8Array#new(16) | 24,385,731 ops/sec | ±5.01% | 51 | ✓ | +| | | | | +| BrowserBuffer#readDoubleBE | 41,341 ops/sec | ±1.06% | 67 | | +| DataView#getFloat64 | 322,280 ops/sec | ±0.84% | 68 | ✓ | +| | | | | +| BrowserBuffer#readFloatBE | 46,141 ops/sec | ±1.06% | 65 | | +| DataView#getFloat32 | 337,025 ops/sec | ±0.43% | 69 | ✓ | +| | | | | +| BrowserBuffer#readUInt32LE | 151,551 ops/sec | ±1.02% | 66 | | +| DataView#getUint32 | 308,278 ops/sec | ±0.94% | 67 | ✓ | +| | | | | +| BrowserBuffer#slice | 197,365 ops/sec | ±0.95% | 66 | | +| Uint8Array#subarray | 9,558,024 ops/sec | ±3.08% | 58 | ✓ | +| | | | | +| BrowserBuffer#writeFloatBE | 17,518 ops/sec | ±1.03% | 63 | | +| DataView#setFloat32 | 319,751 ops/sec | ±0.48% | 68 | ✓ | + + +### Node 0.11.14 + +| Method | Operations | Accuracy | Sampled | Fastest | +|:-------|:-----------|:---------|:--------|:-------:| +| BrowserBuffer#bracket-notation | 10,489,828 ops/sec | ±3.25% | 90 | | +| Uint8Array#bracket-notation | 10,534,884 ops/sec | ±0.81% | 92 | ✓ | +| NodeBuffer#bracket-notation | 10,389,910 ops/sec | ±0.97% | 87 | | +| | | | | +| BrowserBuffer#concat | 487,830 ops/sec | ±2.58% | 88 | | +| Uint8Array#concat | 1,814,327 ops/sec | ±1.28% | 88 | ✓ | +| NodeBuffer#concat | 1,636,523 ops/sec | ±1.88% | 73 | | +| | | | | +| BrowserBuffer#copy(16000) | 1,073,665 ops/sec | ±0.77% | 90 | | +| Uint8Array#copy(16000) | 1,348,517 ops/sec | ±0.84% | 89 | ✓ | +| NodeBuffer#copy(16000) | 1,289,533 ops/sec | ±0.82% | 93 | | +| | | | | +| BrowserBuffer#copy(16) | 12,782,706 ops/sec | ±0.74% | 85 | | +| Uint8Array#copy(16) | 14,180,427 ops/sec | ±0.93% | 92 | ✓ | +| NodeBuffer#copy(16) | 11,083,134 ops/sec | ±1.06% | 89 | | +| | | | | +| BrowserBuffer#new(16000) | 141,678 ops/sec | ±3.30% | 67 | | +| Uint8Array#new(16000) | 161,491 ops/sec | ±2.96% | 60 | | +| NodeBuffer#new(16000) | 292,699 ops/sec | ±3.20% | 55 | ✓ | +| | | | | +| BrowserBuffer#new(16) | 1,655,466 ops/sec | ±2.41% | 82 | | +| Uint8Array#new(16) | 14,399,926 ops/sec | ±0.91% | 94 | ✓ | +| NodeBuffer#new(16) | 3,894,696 ops/sec | ±0.88% | 92 | | +| | | | | +| BrowserBuffer#readDoubleBE | 109,582 ops/sec | ±0.75% | 93 | ✓ | +| DataView#getFloat64 | 91,235 ops/sec | ±0.81% | 90 | | +| NodeBuffer#readDoubleBE | 88,593 ops/sec | ±0.96% | 81 | | +| | | | | +| BrowserBuffer#readFloatBE | 139,854 ops/sec | ±1.03% | 85 | ✓ | +| DataView#getFloat32 | 98,744 ops/sec | ±0.80% | 89 | | +| NodeBuffer#readFloatBE | 92,769 ops/sec | ±0.94% | 93 | | +| | | | | +| BrowserBuffer#readUInt32LE | 710,861 ops/sec | ±0.82% | 92 | | +| DataView#getUint32 | 117,893 ops/sec | ±0.84% | 91 | | +| NodeBuffer#readUInt32LE | 851,412 ops/sec | ±0.72% | 93 | ✓ | +| | | | | +| BrowserBuffer#slice | 1,673,877 ops/sec | ±0.73% | 94 | | +| Uint8Array#subarray | 6,919,243 ops/sec | ±0.67% | 90 | ✓ | +| NodeBuffer#slice | 4,617,604 ops/sec | ±0.79% | 93 | | +| | | | | +| BrowserBuffer#writeFloatBE | 66,011 ops/sec | ±0.75% | 93 | | +| DataView#setFloat32 | 127,760 ops/sec | ±0.72% | 93 | ✓ | +| NodeBuffer#writeFloatBE | 103,352 ops/sec | ±0.83% | 93 | | + +### iojs 1.8.1 + +| Method | Operations | Accuracy | Sampled | Fastest | +|:-------|:-----------|:---------|:--------|:-------:| +| BrowserBuffer#bracket-notation | 10,990,488 ops/sec | ±1.11% | 91 | | +| Uint8Array#bracket-notation | 11,268,757 ops/sec | ±0.65% | 97 | | +| NodeBuffer#bracket-notation | 11,353,260 ops/sec | ±0.83% | 94 | ✓ | +| | | | | +| BrowserBuffer#concat | 378,954 ops/sec | ±0.74% | 94 | | +| Uint8Array#concat | 1,358,288 ops/sec | ±0.97% | 87 | | +| NodeBuffer#concat | 1,934,050 ops/sec | ±1.11% | 78 | ✓ | +| | | | | +| BrowserBuffer#copy(16000) | 894,538 ops/sec | ±0.56% | 84 | | +| Uint8Array#copy(16000) | 1,442,656 ops/sec | ±0.71% | 96 | | +| NodeBuffer#copy(16000) | 1,457,898 ops/sec | ±0.53% | 92 | ✓ | +| | | | | +| BrowserBuffer#copy(16) | 12,870,457 ops/sec | ±0.67% | 95 | | +| Uint8Array#copy(16) | 16,643,989 ops/sec | ±0.61% | 93 | ✓ | +| NodeBuffer#copy(16) | 14,885,848 ops/sec | ±0.74% | 94 | | +| | | | | +| BrowserBuffer#new(16000) | 109,264 ops/sec | ±4.21% | 63 | | +| Uint8Array#new(16000) | 138,916 ops/sec | ±1.87% | 61 | | +| NodeBuffer#new(16000) | 281,449 ops/sec | ±3.58% | 51 | ✓ | +| | | | | +| BrowserBuffer#new(16) | 1,362,935 ops/sec | ±0.56% | 99 | | +| Uint8Array#new(16) | 6,193,090 ops/sec | ±0.64% | 95 | ✓ | +| NodeBuffer#new(16) | 4,745,425 ops/sec | ±1.56% | 90 | | +| | | | | +| BrowserBuffer#readDoubleBE | 118,127 ops/sec | ±0.59% | 93 | ✓ | +| DataView#getFloat64 | 107,332 ops/sec | ±0.65% | 91 | | +| NodeBuffer#readDoubleBE | 116,274 ops/sec | ±0.94% | 95 | | +| | | | | +| BrowserBuffer#readFloatBE | 150,326 ops/sec | ±0.58% | 95 | ✓ | +| DataView#getFloat32 | 110,541 ops/sec | ±0.57% | 98 | | +| NodeBuffer#readFloatBE | 121,599 ops/sec | ±0.60% | 87 | | +| | | | | +| BrowserBuffer#readUInt32LE | 814,147 ops/sec | ±0.62% | 93 | | +| DataView#getUint32 | 137,592 ops/sec | ±0.64% | 90 | | +| NodeBuffer#readUInt32LE | 931,650 ops/sec | ±0.71% | 96 | ✓ | +| | | | | +| BrowserBuffer#slice | 878,590 ops/sec | ±0.68% | 93 | | +| Uint8Array#subarray | 2,843,308 ops/sec | ±1.02% | 90 | | +| NodeBuffer#slice | 4,998,316 ops/sec | ±0.68% | 90 | ✓ | +| | | | | +| BrowserBuffer#writeFloatBE | 65,927 ops/sec | ±0.74% | 93 | | +| DataView#setFloat32 | 139,823 ops/sec | ±0.97% | 89 | ✓ | +| NodeBuffer#writeFloatBE | 135,763 ops/sec | ±0.65% | 96 | | +| | | | | + +## Testing the project + +First, install the project: + + npm install + +Then, to run tests in Node.js, run: + + npm run test-node + +To test locally in a browser, you can run: + + npm run test-browser-es5-local # For ES5 browsers that don't support ES6 + npm run test-browser-es6-local # For ES6 compliant browsers + +This will print out a URL that you can then open in a browser to run the tests, using [airtap](https://www.npmjs.com/package/airtap). + +To run automated browser tests using Saucelabs, ensure that your `SAUCE_USERNAME` and `SAUCE_ACCESS_KEY` environment variables are set, then run: + + npm test + +This is what's run in Travis, to check against various browsers. The list of browsers is kept in the `bin/airtap-es5.yml` and `bin/airtap-es6.yml` files. + +## JavaScript Standard Style + +This module uses [JavaScript Standard Style](https://github.com/feross/standard). + +[![JavaScript Style Guide](https://cdn.rawgit.com/feross/standard/master/badge.svg)](https://github.com/feross/standard) + +To test that the code conforms to the style, `npm install` and run: + + ./node_modules/.bin/standard + +## credit + +This was originally forked from [buffer-browserify](https://github.com/toots/buffer-browserify). + +## Security Policies and Procedures + +The `buffer` team and community take all security bugs in `buffer` seriously. Please see our [security policies and procedures](https://github.com/feross/security) document to learn how to report issues. + +## license + +MIT. Copyright (C) [Feross Aboukhadijeh](http://feross.org), and other contributors. Originally forked from an MIT-licensed module by Romain Beauxis. diff --git a/node_modules/buffer/index.d.ts b/node_modules/buffer/index.d.ts new file mode 100644 index 0000000000..07096a2f72 --- /dev/null +++ b/node_modules/buffer/index.d.ts @@ -0,0 +1,194 @@ +export class Buffer extends Uint8Array { + length: number + write(string: string, offset?: number, length?: number, encoding?: string): number; + toString(encoding?: string, start?: number, end?: number): string; + toJSON(): { type: 'Buffer', data: any[] }; + equals(otherBuffer: Buffer): boolean; + compare(otherBuffer: Uint8Array, targetStart?: number, targetEnd?: number, sourceStart?: number, sourceEnd?: number): number; + copy(targetBuffer: Buffer, targetStart?: number, sourceStart?: number, sourceEnd?: number): number; + slice(start?: number, end?: number): Buffer; + writeUIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; + writeUIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; + writeIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; + writeIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; + readUIntLE(offset: number, byteLength: number, noAssert?: boolean): number; + readUIntBE(offset: number, byteLength: number, noAssert?: boolean): number; + readIntLE(offset: number, byteLength: number, noAssert?: boolean): number; + readIntBE(offset: number, byteLength: number, noAssert?: boolean): number; + readUInt8(offset: number, noAssert?: boolean): number; + readUInt16LE(offset: number, noAssert?: boolean): number; + readUInt16BE(offset: number, noAssert?: boolean): number; + readUInt32LE(offset: number, noAssert?: boolean): number; + readUInt32BE(offset: number, noAssert?: boolean): number; + readBigUInt64LE(offset: number): BigInt; + readBigUInt64BE(offset: number): BigInt; + readInt8(offset: number, noAssert?: boolean): number; + readInt16LE(offset: number, noAssert?: boolean): number; + readInt16BE(offset: number, noAssert?: boolean): number; + readInt32LE(offset: number, noAssert?: boolean): number; + readInt32BE(offset: number, noAssert?: boolean): number; + readBigInt64LE(offset: number): BigInt; + readBigInt64BE(offset: number): BigInt; + readFloatLE(offset: number, noAssert?: boolean): number; + readFloatBE(offset: number, noAssert?: boolean): number; + readDoubleLE(offset: number, noAssert?: boolean): number; + readDoubleBE(offset: number, noAssert?: boolean): number; + reverse(): this; + swap16(): Buffer; + swap32(): Buffer; + swap64(): Buffer; + writeUInt8(value: number, offset: number, noAssert?: boolean): number; + writeUInt16LE(value: number, offset: number, noAssert?: boolean): number; + writeUInt16BE(value: number, offset: number, noAssert?: boolean): number; + writeUInt32LE(value: number, offset: number, noAssert?: boolean): number; + writeUInt32BE(value: number, offset: number, noAssert?: boolean): number; + writeBigUInt64LE(value: number, offset: number): BigInt; + writeBigUInt64BE(value: number, offset: number): BigInt; + writeInt8(value: number, offset: number, noAssert?: boolean): number; + writeInt16LE(value: number, offset: number, noAssert?: boolean): number; + writeInt16BE(value: number, offset: number, noAssert?: boolean): number; + writeInt32LE(value: number, offset: number, noAssert?: boolean): number; + writeInt32BE(value: number, offset: number, noAssert?: boolean): number; + writeBigInt64LE(value: number, offset: number): BigInt; + writeBigInt64BE(value: number, offset: number): BigInt; + writeFloatLE(value: number, offset: number, noAssert?: boolean): number; + writeFloatBE(value: number, offset: number, noAssert?: boolean): number; + writeDoubleLE(value: number, offset: number, noAssert?: boolean): number; + writeDoubleBE(value: number, offset: number, noAssert?: boolean): number; + fill(value: any, offset?: number, end?: number): this; + indexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; + lastIndexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; + includes(value: string | number | Buffer, byteOffset?: number, encoding?: string): boolean; + + /** + * Allocates a new buffer containing the given {str}. + * + * @param str String to store in buffer. + * @param encoding encoding to use, optional. Default is 'utf8' + */ + constructor (str: string, encoding?: string); + /** + * Allocates a new buffer of {size} octets. + * + * @param size count of octets to allocate. + */ + constructor (size: number); + /** + * Allocates a new buffer containing the given {array} of octets. + * + * @param array The octets to store. + */ + constructor (array: Uint8Array); + /** + * Produces a Buffer backed by the same allocated memory as + * the given {ArrayBuffer}. + * + * + * @param arrayBuffer The ArrayBuffer with which to share memory. + */ + constructor (arrayBuffer: ArrayBuffer); + /** + * Allocates a new buffer containing the given {array} of octets. + * + * @param array The octets to store. + */ + constructor (array: any[]); + /** + * Copies the passed {buffer} data onto a new {Buffer} instance. + * + * @param buffer The buffer to copy. + */ + constructor (buffer: Buffer); + prototype: Buffer; + /** + * Allocates a new Buffer using an {array} of octets. + * + * @param array + */ + static from(array: any[]): Buffer; + /** + * When passed a reference to the .buffer property of a TypedArray instance, + * the newly created Buffer will share the same allocated memory as the TypedArray. + * The optional {byteOffset} and {length} arguments specify a memory range + * within the {arrayBuffer} that will be shared by the Buffer. + * + * @param arrayBuffer The .buffer property of a TypedArray or a new ArrayBuffer() + * @param byteOffset + * @param length + */ + static from(arrayBuffer: ArrayBuffer, byteOffset?: number, length?: number): Buffer; + /** + * Copies the passed {buffer} data onto a new Buffer instance. + * + * @param buffer + */ + static from(buffer: Buffer | Uint8Array): Buffer; + /** + * Creates a new Buffer containing the given JavaScript string {str}. + * If provided, the {encoding} parameter identifies the character encoding. + * If not provided, {encoding} defaults to 'utf8'. + * + * @param str + */ + static from(str: string, encoding?: string): Buffer; + /** + * Returns true if {obj} is a Buffer + * + * @param obj object to test. + */ + static isBuffer(obj: any): obj is Buffer; + /** + * Returns true if {encoding} is a valid encoding argument. + * Valid string encodings in Node 0.12: 'ascii'|'utf8'|'utf16le'|'ucs2'(alias of 'utf16le')|'base64'|'binary'(deprecated)|'hex' + * + * @param encoding string to test. + */ + static isEncoding(encoding: string): boolean; + /** + * Gives the actual byte length of a string. encoding defaults to 'utf8'. + * This is not the same as String.prototype.length since that returns the number of characters in a string. + * + * @param string string to test. + * @param encoding encoding used to evaluate (defaults to 'utf8') + */ + static byteLength(string: string, encoding?: string): number; + /** + * Returns a buffer which is the result of concatenating all the buffers in the list together. + * + * If the list has no items, or if the totalLength is 0, then it returns a zero-length buffer. + * If the list has exactly one item, then the first item of the list is returned. + * If the list has more than one item, then a new Buffer is created. + * + * @param list An array of Buffer objects to concatenate + * @param totalLength Total length of the buffers when concatenated. + * If totalLength is not provided, it is read from the buffers in the list. However, this adds an additional loop to the function, so it is faster to provide the length explicitly. + */ + static concat(list: Uint8Array[], totalLength?: number): Buffer; + /** + * The same as buf1.compare(buf2). + */ + static compare(buf1: Uint8Array, buf2: Uint8Array): number; + /** + * Allocates a new buffer of {size} octets. + * + * @param size count of octets to allocate. + * @param fill if specified, buffer will be initialized by calling buf.fill(fill). + * If parameter is omitted, buffer will be filled with zeros. + * @param encoding encoding used for call to buf.fill while initializing + */ + static alloc(size: number, fill?: string | Buffer | number, encoding?: string): Buffer; + /** + * Allocates a new buffer of {size} octets, leaving memory not initialized, so the contents + * of the newly created Buffer are unknown and may contain sensitive data. + * + * @param size count of octets to allocate + */ + static allocUnsafe(size: number): Buffer; + /** + * Allocates a new non-pooled buffer of {size} octets, leaving memory not initialized, so the contents + * of the newly created Buffer are unknown and may contain sensitive data. + * + * @param size count of octets to allocate + */ + static allocUnsafeSlow(size: number): Buffer; +} diff --git a/node_modules/buffer/index.js b/node_modules/buffer/index.js new file mode 100644 index 0000000000..7a0e9c2a12 --- /dev/null +++ b/node_modules/buffer/index.js @@ -0,0 +1,2106 @@ +/*! + * The buffer module from node.js, for the browser. + * + * @author Feross Aboukhadijeh + * @license MIT + */ +/* eslint-disable no-proto */ + +'use strict' + +const base64 = require('base64-js') +const ieee754 = require('ieee754') +const customInspectSymbol = + (typeof Symbol === 'function' && typeof Symbol['for'] === 'function') // eslint-disable-line dot-notation + ? Symbol['for']('nodejs.util.inspect.custom') // eslint-disable-line dot-notation + : null + +exports.Buffer = Buffer +exports.SlowBuffer = SlowBuffer +exports.INSPECT_MAX_BYTES = 50 + +const K_MAX_LENGTH = 0x7fffffff +exports.kMaxLength = K_MAX_LENGTH + +/** + * If `Buffer.TYPED_ARRAY_SUPPORT`: + * === true Use Uint8Array implementation (fastest) + * === false Print warning and recommend using `buffer` v4.x which has an Object + * implementation (most compatible, even IE6) + * + * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+, + * Opera 11.6+, iOS 4.2+. + * + * We report that the browser does not support typed arrays if the are not subclassable + * using __proto__. Firefox 4-29 lacks support for adding new properties to `Uint8Array` + * (See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438). IE 10 lacks support + * for __proto__ and has a buggy typed array implementation. + */ +Buffer.TYPED_ARRAY_SUPPORT = typedArraySupport() + +if (!Buffer.TYPED_ARRAY_SUPPORT && typeof console !== 'undefined' && + typeof console.error === 'function') { + console.error( + 'This browser lacks typed array (Uint8Array) support which is required by ' + + '`buffer` v5.x. Use `buffer` v4.x if you require old browser support.' + ) +} + +function typedArraySupport () { + // Can typed array instances can be augmented? + try { + const arr = new Uint8Array(1) + const proto = { foo: function () { return 42 } } + Object.setPrototypeOf(proto, Uint8Array.prototype) + Object.setPrototypeOf(arr, proto) + return arr.foo() === 42 + } catch (e) { + return false + } +} + +Object.defineProperty(Buffer.prototype, 'parent', { + enumerable: true, + get: function () { + if (!Buffer.isBuffer(this)) return undefined + return this.buffer + } +}) + +Object.defineProperty(Buffer.prototype, 'offset', { + enumerable: true, + get: function () { + if (!Buffer.isBuffer(this)) return undefined + return this.byteOffset + } +}) + +function createBuffer (length) { + if (length > K_MAX_LENGTH) { + throw new RangeError('The value "' + length + '" is invalid for option "size"') + } + // Return an augmented `Uint8Array` instance + const buf = new Uint8Array(length) + Object.setPrototypeOf(buf, Buffer.prototype) + return buf +} + +/** + * The Buffer constructor returns instances of `Uint8Array` that have their + * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of + * `Uint8Array`, so the returned instances will have all the node `Buffer` methods + * and the `Uint8Array` methods. Square bracket notation works as expected -- it + * returns a single octet. + * + * The `Uint8Array` prototype remains unmodified. + */ + +function Buffer (arg, encodingOrOffset, length) { + // Common case. + if (typeof arg === 'number') { + if (typeof encodingOrOffset === 'string') { + throw new TypeError( + 'The "string" argument must be of type string. Received type number' + ) + } + return allocUnsafe(arg) + } + return from(arg, encodingOrOffset, length) +} + +Buffer.poolSize = 8192 // not used by this implementation + +function from (value, encodingOrOffset, length) { + if (typeof value === 'string') { + return fromString(value, encodingOrOffset) + } + + if (ArrayBuffer.isView(value)) { + return fromArrayView(value) + } + + if (value == null) { + throw new TypeError( + 'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' + + 'or Array-like Object. Received type ' + (typeof value) + ) + } + + if (isInstance(value, ArrayBuffer) || + (value && isInstance(value.buffer, ArrayBuffer))) { + return fromArrayBuffer(value, encodingOrOffset, length) + } + + if (typeof SharedArrayBuffer !== 'undefined' && + (isInstance(value, SharedArrayBuffer) || + (value && isInstance(value.buffer, SharedArrayBuffer)))) { + return fromArrayBuffer(value, encodingOrOffset, length) + } + + if (typeof value === 'number') { + throw new TypeError( + 'The "value" argument must not be of type number. Received type number' + ) + } + + const valueOf = value.valueOf && value.valueOf() + if (valueOf != null && valueOf !== value) { + return Buffer.from(valueOf, encodingOrOffset, length) + } + + const b = fromObject(value) + if (b) return b + + if (typeof Symbol !== 'undefined' && Symbol.toPrimitive != null && + typeof value[Symbol.toPrimitive] === 'function') { + return Buffer.from(value[Symbol.toPrimitive]('string'), encodingOrOffset, length) + } + + throw new TypeError( + 'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' + + 'or Array-like Object. Received type ' + (typeof value) + ) +} + +/** + * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError + * if value is a number. + * Buffer.from(str[, encoding]) + * Buffer.from(array) + * Buffer.from(buffer) + * Buffer.from(arrayBuffer[, byteOffset[, length]]) + **/ +Buffer.from = function (value, encodingOrOffset, length) { + return from(value, encodingOrOffset, length) +} + +// Note: Change prototype *after* Buffer.from is defined to workaround Chrome bug: +// https://github.com/feross/buffer/pull/148 +Object.setPrototypeOf(Buffer.prototype, Uint8Array.prototype) +Object.setPrototypeOf(Buffer, Uint8Array) + +function assertSize (size) { + if (typeof size !== 'number') { + throw new TypeError('"size" argument must be of type number') + } else if (size < 0) { + throw new RangeError('The value "' + size + '" is invalid for option "size"') + } +} + +function alloc (size, fill, encoding) { + assertSize(size) + if (size <= 0) { + return createBuffer(size) + } + if (fill !== undefined) { + // Only pay attention to encoding if it's a string. This + // prevents accidentally sending in a number that would + // be interpreted as a start offset. + return typeof encoding === 'string' + ? createBuffer(size).fill(fill, encoding) + : createBuffer(size).fill(fill) + } + return createBuffer(size) +} + +/** + * Creates a new filled Buffer instance. + * alloc(size[, fill[, encoding]]) + **/ +Buffer.alloc = function (size, fill, encoding) { + return alloc(size, fill, encoding) +} + +function allocUnsafe (size) { + assertSize(size) + return createBuffer(size < 0 ? 0 : checked(size) | 0) +} + +/** + * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance. + * */ +Buffer.allocUnsafe = function (size) { + return allocUnsafe(size) +} +/** + * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance. + */ +Buffer.allocUnsafeSlow = function (size) { + return allocUnsafe(size) +} + +function fromString (string, encoding) { + if (typeof encoding !== 'string' || encoding === '') { + encoding = 'utf8' + } + + if (!Buffer.isEncoding(encoding)) { + throw new TypeError('Unknown encoding: ' + encoding) + } + + const length = byteLength(string, encoding) | 0 + let buf = createBuffer(length) + + const actual = buf.write(string, encoding) + + if (actual !== length) { + // Writing a hex string, for example, that contains invalid characters will + // cause everything after the first invalid character to be ignored. (e.g. + // 'abxxcd' will be treated as 'ab') + buf = buf.slice(0, actual) + } + + return buf +} + +function fromArrayLike (array) { + const length = array.length < 0 ? 0 : checked(array.length) | 0 + const buf = createBuffer(length) + for (let i = 0; i < length; i += 1) { + buf[i] = array[i] & 255 + } + return buf +} + +function fromArrayView (arrayView) { + if (isInstance(arrayView, Uint8Array)) { + const copy = new Uint8Array(arrayView) + return fromArrayBuffer(copy.buffer, copy.byteOffset, copy.byteLength) + } + return fromArrayLike(arrayView) +} + +function fromArrayBuffer (array, byteOffset, length) { + if (byteOffset < 0 || array.byteLength < byteOffset) { + throw new RangeError('"offset" is outside of buffer bounds') + } + + if (array.byteLength < byteOffset + (length || 0)) { + throw new RangeError('"length" is outside of buffer bounds') + } + + let buf + if (byteOffset === undefined && length === undefined) { + buf = new Uint8Array(array) + } else if (length === undefined) { + buf = new Uint8Array(array, byteOffset) + } else { + buf = new Uint8Array(array, byteOffset, length) + } + + // Return an augmented `Uint8Array` instance + Object.setPrototypeOf(buf, Buffer.prototype) + + return buf +} + +function fromObject (obj) { + if (Buffer.isBuffer(obj)) { + const len = checked(obj.length) | 0 + const buf = createBuffer(len) + + if (buf.length === 0) { + return buf + } + + obj.copy(buf, 0, 0, len) + return buf + } + + if (obj.length !== undefined) { + if (typeof obj.length !== 'number' || numberIsNaN(obj.length)) { + return createBuffer(0) + } + return fromArrayLike(obj) + } + + if (obj.type === 'Buffer' && Array.isArray(obj.data)) { + return fromArrayLike(obj.data) + } +} + +function checked (length) { + // Note: cannot use `length < K_MAX_LENGTH` here because that fails when + // length is NaN (which is otherwise coerced to zero.) + if (length >= K_MAX_LENGTH) { + throw new RangeError('Attempt to allocate Buffer larger than maximum ' + + 'size: 0x' + K_MAX_LENGTH.toString(16) + ' bytes') + } + return length | 0 +} + +function SlowBuffer (length) { + if (+length != length) { // eslint-disable-line eqeqeq + length = 0 + } + return Buffer.alloc(+length) +} + +Buffer.isBuffer = function isBuffer (b) { + return b != null && b._isBuffer === true && + b !== Buffer.prototype // so Buffer.isBuffer(Buffer.prototype) will be false +} + +Buffer.compare = function compare (a, b) { + if (isInstance(a, Uint8Array)) a = Buffer.from(a, a.offset, a.byteLength) + if (isInstance(b, Uint8Array)) b = Buffer.from(b, b.offset, b.byteLength) + if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) { + throw new TypeError( + 'The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array' + ) + } + + if (a === b) return 0 + + let x = a.length + let y = b.length + + for (let i = 0, len = Math.min(x, y); i < len; ++i) { + if (a[i] !== b[i]) { + x = a[i] + y = b[i] + break + } + } + + if (x < y) return -1 + if (y < x) return 1 + return 0 +} + +Buffer.isEncoding = function isEncoding (encoding) { + switch (String(encoding).toLowerCase()) { + case 'hex': + case 'utf8': + case 'utf-8': + case 'ascii': + case 'latin1': + case 'binary': + case 'base64': + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return true + default: + return false + } +} + +Buffer.concat = function concat (list, length) { + if (!Array.isArray(list)) { + throw new TypeError('"list" argument must be an Array of Buffers') + } + + if (list.length === 0) { + return Buffer.alloc(0) + } + + let i + if (length === undefined) { + length = 0 + for (i = 0; i < list.length; ++i) { + length += list[i].length + } + } + + const buffer = Buffer.allocUnsafe(length) + let pos = 0 + for (i = 0; i < list.length; ++i) { + let buf = list[i] + if (isInstance(buf, Uint8Array)) { + if (pos + buf.length > buffer.length) { + if (!Buffer.isBuffer(buf)) buf = Buffer.from(buf) + buf.copy(buffer, pos) + } else { + Uint8Array.prototype.set.call( + buffer, + buf, + pos + ) + } + } else if (!Buffer.isBuffer(buf)) { + throw new TypeError('"list" argument must be an Array of Buffers') + } else { + buf.copy(buffer, pos) + } + pos += buf.length + } + return buffer +} + +function byteLength (string, encoding) { + if (Buffer.isBuffer(string)) { + return string.length + } + if (ArrayBuffer.isView(string) || isInstance(string, ArrayBuffer)) { + return string.byteLength + } + if (typeof string !== 'string') { + throw new TypeError( + 'The "string" argument must be one of type string, Buffer, or ArrayBuffer. ' + + 'Received type ' + typeof string + ) + } + + const len = string.length + const mustMatch = (arguments.length > 2 && arguments[2] === true) + if (!mustMatch && len === 0) return 0 + + // Use a for loop to avoid recursion + let loweredCase = false + for (;;) { + switch (encoding) { + case 'ascii': + case 'latin1': + case 'binary': + return len + case 'utf8': + case 'utf-8': + return utf8ToBytes(string).length + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return len * 2 + case 'hex': + return len >>> 1 + case 'base64': + return base64ToBytes(string).length + default: + if (loweredCase) { + return mustMatch ? -1 : utf8ToBytes(string).length // assume utf8 + } + encoding = ('' + encoding).toLowerCase() + loweredCase = true + } + } +} +Buffer.byteLength = byteLength + +function slowToString (encoding, start, end) { + let loweredCase = false + + // No need to verify that "this.length <= MAX_UINT32" since it's a read-only + // property of a typed array. + + // This behaves neither like String nor Uint8Array in that we set start/end + // to their upper/lower bounds if the value passed is out of range. + // undefined is handled specially as per ECMA-262 6th Edition, + // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization. + if (start === undefined || start < 0) { + start = 0 + } + // Return early if start > this.length. Done here to prevent potential uint32 + // coercion fail below. + if (start > this.length) { + return '' + } + + if (end === undefined || end > this.length) { + end = this.length + } + + if (end <= 0) { + return '' + } + + // Force coercion to uint32. This will also coerce falsey/NaN values to 0. + end >>>= 0 + start >>>= 0 + + if (end <= start) { + return '' + } + + if (!encoding) encoding = 'utf8' + + while (true) { + switch (encoding) { + case 'hex': + return hexSlice(this, start, end) + + case 'utf8': + case 'utf-8': + return utf8Slice(this, start, end) + + case 'ascii': + return asciiSlice(this, start, end) + + case 'latin1': + case 'binary': + return latin1Slice(this, start, end) + + case 'base64': + return base64Slice(this, start, end) + + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return utf16leSlice(this, start, end) + + default: + if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding) + encoding = (encoding + '').toLowerCase() + loweredCase = true + } + } +} + +// This property is used by `Buffer.isBuffer` (and the `is-buffer` npm package) +// to detect a Buffer instance. It's not possible to use `instanceof Buffer` +// reliably in a browserify context because there could be multiple different +// copies of the 'buffer' package in use. This method works even for Buffer +// instances that were created from another copy of the `buffer` package. +// See: https://github.com/feross/buffer/issues/154 +Buffer.prototype._isBuffer = true + +function swap (b, n, m) { + const i = b[n] + b[n] = b[m] + b[m] = i +} + +Buffer.prototype.swap16 = function swap16 () { + const len = this.length + if (len % 2 !== 0) { + throw new RangeError('Buffer size must be a multiple of 16-bits') + } + for (let i = 0; i < len; i += 2) { + swap(this, i, i + 1) + } + return this +} + +Buffer.prototype.swap32 = function swap32 () { + const len = this.length + if (len % 4 !== 0) { + throw new RangeError('Buffer size must be a multiple of 32-bits') + } + for (let i = 0; i < len; i += 4) { + swap(this, i, i + 3) + swap(this, i + 1, i + 2) + } + return this +} + +Buffer.prototype.swap64 = function swap64 () { + const len = this.length + if (len % 8 !== 0) { + throw new RangeError('Buffer size must be a multiple of 64-bits') + } + for (let i = 0; i < len; i += 8) { + swap(this, i, i + 7) + swap(this, i + 1, i + 6) + swap(this, i + 2, i + 5) + swap(this, i + 3, i + 4) + } + return this +} + +Buffer.prototype.toString = function toString () { + const length = this.length + if (length === 0) return '' + if (arguments.length === 0) return utf8Slice(this, 0, length) + return slowToString.apply(this, arguments) +} + +Buffer.prototype.toLocaleString = Buffer.prototype.toString + +Buffer.prototype.equals = function equals (b) { + if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer') + if (this === b) return true + return Buffer.compare(this, b) === 0 +} + +Buffer.prototype.inspect = function inspect () { + let str = '' + const max = exports.INSPECT_MAX_BYTES + str = this.toString('hex', 0, max).replace(/(.{2})/g, '$1 ').trim() + if (this.length > max) str += ' ... ' + return '' +} +if (customInspectSymbol) { + Buffer.prototype[customInspectSymbol] = Buffer.prototype.inspect +} + +Buffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) { + if (isInstance(target, Uint8Array)) { + target = Buffer.from(target, target.offset, target.byteLength) + } + if (!Buffer.isBuffer(target)) { + throw new TypeError( + 'The "target" argument must be one of type Buffer or Uint8Array. ' + + 'Received type ' + (typeof target) + ) + } + + if (start === undefined) { + start = 0 + } + if (end === undefined) { + end = target ? target.length : 0 + } + if (thisStart === undefined) { + thisStart = 0 + } + if (thisEnd === undefined) { + thisEnd = this.length + } + + if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) { + throw new RangeError('out of range index') + } + + if (thisStart >= thisEnd && start >= end) { + return 0 + } + if (thisStart >= thisEnd) { + return -1 + } + if (start >= end) { + return 1 + } + + start >>>= 0 + end >>>= 0 + thisStart >>>= 0 + thisEnd >>>= 0 + + if (this === target) return 0 + + let x = thisEnd - thisStart + let y = end - start + const len = Math.min(x, y) + + const thisCopy = this.slice(thisStart, thisEnd) + const targetCopy = target.slice(start, end) + + for (let i = 0; i < len; ++i) { + if (thisCopy[i] !== targetCopy[i]) { + x = thisCopy[i] + y = targetCopy[i] + break + } + } + + if (x < y) return -1 + if (y < x) return 1 + return 0 +} + +// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`, +// OR the last index of `val` in `buffer` at offset <= `byteOffset`. +// +// Arguments: +// - buffer - a Buffer to search +// - val - a string, Buffer, or number +// - byteOffset - an index into `buffer`; will be clamped to an int32 +// - encoding - an optional encoding, relevant is val is a string +// - dir - true for indexOf, false for lastIndexOf +function bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) { + // Empty buffer means no match + if (buffer.length === 0) return -1 + + // Normalize byteOffset + if (typeof byteOffset === 'string') { + encoding = byteOffset + byteOffset = 0 + } else if (byteOffset > 0x7fffffff) { + byteOffset = 0x7fffffff + } else if (byteOffset < -0x80000000) { + byteOffset = -0x80000000 + } + byteOffset = +byteOffset // Coerce to Number. + if (numberIsNaN(byteOffset)) { + // byteOffset: it it's undefined, null, NaN, "foo", etc, search whole buffer + byteOffset = dir ? 0 : (buffer.length - 1) + } + + // Normalize byteOffset: negative offsets start from the end of the buffer + if (byteOffset < 0) byteOffset = buffer.length + byteOffset + if (byteOffset >= buffer.length) { + if (dir) return -1 + else byteOffset = buffer.length - 1 + } else if (byteOffset < 0) { + if (dir) byteOffset = 0 + else return -1 + } + + // Normalize val + if (typeof val === 'string') { + val = Buffer.from(val, encoding) + } + + // Finally, search either indexOf (if dir is true) or lastIndexOf + if (Buffer.isBuffer(val)) { + // Special case: looking for empty string/buffer always fails + if (val.length === 0) { + return -1 + } + return arrayIndexOf(buffer, val, byteOffset, encoding, dir) + } else if (typeof val === 'number') { + val = val & 0xFF // Search for a byte value [0-255] + if (typeof Uint8Array.prototype.indexOf === 'function') { + if (dir) { + return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset) + } else { + return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset) + } + } + return arrayIndexOf(buffer, [val], byteOffset, encoding, dir) + } + + throw new TypeError('val must be string, number or Buffer') +} + +function arrayIndexOf (arr, val, byteOffset, encoding, dir) { + let indexSize = 1 + let arrLength = arr.length + let valLength = val.length + + if (encoding !== undefined) { + encoding = String(encoding).toLowerCase() + if (encoding === 'ucs2' || encoding === 'ucs-2' || + encoding === 'utf16le' || encoding === 'utf-16le') { + if (arr.length < 2 || val.length < 2) { + return -1 + } + indexSize = 2 + arrLength /= 2 + valLength /= 2 + byteOffset /= 2 + } + } + + function read (buf, i) { + if (indexSize === 1) { + return buf[i] + } else { + return buf.readUInt16BE(i * indexSize) + } + } + + let i + if (dir) { + let foundIndex = -1 + for (i = byteOffset; i < arrLength; i++) { + if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) { + if (foundIndex === -1) foundIndex = i + if (i - foundIndex + 1 === valLength) return foundIndex * indexSize + } else { + if (foundIndex !== -1) i -= i - foundIndex + foundIndex = -1 + } + } + } else { + if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength + for (i = byteOffset; i >= 0; i--) { + let found = true + for (let j = 0; j < valLength; j++) { + if (read(arr, i + j) !== read(val, j)) { + found = false + break + } + } + if (found) return i + } + } + + return -1 +} + +Buffer.prototype.includes = function includes (val, byteOffset, encoding) { + return this.indexOf(val, byteOffset, encoding) !== -1 +} + +Buffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) { + return bidirectionalIndexOf(this, val, byteOffset, encoding, true) +} + +Buffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) { + return bidirectionalIndexOf(this, val, byteOffset, encoding, false) +} + +function hexWrite (buf, string, offset, length) { + offset = Number(offset) || 0 + const remaining = buf.length - offset + if (!length) { + length = remaining + } else { + length = Number(length) + if (length > remaining) { + length = remaining + } + } + + const strLen = string.length + + if (length > strLen / 2) { + length = strLen / 2 + } + let i + for (i = 0; i < length; ++i) { + const parsed = parseInt(string.substr(i * 2, 2), 16) + if (numberIsNaN(parsed)) return i + buf[offset + i] = parsed + } + return i +} + +function utf8Write (buf, string, offset, length) { + return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length) +} + +function asciiWrite (buf, string, offset, length) { + return blitBuffer(asciiToBytes(string), buf, offset, length) +} + +function base64Write (buf, string, offset, length) { + return blitBuffer(base64ToBytes(string), buf, offset, length) +} + +function ucs2Write (buf, string, offset, length) { + return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length) +} + +Buffer.prototype.write = function write (string, offset, length, encoding) { + // Buffer#write(string) + if (offset === undefined) { + encoding = 'utf8' + length = this.length + offset = 0 + // Buffer#write(string, encoding) + } else if (length === undefined && typeof offset === 'string') { + encoding = offset + length = this.length + offset = 0 + // Buffer#write(string, offset[, length][, encoding]) + } else if (isFinite(offset)) { + offset = offset >>> 0 + if (isFinite(length)) { + length = length >>> 0 + if (encoding === undefined) encoding = 'utf8' + } else { + encoding = length + length = undefined + } + } else { + throw new Error( + 'Buffer.write(string, encoding, offset[, length]) is no longer supported' + ) + } + + const remaining = this.length - offset + if (length === undefined || length > remaining) length = remaining + + if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) { + throw new RangeError('Attempt to write outside buffer bounds') + } + + if (!encoding) encoding = 'utf8' + + let loweredCase = false + for (;;) { + switch (encoding) { + case 'hex': + return hexWrite(this, string, offset, length) + + case 'utf8': + case 'utf-8': + return utf8Write(this, string, offset, length) + + case 'ascii': + case 'latin1': + case 'binary': + return asciiWrite(this, string, offset, length) + + case 'base64': + // Warning: maxLength not taken into account in base64Write + return base64Write(this, string, offset, length) + + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return ucs2Write(this, string, offset, length) + + default: + if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding) + encoding = ('' + encoding).toLowerCase() + loweredCase = true + } + } +} + +Buffer.prototype.toJSON = function toJSON () { + return { + type: 'Buffer', + data: Array.prototype.slice.call(this._arr || this, 0) + } +} + +function base64Slice (buf, start, end) { + if (start === 0 && end === buf.length) { + return base64.fromByteArray(buf) + } else { + return base64.fromByteArray(buf.slice(start, end)) + } +} + +function utf8Slice (buf, start, end) { + end = Math.min(buf.length, end) + const res = [] + + let i = start + while (i < end) { + const firstByte = buf[i] + let codePoint = null + let bytesPerSequence = (firstByte > 0xEF) + ? 4 + : (firstByte > 0xDF) + ? 3 + : (firstByte > 0xBF) + ? 2 + : 1 + + if (i + bytesPerSequence <= end) { + let secondByte, thirdByte, fourthByte, tempCodePoint + + switch (bytesPerSequence) { + case 1: + if (firstByte < 0x80) { + codePoint = firstByte + } + break + case 2: + secondByte = buf[i + 1] + if ((secondByte & 0xC0) === 0x80) { + tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F) + if (tempCodePoint > 0x7F) { + codePoint = tempCodePoint + } + } + break + case 3: + secondByte = buf[i + 1] + thirdByte = buf[i + 2] + if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) { + tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F) + if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) { + codePoint = tempCodePoint + } + } + break + case 4: + secondByte = buf[i + 1] + thirdByte = buf[i + 2] + fourthByte = buf[i + 3] + if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) { + tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F) + if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) { + codePoint = tempCodePoint + } + } + } + } + + if (codePoint === null) { + // we did not generate a valid codePoint so insert a + // replacement char (U+FFFD) and advance only 1 byte + codePoint = 0xFFFD + bytesPerSequence = 1 + } else if (codePoint > 0xFFFF) { + // encode to utf16 (surrogate pair dance) + codePoint -= 0x10000 + res.push(codePoint >>> 10 & 0x3FF | 0xD800) + codePoint = 0xDC00 | codePoint & 0x3FF + } + + res.push(codePoint) + i += bytesPerSequence + } + + return decodeCodePointsArray(res) +} + +// Based on http://stackoverflow.com/a/22747272/680742, the browser with +// the lowest limit is Chrome, with 0x10000 args. +// We go 1 magnitude less, for safety +const MAX_ARGUMENTS_LENGTH = 0x1000 + +function decodeCodePointsArray (codePoints) { + const len = codePoints.length + if (len <= MAX_ARGUMENTS_LENGTH) { + return String.fromCharCode.apply(String, codePoints) // avoid extra slice() + } + + // Decode in chunks to avoid "call stack size exceeded". + let res = '' + let i = 0 + while (i < len) { + res += String.fromCharCode.apply( + String, + codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH) + ) + } + return res +} + +function asciiSlice (buf, start, end) { + let ret = '' + end = Math.min(buf.length, end) + + for (let i = start; i < end; ++i) { + ret += String.fromCharCode(buf[i] & 0x7F) + } + return ret +} + +function latin1Slice (buf, start, end) { + let ret = '' + end = Math.min(buf.length, end) + + for (let i = start; i < end; ++i) { + ret += String.fromCharCode(buf[i]) + } + return ret +} + +function hexSlice (buf, start, end) { + const len = buf.length + + if (!start || start < 0) start = 0 + if (!end || end < 0 || end > len) end = len + + let out = '' + for (let i = start; i < end; ++i) { + out += hexSliceLookupTable[buf[i]] + } + return out +} + +function utf16leSlice (buf, start, end) { + const bytes = buf.slice(start, end) + let res = '' + // If bytes.length is odd, the last 8 bits must be ignored (same as node.js) + for (let i = 0; i < bytes.length - 1; i += 2) { + res += String.fromCharCode(bytes[i] + (bytes[i + 1] * 256)) + } + return res +} + +Buffer.prototype.slice = function slice (start, end) { + const len = this.length + start = ~~start + end = end === undefined ? len : ~~end + + if (start < 0) { + start += len + if (start < 0) start = 0 + } else if (start > len) { + start = len + } + + if (end < 0) { + end += len + if (end < 0) end = 0 + } else if (end > len) { + end = len + } + + if (end < start) end = start + + const newBuf = this.subarray(start, end) + // Return an augmented `Uint8Array` instance + Object.setPrototypeOf(newBuf, Buffer.prototype) + + return newBuf +} + +/* + * Need to make sure that buffer isn't trying to write out of bounds. + */ +function checkOffset (offset, ext, length) { + if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint') + if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length') +} + +Buffer.prototype.readUintLE = +Buffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) { + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) checkOffset(offset, byteLength, this.length) + + let val = this[offset] + let mul = 1 + let i = 0 + while (++i < byteLength && (mul *= 0x100)) { + val += this[offset + i] * mul + } + + return val +} + +Buffer.prototype.readUintBE = +Buffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) { + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) { + checkOffset(offset, byteLength, this.length) + } + + let val = this[offset + --byteLength] + let mul = 1 + while (byteLength > 0 && (mul *= 0x100)) { + val += this[offset + --byteLength] * mul + } + + return val +} + +Buffer.prototype.readUint8 = +Buffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 1, this.length) + return this[offset] +} + +Buffer.prototype.readUint16LE = +Buffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 2, this.length) + return this[offset] | (this[offset + 1] << 8) +} + +Buffer.prototype.readUint16BE = +Buffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 2, this.length) + return (this[offset] << 8) | this[offset + 1] +} + +Buffer.prototype.readUint32LE = +Buffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + + return ((this[offset]) | + (this[offset + 1] << 8) | + (this[offset + 2] << 16)) + + (this[offset + 3] * 0x1000000) +} + +Buffer.prototype.readUint32BE = +Buffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + + return (this[offset] * 0x1000000) + + ((this[offset + 1] << 16) | + (this[offset + 2] << 8) | + this[offset + 3]) +} + +Buffer.prototype.readBigUInt64LE = defineBigIntMethod(function readBigUInt64LE (offset) { + offset = offset >>> 0 + validateNumber(offset, 'offset') + const first = this[offset] + const last = this[offset + 7] + if (first === undefined || last === undefined) { + boundsError(offset, this.length - 8) + } + + const lo = first + + this[++offset] * 2 ** 8 + + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 24 + + const hi = this[++offset] + + this[++offset] * 2 ** 8 + + this[++offset] * 2 ** 16 + + last * 2 ** 24 + + return BigInt(lo) + (BigInt(hi) << BigInt(32)) +}) + +Buffer.prototype.readBigUInt64BE = defineBigIntMethod(function readBigUInt64BE (offset) { + offset = offset >>> 0 + validateNumber(offset, 'offset') + const first = this[offset] + const last = this[offset + 7] + if (first === undefined || last === undefined) { + boundsError(offset, this.length - 8) + } + + const hi = first * 2 ** 24 + + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 8 + + this[++offset] + + const lo = this[++offset] * 2 ** 24 + + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 8 + + last + + return (BigInt(hi) << BigInt(32)) + BigInt(lo) +}) + +Buffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) { + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) checkOffset(offset, byteLength, this.length) + + let val = this[offset] + let mul = 1 + let i = 0 + while (++i < byteLength && (mul *= 0x100)) { + val += this[offset + i] * mul + } + mul *= 0x80 + + if (val >= mul) val -= Math.pow(2, 8 * byteLength) + + return val +} + +Buffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) { + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) checkOffset(offset, byteLength, this.length) + + let i = byteLength + let mul = 1 + let val = this[offset + --i] + while (i > 0 && (mul *= 0x100)) { + val += this[offset + --i] * mul + } + mul *= 0x80 + + if (val >= mul) val -= Math.pow(2, 8 * byteLength) + + return val +} + +Buffer.prototype.readInt8 = function readInt8 (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 1, this.length) + if (!(this[offset] & 0x80)) return (this[offset]) + return ((0xff - this[offset] + 1) * -1) +} + +Buffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 2, this.length) + const val = this[offset] | (this[offset + 1] << 8) + return (val & 0x8000) ? val | 0xFFFF0000 : val +} + +Buffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 2, this.length) + const val = this[offset + 1] | (this[offset] << 8) + return (val & 0x8000) ? val | 0xFFFF0000 : val +} + +Buffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + + return (this[offset]) | + (this[offset + 1] << 8) | + (this[offset + 2] << 16) | + (this[offset + 3] << 24) +} + +Buffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + + return (this[offset] << 24) | + (this[offset + 1] << 16) | + (this[offset + 2] << 8) | + (this[offset + 3]) +} + +Buffer.prototype.readBigInt64LE = defineBigIntMethod(function readBigInt64LE (offset) { + offset = offset >>> 0 + validateNumber(offset, 'offset') + const first = this[offset] + const last = this[offset + 7] + if (first === undefined || last === undefined) { + boundsError(offset, this.length - 8) + } + + const val = this[offset + 4] + + this[offset + 5] * 2 ** 8 + + this[offset + 6] * 2 ** 16 + + (last << 24) // Overflow + + return (BigInt(val) << BigInt(32)) + + BigInt(first + + this[++offset] * 2 ** 8 + + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 24) +}) + +Buffer.prototype.readBigInt64BE = defineBigIntMethod(function readBigInt64BE (offset) { + offset = offset >>> 0 + validateNumber(offset, 'offset') + const first = this[offset] + const last = this[offset + 7] + if (first === undefined || last === undefined) { + boundsError(offset, this.length - 8) + } + + const val = (first << 24) + // Overflow + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 8 + + this[++offset] + + return (BigInt(val) << BigInt(32)) + + BigInt(this[++offset] * 2 ** 24 + + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 8 + + last) +}) + +Buffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + return ieee754.read(this, offset, true, 23, 4) +} + +Buffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + return ieee754.read(this, offset, false, 23, 4) +} + +Buffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 8, this.length) + return ieee754.read(this, offset, true, 52, 8) +} + +Buffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 8, this.length) + return ieee754.read(this, offset, false, 52, 8) +} + +function checkInt (buf, value, offset, ext, max, min) { + if (!Buffer.isBuffer(buf)) throw new TypeError('"buffer" argument must be a Buffer instance') + if (value > max || value < min) throw new RangeError('"value" argument is out of bounds') + if (offset + ext > buf.length) throw new RangeError('Index out of range') +} + +Buffer.prototype.writeUintLE = +Buffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) { + value = +value + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) { + const maxBytes = Math.pow(2, 8 * byteLength) - 1 + checkInt(this, value, offset, byteLength, maxBytes, 0) + } + + let mul = 1 + let i = 0 + this[offset] = value & 0xFF + while (++i < byteLength && (mul *= 0x100)) { + this[offset + i] = (value / mul) & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeUintBE = +Buffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) { + value = +value + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) { + const maxBytes = Math.pow(2, 8 * byteLength) - 1 + checkInt(this, value, offset, byteLength, maxBytes, 0) + } + + let i = byteLength - 1 + let mul = 1 + this[offset + i] = value & 0xFF + while (--i >= 0 && (mul *= 0x100)) { + this[offset + i] = (value / mul) & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeUint8 = +Buffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0) + this[offset] = (value & 0xff) + return offset + 1 +} + +Buffer.prototype.writeUint16LE = +Buffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0) + this[offset] = (value & 0xff) + this[offset + 1] = (value >>> 8) + return offset + 2 +} + +Buffer.prototype.writeUint16BE = +Buffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0) + this[offset] = (value >>> 8) + this[offset + 1] = (value & 0xff) + return offset + 2 +} + +Buffer.prototype.writeUint32LE = +Buffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0) + this[offset + 3] = (value >>> 24) + this[offset + 2] = (value >>> 16) + this[offset + 1] = (value >>> 8) + this[offset] = (value & 0xff) + return offset + 4 +} + +Buffer.prototype.writeUint32BE = +Buffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0) + this[offset] = (value >>> 24) + this[offset + 1] = (value >>> 16) + this[offset + 2] = (value >>> 8) + this[offset + 3] = (value & 0xff) + return offset + 4 +} + +function wrtBigUInt64LE (buf, value, offset, min, max) { + checkIntBI(value, min, max, buf, offset, 7) + + let lo = Number(value & BigInt(0xffffffff)) + buf[offset++] = lo + lo = lo >> 8 + buf[offset++] = lo + lo = lo >> 8 + buf[offset++] = lo + lo = lo >> 8 + buf[offset++] = lo + let hi = Number(value >> BigInt(32) & BigInt(0xffffffff)) + buf[offset++] = hi + hi = hi >> 8 + buf[offset++] = hi + hi = hi >> 8 + buf[offset++] = hi + hi = hi >> 8 + buf[offset++] = hi + return offset +} + +function wrtBigUInt64BE (buf, value, offset, min, max) { + checkIntBI(value, min, max, buf, offset, 7) + + let lo = Number(value & BigInt(0xffffffff)) + buf[offset + 7] = lo + lo = lo >> 8 + buf[offset + 6] = lo + lo = lo >> 8 + buf[offset + 5] = lo + lo = lo >> 8 + buf[offset + 4] = lo + let hi = Number(value >> BigInt(32) & BigInt(0xffffffff)) + buf[offset + 3] = hi + hi = hi >> 8 + buf[offset + 2] = hi + hi = hi >> 8 + buf[offset + 1] = hi + hi = hi >> 8 + buf[offset] = hi + return offset + 8 +} + +Buffer.prototype.writeBigUInt64LE = defineBigIntMethod(function writeBigUInt64LE (value, offset = 0) { + return wrtBigUInt64LE(this, value, offset, BigInt(0), BigInt('0xffffffffffffffff')) +}) + +Buffer.prototype.writeBigUInt64BE = defineBigIntMethod(function writeBigUInt64BE (value, offset = 0) { + return wrtBigUInt64BE(this, value, offset, BigInt(0), BigInt('0xffffffffffffffff')) +}) + +Buffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) { + const limit = Math.pow(2, (8 * byteLength) - 1) + + checkInt(this, value, offset, byteLength, limit - 1, -limit) + } + + let i = 0 + let mul = 1 + let sub = 0 + this[offset] = value & 0xFF + while (++i < byteLength && (mul *= 0x100)) { + if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) { + sub = 1 + } + this[offset + i] = ((value / mul) >> 0) - sub & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) { + const limit = Math.pow(2, (8 * byteLength) - 1) + + checkInt(this, value, offset, byteLength, limit - 1, -limit) + } + + let i = byteLength - 1 + let mul = 1 + let sub = 0 + this[offset + i] = value & 0xFF + while (--i >= 0 && (mul *= 0x100)) { + if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) { + sub = 1 + } + this[offset + i] = ((value / mul) >> 0) - sub & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80) + if (value < 0) value = 0xff + value + 1 + this[offset] = (value & 0xff) + return offset + 1 +} + +Buffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000) + this[offset] = (value & 0xff) + this[offset + 1] = (value >>> 8) + return offset + 2 +} + +Buffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000) + this[offset] = (value >>> 8) + this[offset + 1] = (value & 0xff) + return offset + 2 +} + +Buffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000) + this[offset] = (value & 0xff) + this[offset + 1] = (value >>> 8) + this[offset + 2] = (value >>> 16) + this[offset + 3] = (value >>> 24) + return offset + 4 +} + +Buffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000) + if (value < 0) value = 0xffffffff + value + 1 + this[offset] = (value >>> 24) + this[offset + 1] = (value >>> 16) + this[offset + 2] = (value >>> 8) + this[offset + 3] = (value & 0xff) + return offset + 4 +} + +Buffer.prototype.writeBigInt64LE = defineBigIntMethod(function writeBigInt64LE (value, offset = 0) { + return wrtBigUInt64LE(this, value, offset, -BigInt('0x8000000000000000'), BigInt('0x7fffffffffffffff')) +}) + +Buffer.prototype.writeBigInt64BE = defineBigIntMethod(function writeBigInt64BE (value, offset = 0) { + return wrtBigUInt64BE(this, value, offset, -BigInt('0x8000000000000000'), BigInt('0x7fffffffffffffff')) +}) + +function checkIEEE754 (buf, value, offset, ext, max, min) { + if (offset + ext > buf.length) throw new RangeError('Index out of range') + if (offset < 0) throw new RangeError('Index out of range') +} + +function writeFloat (buf, value, offset, littleEndian, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) { + checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38) + } + ieee754.write(buf, value, offset, littleEndian, 23, 4) + return offset + 4 +} + +Buffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) { + return writeFloat(this, value, offset, true, noAssert) +} + +Buffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) { + return writeFloat(this, value, offset, false, noAssert) +} + +function writeDouble (buf, value, offset, littleEndian, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) { + checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308) + } + ieee754.write(buf, value, offset, littleEndian, 52, 8) + return offset + 8 +} + +Buffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) { + return writeDouble(this, value, offset, true, noAssert) +} + +Buffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) { + return writeDouble(this, value, offset, false, noAssert) +} + +// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length) +Buffer.prototype.copy = function copy (target, targetStart, start, end) { + if (!Buffer.isBuffer(target)) throw new TypeError('argument should be a Buffer') + if (!start) start = 0 + if (!end && end !== 0) end = this.length + if (targetStart >= target.length) targetStart = target.length + if (!targetStart) targetStart = 0 + if (end > 0 && end < start) end = start + + // Copy 0 bytes; we're done + if (end === start) return 0 + if (target.length === 0 || this.length === 0) return 0 + + // Fatal error conditions + if (targetStart < 0) { + throw new RangeError('targetStart out of bounds') + } + if (start < 0 || start >= this.length) throw new RangeError('Index out of range') + if (end < 0) throw new RangeError('sourceEnd out of bounds') + + // Are we oob? + if (end > this.length) end = this.length + if (target.length - targetStart < end - start) { + end = target.length - targetStart + start + } + + const len = end - start + + if (this === target && typeof Uint8Array.prototype.copyWithin === 'function') { + // Use built-in when available, missing from IE11 + this.copyWithin(targetStart, start, end) + } else { + Uint8Array.prototype.set.call( + target, + this.subarray(start, end), + targetStart + ) + } + + return len +} + +// Usage: +// buffer.fill(number[, offset[, end]]) +// buffer.fill(buffer[, offset[, end]]) +// buffer.fill(string[, offset[, end]][, encoding]) +Buffer.prototype.fill = function fill (val, start, end, encoding) { + // Handle string cases: + if (typeof val === 'string') { + if (typeof start === 'string') { + encoding = start + start = 0 + end = this.length + } else if (typeof end === 'string') { + encoding = end + end = this.length + } + if (encoding !== undefined && typeof encoding !== 'string') { + throw new TypeError('encoding must be a string') + } + if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) { + throw new TypeError('Unknown encoding: ' + encoding) + } + if (val.length === 1) { + const code = val.charCodeAt(0) + if ((encoding === 'utf8' && code < 128) || + encoding === 'latin1') { + // Fast path: If `val` fits into a single byte, use that numeric value. + val = code + } + } + } else if (typeof val === 'number') { + val = val & 255 + } else if (typeof val === 'boolean') { + val = Number(val) + } + + // Invalid ranges are not set to a default, so can range check early. + if (start < 0 || this.length < start || this.length < end) { + throw new RangeError('Out of range index') + } + + if (end <= start) { + return this + } + + start = start >>> 0 + end = end === undefined ? this.length : end >>> 0 + + if (!val) val = 0 + + let i + if (typeof val === 'number') { + for (i = start; i < end; ++i) { + this[i] = val + } + } else { + const bytes = Buffer.isBuffer(val) + ? val + : Buffer.from(val, encoding) + const len = bytes.length + if (len === 0) { + throw new TypeError('The value "' + val + + '" is invalid for argument "value"') + } + for (i = 0; i < end - start; ++i) { + this[i + start] = bytes[i % len] + } + } + + return this +} + +// CUSTOM ERRORS +// ============= + +// Simplified versions from Node, changed for Buffer-only usage +const errors = {} +function E (sym, getMessage, Base) { + errors[sym] = class NodeError extends Base { + constructor () { + super() + + Object.defineProperty(this, 'message', { + value: getMessage.apply(this, arguments), + writable: true, + configurable: true + }) + + // Add the error code to the name to include it in the stack trace. + this.name = `${this.name} [${sym}]` + // Access the stack to generate the error message including the error code + // from the name. + this.stack // eslint-disable-line no-unused-expressions + // Reset the name to the actual name. + delete this.name + } + + get code () { + return sym + } + + set code (value) { + Object.defineProperty(this, 'code', { + configurable: true, + enumerable: true, + value, + writable: true + }) + } + + toString () { + return `${this.name} [${sym}]: ${this.message}` + } + } +} + +E('ERR_BUFFER_OUT_OF_BOUNDS', + function (name) { + if (name) { + return `${name} is outside of buffer bounds` + } + + return 'Attempt to access memory outside buffer bounds' + }, RangeError) +E('ERR_INVALID_ARG_TYPE', + function (name, actual) { + return `The "${name}" argument must be of type number. Received type ${typeof actual}` + }, TypeError) +E('ERR_OUT_OF_RANGE', + function (str, range, input) { + let msg = `The value of "${str}" is out of range.` + let received = input + if (Number.isInteger(input) && Math.abs(input) > 2 ** 32) { + received = addNumericalSeparator(String(input)) + } else if (typeof input === 'bigint') { + received = String(input) + if (input > BigInt(2) ** BigInt(32) || input < -(BigInt(2) ** BigInt(32))) { + received = addNumericalSeparator(received) + } + received += 'n' + } + msg += ` It must be ${range}. Received ${received}` + return msg + }, RangeError) + +function addNumericalSeparator (val) { + let res = '' + let i = val.length + const start = val[0] === '-' ? 1 : 0 + for (; i >= start + 4; i -= 3) { + res = `_${val.slice(i - 3, i)}${res}` + } + return `${val.slice(0, i)}${res}` +} + +// CHECK FUNCTIONS +// =============== + +function checkBounds (buf, offset, byteLength) { + validateNumber(offset, 'offset') + if (buf[offset] === undefined || buf[offset + byteLength] === undefined) { + boundsError(offset, buf.length - (byteLength + 1)) + } +} + +function checkIntBI (value, min, max, buf, offset, byteLength) { + if (value > max || value < min) { + const n = typeof min === 'bigint' ? 'n' : '' + let range + if (byteLength > 3) { + if (min === 0 || min === BigInt(0)) { + range = `>= 0${n} and < 2${n} ** ${(byteLength + 1) * 8}${n}` + } else { + range = `>= -(2${n} ** ${(byteLength + 1) * 8 - 1}${n}) and < 2 ** ` + + `${(byteLength + 1) * 8 - 1}${n}` + } + } else { + range = `>= ${min}${n} and <= ${max}${n}` + } + throw new errors.ERR_OUT_OF_RANGE('value', range, value) + } + checkBounds(buf, offset, byteLength) +} + +function validateNumber (value, name) { + if (typeof value !== 'number') { + throw new errors.ERR_INVALID_ARG_TYPE(name, 'number', value) + } +} + +function boundsError (value, length, type) { + if (Math.floor(value) !== value) { + validateNumber(value, type) + throw new errors.ERR_OUT_OF_RANGE(type || 'offset', 'an integer', value) + } + + if (length < 0) { + throw new errors.ERR_BUFFER_OUT_OF_BOUNDS() + } + + throw new errors.ERR_OUT_OF_RANGE(type || 'offset', + `>= ${type ? 1 : 0} and <= ${length}`, + value) +} + +// HELPER FUNCTIONS +// ================ + +const INVALID_BASE64_RE = /[^+/0-9A-Za-z-_]/g + +function base64clean (str) { + // Node takes equal signs as end of the Base64 encoding + str = str.split('=')[0] + // Node strips out invalid characters like \n and \t from the string, base64-js does not + str = str.trim().replace(INVALID_BASE64_RE, '') + // Node converts strings with length < 2 to '' + if (str.length < 2) return '' + // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not + while (str.length % 4 !== 0) { + str = str + '=' + } + return str +} + +function utf8ToBytes (string, units) { + units = units || Infinity + let codePoint + const length = string.length + let leadSurrogate = null + const bytes = [] + + for (let i = 0; i < length; ++i) { + codePoint = string.charCodeAt(i) + + // is surrogate component + if (codePoint > 0xD7FF && codePoint < 0xE000) { + // last char was a lead + if (!leadSurrogate) { + // no lead yet + if (codePoint > 0xDBFF) { + // unexpected trail + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + continue + } else if (i + 1 === length) { + // unpaired lead + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + continue + } + + // valid lead + leadSurrogate = codePoint + + continue + } + + // 2 leads in a row + if (codePoint < 0xDC00) { + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + leadSurrogate = codePoint + continue + } + + // valid surrogate pair + codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000 + } else if (leadSurrogate) { + // valid bmp char, but last char was a lead + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + } + + leadSurrogate = null + + // encode utf8 + if (codePoint < 0x80) { + if ((units -= 1) < 0) break + bytes.push(codePoint) + } else if (codePoint < 0x800) { + if ((units -= 2) < 0) break + bytes.push( + codePoint >> 0x6 | 0xC0, + codePoint & 0x3F | 0x80 + ) + } else if (codePoint < 0x10000) { + if ((units -= 3) < 0) break + bytes.push( + codePoint >> 0xC | 0xE0, + codePoint >> 0x6 & 0x3F | 0x80, + codePoint & 0x3F | 0x80 + ) + } else if (codePoint < 0x110000) { + if ((units -= 4) < 0) break + bytes.push( + codePoint >> 0x12 | 0xF0, + codePoint >> 0xC & 0x3F | 0x80, + codePoint >> 0x6 & 0x3F | 0x80, + codePoint & 0x3F | 0x80 + ) + } else { + throw new Error('Invalid code point') + } + } + + return bytes +} + +function asciiToBytes (str) { + const byteArray = [] + for (let i = 0; i < str.length; ++i) { + // Node's code seems to be doing this and not & 0x7F.. + byteArray.push(str.charCodeAt(i) & 0xFF) + } + return byteArray +} + +function utf16leToBytes (str, units) { + let c, hi, lo + const byteArray = [] + for (let i = 0; i < str.length; ++i) { + if ((units -= 2) < 0) break + + c = str.charCodeAt(i) + hi = c >> 8 + lo = c % 256 + byteArray.push(lo) + byteArray.push(hi) + } + + return byteArray +} + +function base64ToBytes (str) { + return base64.toByteArray(base64clean(str)) +} + +function blitBuffer (src, dst, offset, length) { + let i + for (i = 0; i < length; ++i) { + if ((i + offset >= dst.length) || (i >= src.length)) break + dst[i + offset] = src[i] + } + return i +} + +// ArrayBuffer or Uint8Array objects from other contexts (i.e. iframes) do not pass +// the `instanceof` check but they should be treated as of that type. +// See: https://github.com/feross/buffer/issues/166 +function isInstance (obj, type) { + return obj instanceof type || + (obj != null && obj.constructor != null && obj.constructor.name != null && + obj.constructor.name === type.name) +} +function numberIsNaN (obj) { + // For IE11 support + return obj !== obj // eslint-disable-line no-self-compare +} + +// Create lookup table for `toString('hex')` +// See: https://github.com/feross/buffer/issues/219 +const hexSliceLookupTable = (function () { + const alphabet = '0123456789abcdef' + const table = new Array(256) + for (let i = 0; i < 16; ++i) { + const i16 = i * 16 + for (let j = 0; j < 16; ++j) { + table[i16 + j] = alphabet[i] + alphabet[j] + } + } + return table +})() + +// Return not function with Error if BigInt not supported +function defineBigIntMethod (fn) { + return typeof BigInt === 'undefined' ? BufferBigIntNotDefined : fn +} + +function BufferBigIntNotDefined () { + throw new Error('BigInt not supported') +} diff --git a/node_modules/buffer/package.json b/node_modules/buffer/package.json new file mode 100644 index 0000000000..ca1ad9a707 --- /dev/null +++ b/node_modules/buffer/package.json @@ -0,0 +1,93 @@ +{ + "name": "buffer", + "description": "Node.js Buffer API, for the browser", + "version": "6.0.3", + "author": { + "name": "Feross Aboukhadijeh", + "email": "feross@feross.org", + "url": "https://feross.org" + }, + "bugs": { + "url": "https://github.com/feross/buffer/issues" + }, + "contributors": [ + "Romain Beauxis ", + "James Halliday " + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + }, + "devDependencies": { + "airtap": "^3.0.0", + "benchmark": "^2.1.4", + "browserify": "^17.0.0", + "concat-stream": "^2.0.0", + "hyperquest": "^2.1.3", + "is-buffer": "^2.0.5", + "is-nan": "^1.3.0", + "split": "^1.0.1", + "standard": "*", + "tape": "^5.0.1", + "through2": "^4.0.2", + "uglify-js": "^3.11.5" + }, + "homepage": "https://github.com/feross/buffer", + "jspm": { + "map": { + "./index.js": { + "node": "@node/buffer" + } + } + }, + "keywords": [ + "arraybuffer", + "browser", + "browserify", + "buffer", + "compatible", + "dataview", + "uint8array" + ], + "license": "MIT", + "main": "index.js", + "types": "index.d.ts", + "repository": { + "type": "git", + "url": "git://github.com/feross/buffer.git" + }, + "scripts": { + "perf": "browserify --debug perf/bracket-notation.js > perf/bundle.js && open perf/index.html", + "perf-node": "node perf/bracket-notation.js && node perf/concat.js && node perf/copy-big.js && node perf/copy.js && node perf/new-big.js && node perf/new.js && node perf/readDoubleBE.js && node perf/readFloatBE.js && node perf/readUInt32LE.js && node perf/slice.js && node perf/writeFloatBE.js", + "size": "browserify -r ./ | uglifyjs -c -m | gzip | wc -c", + "test": "standard && node ./bin/test.js", + "test-browser-old": "airtap -- test/*.js", + "test-browser-old-local": "airtap --local -- test/*.js", + "test-browser-new": "airtap -- test/*.js test/node/*.js", + "test-browser-new-local": "airtap --local -- test/*.js test/node/*.js", + "test-node": "tape test/*.js test/node/*.js", + "update-authors": "./bin/update-authors.sh" + }, + "standard": { + "ignore": [ + "test/node/**/*.js", + "test/common.js", + "test/_polyfill.js", + "perf/**/*.js" + ] + }, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] +} diff --git a/node_modules/ieee754/LICENSE b/node_modules/ieee754/LICENSE new file mode 100644 index 0000000000..5aac82c78c --- /dev/null +++ b/node_modules/ieee754/LICENSE @@ -0,0 +1,11 @@ +Copyright 2008 Fair Oaks Labs, Inc. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/node_modules/ieee754/README.md b/node_modules/ieee754/README.md new file mode 100644 index 0000000000..cb7527b3ce --- /dev/null +++ b/node_modules/ieee754/README.md @@ -0,0 +1,51 @@ +# ieee754 [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url] [![javascript style guide][standard-image]][standard-url] + +[travis-image]: https://img.shields.io/travis/feross/ieee754/master.svg +[travis-url]: https://travis-ci.org/feross/ieee754 +[npm-image]: https://img.shields.io/npm/v/ieee754.svg +[npm-url]: https://npmjs.org/package/ieee754 +[downloads-image]: https://img.shields.io/npm/dm/ieee754.svg +[downloads-url]: https://npmjs.org/package/ieee754 +[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg +[standard-url]: https://standardjs.com + +[![saucelabs][saucelabs-image]][saucelabs-url] + +[saucelabs-image]: https://saucelabs.com/browser-matrix/ieee754.svg +[saucelabs-url]: https://saucelabs.com/u/ieee754 + +### Read/write IEEE754 floating point numbers from/to a Buffer or array-like object. + +## install + +``` +npm install ieee754 +``` + +## methods + +`var ieee754 = require('ieee754')` + +The `ieee754` object has the following functions: + +``` +ieee754.read = function (buffer, offset, isLE, mLen, nBytes) +ieee754.write = function (buffer, value, offset, isLE, mLen, nBytes) +``` + +The arguments mean the following: + +- buffer = the buffer +- offset = offset into the buffer +- value = value to set (only for `write`) +- isLe = is little endian? +- mLen = mantissa length +- nBytes = number of bytes + +## what is ieee754? + +The IEEE Standard for Floating-Point Arithmetic (IEEE 754) is a technical standard for floating-point computation. [Read more](http://en.wikipedia.org/wiki/IEEE_floating_point). + +## license + +BSD 3 Clause. Copyright (c) 2008, Fair Oaks Labs, Inc. diff --git a/node_modules/ieee754/index.d.ts b/node_modules/ieee754/index.d.ts new file mode 100644 index 0000000000..f1e435487f --- /dev/null +++ b/node_modules/ieee754/index.d.ts @@ -0,0 +1,10 @@ +declare namespace ieee754 { + export function read( + buffer: Uint8Array, offset: number, isLE: boolean, mLen: number, + nBytes: number): number; + export function write( + buffer: Uint8Array, value: number, offset: number, isLE: boolean, + mLen: number, nBytes: number): void; + } + + export = ieee754; \ No newline at end of file diff --git a/node_modules/ieee754/index.js b/node_modules/ieee754/index.js new file mode 100644 index 0000000000..81d26c343c --- /dev/null +++ b/node_modules/ieee754/index.js @@ -0,0 +1,85 @@ +/*! ieee754. BSD-3-Clause License. Feross Aboukhadijeh */ +exports.read = function (buffer, offset, isLE, mLen, nBytes) { + var e, m + var eLen = (nBytes * 8) - mLen - 1 + var eMax = (1 << eLen) - 1 + var eBias = eMax >> 1 + var nBits = -7 + var i = isLE ? (nBytes - 1) : 0 + var d = isLE ? -1 : 1 + var s = buffer[offset + i] + + i += d + + e = s & ((1 << (-nBits)) - 1) + s >>= (-nBits) + nBits += eLen + for (; nBits > 0; e = (e * 256) + buffer[offset + i], i += d, nBits -= 8) {} + + m = e & ((1 << (-nBits)) - 1) + e >>= (-nBits) + nBits += mLen + for (; nBits > 0; m = (m * 256) + buffer[offset + i], i += d, nBits -= 8) {} + + if (e === 0) { + e = 1 - eBias + } else if (e === eMax) { + return m ? NaN : ((s ? -1 : 1) * Infinity) + } else { + m = m + Math.pow(2, mLen) + e = e - eBias + } + return (s ? -1 : 1) * m * Math.pow(2, e - mLen) +} + +exports.write = function (buffer, value, offset, isLE, mLen, nBytes) { + var e, m, c + var eLen = (nBytes * 8) - mLen - 1 + var eMax = (1 << eLen) - 1 + var eBias = eMax >> 1 + var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0) + var i = isLE ? 0 : (nBytes - 1) + var d = isLE ? 1 : -1 + var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0 + + value = Math.abs(value) + + if (isNaN(value) || value === Infinity) { + m = isNaN(value) ? 1 : 0 + e = eMax + } else { + e = Math.floor(Math.log(value) / Math.LN2) + if (value * (c = Math.pow(2, -e)) < 1) { + e-- + c *= 2 + } + if (e + eBias >= 1) { + value += rt / c + } else { + value += rt * Math.pow(2, 1 - eBias) + } + if (value * c >= 2) { + e++ + c /= 2 + } + + if (e + eBias >= eMax) { + m = 0 + e = eMax + } else if (e + eBias >= 1) { + m = ((value * c) - 1) * Math.pow(2, mLen) + e = e + eBias + } else { + m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen) + e = 0 + } + } + + for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {} + + e = (e << mLen) | m + eLen += mLen + for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {} + + buffer[offset + i - d] |= s * 128 +} diff --git a/node_modules/ieee754/package.json b/node_modules/ieee754/package.json new file mode 100644 index 0000000000..7b23851384 --- /dev/null +++ b/node_modules/ieee754/package.json @@ -0,0 +1,52 @@ +{ + "name": "ieee754", + "description": "Read/write IEEE754 floating point numbers from/to a Buffer or array-like object", + "version": "1.2.1", + "author": { + "name": "Feross Aboukhadijeh", + "email": "feross@feross.org", + "url": "https://feross.org" + }, + "contributors": [ + "Romain Beauxis " + ], + "devDependencies": { + "airtap": "^3.0.0", + "standard": "*", + "tape": "^5.0.1" + }, + "keywords": [ + "IEEE 754", + "buffer", + "convert", + "floating point", + "ieee754" + ], + "license": "BSD-3-Clause", + "main": "index.js", + "types": "index.d.ts", + "repository": { + "type": "git", + "url": "git://github.com/feross/ieee754.git" + }, + "scripts": { + "test": "standard && npm run test-node && npm run test-browser", + "test-browser": "airtap -- test/*.js", + "test-browser-local": "airtap --local -- test/*.js", + "test-node": "tape test/*.js" + }, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] +} diff --git a/node_modules/inherits/LICENSE b/node_modules/inherits/LICENSE new file mode 100644 index 0000000000..dea3013d67 --- /dev/null +++ b/node_modules/inherits/LICENSE @@ -0,0 +1,16 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + diff --git a/node_modules/inherits/README.md b/node_modules/inherits/README.md new file mode 100644 index 0000000000..b1c5665855 --- /dev/null +++ b/node_modules/inherits/README.md @@ -0,0 +1,42 @@ +Browser-friendly inheritance fully compatible with standard node.js +[inherits](http://nodejs.org/api/util.html#util_util_inherits_constructor_superconstructor). + +This package exports standard `inherits` from node.js `util` module in +node environment, but also provides alternative browser-friendly +implementation through [browser +field](https://gist.github.com/shtylman/4339901). Alternative +implementation is a literal copy of standard one located in standalone +module to avoid requiring of `util`. It also has a shim for old +browsers with no `Object.create` support. + +While keeping you sure you are using standard `inherits` +implementation in node.js environment, it allows bundlers such as +[browserify](https://github.com/substack/node-browserify) to not +include full `util` package to your client code if all you need is +just `inherits` function. It worth, because browser shim for `util` +package is large and `inherits` is often the single function you need +from it. + +It's recommended to use this package instead of +`require('util').inherits` for any code that has chances to be used +not only in node.js but in browser too. + +## usage + +```js +var inherits = require('inherits'); +// then use exactly as the standard one +``` + +## note on version ~1.0 + +Version ~1.0 had completely different motivation and is not compatible +neither with 2.0 nor with standard node.js `inherits`. + +If you are using version ~1.0 and planning to switch to ~2.0, be +careful: + +* new version uses `super_` instead of `super` for referencing + superclass +* new version overwrites current prototype while old one preserves any + existing fields on it diff --git a/node_modules/inherits/inherits.js b/node_modules/inherits/inherits.js new file mode 100644 index 0000000000..f71f2d9329 --- /dev/null +++ b/node_modules/inherits/inherits.js @@ -0,0 +1,9 @@ +try { + var util = require('util'); + /* istanbul ignore next */ + if (typeof util.inherits !== 'function') throw ''; + module.exports = util.inherits; +} catch (e) { + /* istanbul ignore next */ + module.exports = require('./inherits_browser.js'); +} diff --git a/node_modules/inherits/inherits_browser.js b/node_modules/inherits/inherits_browser.js new file mode 100644 index 0000000000..86bbb3dc29 --- /dev/null +++ b/node_modules/inherits/inherits_browser.js @@ -0,0 +1,27 @@ +if (typeof Object.create === 'function') { + // implementation from standard node.js 'util' module + module.exports = function inherits(ctor, superCtor) { + if (superCtor) { + ctor.super_ = superCtor + ctor.prototype = Object.create(superCtor.prototype, { + constructor: { + value: ctor, + enumerable: false, + writable: true, + configurable: true + } + }) + } + }; +} else { + // old school shim for old browsers + module.exports = function inherits(ctor, superCtor) { + if (superCtor) { + ctor.super_ = superCtor + var TempCtor = function () {} + TempCtor.prototype = superCtor.prototype + ctor.prototype = new TempCtor() + ctor.prototype.constructor = ctor + } + } +} diff --git a/node_modules/inherits/package.json b/node_modules/inherits/package.json new file mode 100644 index 0000000000..37b4366b83 --- /dev/null +++ b/node_modules/inherits/package.json @@ -0,0 +1,29 @@ +{ + "name": "inherits", + "description": "Browser-friendly inheritance fully compatible with standard node.js inherits()", + "version": "2.0.4", + "keywords": [ + "inheritance", + "class", + "klass", + "oop", + "object-oriented", + "inherits", + "browser", + "browserify" + ], + "main": "./inherits.js", + "browser": "./inherits_browser.js", + "repository": "git://github.com/isaacs/inherits", + "license": "ISC", + "scripts": { + "test": "tap" + }, + "devDependencies": { + "tap": "^14.2.4" + }, + "files": [ + "inherits.js", + "inherits_browser.js" + ] +} diff --git a/node_modules/msgpack5/.github/workflows/ci.yml b/node_modules/msgpack5/.github/workflows/ci.yml new file mode 100644 index 0000000000..b9109a0c3b --- /dev/null +++ b/node_modules/msgpack5/.github/workflows/ci.yml @@ -0,0 +1,30 @@ +name: CI +on: [push, pull_request] +jobs: + test: + name: ${{ matrix.node-version }} ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [macOS-latest, windows-latest, ubuntu-latest] + node-version: [10, 12, 14, 16] + steps: + - uses: actions/checkout@v2 + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v1 + with: + node-version: ${{ matrix.node-version }} + - name: Install + run: npm i + - name: Tests + run: npm test + + automerge: + needs: test + runs-on: ubuntu-latest + steps: + - uses: fastify/github-action-merge-dependabot@v1 + if: ${{ github.actor == 'dependabot[bot]' && github.event_name == 'pull_request' }} + with: + github-token: ${{secrets.github_token}} diff --git a/node_modules/msgpack5/.jshintrc b/node_modules/msgpack5/.jshintrc new file mode 100644 index 0000000000..8863fcbaf5 --- /dev/null +++ b/node_modules/msgpack5/.jshintrc @@ -0,0 +1,7 @@ +{ + "node": true, + "laxcomma": true, + "undef": true, + "unused": true, + "asi": true +} diff --git a/node_modules/msgpack5/CONTRIBUTING.md b/node_modules/msgpack5/CONTRIBUTING.md new file mode 100644 index 0000000000..57210d6d52 --- /dev/null +++ b/node_modules/msgpack5/CONTRIBUTING.md @@ -0,0 +1,41 @@ +Contributing +============ + +The main development is on GitHub at http://github.com/mcollina/msgpack5. +In order to contribute, fork the repo on github and send a pull requests with topic branches. +Do not forget to provide tests for your contribution. + +Contact the lead dev +-------------------- + +You can reach [@matteocollina](http://twitter.com/matteocollina) on +twitter of via email at hello@matteocollina.com. + +Running the tests +------------- + +* Fork and clone the repository +* Run `npm install` +* Run `npm test` + + +Coding guidelines +---------------- + +Adopt the prevailing code style in the repository. +This project use [JSHint](http://www.jshint.com/) to validate the +source code formatting with a pre commit hook: please respect that. + + +Contribution License Agreement +---------------- + +Project license: MIT + +* You will only Submit Contributions where You have authored 100% of + the content. +* You will only Submit Contributions to which You have the necessary + rights. This means that if You are employed You have received the + necessary permissions from Your employer to make the Contributions. +* Whatever content You Contribute will be provided under the Project + License. diff --git a/node_modules/msgpack5/LICENSE b/node_modules/msgpack5/LICENSE new file mode 100644 index 0000000000..63ac963572 --- /dev/null +++ b/node_modules/msgpack5/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Matteo Collina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/node_modules/msgpack5/README.md b/node_modules/msgpack5/README.md new file mode 100644 index 0000000000..e330218c1c --- /dev/null +++ b/node_modules/msgpack5/README.md @@ -0,0 +1,250 @@ +msgpack5  [![CI](https://github.com/mcollina/msgpack5/workflows/CI/badge.svg)](https://github.com/mcollina/msgpack5/actions?query=workflow%3ACI) +======== + +A msgpack v5 implementation for node.js and the browser, with extension point support. + +Install +------- + +```bash +npm install msgpack5 --save +``` + + +Usage +----- + +```js +var msgpack = require('msgpack5')() // namespace our extensions + , a = new MyType(2, 'a') + , encode = msgpack.encode + , decode = msgpack.decode + +msgpack.register(0x42, MyType, mytipeEncode, mytipeDecode) + +console.log(encode({ 'hello': 'world' }).toString('hex')) +// 81a568656c6c6fa5776f726c64 +console.log(decode(encode({ 'hello': 'world' }))) +// { hello: 'world' } +console.log(encode(a).toString('hex')) +// d5426161 +console.log(decode(encode(a)) instanceof MyType) +// true +console.log(decode(encode(a))) +// { value: 'a', size: 2 } + +function MyType(size, value) { + this.value = value + this.size = size +} + +function mytipeEncode(obj) { + var buf = new Buffer(obj.size) + buf.fill(obj.value) + return buf +} + +function mytipeDecode(data) { + var result = new MyType(data.length, data.toString('utf8', 0, 1)) + , i + + for (i = 0; i < data.length; i++) { + if (data.readUInt8(0) != data.readUInt8(i)) { + throw new Error('should all be the same') + } + } + + return result +} +``` + +In the Browser +----------- + +This library is compatible with [Browserify](http://npm.im/browserify). + +If you want to use standalone, grab the file in the `dist` folder of +this repo, and use in your own HTML page, the module will expose a +`msgpack5` global. + + +``` + +``` + +### To build + +``` + npm run build +``` + +API +--- + + + +## API + + * msgpack() + * msgpack().encode() + * msgpack().decode() + * msgpack().registerEncoder() + * msgpack().registerDecoder() + * msgpack().register() + * msgpack().encoder() + * msgpack().decoder() + +------------------------------------------------------- + + +### msgpack(options(obj)) + +Creates a new instance on which you can register new types for being +encoded. + +options: + +- `forceFloat64`, a boolean to that forces all floats to be encoded as 64-bits floats. Defaults to false. +- `sortKeys`, a boolean to force a determinate keys order +- `compatibilityMode`, a boolean that enables "compatibility mode" which doesn't use bin format family and str 8 format. Defaults to false. +- `disableTimestampEncoding`, a boolean that when set disables the encoding of Dates into the [timestamp extension type](https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type). Defaults to false. +- `preferMap`, a boolean that forces all maps to be decoded to `Map`s rather than plain objects. This ensures that `decode(encode(new Map())) instanceof Map` and that iteration order is preserved. Defaults to false. +- `protoAction`, a string which can be `error|ignore|remove` that determines what happens when decoding a plain object with a `__proto__` property which would cause prototype poisoning. `error` (default) throws an error, `remove` removes the property, `ignore` (not recommended) allows the property, thereby causing prototype poisoning on the decoded object. + +------------------------------------------------------- + + +### encode(object) + +Encodes `object` in msgpack, returns a [bl](http://npm.im/bl). + +------------------------------------------------------- + + +### decode(buf) + +Decodes buf from in msgpack. `buf` can be a `Buffer` or a [bl](http://npm.im/bl) instance. + +In order to support a stream interface, a user must pass in a [bl](http://npm.im/bl) instance. + +------------------------------------------------------- + + +### registerEncoder(check(obj), encode(obj)) + +Register a new custom object type for being automatically encoded. +The arguments are: + +- `check`, a function that will be called to check if the passed + object should be encoded with the `encode` function +- `encode`, a function that will be called to encode an object in binary + form; this function __must__ return a `Buffer` which include the same type + for [registerDecoder](#registerDecoder). + +------------------------------------------------------- + + +### registerDecoder(type, decode(buf)) + +Register a new custom object type for being automatically decoded. +The arguments are: + +- `type`, is a greater than zero integer identificating the type once serialized +- `decode`, a function that will be called to decode the object from + the passed `Buffer` + + +------------------------------------------------------- + + +### register(type, constructor, encode(obj), decode(buf)) + +Register a new custom object type for being automatically encoded and +decoded. The arguments are: + +- `type`, is a greater than zero integer identificating the type once serialized +- `constructor`, the function that will be used to match the objects + with `instanceof` +- `encode`, a function that will be called to encode an object in binary + form; this function __must__ return a `Buffer` that can be + deserialized by the `decode` function +- `decode`, a function that will be called to decode the object from + the passed `Buffer` + +This is just a commodity that calls +[`registerEncoder`](#registerEncoder) and +[`registerDecoder`](#registerDecoder) internally. + +------------------------------------------------------- + + +### encoder(options) + +Builds a stream in object mode that encodes msgpack. + +Supported options: +* `wrap`, objects should be passed to encoder in wrapped object {value: data}. Wrap option should be used if you need to pass null to encoder. + + +------------------------------------------------------- + + +### decoder(options) + +Builds a stream in object mode that decodes msgpack. + +Supported options: +* `wrap`, decoded objects returned in wrapped object {value: data}. Wrap option should be used if stream contains msgpack nil. + + +LevelUp Support +--------------- + +__msgpack5__ can be used as a LevelUp +[`valueEncoding`](https://github.com/rvagg/node-levelup#leveluplocation-options-callback) straight away: + +```js +var level = require('level') + , pack = msgpack() + , db = level('foo', { + valueEncoding: pack + }) + , obj = { my: 'obj' } + +db.put('hello', obj, function(err) { + db.get('hello', function(err, result) { + console.log(result) + db.close() + }) +}) + +``` + +Related projects +---------------- + +- [msgpack5rpc](http://npmjs.com/package/msgpack5rpc): An implementation of the + [msgpack-rpc spec](https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md) + on top of this library. + +Disclaimer +---------- + +This library is built fully on JS and on [bl](http://npm.im/bl) to +simplify the code. Every improvement that keeps the same API is welcome. + +Acknowledgements +---------------- + +This project was kindly sponsored by [nearForm](http://nearform.com). + + +This library was originally built as the data format for +[JSChan](http://npm.im/jschan). + +License +------- + +MIT diff --git a/node_modules/msgpack5/benchmarks/encodedecode.js b/node_modules/msgpack5/benchmarks/encodedecode.js new file mode 100644 index 0000000000..384b8c1f51 --- /dev/null +++ b/node_modules/msgpack5/benchmarks/encodedecode.js @@ -0,0 +1,21 @@ +const msgpack = require('../')() +const msg = { hello: 'world' } +const encode = msgpack.encode +const decode = msgpack.decode +const max = 100000 +let i + +function run () { + for (i = 0; i < max; i++) { + decode(encode(msg)) + } +} + +// preheat +run() + +const start = Date.now() +run() +const stop = Date.now() +console.log('time', stop - start) +console.log('decode/s', max / (stop - start) * 1000) diff --git a/node_modules/msgpack5/benchmarks/parseshortmap.js b/node_modules/msgpack5/benchmarks/parseshortmap.js new file mode 100644 index 0000000000..d9fded4412 --- /dev/null +++ b/node_modules/msgpack5/benchmarks/parseshortmap.js @@ -0,0 +1,21 @@ +const msgpack = require('../')() +const bl = require('bl') +const msg = bl(msgpack.encode({ hello: 'world' })) +const decode = msgpack.decode +const max = 1000000 +let i + +function run () { + for (i = 0; i < max; i++) { + decode(msg.duplicate()) + } +} + +// preheat +run() + +const start = Date.now() +run() +const stop = Date.now() +console.log('time', stop - start) +console.log('decode/s', max / (stop - start) * 1000) diff --git a/node_modules/msgpack5/example.js b/node_modules/msgpack5/example.js new file mode 100644 index 0000000000..048d23290b --- /dev/null +++ b/node_modules/msgpack5/example.js @@ -0,0 +1,44 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const msgpack = require('./')() // namespace our extensions +const a = new MyType(2, 'a') +const encode = msgpack.encode +const decode = msgpack.decode + +msgpack.register(0x42, MyType, mytipeEncode, mytipeDecode) + +console.log(encode({ hello: 'world' }).toString('hex')) +// 81a568656c6c6fa5776f726c64 +console.log(decode(encode({ hello: 'world' }))) +// { hello: 'world' } +console.log(encode(a).toString('hex')) +// d5426161 +console.log(decode(encode(a)) instanceof MyType) +// true +console.log(decode(encode(a))) +// { value: 'a', size: 2 } + +function MyType (size, value) { + this.value = value + this.size = size +} + +function mytipeEncode (obj) { + const buf = Buffer.allocUnsafe(obj.size) + buf.fill(obj.value) + return buf +} + +function mytipeDecode (data) { + const result = new MyType(data.length, data.toString('utf8', 0, 1)) + let i + + for (i = 0; i < data.length; i++) { + if (data.readUInt8(0) != data.readUInt8(i)) { // eslint-disable-line + throw new Error('should all be the same') + } + } + + return result +} diff --git a/node_modules/msgpack5/index.js b/node_modules/msgpack5/index.js new file mode 100644 index 0000000000..b12620dfd2 --- /dev/null +++ b/node_modules/msgpack5/index.js @@ -0,0 +1,91 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const assert = require('assert') +const bl = require('bl') +const streams = require('./lib/streams') +const buildDecode = require('./lib/decoder') +const buildEncode = require('./lib/encoder') +const IncompleteBufferError = require('./lib/helpers.js').IncompleteBufferError +const DateCodec = require('./lib/codecs/DateCodec') + +function msgpack (options) { + const encodingTypes = [] + const decodingTypes = new Map() + + options = options || { + forceFloat64: false, + compatibilityMode: false, + // if true, skips encoding Dates using the msgpack + // timestamp ext format (-1) + disableTimestampEncoding: false, + preferMap: false, + // options.protoAction: 'error' (default) / 'remove' / 'ignore' + protoAction: 'error' + } + + decodingTypes.set(DateCodec.type, DateCodec.decode) + if (!options.disableTimestampEncoding) { + encodingTypes.push(DateCodec) + } + + function registerEncoder (check, encode) { + assert(check, 'must have an encode function') + assert(encode, 'must have an encode function') + + encodingTypes.push({ check, encode }) + + return this + } + + function registerDecoder (type, decode) { + assert(type >= 0, 'must have a non-negative type') + assert(decode, 'must have a decode function') + decodingTypes.set(type, decode) + return this + } + + function register (type, constructor, encode, decode) { + assert(constructor, 'must have a constructor') + assert(encode, 'must have an encode function') + assert(type >= 0, 'must have a non-negative type') + assert(decode, 'must have a decode function') + + function check (obj) { + return (obj instanceof constructor) + } + + function reEncode (obj) { + const buf = bl() + const header = Buffer.allocUnsafe(1) + + header.writeInt8(type, 0) + + buf.append(header) + buf.append(encode(obj)) + + return buf + } + + this.registerEncoder(check, reEncode) + this.registerDecoder(type, decode) + + return this + } + + return { + encode: buildEncode(encodingTypes, options), + decode: buildDecode(decodingTypes, options), + register, + registerEncoder, + registerDecoder, + encoder: streams.encoder, + decoder: streams.decoder, + // needed for levelup support + buffer: true, + type: 'msgpack5', + IncompleteBufferError + } +} + +module.exports = msgpack diff --git a/node_modules/msgpack5/lib/codecs/DateCodec.js b/node_modules/msgpack5/lib/codecs/DateCodec.js new file mode 100644 index 0000000000..a073d7c560 --- /dev/null +++ b/node_modules/msgpack5/lib/codecs/DateCodec.js @@ -0,0 +1,131 @@ +const type = -1 + +function encode (dt) { + if (dt === null) { + return + } + + const millis = dt * 1 + const seconds = Math.floor(millis / 1000) + const nanos = (millis - seconds * 1000) * 1e6 + + if (seconds < 0 || seconds > 0x400000000) { + // Timestamp96 + const encoded = Buffer.allocUnsafe(13) + encoded[0] = -1 + + encoded.writeUInt32BE(nanos, 1) + + let hex = '' + if (seconds >= 0) { + const padhex = '0000000000000000' + hex = seconds.toString(16) + // add some padding + hex = padhex.slice(0, hex.length * -1) + hex + } else { + // encode seconds in 2's Complement 64Bit + // reverse sign + // keep all bits 0 and first 1 from right + // reverse all other bits + let bin = (seconds * -1).toString(2) + let i = bin.length - 1 + while (bin[i] === '0') { + i-- + } + bin = bin.slice(0, i).split('').map(function (bit) { return bit === '1' ? 0 : 1 }).join('') + bin.slice(i, bin.length) + // add some padding + const pad64 = '1111111111111111111111111111111111111111111111111111111111111111' + bin = pad64.slice(0, bin.length * -1) + bin + // convert to hex + bin.match(/.{1,8}/g).forEach(function (byte) { + byte = parseInt(byte, 2).toString(16) + if (byte.length === 1) { + byte = '0' + byte + } + hex += byte + }) + } + encoded.write(hex, 5, 'hex') + return encoded + } else if (nanos || seconds > 0xffffffff) { + // Timestamp64 + const encoded = Buffer.allocUnsafe(9) + encoded[0] = -1 + + const upperNanos = nanos * 4 + const upperSeconds = seconds / Math.pow(2, 32) + const upper = (upperNanos + upperSeconds) & 0xffffffff + const lower = seconds & 0xffffffff + + encoded.writeInt32BE(upper, 1) + encoded.writeInt32BE(lower, 5) + return encoded + } else { + // Timestamp32 + const encoded = Buffer.allocUnsafe(5) + encoded[0] = -1 + encoded.writeUInt32BE(Math.floor(millis / 1000), 1) + return encoded + } +} + +function check (obj) { + return typeof obj.getDate === 'function' +} + +function decode (buf) { + let seconds + let nanoseconds = 0 + let upper + let lower + let hex + + switch (buf.length) { + case 4: + // timestamp 32 stores the number of seconds that have elapsed since 1970-01-01 00:00:00 UTC in an 32-bit unsigned integer + seconds = buf.readUInt32BE(0) + break + + case 8: + // Timestamp 64 stores the number of seconds and nanoseconds that have elapsed + // since 1970-01-01 00:00:00 UTC in 32-bit unsigned integers, split 30/34 bits + upper = buf.readUInt32BE(0) + lower = buf.readUInt32BE(4) + nanoseconds = upper / 4 + seconds = ((upper & 0x03) * Math.pow(2, 32)) + lower // If we use bitwise operators, we get truncated to 32bits + break + + case 12: + // timestamp 96 stores the number of seconds and nanoseconds that have elapsed + // since 1970-01-01 00:00:00 UTC in 64-bit signed integer and 32-bit unsigned integer + + // get seconds in hex + hex = buf.toString('hex', 4, 12) + // check if seconds is a negative number + if (parseInt(buf.toString('hex', 4, 6), 16) & 0x80) { + // convert to binary + let bin = '' + const pad8 = '00000000' + hex.match(/.{1,2}/g).forEach(function (byte) { + byte = parseInt(byte, 16).toString(2) + byte = pad8.slice(0, byte.length * -1) + byte + bin += byte + }) + // decode seconds from 2's Complement 64Bit + // reverse all bits + // reverse sign + // remove one + seconds = -1 * parseInt(bin.split('').map(function (bit) { return bit === '1' ? 0 : 1 }).join(''), 2) - 1 + } else { + seconds = parseInt(hex, 16) + } + + nanoseconds = buf.readUInt32BE(0) + } + + const millis = (seconds * 1000) + Math.round(nanoseconds / 1E6) + + return new Date(millis) +} + +module.exports = { check, type, encode, decode } diff --git a/node_modules/msgpack5/lib/decoder.js b/node_modules/msgpack5/lib/decoder.js new file mode 100644 index 0000000000..9296d75731 --- /dev/null +++ b/node_modules/msgpack5/lib/decoder.js @@ -0,0 +1,268 @@ +'use strict' + +const bl = require('bl') +const IncompleteBufferError = require('./helpers.js').IncompleteBufferError + +const SIZES = { + 0xc4: 2, + 0xc5: 3, + 0xc6: 5, + 0xc7: 3, + 0xc8: 4, + 0xc9: 6, + 0xca: 5, + 0xcb: 9, + 0xcc: 2, + 0xcd: 3, + 0xce: 5, + 0xcf: 9, + 0xd0: 2, + 0xd1: 3, + 0xd2: 5, + 0xd3: 9, + 0xd4: 3, + 0xd5: 4, + 0xd6: 6, + 0xd7: 10, + 0xd8: 18, + 0xd9: 2, + 0xda: 3, + 0xdb: 5, + 0xde: 3, + 0xdc: 3, + 0xdd: 5 +} + +function isValidDataSize (dataLength, bufLength, headerLength) { + return bufLength >= headerLength + dataLength +} + +module.exports = function buildDecode (decodingTypes, options) { + const context = { decodingTypes, options, decode } + return decode + + function decode (buf) { + if (!bl.isBufferList(buf)) { + buf = bl(buf) + } + + const result = tryDecode(buf, 0, context) + // Handle worst case ASAP and keep code flat + if (!result) throw new IncompleteBufferError() + + buf.consume(result[1]) + return result[0] + } +} + +function decodeArray (buf, initialOffset, length, headerLength, context) { + let offset = initialOffset + const result = [] + let i = 0 + + while (i++ < length) { + const decodeResult = tryDecode(buf, offset, context) + if (!decodeResult) return null + + result.push(decodeResult[0]) + offset += decodeResult[1] + } + return [result, headerLength + offset - initialOffset] +} + +function decodeMap (buf, offset, length, headerLength, context) { + const _temp = decodeArray(buf, offset, 2 * length, headerLength, context) + if (!_temp) return null + const [result, consumedBytes] = _temp + + let isPlainObject = !context.options.preferMap + + if (isPlainObject) { + for (let i = 0; i < 2 * length; i += 2) { + if (typeof result[i] !== 'string') { + isPlainObject = false + break + } + } + } + + if (isPlainObject) { + const object = {} + for (let i = 0; i < 2 * length; i += 2) { + const key = result[i] + const val = result[i + 1] + + if (key === '__proto__') { + if (context.options.protoAction === 'error') { + throw new SyntaxError('Object contains forbidden prototype property') + } + + if (context.options.protoAction === 'remove') { + continue + } + } + + object[key] = val + } + return [object, consumedBytes] + } else { + const mapping = new Map() + for (let i = 0; i < 2 * length; i += 2) { + const key = result[i] + const val = result[i + 1] + mapping.set(key, val) + } + return [mapping, consumedBytes] + } +} + +function tryDecode (buf, initialOffset, context) { + if (buf.length <= initialOffset) return null + + const bufLength = buf.length - initialOffset + let offset = initialOffset + + const first = buf.readUInt8(offset) + offset += 1 + + const size = SIZES[first] || -1 + if (bufLength < size) return null + + if (first < 0x80) return [first, 1] // 7-bits positive ints + if ((first & 0xf0) === 0x80) { + const length = first & 0x0f + const headerSize = offset - initialOffset + // we have a map with less than 15 elements + return decodeMap(buf, offset, length, headerSize, context) + } + if ((first & 0xf0) === 0x90) { + const length = first & 0x0f + const headerSize = offset - initialOffset + // we have an array with less than 15 elements + return decodeArray(buf, offset, length, headerSize, context) + } + + if ((first & 0xe0) === 0xa0) { + // fixstr up to 31 bytes + const length = first & 0x1f + if (!isValidDataSize(length, bufLength, 1)) return null + const result = buf.toString('utf8', offset, offset + length) + return [result, length + 1] + } + if (first >= 0xc0 && first <= 0xc3) return decodeConstants(first) + if (first >= 0xc4 && first <= 0xc6) { + const length = buf.readUIntBE(offset, size - 1) + offset += size - 1 + + if (!isValidDataSize(length, bufLength, size)) return null + const result = buf.slice(offset, offset + length) + return [result, size + length] + } + if (first >= 0xc7 && first <= 0xc9) { + const length = buf.readUIntBE(offset, size - 2) + offset += size - 2 + + const type = buf.readInt8(offset) + offset += 1 + + if (!isValidDataSize(length, bufLength, size)) return null + return decodeExt(buf, offset, type, length, size, context) + } + if (first >= 0xca && first <= 0xcb) return decodeFloat(buf, offset, size - 1) + if (first >= 0xcc && first <= 0xcf) return decodeUnsignedInt(buf, offset, size - 1) + if (first >= 0xd0 && first <= 0xd3) return decodeSigned(buf, offset, size - 1) + if (first >= 0xd4 && first <= 0xd8) { + const type = buf.readInt8(offset) // Signed + offset += 1 + return decodeExt(buf, offset, type, size - 2, 2, context) + } + + if (first >= 0xd9 && first <= 0xdb) { + const length = buf.readUIntBE(offset, size - 1) + offset += size - 1 + + if (!isValidDataSize(length, bufLength, size)) return null + const result = buf.toString('utf8', offset, offset + length) + return [result, size + length] + } + if (first >= 0xdc && first <= 0xdd) { + const length = buf.readUIntBE(offset, size - 1) + offset += size - 1 + return decodeArray(buf, offset, length, size, context) + } + if (first >= 0xde && first <= 0xdf) { + let length + switch (first) { + case 0xde: + // maps up to 2^16 elements - 2 bytes + length = buf.readUInt16BE(offset) + offset += 2 + // console.log(offset - initialOffset) + return decodeMap(buf, offset, length, 3, context) + + case 0xdf: + length = buf.readUInt32BE(offset) + offset += 4 + return decodeMap(buf, offset, length, 5, context) + } + } + if (first >= 0xe0) return [first - 0x100, 1] // 5 bits negative ints + + throw new Error('not implemented yet') +} + +function decodeSigned (buf, offset, size) { + let result + if (size === 1) result = buf.readInt8(offset) + if (size === 2) result = buf.readInt16BE(offset) + if (size === 4) result = buf.readInt32BE(offset) + if (size === 8) result = readInt64BE(buf.slice(offset, offset + 8), 0) + return [result, size + 1] +} + +function decodeExt (buf, offset, type, size, headerSize, context) { + const toDecode = buf.slice(offset, offset + size) + + const decode = context.decodingTypes.get(type) + if (!decode) throw new Error('unable to find ext type ' + type) + + const value = decode(toDecode) + return [value, headerSize + size] +} + +function decodeUnsignedInt (buf, offset, size) { + const maxOffset = offset + size + let result = 0 + while (offset < maxOffset) { result += buf.readUInt8(offset++) * Math.pow(256, maxOffset - offset) } + return [result, size + 1] +} + +function decodeConstants (first) { + if (first === 0xc0) return [null, 1] + if (first === 0xc2) return [false, 1] + if (first === 0xc3) return [true, 1] +} + +function decodeFloat (buf, offset, size) { + let result + if (size === 4) result = buf.readFloatBE(offset) + if (size === 8) result = buf.readDoubleBE(offset) + return [result, size + 1] +} + +function readInt64BE (buf, offset) { + var negate = (buf[offset] & 0x80) == 0x80; // eslint-disable-line + + if (negate) { + let carry = 1 + for (let i = offset + 7; i >= offset; i--) { + const v = (buf[i] ^ 0xff) + carry + buf[i] = v & 0xff + carry = v >> 8 + } + } + + const hi = buf.readUInt32BE(offset + 0) + const lo = buf.readUInt32BE(offset + 4) + return (hi * 4294967296 + lo) * (negate ? -1 : +1) +} diff --git a/node_modules/msgpack5/lib/encoder.js b/node_modules/msgpack5/lib/encoder.js new file mode 100644 index 0000000000..0ed736062b --- /dev/null +++ b/node_modules/msgpack5/lib/encoder.js @@ -0,0 +1,298 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const bl = require('bl') +const isFloat = require('./helpers.js').isFloat + +module.exports = function buildEncode (encodingTypes, options) { + function encode (obj) { + if (obj === undefined) throw new Error('undefined is not encodable in msgpack!') + + if (obj === null) return Buffer.from([0xc0]) + if (obj === true) return Buffer.from([0xc3]) + if (obj === false) return Buffer.from([0xc2]) + + if (obj instanceof Map) return encodeMap(obj, options, encode) + + if (typeof obj === 'string') return encodeString(obj, options) + + if (obj && (obj.readUInt32LE || obj instanceof Uint8Array)) { + if (obj instanceof Uint8Array) { + obj = Buffer.from(obj) + } + // weird hack to support Buffer + // and Buffer-like objects + const _getBufferHeader = options.compatibilityMode ? getCompatibleBufferHeader : getBufferHeader + return bl([_getBufferHeader(obj.length), obj]) + } + if (Array.isArray(obj)) return encodeArray(obj, encode) + if (typeof obj === 'object') return encodeExt(obj, encodingTypes) || encodeObject(obj, options, encode) + if (typeof obj === 'number') return encodeNumber(obj, options) + + throw new Error('not implemented yet') + } + + return function (obj) { + return encode(obj).slice() + } +} + +// +// +// === MENTAL SEPARATOR === +// +// + +function encodeArray (array, encode) { + const acc = [getHeader(array.length, 0x90, 0xdc)] + + // This has to be forEach; Array.prototype.map preserves missing values and + // Array.prototype.values yields them as undefined + array.forEach(item => { + acc.push(encode(item)) + }) + + if (acc.length !== array.length + 1) { + throw new Error('Sparse arrays are not encodable in msgpack') + } + + return bl(acc) +} + +function encodeMap (map, options, encode) { + const acc = [getHeader(map.size, 0x80, 0xde)] + const keys = [...map.keys()] + + if (!options.preferMap) { + if (keys.every(item => typeof item === 'string')) { + console.warn('Map with string only keys will be deserialized as an object!') + } + } + + keys.forEach(key => { + acc.push(encode(key), encode(map.get(key))) + }) + return bl(acc) +} + +function encodeObject (obj, options, encode) { + const keys = [] + + for (const key in obj) { + if (Object.prototype.hasOwnProperty.call(obj, key) && + obj[key] !== undefined && + typeof obj[key] !== 'function') { + keys.push(key) + } + } + + const acc = [getHeader(keys.length, 0x80, 0xde)] + + if (options.sortKeys) keys.sort() + + keys.forEach(key => { + acc.push(encode(key), encode(obj[key])) + }) + + return bl(acc) +} + +function write64BitUint (buf, offset, num) { + const lo = num % 4294967296 + const hi = Math.floor(num / 4294967296) + + buf.writeUInt32BE(hi, offset + 0) + buf.writeUInt32BE(lo, offset + 4) +} + +function write64BitInt (buf, offset, num) { + const negate = num < 0 + num = Math.abs(num) + write64BitUint(buf, offset, num) + if (negate) negate64BitInt(buf, offset) +} + +function negate64BitInt (buf, offset) { + let i = offset + 8 + + // Optimization based on the fact that: + // buf[i] == 0x00 => (buf[i] ^ 0xff) + 1 = 0x100 = 0x00 + 1 curry + + while (i-- > offset) { + if (buf[i] === 0x00) continue + buf[i] = (buf[i] ^ 0xff) + 1 + break + } + + while (i-- > offset) { + buf[i] = buf[i] ^ 0xff + } +} + +const fround = Math.fround + +function encodeFloat (obj, forceFloat64) { + let buf + + if (forceFloat64 || !fround || !Object.is(fround(obj), obj)) { + buf = Buffer.allocUnsafe(9) + buf[0] = 0xcb + buf.writeDoubleBE(obj, 1) + } else { + buf = Buffer.allocUnsafe(5) + buf[0] = 0xca + buf.writeFloatBE(obj, 1) + } + + return buf +} + +function encodeExt (obj, encodingTypes) { + const codec = encodingTypes.find(codec => codec.check(obj)) + if (!codec) return null + const encoded = codec.encode(obj) + if (!encoded) return null + + return bl([getExtHeader(encoded.length - 1), encoded]) +} + +function getExtHeader (length) { + if (length === 1) return Buffer.from([0xd4]) + if (length === 2) return Buffer.from([0xd5]) + if (length === 4) return Buffer.from([0xd6]) + if (length === 8) return Buffer.from([0xd7]) + if (length === 16) return Buffer.from([0xd8]) + + if (length < 256) return Buffer.from([0xc7, length]) + if (length < 0x10000) return Buffer.from([0xc8, length >> 8, length & 0x00ff]) + return Buffer.from([0xc9, length >> 24, (length >> 16) & 0x000000ff, (length >> 8) & 0x000000ff, length & 0x000000ff]) +} + +function getHeader (length, tag1, tag2) { + if (length < 16) return Buffer.from([tag1 | length]) + const size = length < 0x10000 ? 2 : 4 + const buf = Buffer.allocUnsafe(1 + size) + buf[0] = length < 0x10000 ? tag2 : tag2 + 1 + buf.writeUIntBE(length, 1, size) + + return buf +} + +function encodeString (obj, options) { + const len = Buffer.byteLength(obj) + let buf + if (len < 32) { + buf = Buffer.allocUnsafe(1 + len) + buf[0] = 0xa0 | len + if (len > 0) { + buf.write(obj, 1) + } + } else if (len <= 0xff && !options.compatibilityMode) { + // str8, but only when not in compatibility mode + buf = Buffer.allocUnsafe(2 + len) + buf[0] = 0xd9 + buf[1] = len + buf.write(obj, 2) + } else if (len <= 0xffff) { + buf = Buffer.allocUnsafe(3 + len) + buf[0] = 0xda + buf.writeUInt16BE(len, 1) + buf.write(obj, 3) + } else { + buf = Buffer.allocUnsafe(5 + len) + buf[0] = 0xdb + buf.writeUInt32BE(len, 1) + buf.write(obj, 5) + } + return buf +} + +function getBufferHeader (length) { + let header + if (length <= 0xff) { + header = Buffer.allocUnsafe(2) + header[0] = 0xc4 + header[1] = length + } else if (length <= 0xffff) { + header = Buffer.allocUnsafe(3) + header[0] = 0xc5 + header.writeUInt16BE(length, 1) + } else { + header = Buffer.allocUnsafe(5) + header[0] = 0xc6 + header.writeUInt32BE(length, 1) + } + + return header +} + +function getCompatibleBufferHeader (length) { + let header + if (length <= 0x1f) { + // fix raw header: 101XXXXX + header = Buffer.allocUnsafe(1) + header[0] = 0xa0 | length + } else if (length <= 0xffff) { + // raw 16 header: 0xda, XXXXXXXX, XXXXXXXX + header = Buffer.allocUnsafe(3) + header[0] = 0xda + header.writeUInt16BE(length, 1) + } else { + // raw 32 header: 0xdb, XXXXXXXX, XXXXXXXX, XXXXXXXX, XXXXXXXX + header = Buffer.allocUnsafe(5) + header[0] = 0xdb + header.writeUInt32BE(length, 1) + } + return header +} + +function encodeNumber (obj, options) { + let buf + if (isFloat(obj)) return encodeFloat(obj, options.forceFloat64) + if (Math.abs(obj) > 9007199254740991) { + return encodeFloat(obj, true) + } + + if (obj >= 0) { + if (obj < 128) { + return Buffer.from([obj]) + } else if (obj < 256) { + return Buffer.from([0xcc, obj]) + } else if (obj < 65536) { + return Buffer.from([0xcd, 0xff & (obj >> 8), 0xff & (obj)]) + } else if (obj <= 0xffffffff) { + return Buffer.from([0xce, 0xff & (obj >> 24), 0xff & (obj >> 16), 0xff & (obj >> 8), 0xff & (obj)]) + } else if (obj <= 9007199254740991) { + buf = Buffer.allocUnsafe(9) + buf[0] = 0xcf + write64BitUint(buf, 1, obj) + } + } else { + if (obj >= -32) { + buf = Buffer.allocUnsafe(1) + buf[0] = 0x100 + obj + } else if (obj >= -128) { + buf = Buffer.allocUnsafe(2) + buf[0] = 0xd0 + buf.writeInt8(obj, 1) + } else if (obj >= -32768) { + buf = Buffer.allocUnsafe(3) + buf[0] = 0xd1 + buf.writeInt16BE(obj, 1) + } else if (obj > -214748365) { + buf = Buffer.allocUnsafe(5) + buf[0] = 0xd2 + buf.writeInt32BE(obj, 1) + } else if (obj >= -9007199254740991) { + buf = Buffer.allocUnsafe(9) + buf[0] = 0xd3 + write64BitInt(buf, 1, obj) + } + } + return buf +} + +// function order(num, n = 1, step = 2) { +// while (num = num >> step) n++; +// return n +// } diff --git a/node_modules/msgpack5/lib/helpers.js b/node_modules/msgpack5/lib/helpers.js new file mode 100644 index 0000000000..26fdf21ea4 --- /dev/null +++ b/node_modules/msgpack5/lib/helpers.js @@ -0,0 +1,20 @@ +'use strict' + +const util = require('util') + +exports.IncompleteBufferError = IncompleteBufferError + +function IncompleteBufferError (message) { + Error.call(this) // super constructor + if (Error.captureStackTrace) { + Error.captureStackTrace(this, this.constructor) // super helper method to include stack trace in error object + } + this.name = this.constructor.name + this.message = message || 'unable to decode' +} + +util.inherits(IncompleteBufferError, Error) + +exports.isFloat = function isFloat (n) { + return n % 1 !== 0 +} diff --git a/node_modules/msgpack5/lib/streams.js b/node_modules/msgpack5/lib/streams.js new file mode 100644 index 0000000000..714f798c7d --- /dev/null +++ b/node_modules/msgpack5/lib/streams.js @@ -0,0 +1,90 @@ +'use strict' + +const Transform = require('readable-stream').Transform +const inherits = require('inherits') +const bl = require('bl') + +function Base (opts) { + opts = opts || {} + + opts.objectMode = true + opts.highWaterMark = 16 + + Transform.call(this, opts) + + this._msgpack = opts.msgpack +} + +inherits(Base, Transform) + +function Encoder (opts) { + if (!(this instanceof Encoder)) { + opts = opts || {} + opts.msgpack = this + return new Encoder(opts) + } + + Base.call(this, opts) + this._wrap = ('wrap' in opts) && opts.wrap +} + +inherits(Encoder, Base) + +Encoder.prototype._transform = function (obj, enc, done) { + let buf = null + + try { + buf = this._msgpack.encode(this._wrap ? obj.value : obj).slice(0) + } catch (err) { + this.emit('error', err) + return done() + } + + this.push(buf) + done() +} + +function Decoder (opts) { + if (!(this instanceof Decoder)) { + opts = opts || {} + opts.msgpack = this + return new Decoder(opts) + } + + Base.call(this, opts) + + this._chunks = bl() + this._wrap = ('wrap' in opts) && opts.wrap +} + +inherits(Decoder, Base) + +Decoder.prototype._transform = function (buf, enc, done) { + if (buf) { + this._chunks.append(buf) + } + + try { + let result = this._msgpack.decode(this._chunks) + if (this._wrap) { + result = { value: result } + } + this.push(result) + } catch (err) { + if (err instanceof this._msgpack.IncompleteBufferError) { + done() + } else { + this.emit('error', err) + } + return + } + + if (this._chunks.length > 0) { + this._transform(null, enc, done) + } else { + done() + } +} + +module.exports.decoder = Decoder +module.exports.encoder = Encoder diff --git a/node_modules/msgpack5/package.json b/node_modules/msgpack5/package.json new file mode 100644 index 0000000000..0f5136439c --- /dev/null +++ b/node_modules/msgpack5/package.json @@ -0,0 +1,52 @@ +{ + "name": "msgpack5", + "version": "6.0.2", + "description": "A msgpack v5 implementation for node.js and the browser, with extension points", + "main": "index.js", + "scripts": { + "test": "standard && tape test/* | tap-mocha-reporter dot", + "build": "npm run browserify && npm run dist", + "browserify": "browserify index.js -o dist/msgpack5.js -s msgpack5", + "dist": "uglifyjs dist/msgpack5.js -o dist/msgpack5.min.js" + }, + "pre-commit": [ + "test" + ], + "repository": { + "type": "git", + "url": "git://github.com/mcollina/msgpack5.git" + }, + "keywords": [ + "msgpack", + "extension", + "v5", + "MessagePack", + "ext" + ], + "author": "Matteo collina ", + "license": "MIT", + "bugs": { + "url": "https://github.com/mcollina/msgpack5/issues" + }, + "homepage": "https://github.com/mcollina/msgpack5", + "devDependencies": { + "browserify": "^17.0.0", + "memdb": "^1.3.1", + "pre-commit": "^1.2.2", + "standard": "^16.0.0", + "tap-mocha-reporter": "^5.0.0", + "tape": "^5.0.0", + "uglify-js": "^3.4.9" + }, + "standard": { + "ignore": [ + "dist/" + ] + }, + "dependencies": { + "bl": "^5.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.0.0", + "safe-buffer": "^5.1.2" + } +} diff --git a/node_modules/msgpack5/spec.html b/node_modules/msgpack5/spec.html new file mode 100644 index 0000000000..61d925e330 --- /dev/null +++ b/node_modules/msgpack5/spec.html @@ -0,0 +1,459 @@ +

MessagePack specification

+

MessagePack is an object serialization specification like JSON.

+

MessagePack has two concepts: type system and formats.

+

Serialization is conversion from application objects into MessagePack formats via MessagePack type system.

+

Deserialization is conversion from MessagePack formats into application objects via MessagePack type system.

+
Serialization:
+    Application objects
+    -->  MessagePack type system
+    -->  MessagePack formats (byte array)
+
+Deserialization:
+    MessagePack formats (byte array)
+    -->  MessagePack type system
+    -->  Application objects
+

This document describes the MessagePack type system, MesagePack formats and conversion of them.

+

Table of contents

+ +

+

Type system

+
    +
  • Types
      +
    • Integer represents an integer
    • +
    • Nil represents nil
    • +
    • Boolean represents true or false
    • +
    • Float represents a floating point number
    • +
    • Raw
        +
      • String extending Raw type represents a UTF-8 string
      • +
      • Binary extending Raw type represents a byte array
      • +
      +
    • +
    • Array represents a sequence of objects
    • +
    • Map represents key-value pairs of objects
    • +
    • Extended implements Extension interface: represents a tuple of type information and a byte array where type information is an integer whose meaning is defined by applications
    • +
    +
  • +
  • Interfaces
      +
    • Extension represents a tuple of an integer and a byte array where the integer represents type information and the byte array represents data. The format of the data is defined by concrete types
    • +
    +
  • +
+

+

Limitation

+
    +
  • a value of an Integer object is limited from -(2^63) upto (2^64)-1
  • +
  • a value of a Float object is IEEE 754 single or double precision floating-point number
  • +
  • maximum length of a Binary object is (2^32)-1
  • +
  • maximum byte size of a String object is (2^32)-1
  • +
  • String objects may contain invalid byte sequence and the behavior of a deserializer depends on the actual implementation when it received invalid byte sequence
      +
    • Deserializers should provide functionality to get the original byte array so that applications can decide how to handle the object
    • +
    +
  • +
  • maximum number of elements of an Array object is (2^32)-1
  • +
  • maximum number of key-value associations of a Map object is (2^32)-1
  • +
+

+

Extension type

+

MessagePack allows applications to define application-specific types using the Extended type. +Extended type consists of an integer and a byte array where the integer represents a kind of types and the byte array represents data.

+

Applications can assign 0 to 127 to store application-specific type information.

+

MessagePack reserves -1 to -128 for future extension to add predefined types which will be described in separated documents.

+
[0, 127]: application-specific types
+[-1, -128]: reserved for predefined types
+

+

Formats

+

+

Overview

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
format namefirst byte (in binary)first byte (in hex)
positive fixint0xxxxxxx0x00 - 0x7f
fixmap1000xxxx0x80 - 0x8f
fixarray1001xxxx0x90 - 0x9f
fixstr101xxxxx0xa0 - 0xbf
nil110000000xc0
(never used)110000010xc1
false110000100xc2
true110000110xc3
bin 8110001000xc4
bin 16110001010xc5
bin 32110001100xc6
ext 8110001110xc7
ext 16110010000xc8
ext 32110010010xc9
float 32110010100xca
float 64110010110xcb
uint 8110011000xcc
uint 16110011010xcd
uint 32110011100xce
uint 64110011110xcf
int 8110100000xd0
int 16110100010xd1
int 32110100100xd2
int 64110100110xd3
fixext 1110101000xd4
fixext 2110101010xd5
fixext 4110101100xd6
fixext 8110101110xd7
fixext 16110110000xd8
str 8110110010xd9
str 16110110100xda
str 32110110110xdb
array 16110111000xdc
array 32110111010xdd
map 16110111100xde
map 32110111110xdf
negative fixint111xxxxx0xe0 - 0xff
+ + +

+

Notation in diagrams

+
one byte:
++--------+
+|        |
++--------+
+
+a variable number of bytes:
++========+
+|        |
++========+
+
+variable number of objects stored in MessagePack format:
++~~~~~~~~~~~~~~~~~+
+|                 |
++~~~~~~~~~~~~~~~~~+
+

X, Y, Z and A are the symbols that will be replaced by an actual bit.

+

+

nil format

+

Nil format stores nil in 1 byte.

+
nil:
++--------+
+|  0xc0  |
++--------+
+

+

bool format family

+

Bool format family stores false or true in 1 byte.

+
false:
++--------+
+|  0xc2  |
++--------+
+
+true:
++--------+
+|  0xc3  |
++--------+
+

+

int format family

+

Int format family stores an integer in 1, 2, 3, 5, or 9 bytes.

+
positive fixnum stores 7-bit positive integer
++--------+
+|0XXXXXXX|
++--------+
+
+negative fixnum stores 5-bit negative integer
++--------+
+|111YYYYY|
++--------+
+
+* 0XXXXXXX is 8-bit unsigned integer
+* 111YYYYY is 8-bit signed integer
+
+uint 8 stores a 8-bit unsigned integer
++--------+--------+
+|  0xcc  |ZZZZZZZZ|
++--------+--------+
+
+uint 16 stores a 16-bit big-endian unsigned integer
++--------+--------+--------+
+|  0xcd  |ZZZZZZZZ|ZZZZZZZZ|
++--------+--------+--------+
+
+uint 32 stores a 32-bit big-endian unsigned integer
++--------+--------+--------+--------+--------+
+|  0xce  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ
++--------+--------+--------+--------+--------+
+
+uint 64 stores a 64-bit big-endian unsigned integer
++--------+--------+--------+--------+--------+--------+--------+--------+--------+
+|  0xcf  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|
++--------+--------+--------+--------+--------+--------+--------+--------+--------+
+
+int 8 stores a 8-bit signed integer
++--------+--------+
+|  0xd0  |ZZZZZZZZ|
++--------+--------+
+
+int 16 stores a 16-bit big-endian signed integer
++--------+--------+--------+
+|  0xd1  |ZZZZZZZZ|ZZZZZZZZ|
++--------+--------+--------+
+
+int 32 stores a 32-bit big-endian signed integer
++--------+--------+--------+--------+--------+
+|  0xd2  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|
++--------+--------+--------+--------+--------+
+
+int 64 stores a 64-bit big-endian signed integer
++--------+--------+--------+--------+--------+--------+--------+--------+--------+
+|  0xd3  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|
++--------+--------+--------+--------+--------+--------+--------+--------+--------+
+

+

float format family

+

Float format family stores an floating point number in 5 bytes or 9 bytes.

+
float 32 stores a floating point number in IEEE 754 single precision floating point number format:
++--------+--------+--------+--------+--------+
+|  0xca  |XXXXXXXX|XXXXXXXX|XXXXXXXX|XXXXXXXX
++--------+--------+--------+--------+--------+
+
+float 64 stores a floating point number in IEEE 754 double precision floating point number format:
++--------+--------+--------+--------+--------+--------+--------+--------+--------+
+|  0xcb  |YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|
++--------+--------+--------+--------+--------+--------+--------+--------+--------+
+
+where
+* XXXXXXXX_XXXXXXXX_XXXXXXXX_XXXXXXXX is a big-endian IEEE 754 single precision floating point number
+* YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY is a big-endian
+  IEEE 754 double precision floating point number
+

+

str format family

+

Str format family stores an byte array in 1, 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array.

+
fixstr stores a byte array whose length is upto 31 bytes:
++--------+========+
+|101XXXXX|  data  |
++--------+========+
+
+str 8 stores a byte array whose length is upto (2^8)-1 bytes:
++--------+--------+========+
+|  0xd9  |YYYYYYYY|  data  |
++--------+--------+========+
+
+str 16 stores a byte array whose length is upto (2^16)-1 bytes:
++--------+--------+--------+========+
+|  0xda  |ZZZZZZZZ|ZZZZZZZZ|  data  |
++--------+--------+--------+========+
+
+str 32 stores a byte array whose length is upto (2^32)-1 bytes:
++--------+--------+--------+--------+--------+========+
+|  0xdb  |AAAAAAAA|AAAAAAAA|AAAAAAAA|AAAAAAAA|  data  |
++--------+--------+--------+--------+--------+========+
+
+where
+* XXXXX is a 5-bit unsigned integer which represents N
+* YYYYYYYY is a 8-bit unsigned integer which represents N
+* ZZZZZZZZ_ZZZZZZZZ is a 16-bit big-endian unsigned integer which represents N
+* AAAAAAAA_AAAAAAAA_AAAAAAAA_AAAAAAAA is a 32-bit big-endian unsigned integer which represents N
+* N is the length of data
+

+

bin format family

+

Bin format family stores an byte array in 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array.

+
bin 8 stores a byte array whose length is upto (2^8)-1 bytes:
++--------+--------+========+
+|  0xc4  |XXXXXXXX|  data  |
++--------+--------+========+
+
+bin 16 stores a byte array whose length is upto (2^16)-1 bytes:
++--------+--------+--------+========+
+|  0xc5  |YYYYYYYY|YYYYYYYY|  data  |
++--------+--------+--------+========+
+
+bin 32 stores a byte array whose length is upto (2^32)-1 bytes:
++--------+--------+--------+--------+--------+========+
+|  0xc6  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|  data  |
++--------+--------+--------+--------+--------+========+
+
+where
+* XXXXXXXX is a 8-bit unsigned integer which represents N
+* YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N
+* ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N
+* N is the length of data
+

+

array format family

+

Array format family stores a sequence of elements in 1, 3, or 5 bytes of extra bytes in addition to the elements.

+
fixarray stores an array whose length is upto 15 elements:
++--------+~~~~~~~~~~~~~~~~~+
+|1001XXXX|    N objects    |
++--------+~~~~~~~~~~~~~~~~~+
+
+array 16 stores an array whose length is upto (2^16)-1 elements:
++--------+--------+--------+~~~~~~~~~~~~~~~~~+
+|  0xdc  |YYYYYYYY|YYYYYYYY|    N objects    |
++--------+--------+--------+~~~~~~~~~~~~~~~~~+
+
+array 32 stores an array whose length is upto (2^32)-1 elements:
++--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+
+|  0xdd  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|    N objects    |
++--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+
+
+where
+* XXXX is a 4-bit unsigned integer which represents N
+* YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N
+* ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N
+    N is the size of a array
+

+

map format family

+

Map format family stores a sequence of key-value pairs in 1, 3, or 5 bytes of extra bytes in addition to the key-value pairs.

+
fixmap stores a map whose length is upto 15 elements
++--------+~~~~~~~~~~~~~~~~~+
+|1000XXXX|   N*2 objects   |
++--------+~~~~~~~~~~~~~~~~~+
+
+map 16 stores a map whose length is upto (2^16)-1 elements
++--------+--------+--------+~~~~~~~~~~~~~~~~~+
+|  0xde  |YYYYYYYY|YYYYYYYY|   N*2 objects   |
++--------+--------+--------+~~~~~~~~~~~~~~~~~+
+
+map 32 stores a map whose length is upto (2^32)-1 elements
++--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+
+|  0xdf  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|   N*2 objects   |
++--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+
+
+where
+* XXXX is a 4-bit unsigned integer which represents N
+* YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N
+* ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N
+* N is the size of a map
+* odd elements in objects are keys of a map
+* the next element of a key is its associated value
+

+

ext format family

+

Ext format family stores a tuple of an integer and a byte array.

+
fixext 1 stores an integer and a byte array whose length is 1 byte
++--------+--------+--------+
+|  0xd4  |  type  |  data  |
++--------+--------+--------+
+
+fixext 2 stores an integer and a byte array whose length is 2 bytes
++--------+--------+--------+--------+
+|  0xd5  |  type  |       data      |
++--------+--------+--------+--------+
+
+fixext 4 stores an integer and a byte array whose length is 4 bytes
++--------+--------+--------+--------+--------+--------+
+|  0xd6  |  type  |                data               |
++--------+--------+--------+--------+--------+--------+
+
+fixext 8 stores an integer and a byte array whose length is 8 bytes
++--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
+|  0xd7  |  type  |                                  data                                 |
++--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
+
+fixext 16 stores an integer and a byte array whose length is 16 bytes
++--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
+|  0xd8  |  type  |                                  data                                  
++--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
++--------+--------+--------+--------+--------+--------+--------+--------+
+                              data (cont.)                              |
++--------+--------+--------+--------+--------+--------+--------+--------+
+
+ext 8 stores an integer and a byte array whose length is upto (2^8)-1 bytes:
++--------+--------+--------+========+
+|  0xc7  |XXXXXXXX|  type  |  data  |
++--------+--------+--------+========+
+
+ext 16 stores an integer and a byte array whose length is upto (2^16)-1 bytes:
++--------+--------+--------+--------+========+
+|  0xc8  |YYYYYYYY|YYYYYYYY|  type  |  data  |
++--------+--------+--------+--------+========+
+
+ext 32 stores an integer and a byte array whose length is upto (2^32)-1 bytes:
++--------+--------+--------+--------+--------+--------+========+
+|  0xc9  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|  type  |  data  |
++--------+--------+--------+--------+--------+--------+========+
+
+where
+* XXXXXXXX is a 8-bit unsigned integer which represents N
+* YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N
+* ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a big-endian 32-bit unsigned integer which represents N
+* N is a length of data
+* type is a signed 8-bit signed integer
+* type < 0 is reserved for future extension including 2-byte type information
+

+

Serialization: type to format conversion

+

MessagePack serializers convert MessagePack types into formats as following:

+ + + + + + + + + + + +
source typesoutput format
Integerint format family (positive fixint, negative fixint, int 8/16/32/64 or uint 8/16/32/64)
Nilnil
Booleanbool format family (false or true)
Floatfloat format family (float 32/64)
Stringstr format family (fixstr or str 8/16/32)
Binarybin format family (bin 8/16/32)
Arrayarray format family (fixarray or array 16/32)
Mapmap format family (fixmap or map 16/32)
Extendedext format family (fixext or ext 8/16/32)
+ +

If an object can be represented in multiple possible output formats, serializers SHOULD use the format which represents the data in the smallest number of bytes.

+

+

Deserialization: format to type conversion

+

MessagePack deserializers convert convert MessagePack formats into types as following:

+ + + + + + + + + + + +
source formatsoutput type
positive fixint, negative fixint, int 8/16/32/64 and uint 8/16/32/64Integer
nilNil
false and trueBoolean
float 32/64Float
fixstr and str 8/16/32String
bin 8/16/32Binary
fixarray and array 16/32Array
fixmap map 16/32Map
fixext and ext 8/16/32Extended
+ +

+

Future discussion

+

+

Profile

+

Profile is an idea that Applications restrict the semantics of MessagePack while sharing the same syntax to adapt MessagePack for certain use cases.

+

For example, applications may remove Binary type, restrict keys of map objects to be String type, and put some restrictions to make the semantics compatible with JSON. Applications which use schema may remove String and Binary types and deal with byte arrays as Raw type. Applications which use hash (digest) of serialized data may sort keys of maps to make the serialized data deterministic.

+

+

implementation guidelines

+

+

Upgrading MessagePack specification

+

MessagePack specification is changed at this time. +Here is a guideline to upgrade existent MessagePack implementations:

+
    +
  • In a minor release, deserializers support the bin format family and str 8 format. The type of deserialized objects should be same with raw 16 (== str 16) or raw 32 (== str 32)
  • +
  • In a major release, serializers distinguish Binary type and String type using bin format family and str format family
      +
    • At the same time, serializers should offer "compatibility mode" which doesn't use bin format family and str 8 format
    • +
    +
  • +
+
+
MessagePack specification
+Last modified at 2013-04-21 21:52:33 -0700
+Sadayuki Furuhashi © 2013-04-21 21:52:33 -0700
+
diff --git a/node_modules/msgpack5/spec.md b/node_modules/msgpack5/spec.md new file mode 100644 index 0000000000..0523896a58 --- /dev/null +++ b/node_modules/msgpack5/spec.md @@ -0,0 +1,499 @@ +# MessagePack specification + +MessagePack is an object serialization specification like JSON. + +MessagePack has two concepts: **type system** and **formats**. + +Serialization is conversion from application objects into MessagePack formats via MessagePack type system. + +Deserialization is conversion from MessagePack formats into application objects via MessagePack type system. + + Serialization: + Application objects + --> MessagePack type system + --> MessagePack formats (byte array) + + Deserialization: + MessagePack formats (byte array) + --> MessagePack type system + --> Application objects + +This document describes the MessagePack type system, MesagePack formats and conversion of them. + +## Table of contents + +* MessagePack specification + * [Type system](#types) + * [Limitation](#types-limitation) + * [Extension type](#types-extension-type) + * [Formats](#formats) + * [Overview](#formats-overview) + * [Notation in diagrams](#formats-notation) + * [nil format family](#formats-nil) + * [bool format family](#formats-bool) + * [int format family](#formats-int) + * [float format family](#formats-float) + * [str format family](#formats-str) + * [bin format family](#formats-bin) + * [array format family](#formats-array) + * [map format family](#formats-map) + * [ext format family](#formats-ext) + * [Serialization: type to format conversion](#serialization) + * [Deserialization: format to type conversion](#deserialization) + * [Future discussion](#future) + * [Profile](#future-profiles) + * [Implementation guidelines](#impl) + * [Upgrade MessagePack specification](#impl-upgrade) + +
+## Type system + +* Types + * **Integer** represents an integer + * **Nil** represents nil + * **Boolean** represents true or false + * **Float** represents a floating point number + * **Raw** + * **String** extending Raw type represents a UTF-8 string + * **Binary** extending Raw type represents a byte array + * **Array** represents a sequence of objects + * **Map** represents key-value pairs of objects + * **Extended** implements Extension interface: represents a tuple of type information and a byte array where type information is an integer whose meaning is defined by applications +* Interfaces + * **Extension** represents a tuple of an integer and a byte array where the integer represents type information and the byte array represents data. The format of the data is defined by concrete types + + +### Limitation + +* a value of an Integer object is limited from `-(2^63)` upto `(2^64)-1` +* a value of a Float object is IEEE 754 single or double precision floating-point number +* maximum length of a Binary object is `(2^32)-1` +* maximum byte size of a String object is `(2^32)-1` +* String objects may contain invalid byte sequence and the behavior of a deserializer depends on the actual implementation when it received invalid byte sequence + * Deserializers should provide functionality to get the original byte array so that applications can decide how to handle the object +* maximum number of elements of an Array object is `(2^32)-1` +* maximum number of key-value associations of a Map object is `(2^32)-1` + + +### Extension type + +MessagePack allows applications to define application-specific types using the Extended type. +Extended type consists of an integer and a byte array where the integer represents a kind of types and the byte array represents data. + +Applications can assign `0` to `127` to store application-specific type information. + +MessagePack reserves `-1` to `-128` for future extension to add predefined types which will be described in separated documents. + + [0, 127]: application-specific types + [-1, -128]: reserved for predefined types + + + +## Formats + + +### Overview + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
format namefirst byte (in binary)first byte (in hex)
positive fixint0xxxxxxx0x00 - 0x7f
fixmap1000xxxx0x80 - 0x8f
fixarray1001xxxx0x90 - 0x9f
fixstr101xxxxx0xa0 - 0xbf
nil110000000xc0
(never used)110000010xc1
false110000100xc2
true110000110xc3
bin 8110001000xc4
bin 16110001010xc5
bin 32110001100xc6
ext 8110001110xc7
ext 16110010000xc8
ext 32110010010xc9
float 32110010100xca
float 64110010110xcb
uint 8110011000xcc
uint 16110011010xcd
uint 32110011100xce
uint 64110011110xcf
int 8110100000xd0
int 16110100010xd1
int 32110100100xd2
int 64110100110xd3
fixext 1110101000xd4
fixext 2110101010xd5
fixext 4110101100xd6
fixext 8110101110xd7
fixext 16110110000xd8
str 8110110010xd9
str 16110110100xda
str 32110110110xdb
array 16110111000xdc
array 32110111010xdd
map 16110111100xde
map 32110111110xdf
negative fixint111xxxxx0xe0 - 0xff
+ + +
+### Notation in diagrams + + one byte: + +--------+ + | | + +--------+ + + a variable number of bytes: + +========+ + | | + +========+ + + variable number of objects stored in MessagePack format: + +~~~~~~~~~~~~~~~~~+ + | | + +~~~~~~~~~~~~~~~~~+ + +`X`, `Y`, `Z` and `A` are the symbols that will be replaced by an actual bit. + + +### nil format + +Nil format stores nil in 1 byte. + + nil: + +--------+ + | 0xc0 | + +--------+ + + +### bool format family + +Bool format family stores false or true in 1 byte. + + false: + +--------+ + | 0xc2 | + +--------+ + + true: + +--------+ + | 0xc3 | + +--------+ + + +### int format family + +Int format family stores an integer in 1, 2, 3, 5, or 9 bytes. + + positive fixnum stores 7-bit positive integer + +--------+ + |0XXXXXXX| + +--------+ + + negative fixnum stores 5-bit negative integer + +--------+ + |111YYYYY| + +--------+ + + * 0XXXXXXX is 8-bit unsigned integer + * 111YYYYY is 8-bit signed integer + + uint 8 stores a 8-bit unsigned integer + +--------+--------+ + | 0xcc |ZZZZZZZZ| + +--------+--------+ + + uint 16 stores a 16-bit big-endian unsigned integer + +--------+--------+--------+ + | 0xcd |ZZZZZZZZ|ZZZZZZZZ| + +--------+--------+--------+ + + uint 32 stores a 32-bit big-endian unsigned integer + +--------+--------+--------+--------+--------+ + | 0xce |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ + +--------+--------+--------+--------+--------+ + + uint 64 stores a 64-bit big-endian unsigned integer + +--------+--------+--------+--------+--------+--------+--------+--------+--------+ + | 0xcf |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| + +--------+--------+--------+--------+--------+--------+--------+--------+--------+ + + int 8 stores a 8-bit signed integer + +--------+--------+ + | 0xd0 |ZZZZZZZZ| + +--------+--------+ + + int 16 stores a 16-bit big-endian signed integer + +--------+--------+--------+ + | 0xd1 |ZZZZZZZZ|ZZZZZZZZ| + +--------+--------+--------+ + + int 32 stores a 32-bit big-endian signed integer + +--------+--------+--------+--------+--------+ + | 0xd2 |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| + +--------+--------+--------+--------+--------+ + + int 64 stores a 64-bit big-endian signed integer + +--------+--------+--------+--------+--------+--------+--------+--------+--------+ + | 0xd3 |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| + +--------+--------+--------+--------+--------+--------+--------+--------+--------+ + + +### float format family + +Float format family stores an floating point number in 5 bytes or 9 bytes. + + float 32 stores a floating point number in IEEE 754 single precision floating point number format: + +--------+--------+--------+--------+--------+ + | 0xca |XXXXXXXX|XXXXXXXX|XXXXXXXX|XXXXXXXX + +--------+--------+--------+--------+--------+ + + float 64 stores a floating point number in IEEE 754 double precision floating point number format: + +--------+--------+--------+--------+--------+--------+--------+--------+--------+ + | 0xcb |YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY| + +--------+--------+--------+--------+--------+--------+--------+--------+--------+ + + where + * XXXXXXXX_XXXXXXXX_XXXXXXXX_XXXXXXXX is a big-endian IEEE 754 single precision floating point number + * YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY is a big-endian + IEEE 754 double precision floating point number + + + +### str format family + +Str format family stores an byte array in 1, 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array. + + fixstr stores a byte array whose length is upto 31 bytes: + +--------+========+ + |101XXXXX| data | + +--------+========+ + + str 8 stores a byte array whose length is upto (2^8)-1 bytes: + +--------+--------+========+ + | 0xd9 |YYYYYYYY| data | + +--------+--------+========+ + + str 16 stores a byte array whose length is upto (2^16)-1 bytes: + +--------+--------+--------+========+ + | 0xda |ZZZZZZZZ|ZZZZZZZZ| data | + +--------+--------+--------+========+ + + str 32 stores a byte array whose length is upto (2^32)-1 bytes: + +--------+--------+--------+--------+--------+========+ + | 0xdb |AAAAAAAA|AAAAAAAA|AAAAAAAA|AAAAAAAA| data | + +--------+--------+--------+--------+--------+========+ + + where + * XXXXX is a 5-bit unsigned integer which represents N + * YYYYYYYY is a 8-bit unsigned integer which represents N + * ZZZZZZZZ_ZZZZZZZZ is a 16-bit big-endian unsigned integer which represents N + * AAAAAAAA_AAAAAAAA_AAAAAAAA_AAAAAAAA is a 32-bit big-endian unsigned integer which represents N + * N is the length of data + + +### bin format family + +Bin format family stores an byte array in 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array. + + bin 8 stores a byte array whose length is upto (2^8)-1 bytes: + +--------+--------+========+ + | 0xc4 |XXXXXXXX| data | + +--------+--------+========+ + + bin 16 stores a byte array whose length is upto (2^16)-1 bytes: + +--------+--------+--------+========+ + | 0xc5 |YYYYYYYY|YYYYYYYY| data | + +--------+--------+--------+========+ + + bin 32 stores a byte array whose length is upto (2^32)-1 bytes: + +--------+--------+--------+--------+--------+========+ + | 0xc6 |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| data | + +--------+--------+--------+--------+--------+========+ + + where + * XXXXXXXX is a 8-bit unsigned integer which represents N + * YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N + * ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N + * N is the length of data + + +### array format family + +Array format family stores a sequence of elements in 1, 3, or 5 bytes of extra bytes in addition to the elements. + + fixarray stores an array whose length is upto 15 elements: + +--------+~~~~~~~~~~~~~~~~~+ + |1001XXXX| N objects | + +--------+~~~~~~~~~~~~~~~~~+ + + array 16 stores an array whose length is upto (2^16)-1 elements: + +--------+--------+--------+~~~~~~~~~~~~~~~~~+ + | 0xdc |YYYYYYYY|YYYYYYYY| N objects | + +--------+--------+--------+~~~~~~~~~~~~~~~~~+ + + array 32 stores an array whose length is upto (2^32)-1 elements: + +--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+ + | 0xdd |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| N objects | + +--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+ + + where + * XXXX is a 4-bit unsigned integer which represents N + * YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N + * ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N + N is the size of a array + + +### map format family + +Map format family stores a sequence of key-value pairs in 1, 3, or 5 bytes of extra bytes in addition to the key-value pairs. + + fixmap stores a map whose length is upto 15 elements + +--------+~~~~~~~~~~~~~~~~~+ + |1000XXXX| N*2 objects | + +--------+~~~~~~~~~~~~~~~~~+ + + map 16 stores a map whose length is upto (2^16)-1 elements + +--------+--------+--------+~~~~~~~~~~~~~~~~~+ + | 0xde |YYYYYYYY|YYYYYYYY| N*2 objects | + +--------+--------+--------+~~~~~~~~~~~~~~~~~+ + + map 32 stores a map whose length is upto (2^32)-1 elements + +--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+ + | 0xdf |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| N*2 objects | + +--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+ + + where + * XXXX is a 4-bit unsigned integer which represents N + * YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N + * ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N + * N is the size of a map + * odd elements in objects are keys of a map + * the next element of a key is its associated value + + +### ext format family + +Ext format family stores a tuple of an integer and a byte array. + + fixext 1 stores an integer and a byte array whose length is 1 byte + +--------+--------+--------+ + | 0xd4 | type | data | + +--------+--------+--------+ + + fixext 2 stores an integer and a byte array whose length is 2 bytes + +--------+--------+--------+--------+ + | 0xd5 | type | data | + +--------+--------+--------+--------+ + + fixext 4 stores an integer and a byte array whose length is 4 bytes + +--------+--------+--------+--------+--------+--------+ + | 0xd6 | type | data | + +--------+--------+--------+--------+--------+--------+ + + fixext 8 stores an integer and a byte array whose length is 8 bytes + +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ + | 0xd7 | type | data | + +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ + + fixext 16 stores an integer and a byte array whose length is 16 bytes + +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ + | 0xd8 | type | data + +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ + +--------+--------+--------+--------+--------+--------+--------+--------+ + data (cont.) | + +--------+--------+--------+--------+--------+--------+--------+--------+ + + ext 8 stores an integer and a byte array whose length is upto (2^8)-1 bytes: + +--------+--------+--------+========+ + | 0xc7 |XXXXXXXX| type | data | + +--------+--------+--------+========+ + + ext 16 stores an integer and a byte array whose length is upto (2^16)-1 bytes: + +--------+--------+--------+--------+========+ + | 0xc8 |YYYYYYYY|YYYYYYYY| type | data | + +--------+--------+--------+--------+========+ + + ext 32 stores an integer and a byte array whose length is upto (2^32)-1 bytes: + +--------+--------+--------+--------+--------+--------+========+ + | 0xc9 |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| type | data | + +--------+--------+--------+--------+--------+--------+========+ + + where + * XXXXXXXX is a 8-bit unsigned integer which represents N + * YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N + * ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a big-endian 32-bit unsigned integer which represents N + * N is a length of data + * type is a signed 8-bit signed integer + * type < 0 is reserved for future extension including 2-byte type information + + + +## Serialization: type to format conversion + +MessagePack serializers convert MessagePack types into formats as following: + + + + + + + + + + + + +
source typesoutput format
Integerint format family (positive fixint, negative fixint, int 8/16/32/64 or uint 8/16/32/64)
Nilnil
Booleanbool format family (false or true)
Floatfloat format family (float 32/64)
Stringstr format family (fixstr or str 8/16/32)
Binarybin format family (bin 8/16/32)
Arrayarray format family (fixarray or array 16/32)
Mapmap format family (fixmap or map 16/32)
Extendedext format family (fixext or ext 8/16/32)
+ +If an object can be represented in multiple possible output formats, serializers SHOULD use the format which represents the data in the smallest number of bytes. + + +
+## Deserialization: format to type conversion + +MessagePack deserializers convert convert MessagePack formats into types as following: + + + + + + + + + + + + +
source formatsoutput type
positive fixint, negative fixint, int 8/16/32/64 and uint 8/16/32/64Integer
nilNil
false and trueBoolean
float 32/64Float
fixstr and str 8/16/32String
bin 8/16/32Binary
fixarray and array 16/32Array
fixmap map 16/32Map
fixext and ext 8/16/32Extended
+ +
+## Future discussion + + +### Profile + +Profile is an idea that Applications restrict the semantics of MessagePack while sharing the same syntax to adapt MessagePack for certain use cases. + +For example, applications may remove Binary type, restrict keys of map objects to be String type, and put some restrictions to make the semantics compatible with JSON. Applications which use schema may remove String and Binary types and deal with byte arrays as Raw type. Applications which use hash (digest) of serialized data may sort keys of maps to make the serialized data deterministic. + + +## implementation guidelines + + +### Upgrading MessagePack specification + +MessagePack specification is changed at this time. +Here is a guideline to upgrade existent MessagePack implementations: + +* In a minor release, deserializers support the bin format family and str 8 format. The type of deserialized objects should be same with raw 16 (== str 16) or raw 32 (== str 32) +* In a major release, serializers distinguish Binary type and String type using bin format family and str format family + * At the same time, serializers should offer "compatibility mode" which doesn't use bin format family and str 8 format + + +___ + + MessagePack specification + Last modified at 2013-04-21 21:52:33 -0700 + Sadayuki Furuhashi © 2013-04-21 21:52:33 -0700 diff --git a/node_modules/msgpack5/test/1-byte-length-buffers.js b/node_modules/msgpack5/test/1-byte-length-buffers.js new file mode 100644 index 0000000000..701b88d23c --- /dev/null +++ b/node_modules/msgpack5/test/1-byte-length-buffers.js @@ -0,0 +1,79 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +function build (size) { + const buf = Buffer.allocUnsafe(size) + buf.fill('a') + + return buf +} + +test('encode/decode 2^8-1 bytes buffers', function (t) { + const encoder = msgpack() + const all = [] + + all.push(build(Math.pow(2, 8) - 1)) + all.push(build(Math.pow(2, 6) + 1)) + all.push(build(1)) + all.push(Buffer.allocUnsafe(0)) + + all.forEach(function (orig) { + t.test('encoding a buffer of length ' + orig.length, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 2 + orig.length, 'must have the right length') + t.equal(buf.readUInt8(0), 0xc4, 'must have the proper header') + t.equal(buf.readUInt8(1), orig.length, 'must include the buf length') + t.equal(buf.toString('utf8', 2), orig.toString('utf8'), 'must decode correctly') + t.end() + }) + + t.test('decoding a buffer of length ' + orig.length, function (t) { + const buf = Buffer.allocUnsafe(2 + orig.length) + buf[0] = 0xc4 + buf[1] = orig.length + orig.copy(buf, 2) + t.equal(encoder.decode(buf).toString('utf8'), orig.toString('utf8'), 'must decode correctly') + t.end() + }) + + t.test('mirror test a buffer of length ' + orig.length, function (t) { + t.equal(encoder.decode(encoder.encode(orig)).toString(), orig.toString(), 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding a chopped 2^8-1 bytes buffer', function (t) { + const encoder = msgpack() + const orig = build(Math.pow(2, 6)) + let buf = Buffer.allocUnsafe(2 + orig.length) + buf[0] = 0xc4 + buf[1] = Math.pow(2, 8) - 1 // set bigger size + orig.copy(buf, 2) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete header of 2^8-1 bytes buffer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(1) + buf[0] = 0xc4 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/1-byte-length-exts.js b/node_modules/msgpack5/test/1-byte-length-exts.js new file mode 100644 index 0000000000..6066bbcbbd --- /dev/null +++ b/node_modules/msgpack5/test/1-byte-length-exts.js @@ -0,0 +1,102 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encode/decode variable ext data up to 0xff', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (size, value) { + this.value = value + this.size = size + } + + function mytipeEncode (obj) { + const buf = Buffer.allocUnsafe(obj.size) + buf.fill(obj.value) + return buf + } + + function mytipeDecode (data) { + const result = new MyType(data.length, data.toString('utf8', 0, 1)) + + for (let i = 0; i < data.length; i++) { + if (data.readUInt8(0) !== data.readUInt8(i)) { + throw new Error('should all be the same') + } + } + + return result + } + + encoder.register(0x42, MyType, mytipeEncode, mytipeDecode) + + // no 1 as it's a fixext + // no 2 as it's a fixext + all.push(new MyType(3, 'a')) + // no 4 as it's a fixext + all.push(new MyType(5, 'a')) + all.push(new MyType(6, 'a')) + all.push(new MyType(7, 'a')) + // no 8 as it's a fixext + all.push(new MyType(9, 'a')) + all.push(new MyType(10, 'a')) + all.push(new MyType(11, 'a')) + all.push(new MyType(12, 'a')) + all.push(new MyType(13, 'a')) + all.push(new MyType(14, 'a')) + all.push(new MyType(15, 'a')) + // no 16 as it's a fixext + all.push(new MyType(17, 'a')) + + all.push(new MyType(255, 'a')) + + all.forEach(function (orig) { + t.test('encoding a custom obj of length ' + orig.size, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 3 + orig.size, 'must have the right length') + t.equal(buf.readUInt8(0), 0xc7, 'must have the ext header') + t.equal(buf.readUInt8(1), orig.size, 'must include the data length') + t.equal(buf.readUInt8(2), 0x42, 'must include the custom type id') + t.equal(buf.toString('utf8', 3, 4), orig.value, 'must decode correctly') + t.end() + }) + + t.test('mirror test with a custom obj of length ' + orig.size, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.test('decoding an incomplete variable ext data up to 0xff', function (t) { + const obj = encoder.encode(new MyType(250, 'a')) + let buf = Buffer.allocUnsafe(obj.length) + buf[0] = 0xc7 + buf.writeUInt8(obj.length + 2, 1) // set bigger size + obj.copy(buf, 2, 2, obj.length) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.test('decoding an incomplete header of variable ext data up to 0xff', function (t) { + let buf = Buffer.allocUnsafe(2) + buf[0] = 0xc7 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/1-byte-length-strings.js b/node_modules/msgpack5/test/1-byte-length-strings.js new file mode 100644 index 0000000000..d5efbe4ebb --- /dev/null +++ b/node_modules/msgpack5/test/1-byte-length-strings.js @@ -0,0 +1,80 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encode/decode 32 <-> (2^8-1) bytes strings', function (t) { + const encoder = msgpack() + const all = [] + let i + + // build base + for (i = 'a'; i.length < 32;) { + i += 'a' + } + + for (; i.length < Math.pow(2, 8); i += 'aaaaa') { + all.push(i) + } + + all.forEach(function (str) { + t.test('encoding a string of length ' + str.length, function (t) { + const buf = encoder.encode(str) + t.equal(buf.length, 2 + Buffer.byteLength(str), 'must be the proper length') + t.equal(buf.readUInt8(0), 0xd9, 'must have the proper header') + t.equal(buf.readUInt8(1), Buffer.byteLength(str), 'must include the str length') + t.equal(buf.toString('utf8', 2, Buffer.byteLength(str) + 2), str, 'must decode correctly') + t.end() + }) + + t.test('decoding a string of length ' + str.length, function (t) { + const buf = Buffer.allocUnsafe(2 + Buffer.byteLength(str)) + buf[0] = 0xd9 + buf[1] = Buffer.byteLength(str) + buf.write(str, 2) + t.equal(encoder.decode(buf), str, 'must decode correctly') + t.end() + }) + + t.test('mirror test a string of length ' + str.length, function (t) { + t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding a chopped string', function (t) { + const encoder = msgpack() + let str + for (str = 'a'; str.length < 40;) { + str += 'a' + } + let buf = Buffer.allocUnsafe(2 + Buffer.byteLength(str)) + buf[0] = 0xd9 + buf[1] = Buffer.byteLength(str) + 10 // set bigger size + buf.write(str, 2) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete header of a string', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(1) + buf[0] = 0xd9 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/1-byte-length-uint8arrays.js b/node_modules/msgpack5/test/1-byte-length-uint8arrays.js new file mode 100644 index 0000000000..3b6253925d --- /dev/null +++ b/node_modules/msgpack5/test/1-byte-length-uint8arrays.js @@ -0,0 +1,43 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +function build (size) { + const array = [] + let i + + for (i = 0; i < size; i++) { + array.push(42) + } + + return new Uint8Array(array) +} + +test('encode/decode 2^8-1 Uint8Arrays', function (t) { + const encoder = msgpack() + const all = [] + + all.push(build(Math.pow(2, 8) - 1)) + all.push(build(Math.pow(2, 6) + 1)) + all.push(build(1)) + all.push(new Uint8Array(0)) + + all.forEach(function (array) { + t.test('encoding Uint8Array of length ' + array.byteLength + ' bytes', function (t) { + const buf = encoder.encode(array) + t.equal(buf.length, 2 + array.byteLength, 'must have the right length') + t.equal(buf.readUInt8(0), 0xc4, 'must have the proper header') + t.equal(buf.readUInt8(1), array.byteLength, 'must include the buf length') + t.end() + }) + + t.test('mirror test for an Uint8Array of length ' + array.byteLength + ' bytes', function (t) { + t.deepEqual(encoder.decode(encoder.encode(array)), Buffer.from(array), 'must stay the same') + t.end() + }) + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/15-elements-arrays.js b/node_modules/msgpack5/test/15-elements-arrays.js new file mode 100644 index 0000000000..bf76904907 --- /dev/null +++ b/node_modules/msgpack5/test/15-elements-arrays.js @@ -0,0 +1,84 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +function build (size, obj) { + const array = [] + let i + + for (i = 0; i < size; i++) { + array.push(obj) + } + + return array +} + +function computeLength (array) { + let length = 1 // the header + let multi = 1 + + if (array[0] && typeof array[0] === 'string') { + multi += array[0].length + } + + length += array.length * multi + + return length +} + +test('encode/decode arrays up to 15 elements', function (t) { + const encoder = msgpack() + const all = [] + let i + + for (i = 0; i < 16; i++) { + all.push(build(i, 42)) + } + + for (i = 0; i < 16; i++) { + all.push(build(i, 'aaa')) + } + + all.forEach(function (array) { + t.test('encoding an array with ' + array.length + ' elements of ' + array[0], function (t) { + const buf = encoder.encode(array) + // the array is full of 1-byte integers + t.equal(buf.length, computeLength(array), 'must have the right length') + t.equal(buf.readUInt8(0) & 0xf0, 0x90, 'must have the proper header') + t.equal(buf.readUInt8(0) & 0x0f, array.length, 'must include the array length') + t.end() + }) + + t.test('mirror test for an array of length ' + array.length + ' with ' + array[0], function (t) { + t.deepEqual(encoder.decode(encoder.encode(array)), array, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete array', function (t) { + const encoder = msgpack() + + const array = ['a', 'b', 'c'] + const size = computeLength(array) + let buf = Buffer.allocUnsafe(size) + buf[0] = 0x90 | array.length + 2 // set bigger size + let pos = 1 + for (let i = 0; i < array.length; i++) { + const obj = encoder.encode(array[i], true) + obj.copy(buf, pos) + pos += obj.length + } + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(origLength, buf.length, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/15-elements-maps.js b/node_modules/msgpack5/test/15-elements-maps.js new file mode 100644 index 0000000000..8506c0fb4f --- /dev/null +++ b/node_modules/msgpack5/test/15-elements-maps.js @@ -0,0 +1,119 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +function build (size, value) { + const map = {} + let i + + for (i = 0; i < size; i++) { + map[i + 100 + ''] = value + } + + return map +} + +function computeLength (map) { + let length = 1 // the header + let multi = 5 // we have 4 bytes for each key, plus 1 byte for the value + + if (map[100] && typeof map[100] === 'string') { + multi += map[100].length + } + + length += Object.keys(map).length * multi + + return length +} + +test('encode/decode maps up to 15 elements', function (t) { + const encoder = msgpack() + const all = [] + let i + + for (i = 0; i < 16; i++) { + all.push(build(i, 42)) + } + + for (i = 0; i < 16; i++) { + all.push(build(i, 'aaa')) + } + + all.forEach(function (map) { + const length = Object.keys(map).length + t.test('encoding a map with ' + length + ' elements of ' + map[100], function (t) { + const buf = encoder.encode(map) + t.equal(buf.length, computeLength(map), 'must have the right length') + t.equal(buf.readUInt8(0) & 0xf0, 0x80, 'must have the proper header') + t.equal(buf.readUInt8(0) & 0x0f, length, 'must include the map length') + t.end() + }) + + t.test('mirror test for a map of length ' + length + ' with ' + map[100], function (t) { + t.deepEqual(encoder.decode(encoder.encode(map)), map, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('do not encode undefined in a map', function (t) { + const instance = msgpack() + const expected = { hello: 'world' } + const toEncode = { a: undefined, hello: 'world' } + const buf = instance.encode(toEncode) + + t.deepEqual(expected, instance.decode(buf), 'must ignore undefined') + t.end() +}) + +test('encode NaN in a map', function (t) { + const instance = msgpack() + const toEncode = { a: NaN, hello: 'world' } + + const buf = instance.encode(toEncode) + + t.assert(Object.is(instance.decode(buf).a, NaN)) + + const expected = { ...toEncode } + delete toEncode.a + const actual = instance.decode(buf) + delete buf.a + + t.deepEqual(actual, expected) + + t.end() +}) + +test('encode/decode map with buf, ints and strings', function (t) { + const map = { + topic: 'hello', + qos: 1, + payload: Buffer.from('world'), + messageId: '42', + ttl: 1416309270167 + } + const pack = msgpack() + + t.deepEqual(pack.decode(pack.encode(map)), map) + t.end() +}) + +test('decoding a chopped map', function (t) { + const encoder = msgpack() + const map = encoder.encode({ a: 'b', c: 'd', e: 'f' }) + let buf = Buffer.allocUnsafe(map.length) + buf[0] = 0x80 | 5 // set bigger size + map.copy(buf, 1, 1, map.length) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/16-bits-signed-integers.js b/node_modules/msgpack5/test/16-bits-signed-integers.js new file mode 100644 index 0000000000..0b2a545186 --- /dev/null +++ b/node_modules/msgpack5/test/16-bits-signed-integers.js @@ -0,0 +1,56 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 16-bits big-endian signed integers', function (t) { + const encoder = msgpack() + const allNum = [] + let i + + for (i = 129; i < 32768; i += 1423) { + allNum.push(-i) + } + + allNum.push(-32768) + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 3, 'must have 3 bytes') + t.equal(buf[0], 0xd1, 'must have the proper header') + t.equal(buf.readInt16BE(1), num, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.allocUnsafe(3) + buf[0] = 0xd1 + buf.writeInt16BE(num, 1) + t.equal(encoder.decode(buf), num, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 16-bits big-endian integer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(2) + buf[0] = 0xd1 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/16-bits-unsigned-integers.js b/node_modules/msgpack5/test/16-bits-unsigned-integers.js new file mode 100644 index 0000000000..78d59089fe --- /dev/null +++ b/node_modules/msgpack5/test/16-bits-unsigned-integers.js @@ -0,0 +1,56 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 16-bits big-endian unsigned integers', function (t) { + const encoder = msgpack() + const allNum = [] + let i + + for (i = 256; i < 65536; i += 1423) { + allNum.push(i) + } + + allNum.push(65535) + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 3, 'must have 3 bytes') + t.equal(buf[0], 0xcd, 'must have the proper header') + t.equal(buf.readUInt16BE(1), num, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.allocUnsafe(3) + buf[0] = 0xcd + buf.writeUInt16BE(num, 1) + t.equal(encoder.decode(buf), num, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 16-bits big-endian unsigned integer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(2) + buf[0] = 0xcd + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/2-bytes-length-arrays.js b/node_modules/msgpack5/test/2-bytes-length-arrays.js new file mode 100644 index 0000000000..5ff658b32c --- /dev/null +++ b/node_modules/msgpack5/test/2-bytes-length-arrays.js @@ -0,0 +1,84 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +function build (size) { + const array = [] + let i + + for (i = 0; i < size; i++) { + array.push(42) + } + + return array +} + +test('encode/decode arrays up to 0xffff elements', function (t) { + const encoder = msgpack() + const all = [] + let i + + for (i = 16; i < 0xffff; i += 4242) { + all.push(build(i)) + } + + all.push(build(0xff)) + all.push(build(0xffff)) + + all.forEach(function (array) { + t.test('encoding an array with ' + array.length + ' elements', function (t) { + const buf = encoder.encode(array) + // the array is full of 1-byte integers + t.equal(buf.length, 3 + array.length, 'must have the right length') + t.equal(buf.readUInt8(0), 0xdc, 'must have the proper header') + t.equal(buf.readUInt16BE(1), array.length, 'must include the array length') + t.end() + }) + + t.test('mirror test for an array of length ' + array.length, function (t) { + t.deepEqual(encoder.decode(encoder.encode(array)), array, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete array', function (t) { + const encoder = msgpack() + + const array = build(0xffff / 2) + let buf = Buffer.allocUnsafe(3 + array.length) + buf[0] = 0xdc + buf.writeUInt16BE(array.length + 10, 1) // set bigger size + let pos = 3 + for (let i = 0; i < array.length; i++) { + const obj = encoder.encode(array[i], true) + obj.copy(buf, pos) + pos += obj.length + } + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(origLength, buf.length, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete header', function (t) { + const encoder = msgpack() + + let buf = Buffer.allocUnsafe(2) + buf[0] = 0xdc + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/2-bytes-length-buffers.js b/node_modules/msgpack5/test/2-bytes-length-buffers.js new file mode 100644 index 0000000000..5fdc3ee594 --- /dev/null +++ b/node_modules/msgpack5/test/2-bytes-length-buffers.js @@ -0,0 +1,79 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +function build (size) { + const buf = Buffer.allocUnsafe(size) + buf.fill('a') + + return buf +} + +test('encode/decode 2^16-1 bytes buffers', function (t) { + const encoder = msgpack() + const all = [] + + all.push(build(Math.pow(2, 8))) + all.push(build(Math.pow(2, 8) + 1)) + all.push(build(Math.pow(2, 12) + 1)) + all.push(build(Math.pow(2, 16) - 1)) + + all.forEach(function (orig) { + t.test('encoding a buffer of length ' + orig.length, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 3 + orig.length, 'must have the right length') + t.equal(buf.readUInt8(0), 0xc5, 'must have the proper header') + t.equal(buf.readUInt16BE(1), orig.length, 'must include the buf length') + t.equal(buf.toString('utf8', 3), orig.toString('utf8'), 'must decode correctly') + t.end() + }) + + t.test('decoding a buffer of length ' + orig.length, function (t) { + const buf = Buffer.allocUnsafe(3 + orig.length) + buf[0] = 0xc5 + buf.writeUInt16BE(orig.length, 1) + orig.copy(buf, 3) + t.equal(encoder.decode(buf).toString('utf8'), orig.toString('utf8'), 'must decode correctly') + t.end() + }) + + t.test('mirror test a buffer of length ' + orig.length, function (t) { + t.equal(encoder.decode(encoder.encode(orig)).toString(), orig.toString(), 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding a chopped 2^16-1 bytes buffer', function (t) { + const encoder = msgpack() + const orig = build(Math.pow(2, 12)) + let buf = Buffer.allocUnsafe(3 + orig.length) + buf[0] = 0xc5 + buf[1] = Math.pow(2, 16) - 1 // set bigger size + orig.copy(buf, 3) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete header of 2^16-1 bytes buffer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(2) + buf[0] = 0xc5 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/2-bytes-length-exts.js b/node_modules/msgpack5/test/2-bytes-length-exts.js new file mode 100644 index 0000000000..f5aefb633a --- /dev/null +++ b/node_modules/msgpack5/test/2-bytes-length-exts.js @@ -0,0 +1,86 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encode/decode variable ext data up between 0x0100 and 0xffff', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (size, value) { + this.value = value + this.size = size + } + + function mytipeEncode (obj) { + const buf = Buffer.allocUnsafe(obj.size) + buf.fill(obj.value) + return buf + } + + function mytipeDecode (data) { + const result = new MyType(data.length, data.toString('utf8', 0, 1)) + + for (let i = 0; i < data.length; i++) { + if (data.readUInt8(0) !== data.readUInt8(i)) { + throw new Error('should all be the same') + } + } + + return result + } + + encoder.register(0x42, MyType, mytipeEncode, mytipeDecode) + + all.push(new MyType(0x0100, 'a')) + all.push(new MyType(0x0101, 'a')) + all.push(new MyType(0xffff, 'a')) + + all.forEach(function (orig) { + t.test('encoding a custom obj of length ' + orig.size, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 4 + orig.size, 'must have the right length') + t.equal(buf.readUInt8(0), 0xc8, 'must have the ext header') + t.equal(buf.readUInt16BE(1), orig.size, 'must include the data length') + t.equal(buf.readUInt8(3), 0x42, 'must include the custom type id') + t.equal(buf.toString('utf8', 4, 5), orig.value, 'must decode correctly') + t.end() + }) + + t.test('mirror test with a custom obj of length ' + orig.size, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.test('decoding an incomplete variable ext data up between 0x0100 and 0xffff', function (t) { + const obj = encoder.encode(new MyType(0xfff0, 'a')) + let buf = Buffer.allocUnsafe(obj.length) + buf[0] = 0xc8 + buf.writeUInt16BE(obj.length + 2, 1) // set bigger size + obj.copy(buf, 3, 3, obj.length) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.test('decoding an incomplete header of variable ext data up between 0x0100 and 0xffff', function (t) { + let buf = Buffer.allocUnsafe(3) + buf[0] = 0xc8 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/2-bytes-length-maps.js b/node_modules/msgpack5/test/2-bytes-length-maps.js new file mode 100644 index 0000000000..0df6321ec3 --- /dev/null +++ b/node_modules/msgpack5/test/2-bytes-length-maps.js @@ -0,0 +1,85 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') +const base = 100000 + +function build (size, value) { + const map = {} + + for (let i = 0; i < size; i++) { + map[i + base] = value + } + + return map +} + +function computeLength (mapLength) { + let length = 3 // the header + const multi = ('' + base).length + 1 + 1 // we have bytes for each key, plus 1 byte for the value + + length += mapLength * multi + + return length +} + +test('encode/decode maps up to 2^16-1 elements', function (t) { + const encoder = msgpack() + + function doTest (length) { + const map = build(length, 42) + const buf = encoder.encode(map) + + t.test('encoding a map with ' + length + ' elements of ' + map[base], function (t) { + // the map is full of 1-byte integers + t.equal(buf.length, computeLength(length), 'must have the right length') + t.equal(buf.readUInt8(0), 0xde, 'must have the proper header') + t.equal(buf.readUInt16BE(1), length, 'must include the map length') + t.end() + }) + + t.test('mirror test for a map of length ' + length + ' with ' + map[base], function (t) { + t.deepEqual(encoder.decode(buf), map, 'must stay the same') + t.end() + }) + } + + doTest(Math.pow(2, 8)) + doTest(Math.pow(2, 8) + 1) + doTest(Math.pow(2, 12) + 1) + // too slow + // doTest(Math.pow(2, 16) - 1) + + t.end() +}) + +test('decoding a chopped map', function (t) { + const encoder = msgpack() + const map = encoder.encode(build(Math.pow(2, 12) + 1, 42)) + let buf = Buffer.allocUnsafe(map.length) + buf[0] = 0xde + buf.writeUInt16BE(Math.pow(2, 16) - 1, 1) // set bigger size + map.copy(buf, 3, 3, map.length) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete header of a map', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(2) + buf[0] = 0xde + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/2-bytes-length-strings.js b/node_modules/msgpack5/test/2-bytes-length-strings.js new file mode 100644 index 0000000000..ee9f8c1b1d --- /dev/null +++ b/node_modules/msgpack5/test/2-bytes-length-strings.js @@ -0,0 +1,87 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encode/decode 2^8 <-> (2^16-1) bytes strings', function (t) { + const encoder = msgpack() + const all = [] + let str + + str = Buffer.allocUnsafe(Math.pow(2, 8)) + str.fill('a') + all.push(str.toString()) + + str = Buffer.allocUnsafe(Math.pow(2, 8) + 1) + str.fill('a') + all.push(str.toString()) + + str = Buffer.allocUnsafe(Math.pow(2, 14)) + str.fill('a') + all.push(str.toString()) + + str = Buffer.allocUnsafe(Math.pow(2, 16) - 1) + str.fill('a') + all.push(str.toString()) + + all.forEach(function (str) { + t.test('encoding a string of length ' + str.length, function (t) { + const buf = encoder.encode(str) + t.equal(buf.length, 3 + Buffer.byteLength(str), 'must be the proper length') + t.equal(buf[0], 0xda, 'must have the proper header') + t.equal(buf.readUInt16BE(1), Buffer.byteLength(str), 'must include the str length') + t.equal(buf.toString('utf8', 3, Buffer.byteLength(str) + 3), str, 'must decode correctly') + t.end() + }) + + t.test('decoding a string of length ' + str.length, function (t) { + const buf = Buffer.allocUnsafe(3 + Buffer.byteLength(str)) + buf[0] = 0xda + buf.writeUInt16BE(Buffer.byteLength(str), 1) + buf.write(str, 3) + t.equal(encoder.decode(buf), str, 'must decode correctly') + t.end() + }) + + t.test('mirror test a string of length ' + str.length, function (t) { + t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding a chopped string', function (t) { + const encoder = msgpack() + let str + for (str = 'a'; str.length < 0xff + 100;) { + str += 'a' + } + let buf = Buffer.allocUnsafe(3 + Buffer.byteLength(str)) + buf[0] = 0xda + buf.writeUInt16BE(Buffer.byteLength(str) + 10, 1) // set bigger size + buf.write(str, 3) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete header of a string', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(2) + buf[0] = 0xda + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/2-bytes-length-uint8arrays.js b/node_modules/msgpack5/test/2-bytes-length-uint8arrays.js new file mode 100644 index 0000000000..c824a30998 --- /dev/null +++ b/node_modules/msgpack5/test/2-bytes-length-uint8arrays.js @@ -0,0 +1,43 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +function build (size) { + const array = [] + let i + + for (i = 0; i < size; i++) { + array.push(42) + } + + return new Uint8Array(array) +} + +test('encode/decode 2^8-1 Uint8Arrays', function (t) { + const encoder = msgpack() + const all = [] + + all.push(build(Math.pow(2, 8))) + all.push(build(Math.pow(2, 8) + 1)) + all.push(build(Math.pow(2, 12) + 1)) + all.push(build(Math.pow(2, 16) - 1)) + + all.forEach(function (array) { + t.test('encoding Uint8Array of length ' + array.byteLength + ' bytes', function (t) { + const buf = encoder.encode(array) + t.equal(buf.length, 3 + array.byteLength, 'must have the right length') + t.equal(buf.readUInt8(0), 0xc5, 'must have the proper header') + t.equal(buf.readUInt16BE(1), array.byteLength, 'must include the buf length') + t.end() + }) + + t.test('mirror test for an Uint8Array of length ' + array.byteLength + ' bytes', function (t) { + t.deepEqual(encoder.decode(encoder.encode(array)), Buffer.from(array), 'must stay the same') + t.end() + }) + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/31-chars-strings.js b/node_modules/msgpack5/test/31-chars-strings.js new file mode 100644 index 0000000000..239aeb6105 --- /dev/null +++ b/node_modules/msgpack5/test/31-chars-strings.js @@ -0,0 +1,59 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encode/decode strings with max 31 of length', function (t) { + const encoder = msgpack() + const all = [] + + // build base + for (let i = ''; i.length < 32; i += 'a') { + all.push(i) + } + + all.forEach(function (str) { + t.test('encoding a string of length ' + str.length, function (t) { + const buf = encoder.encode(str) + t.equal(buf.length, 1 + Buffer.byteLength(str), 'must be the proper length') + t.equal(buf.readUInt8(0) & 0xe0, 0xa0, 'must have the proper header') + t.equal(buf.readUInt8(0) & 0x1f, Buffer.byteLength(str), 'must include the str length') + t.equal(buf.toString('utf8', 1, Buffer.byteLength(str) + 2), str, 'must decode correctly') + t.end() + }) + + t.test('decoding a string of length ' + str.length, function (t) { + const buf = Buffer.allocUnsafe(1 + Buffer.byteLength(str)) + buf[0] = 0xa0 | Buffer.byteLength(str) + if (str.length > 0) { + buf.write(str, 1) + } + t.equal(encoder.decode(buf), str, 'must decode correctly') + t.end() + }) + + t.test('mirror test a string of length ' + str.length, function (t) { + t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding a chopped string', function (t) { + const encoder = msgpack() + const str = 'aaa' + let buf = Buffer.allocUnsafe(1 + Buffer.byteLength(str)) + buf[0] = 0xa0 | Buffer.byteLength(str) + 2 // set bigger size + buf.write(str, 1) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/32-bits-signed-integers.js b/node_modules/msgpack5/test/32-bits-signed-integers.js new file mode 100644 index 0000000000..1bb9c4ad4b --- /dev/null +++ b/node_modules/msgpack5/test/32-bits-signed-integers.js @@ -0,0 +1,55 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 32-bits big-endian signed integers', function (t) { + const encoder = msgpack() + const allNum = [] + + for (let i = 32769; i < 214748364; i += 10235023) { + allNum.push(-i) + } + + allNum.push(-214748364) + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 5, 'must have 5 bytes') + t.equal(buf[0], 0xd2, 'must have the proper header') + t.equal(buf.readInt32BE(1), num, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.allocUnsafe(5) + buf[0] = 0xd2 + buf.writeInt32BE(num, 1) + t.equal(encoder.decode(buf), num, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 32-bits big-endian integer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(4) + buf[0] = 0xd2 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/32-bits-unsigned-integers.js b/node_modules/msgpack5/test/32-bits-unsigned-integers.js new file mode 100644 index 0000000000..4d73505f20 --- /dev/null +++ b/node_modules/msgpack5/test/32-bits-unsigned-integers.js @@ -0,0 +1,56 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 32-bits big-endian unsigned integers', function (t) { + const encoder = msgpack() + const allNum = [] + + for (let i = 65536; i < 0xffffffff; i += 102350237) { + allNum.push(i) + } + + allNum.push(0xfffffffe) + allNum.push(0xffffffff) + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 5, 'must have 5 bytes') + t.equal(buf[0], 0xce, 'must have the proper header') + t.equal(buf.readUInt32BE(1), num, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.allocUnsafe(5) + buf[0] = 0xce + buf.writeUInt32BE(num, 1) + t.equal(encoder.decode(buf), num, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 32-bits big-endian unsigned integer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(4) + buf[0] = 0xce + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/32-bytes-strings.js b/node_modules/msgpack5/test/32-bytes-strings.js new file mode 100644 index 0000000000..b9c2d0eb9d --- /dev/null +++ b/node_modules/msgpack5/test/32-bytes-strings.js @@ -0,0 +1,39 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +test('encode/decode up to 31 bytes strings', function (t) { + const encoder = msgpack() + const all = [] + + for (let i = 'a'; i.length < 32; i += 'a') { + all.push(i) + } + + all.forEach(function (str) { + t.test('encoding a string of length ' + str.length, function (t) { + const buf = encoder.encode(str) + t.equal(buf.length, 1 + Buffer.byteLength(str), 'must have 2 bytes') + t.equal(buf[0] & 0xe0, 0xa0, 'must have the proper header') + t.equal(buf.toString('utf8', 1, Buffer.byteLength(str) + 1), str, 'must decode correctly') + t.end() + }) + + t.test('decoding a string of length ' + str.length, function (t) { + const buf = Buffer.allocUnsafe(1 + Buffer.byteLength(str)) + buf[0] = 0xa0 | Buffer.byteLength(str) + buf.write(str, 1) + t.equal(encoder.decode(buf), str, 'must decode correctly') + t.end() + }) + + t.test('mirror test a string of length ' + str.length, function (t) { + t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') + t.end() + }) + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/4-bytes-length-arrays.js b/node_modules/msgpack5/test/4-bytes-length-arrays.js new file mode 100644 index 0000000000..46e92cccbd --- /dev/null +++ b/node_modules/msgpack5/test/4-bytes-length-arrays.js @@ -0,0 +1,78 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +function build (size) { + const array = [] + + for (let i = 0; i < size; i++) { + array.push(42) + } + + return array +} + +test('encode/decode arrays up to 0xffffffff elements', function (t) { + const encoder = msgpack() + + function doTest (array) { + t.test('encoding an array with ' + array.length + ' elements', function (t) { + const buf = encoder.encode(array) + // the array is full of 1-byte integers + t.equal(buf.length, 5 + array.length, 'must have the right length') + t.equal(buf.readUInt8(0), 0xdd, 'must have the proper header') + t.equal(buf.readUInt32BE(1), array.length, 'must include the array length') + t.end() + }) + + t.test('mirror test for an array of length ' + array.length, function (t) { + t.deepEqual(encoder.decode(encoder.encode(array)), array, 'must stay the same') + t.end() + }) + } + + doTest(build(0xffff + 1)) + doTest(build(0xffff + 42)) + // unable to test bigger arrays do to out of memory errors + + t.end() +}) + +test('decoding an incomplete array', function (t) { + const encoder = msgpack() + + const array = build(0xffff + 42) + let buf = Buffer.allocUnsafe(5 + array.length) + buf[0] = 0xdd + buf.writeUInt32BE(array.length + 10, 1) // set bigger size + let pos = 5 + for (let i = 0; i < array.length; i++) { + const obj = encoder.encode(array[i], true) + obj.copy(buf, pos) + pos += obj.length + } + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete header', function (t) { + const encoder = msgpack() + + let buf = Buffer.allocUnsafe(4) + buf[0] = 0xdd + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/4-bytes-length-buffers.js b/node_modules/msgpack5/test/4-bytes-length-buffers.js new file mode 100644 index 0000000000..27749da5ab --- /dev/null +++ b/node_modules/msgpack5/test/4-bytes-length-buffers.js @@ -0,0 +1,78 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +function build (size) { + const buf = Buffer.allocUnsafe(size) + buf.fill('a') + + return buf +} + +test('encode/decode 2^32-1 bytes buffers', function (t) { + const encoder = msgpack() + const all = [] + + all.push(build(Math.pow(2, 16))) + all.push(build(Math.pow(2, 16) + 1)) + all.push(build(Math.pow(2, 18) + 1)) + + all.forEach(function (orig) { + t.test('encoding a buffer of length ' + orig.length, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 5 + orig.length, 'must have the right length') + t.equal(buf.readUInt8(0), 0xc6, 'must have the proper header') + t.equal(buf.readUInt32BE(1), orig.length, 'must include the buf length') + t.equal(buf.toString('utf8', 5), orig.toString('utf8'), 'must decode correctly') + t.end() + }) + + t.test('decoding a buffer of length ' + orig.length, function (t) { + const buf = Buffer.allocUnsafe(5 + orig.length) + buf[0] = 0xc6 + buf.writeUInt32BE(orig.length, 1) + orig.copy(buf, 5) + t.equal(encoder.decode(buf).toString('utf8'), orig.toString('utf8'), 'must decode correctly') + t.end() + }) + + t.test('mirror test a buffer of length ' + orig.length, function (t) { + t.equal(encoder.decode(encoder.encode(orig)).toString(), orig.toString(), 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding a chopped 2^32-1 bytes buffer', function (t) { + const encoder = msgpack() + const orig = build(Math.pow(2, 18)) + let buf = Buffer.allocUnsafe(5 + orig.length) + buf[0] = 0xc6 + buf[1] = Math.pow(2, 32) - 1 // set bigger size + orig.copy(buf, 5) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete header of 2^32-1 bytes buffer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(4) + buf[0] = 0xc6 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/4-bytes-length-exts.js b/node_modules/msgpack5/test/4-bytes-length-exts.js new file mode 100644 index 0000000000..e1e539f84e --- /dev/null +++ b/node_modules/msgpack5/test/4-bytes-length-exts.js @@ -0,0 +1,86 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encode/decode variable ext data up between 0x10000 and 0xffffffff', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (size, value) { + this.value = value + this.size = size + } + + function mytipeEncode (obj) { + const buf = Buffer.allocUnsafe(obj.size) + buf.fill(obj.value) + return buf + } + + function mytipeDecode (data) { + const result = new MyType(data.length, data.toString('utf8', 0, 1)) + + for (let i = 0; i < data.length; i++) { + if (data.readUInt8(0) !== data.readUInt8(i)) { + throw new Error('should all be the same') + } + } + + return result + } + + encoder.register(0x52, MyType, mytipeEncode, mytipeDecode) + + all.push(new MyType(0x10000, 'a')) + all.push(new MyType(0x10001, 'a')) + all.push(new MyType(0xffffff, 'a')) + + all.forEach(function (orig) { + t.test('encoding a custom obj of length ' + orig.size, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 6 + orig.size, 'must have the right length') + t.equal(buf.readUInt8(0), 0xc9, 'must have the ext header') + t.equal(buf.readUInt32BE(1), orig.size, 'must include the data length') + t.equal(buf.readUInt8(5), 0x52, 'must include the custom type id') + t.equal(buf.toString('utf8', 6, 7), orig.value, 'must decode correctly') + t.end() + }) + + t.test('mirror test with a custom obj of length ' + orig.size, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.test('decoding an incomplete variable ext data up between 0x10000 and 0xffffffff', function (t) { + const obj = encoder.encode(new MyType(0xffffff, 'a')) + let buf = Buffer.allocUnsafe(obj.length) + buf[0] = 0xc9 + buf.writeUInt32BE(obj.length + 2, 1) // set bigger size + obj.copy(buf, 5, 5, obj.length) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.test('decoding an incomplete header of variable ext data up between 0x10000 and 0xffffffff', function (t) { + let buf = Buffer.allocUnsafe(5) + buf[0] = 0xc9 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/4-bytes-length-strings.js b/node_modules/msgpack5/test/4-bytes-length-strings.js new file mode 100644 index 0000000000..7670151a6e --- /dev/null +++ b/node_modules/msgpack5/test/4-bytes-length-strings.js @@ -0,0 +1,83 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encode/decode 2^16 <-> (2^32 - 1) bytes strings', function (t) { + const encoder = msgpack() + const all = [] + let str + + str = Buffer.allocUnsafe(Math.pow(2, 16)) + str.fill('a') + all.push(str.toString()) + + str = Buffer.allocUnsafe(Math.pow(2, 16) + 1) + str.fill('a') + all.push(str.toString()) + + str = Buffer.allocUnsafe(Math.pow(2, 20)) + str.fill('a') + all.push(str.toString()) + + all.forEach(function (str) { + t.test('encoding a string of length ' + str.length, function (t) { + const buf = encoder.encode(str) + t.equal(buf.length, 5 + Buffer.byteLength(str), 'must be the proper length') + t.equal(buf[0], 0xdb, 'must have the proper header') + t.equal(buf.readUInt32BE(1), Buffer.byteLength(str), 'must include the str length') + t.equal(buf.toString('utf8', 5, Buffer.byteLength(str) + 5), str, 'must decode correctly') + t.end() + }) + + t.test('decoding a string of length ' + str.length, function (t) { + const buf = Buffer.allocUnsafe(5 + Buffer.byteLength(str)) + buf[0] = 0xdb + buf.writeUInt32BE(Buffer.byteLength(str), 1) + buf.write(str, 5) + t.equal(encoder.decode(buf), str, 'must decode correctly') + t.end() + }) + + t.test('mirror test a string of length ' + str.length, function (t) { + t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding a chopped string', function (t) { + const encoder = msgpack() + let str + for (str = 'a'; str.length < 0xffff + 100;) { + str += 'a' + } + let buf = Buffer.allocUnsafe(5 + Buffer.byteLength(str)) + buf[0] = 0xdb + buf.writeUInt32BE(Buffer.byteLength(str) + 10, 1) // set bigger size + buf.write(str, 5) + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete header of a string', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(4) + buf[0] = 0xdb + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/4-bytes-length-uint8arrays.js b/node_modules/msgpack5/test/4-bytes-length-uint8arrays.js new file mode 100644 index 0000000000..46147d9ad9 --- /dev/null +++ b/node_modules/msgpack5/test/4-bytes-length-uint8arrays.js @@ -0,0 +1,42 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +function build (size) { + const array = [] + let i + + for (i = 0; i < size; i++) { + array.push(42) + } + + return new Uint8Array(array) +} + +test('encode/decode 2^8-1 Uint8Arrays', function (t) { + const encoder = msgpack() + const all = [] + + all.push(build(Math.pow(2, 16))) + all.push(build(Math.pow(2, 16) + 1)) + all.push(build(Math.pow(2, 18) + 1)) + + all.forEach(function (array) { + t.test('encoding Uint8Array of length ' + array.byteLength + ' bytes', function (t) { + const buf = encoder.encode(array) + t.equal(buf.length, 5 + array.byteLength, 'must have the right length') + t.equal(buf.readUInt8(0), 0xc6, 'must have the proper header') + t.equal(buf.readUInt32BE(1), array.byteLength, 'must include the buf length') + t.end() + }) + + t.test('mirror test for an Uint8Array of length ' + array.byteLength + ' bytes', function (t) { + t.deepEqual(encoder.decode(encoder.encode(array)), Buffer.from(array), 'must stay the same') + t.end() + }) + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/5-bits-negative-integers.js b/node_modules/msgpack5/test/5-bits-negative-integers.js new file mode 100644 index 0000000000..76d44368dc --- /dev/null +++ b/node_modules/msgpack5/test/5-bits-negative-integers.js @@ -0,0 +1,36 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +test('encoding/decoding 5-bits negative ints', function (t) { + const encoder = msgpack() + const allNum = [] + + for (let i = 1; i <= 32; i++) { + allNum.push(-i) + } + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 1, 'must have 1 byte') + t.equal(buf[0], num + 0x100, 'must encode correctly') + t.end() + }) + + t.test('decoding' + num, function (t) { + const buf = Buffer.from([num + 0x100]) + t.equal(encoder.decode(buf), num, 'must decode correctly') + t.end() + }) + + t.test('mirror test' + num, function (t) { + t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/64-bits-signed-integers.js b/node_modules/msgpack5/test/64-bits-signed-integers.js new file mode 100644 index 0000000000..86b8342540 --- /dev/null +++ b/node_modules/msgpack5/test/64-bits-signed-integers.js @@ -0,0 +1,48 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 64-bits big-endian signed integers', function (t) { + const encoder = msgpack() + const table = [ + { num: -9007199254740991, hi: 0xffe00000, lo: 0x00000001 }, + { num: -4294967297, hi: 0xfffffffe, lo: 0xffffffff }, + { num: -4294967296, hi: 0xffffffff, lo: 0x00000000 }, + { num: -4294967295, hi: 0xffffffff, lo: 0x00000001 }, + { num: -214748365, hi: 0xffffffff, lo: 0xf3333333 } + ] + + table.forEach(function (testCase) { + t.test('encoding ' + testCase.num, function (t) { + const buf = encoder.encode(testCase.num) + t.equal(buf.length, 9, 'must have 9 bytes') + t.equal(buf[0], 0xd3, 'must have the proper header') + t.equal(buf.readUInt32BE(1), testCase.hi, 'hi word must be properly written') + t.equal(buf.readUInt32BE(5), testCase.lo, 'lo word must be properly written') + t.end() + }) + + t.test('mirror test ' + testCase.num, function (t) { + t.equal(encoder.decode(encoder.encode(testCase.num)), testCase.num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 64-bits big-endian signed integer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(8) + buf[0] = 0xd3 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/64-bits-unsigned-integers.js b/node_modules/msgpack5/test/64-bits-unsigned-integers.js new file mode 100644 index 0000000000..f5d628155b --- /dev/null +++ b/node_modules/msgpack5/test/64-bits-unsigned-integers.js @@ -0,0 +1,48 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 64-bits big-endian unsigned integers', function (t) { + const encoder = msgpack() + const allNum = [] + + allNum.push(0x0000000100000000) + allNum.push(0xffffffffeeeee) + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 9, 'must have 9 bytes') + t.equal(buf[0], 0xcf, 'must have the proper header') + let result = 0 + for (let k = 7; k >= 0; k--) { + result += (buf.readUInt8(k + 1) * Math.pow(2, (8 * (7 - k)))) + } + t.equal(result, num, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 64-bits big-endian unsigned integer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(8) + buf[0] = 0xcf + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/7-bits-positive-integers.js b/node_modules/msgpack5/test/7-bits-positive-integers.js new file mode 100644 index 0000000000..16685ea48f --- /dev/null +++ b/node_modules/msgpack5/test/7-bits-positive-integers.js @@ -0,0 +1,36 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +test('encoding/decoding 7-bits positive ints', function (t) { + const encoder = msgpack() + const allNum = [] + + for (let i = 0; i < 126; i++) { + allNum.push(i) + } + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 1, 'must have 1 byte') + t.equal(buf[0], num, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.from([num]) + t.equal(encoder.decode(buf), num, 'must decode correctly') + t.end() + }) + + t.test('mirror test' + num, function (t) { + t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/8-bits-positive-integers.js b/node_modules/msgpack5/test/8-bits-positive-integers.js new file mode 100644 index 0000000000..b30fb2cfed --- /dev/null +++ b/node_modules/msgpack5/test/8-bits-positive-integers.js @@ -0,0 +1,51 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 8-bits integers', function (t) { + const encoder = msgpack() + const allNum = [] + + for (let i = 128; i < 256; i++) { + allNum.push(i) + } + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 2, 'must have 2 bytes') + t.equal(buf[0], 0xcc, 'must have the proper header') + t.equal(buf[1], num, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.from([0xcc, num]) + t.equal(encoder.decode(buf), num, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 8-bits unsigned integer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(1) + buf[0] = 0xcc + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/8-bits-signed-integers.js b/node_modules/msgpack5/test/8-bits-signed-integers.js new file mode 100644 index 0000000000..e074d3563f --- /dev/null +++ b/node_modules/msgpack5/test/8-bits-signed-integers.js @@ -0,0 +1,53 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 8-bits big-endian signed integers', function (t) { + const encoder = msgpack() + const allNum = [] + + for (let i = 33; i <= 128; i++) { + allNum.push(-i) + } + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 2, 'must have 2 bytes') + t.equal(buf[0], 0xd0, 'must have the proper header') + t.equal(buf.readInt8(1), num, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.allocUnsafe(3) + buf[0] = 0xd0 + buf.writeInt8(num, 1) + t.equal(encoder.decode(buf), num, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 8-bits big-endian signed integer', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(1) + buf[0] = 0xd0 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/NaN.js b/node_modules/msgpack5/test/NaN.js new file mode 100644 index 0000000000..bf4c1b2539 --- /dev/null +++ b/node_modules/msgpack5/test/NaN.js @@ -0,0 +1,52 @@ +'use strict' + +const test = require('tape').test +const msgpack = require('../') + +test('encode NaN as 32-bit float', function (t) { + const encoder = msgpack() + + const buf = encoder.encode(NaN) + t.equal(buf[0], 0xca) + t.equal(buf.byteLength, 5) + + t.end() +}) + +test('encode NaN as 64-bit float with forceFloat64', function (t) { + const encoder = msgpack({ forceFloat64: true }) + + const buf = encoder.encode(NaN) + + t.equal(buf[0], 0xcb) + t.equal(buf.byteLength, 9) + + t.end() +}) + +test('round-trip 32-bit NaN', function (t) { + const encoder = msgpack() + + t.assert(Object.is(encoder.decode(encoder.encode(NaN)), NaN)) + + t.end() +}) + +test('round-trip 64-bit NaN with forceFloat64', function (t) { + const encoder = msgpack({ forceFloat64: true }) + + t.assert(Object.is(encoder.decode(encoder.encode(NaN)), NaN)) + + t.end() +}) + +test('decode 64-bit NaN', function (t) { + const encoder = msgpack() + const buf = Buffer.alloc(9) + buf.writeUInt8(0xcb, 0) + buf.writeDoubleBE(NaN, 1) + + t.assert(Object.is(encoder.decode(buf), NaN)) + + t.end() +}) diff --git a/node_modules/msgpack5/test/booleans.js b/node_modules/msgpack5/test/booleans.js new file mode 100644 index 0000000000..02fb5be37f --- /dev/null +++ b/node_modules/msgpack5/test/booleans.js @@ -0,0 +1,21 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +test('encode/decode booleans', function (t) { + const encoder = msgpack() + + t.equal(encoder.encode(true)[0], 0xc3, 'encode true as 0xc3') + t.equal(encoder.encode(true).length, 1, 'encode true as a buffer of length 1') + t.equal(encoder.decode(Buffer.from([0xc3])), true, 'decode 0xc3 as true') + t.equal(encoder.decode(encoder.encode(true)), true, 'mirror test true') + + t.equal(encoder.encode(false)[0], 0xc2, 'encode false as 0xc2') + t.equal(encoder.encode(false).length, 1, 'encode false as a buffer of length 1') + t.equal(encoder.decode(Buffer.from([0xc2])), false, 'decode 0xc2 as false') + t.equal(encoder.decode(encoder.encode(false)), false, 'mirror test false') + + t.end() +}) diff --git a/node_modules/msgpack5/test/compatibility-mode.js b/node_modules/msgpack5/test/compatibility-mode.js new file mode 100644 index 0000000000..185a6ad5e8 --- /dev/null +++ b/node_modules/msgpack5/test/compatibility-mode.js @@ -0,0 +1,73 @@ +'use strict' + +const test = require('tape').test +const msgpack = require('../') + +function buildBuffer (size) { + const buf = Buffer.allocUnsafe(size) + buf.fill('a') + + return buf +} + +test('encode/compatibility mode', function (t) { + const compatEncoder = msgpack({ + compatibilityMode: true + }) + const defaultEncoder = msgpack({ + compatibilityMode: false + }) + + const oneBytesStr = Array(31 + 2).join('x') + const twoBytesStr = Array(255 + 2).join('x') + + t.test('default encoding a string of length ' + oneBytesStr.length, function (t) { + // Default: use 1 byte length string (str8) + const buf = defaultEncoder.encode(oneBytesStr) + t.equal(buf[0], 0xd9, 'must have the proper header (str8)') + t.equal(buf.toString('utf8', 2, Buffer.byteLength(oneBytesStr) + 2), oneBytesStr, 'must decode correctly') + t.end() + }) + + t.test('compat. encoding a string of length ' + oneBytesStr.length, function (t) { + // Compat. mode: use 2 byte length string (str16) + const buf = compatEncoder.encode(oneBytesStr) + t.equal(buf[0], 0xda, 'must have the proper header (str16)') + t.equal(buf.toString('utf8', 3, Buffer.byteLength(oneBytesStr) + 3), oneBytesStr, 'must decode correctly') + t.end() + }) + + t.test('encoding for a string of length ' + twoBytesStr.length, function (t) { + // Two byte strings: compat. mode should make no difference + const buf1 = defaultEncoder.encode(twoBytesStr) + const buf2 = compatEncoder.encode(twoBytesStr) + t.deepEqual(buf1, buf2, 'must be equal for two byte strings') + t.end() + }) + + const fixRawBuffer = buildBuffer(1) + const raw16Buffer = buildBuffer(Math.pow(2, 16) - 1) + const raw32Buffer = buildBuffer(Math.pow(2, 16) + 1) + + t.test('compat. encoding a Buffer of length ' + fixRawBuffer.length, function (t) { + // fix raw header: 0xa0 | 1 = 0xa1 + const buf = compatEncoder.encode(fixRawBuffer) + t.equal(buf[0], 0xa1, 'must have the proper header (fix raw)') + t.equal(buf.toString('utf8', 1, Buffer.byteLength(fixRawBuffer) + 1), fixRawBuffer.toString('utf8'), 'must decode correctly') + t.end() + }) + + t.test('compat. encoding a Buffer of length ' + raw16Buffer.length, function (t) { + const buf = compatEncoder.encode(raw16Buffer) + t.equal(buf[0], 0xda, 'must have the proper header (raw 16)') + t.equal(buf.toString('utf8', 3, Buffer.byteLength(raw16Buffer) + 3), raw16Buffer.toString('utf8'), 'must decode correctly') + t.end() + }) + + t.test('compat. encoding a Buffer of length ' + raw32Buffer.length, function (t) { + const buf = compatEncoder.encode(raw32Buffer) + t.equal(buf[0], 0xdb, 'must have the proper header (raw 32)') + t.equal(buf.toString('utf8', 5, Buffer.byteLength(raw32Buffer) + 5), raw32Buffer.toString('utf8'), 'must decode correctly') + t.end() + }) +}) diff --git a/node_modules/msgpack5/test/datenull.js b/node_modules/msgpack5/test/datenull.js new file mode 100644 index 0000000000..f1933372c2 --- /dev/null +++ b/node_modules/msgpack5/test/datenull.js @@ -0,0 +1,13 @@ +'use strict' +const test = require('tape').test +const msgpack = require('../') + +test('encode date is null ', function (t) { + const encoder = msgpack({ + disableTimestampEncoding: true + }) + + t.equal(encoder.encode(null)[0], 0xc0, 'encode null as null') + + t.end() +}) diff --git a/node_modules/msgpack5/test/doubles.js b/node_modules/msgpack5/test/doubles.js new file mode 100644 index 0000000000..5fe8002d06 --- /dev/null +++ b/node_modules/msgpack5/test/doubles.js @@ -0,0 +1,57 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 64-bits float numbers', function (t) { + const encoder = msgpack() + const allNum = [] + + allNum.push(748365544534.2) + allNum.push(-222111111000004.2) + allNum.push(9007199254740992) + allNum.push(-9007199254740992) + + allNum.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + const dec = buf.readDoubleBE(1) + t.equal(buf.length, 9, 'must have 9 bytes') + t.equal(buf[0], 0xcb, 'must have the proper header') + t.true(Math.abs(dec - num) < 0.1, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.allocUnsafe(9) + buf[0] = 0xcb + buf.writeDoubleBE(num, 1) + const dec = encoder.decode(buf) + t.true(Math.abs(dec - num) < 0.1, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + const dec = encoder.decode(encoder.encode(num)) + t.true(Math.abs(dec - num) < 0.1, 'must decode correctly') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 64-bits float numbers', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(8) + buf[0] = 0xcb + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/ext-custom-encode-check.js b/node_modules/msgpack5/test/ext-custom-encode-check.js new file mode 100644 index 0000000000..22f591f946 --- /dev/null +++ b/node_modules/msgpack5/test/ext-custom-encode-check.js @@ -0,0 +1,64 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +test('encode/decode ext with a custom object check', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (data) { + this.data = data + } + + function checkForMyType (obj) { + return obj instanceof MyType + } + + function mytypeEncode (obj) { + const buf = Buffer.allocUnsafe(2) + buf.writeUInt8(0x42, 0) + buf.writeUInt8(obj.data, 1) + return buf + } + + function mytypeDecode (data) { + return new MyType(data.readUInt8(0)) + } + + encoder.registerEncoder(checkForMyType, mytypeEncode) + encoder.registerDecoder(0x42, mytypeDecode) + + all.push(new MyType(0)) + all.push(new MyType(1)) + all.push(new MyType(42)) + + all.forEach(function (orig) { + t.test('encoding a custom obj encoded as ' + orig.data, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 3, 'must have the right length') + t.equal(buf.readUInt8(0), 0xd4, 'must have the fixext header') + t.equal(buf.readUInt8(1), 0x42, 'must include the custom type id') + t.equal(buf.readUInt8(2), orig.data, 'must decode correctly') + t.end() + }) + + t.test('decoding a custom obj encoded as ' + orig.data, function (t) { + const buf = Buffer.allocUnsafe(3) + buf[0] = 0xd4 + buf[1] = 0x42 + buf.writeUInt8(orig.data, 2) + t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') + t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') + t.end() + }) + + t.test('mirror test with a custom obj containing ' + orig.data, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/fixexts.js b/node_modules/msgpack5/test/fixexts.js new file mode 100644 index 0000000000..c045984432 --- /dev/null +++ b/node_modules/msgpack5/test/fixexts.js @@ -0,0 +1,497 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encode/decode 1 byte fixext data', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (data) { + this.data = data + } + + function mytypeEncode (obj) { + const buf = Buffer.allocUnsafe(1) + buf.writeUInt8(obj.data, 0) + return buf + } + + function mytypeDecode (data) { + return new MyType(data.readUInt8(0)) + } + + encoder.register(0x42, MyType, mytypeEncode, mytypeDecode) + + all.push(new MyType(0)) + all.push(new MyType(1)) + all.push(new MyType(42)) + + all.forEach(function (orig) { + t.test('encoding a custom obj encoded as ' + orig.data, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 3, 'must have the right length') + t.equal(buf.readUInt8(0), 0xd4, 'must have the fixext header') + t.equal(buf.readUInt8(1), 0x42, 'must include the custom type id') + t.equal(buf.readUInt8(2), orig.data, 'must decode correctly') + t.end() + }) + + t.test('decoding a custom obj encoded as ' + orig.data, function (t) { + const buf = Buffer.allocUnsafe(3) + buf[0] = 0xd4 + buf[1] = 0x42 + buf.writeUInt8(orig.data, 2) + t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') + t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') + t.end() + }) + + t.test('mirror test with a custom obj containing ' + orig.data, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('encode/decode 2 bytes fixext data', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (data) { + this.data = data + } + + function mytypeEncode (obj) { + const buf = Buffer.allocUnsafe(2) + buf.writeUInt16BE(obj.data, 0) + return buf + } + + function mytypeDecode (data) { + return new MyType(data.readUInt16BE(0)) + } + + encoder.register(0x42, MyType, mytypeEncode, mytypeDecode) + + all.push(new MyType(0)) + all.push(new MyType(1)) + all.push(new MyType(42)) + + all.forEach(function (orig) { + t.test('encoding a custom obj encoded as ' + orig.data, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 4, 'must have the right length') + t.equal(buf.readUInt8(0), 0xd5, 'must have the fixext header') + t.equal(buf.readUInt8(1), 0x42, 'must include the custom type id') + t.equal(buf.readUInt16BE(2), orig.data, 'must decode correctly') + t.end() + }) + + t.test('decoding a custom obj encoded as ' + orig.data, function (t) { + const buf = Buffer.allocUnsafe(4) + buf[0] = 0xd5 + buf[1] = 0x42 + buf.writeUInt16BE(orig.data, 2) + t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') + t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') + t.end() + }) + + t.test('mirror test with a custom obj containing ' + orig.data, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('encode/decode 4 bytes fixext data', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (data) { + this.data = data + } + + function mytypeEncode (obj) { + const buf = Buffer.allocUnsafe(4) + buf.writeUInt32BE(obj.data, 0) + return buf + } + + function mytypeDecode (data) { + return new MyType(data.readUInt32BE(0)) + } + + encoder.register(0x44, MyType, mytypeEncode, mytypeDecode) + + all.push(new MyType(0)) + all.push(new MyType(1)) + all.push(new MyType(42)) + + all.forEach(function (orig) { + t.test('encoding a custom obj encoded as ' + orig.data, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 6, 'must have the right length') + t.equal(buf.readUInt8(0), 0xd6, 'must have the fixext header') + t.equal(buf.readUInt8(1), 0x44, 'must include the custom type id') + t.equal(buf.readUInt32BE(2), orig.data, 'must decode correctly') + t.end() + }) + + t.test('decoding a custom obj encoded as ' + orig.data, function (t) { + const buf = Buffer.allocUnsafe(6) + buf[0] = 0xd6 + buf[1] = 0x44 + buf.writeUInt32BE(orig.data, 2) + t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') + t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') + t.end() + }) + + t.test('mirror test with a custom obj containing ' + orig.data, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('encode/decode 8 bytes fixext data', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (data) { + this.data = data + } + + function mytypeEncode (obj) { + const buf = Buffer.allocUnsafe(8) + buf.writeUInt32BE(obj.data / 2, 0) + buf.writeUInt32BE(obj.data / 2, 4) + return buf + } + + function mytypeDecode (data) { + return new MyType(data.readUInt32BE(0) + data.readUInt32BE(4)) + } + + encoder.register(0x44, MyType, mytypeEncode, mytypeDecode) + + all.push(new MyType(2)) + all.push(new MyType(4)) + all.push(new MyType(42)) + + all.forEach(function (orig) { + t.test('encoding a custom obj encoded as ' + orig.data, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 10, 'must have the right length') + t.equal(buf.readUInt8(0), 0xd7, 'must have the fixext header') + t.equal(buf.readUInt8(1), 0x44, 'must include the custom type id') + t.equal(buf.readUInt32BE(2) + buf.readUInt32BE(6), orig.data, 'must decode correctly') + t.end() + }) + + t.test('decoding a custom obj encoded as ' + orig.data, function (t) { + const buf = Buffer.allocUnsafe(10) + buf[0] = 0xd7 + buf[1] = 0x44 + buf.writeUInt32BE(orig.data / 2, 2) + buf.writeUInt32BE(orig.data / 2, 6) + t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') + t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') + t.end() + }) + + t.test('mirror test with a custom obj containing ' + orig.data, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('encode/decode 16 bytes fixext data', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (data) { + this.data = data + } + + function mytypeEncode (obj) { + const buf = Buffer.allocUnsafe(16) + buf.writeUInt32BE(obj.data / 4, 0) + buf.writeUInt32BE(obj.data / 4, 4) + buf.writeUInt32BE(obj.data / 4, 8) + buf.writeUInt32BE(obj.data / 4, 12) + return buf + } + + function mytypeDecode (data) { + return new MyType(data.readUInt32BE(0) + data.readUInt32BE(4) + data.readUInt32BE(8) + data.readUInt32BE(12)) + } + + encoder.register(0x46, MyType, mytypeEncode, mytypeDecode) + + all.push(new MyType(4)) + all.push(new MyType(8)) + all.push(new MyType(44)) + + all.forEach(function (orig) { + t.test('encoding a custom obj encoded as ' + orig.data, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 18, 'must have the right length') + t.equal(buf.readUInt8(0), 0xd8, 'must have the fixext header') + t.equal(buf.readUInt8(1), 0x46, 'must include the custom type id') + t.equal(buf.readUInt32BE(2) + buf.readUInt32BE(6) + buf.readUInt32BE(10) + buf.readUInt32BE(14), orig.data, 'must decode correctly') + t.end() + }) + + t.test('decoding a custom obj encoded as ' + orig.data, function (t) { + const buf = Buffer.allocUnsafe(18) + buf[0] = 0xd8 + buf[1] = 0x46 + buf.writeUInt32BE(orig.data / 4, 2) + buf.writeUInt32BE(orig.data / 4, 6) + buf.writeUInt32BE(orig.data / 4, 10) + buf.writeUInt32BE(orig.data / 4, 14) + t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') + t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') + t.end() + }) + + t.test('mirror test with a custom obj containing ' + orig.data, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('encode/decode fixext inside a map', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (data) { + this.data = data + } + + function mytypeEncode (obj) { + const buf = Buffer.allocUnsafe(4) + buf.writeUInt32BE(obj.data, 0) + return buf + } + + function mytypeDecode (data) { + return new MyType(data.readUInt32BE(0)) + } + + encoder.register(0x42, MyType, mytypeEncode, mytypeDecode) + + all.push({ ret: new MyType(42) }) + all.push({ a: new MyType(42), b: new MyType(43) }) + + all.push([1, 2, 3, 4, 5, 6].reduce(function (acc, key) { + acc[key] = new MyType(key) + return acc + }, {})) + + all.forEach(function (orig) { + t.test('mirror test with a custom obj inside a map', function (t) { + const encoded = encoder.encode(orig) + t.deepEqual(encoder.decode(encoded), orig, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('encode/decode 8 bytes fixext data', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (data) { + this.data = data + } + + function mytypeEncode (obj) { + const buf = Buffer.allocUnsafe(8) + buf.writeUInt32BE(obj.data / 2, 0) + buf.writeUInt32BE(obj.data / 2, 4) + return buf + } + + function mytypeDecode (data) { + return new MyType(data.readUInt32BE(0) + data.readUInt32BE(4)) + } + + encoder.register(0x44, MyType, mytypeEncode, mytypeDecode) + + all.push(new MyType(2)) + all.push(new MyType(4)) + all.push(new MyType(42)) + + all.forEach(function (orig) { + t.test('encoding a custom obj encoded as ' + orig.data, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 10, 'must have the right length') + t.equal(buf.readUInt8(0), 0xd7, 'must have the fixext header') + t.equal(buf.readUInt8(1), 0x44, 'must include the custom type id') + t.equal(buf.readUInt32BE(2) + buf.readUInt32BE(6), orig.data, 'must decode correctly') + t.end() + }) + + t.test('decoding a custom obj encoded as ' + orig.data, function (t) { + const buf = Buffer.allocUnsafe(10) + buf[0] = 0xd7 + buf[1] = 0x44 + buf.writeUInt32BE(orig.data / 2, 2) + buf.writeUInt32BE(orig.data / 2, 6) + t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') + t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') + t.end() + }) + + t.test('mirror test with a custom obj containing ' + orig.data, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('encode/decode 16 bytes fixext data', function (t) { + const encoder = msgpack() + const all = [] + + function MyType (data) { + this.data = data + } + + function mytypeEncode (obj) { + const buf = Buffer.allocUnsafe(16) + buf.writeUInt32BE(obj.data / 4, 0) + buf.writeUInt32BE(obj.data / 4, 4) + buf.writeUInt32BE(obj.data / 4, 8) + buf.writeUInt32BE(obj.data / 4, 12) + return buf + } + + function mytypeDecode (data) { + return new MyType(data.readUInt32BE(0) + data.readUInt32BE(4) + data.readUInt32BE(8) + data.readUInt32BE(12)) + } + + encoder.register(0x46, MyType, mytypeEncode, mytypeDecode) + + all.push(new MyType(4)) + all.push(new MyType(8)) + all.push(new MyType(44)) + + all.forEach(function (orig) { + t.test('encoding a custom obj encoded as ' + orig.data, function (t) { + const buf = encoder.encode(orig) + t.equal(buf.length, 18, 'must have the right length') + t.equal(buf.readUInt8(0), 0xd8, 'must have the fixext header') + t.equal(buf.readUInt8(1), 0x46, 'must include the custom type id') + t.equal(buf.readUInt32BE(2) + buf.readUInt32BE(6) + buf.readUInt32BE(10) + buf.readUInt32BE(14), orig.data, 'must decode correctly') + t.end() + }) + + t.test('decoding a custom obj encoded as ' + orig.data, function (t) { + const buf = Buffer.allocUnsafe(18) + buf[0] = 0xd8 + buf[1] = 0x46 + buf.writeUInt32BE(orig.data / 4, 2) + buf.writeUInt32BE(orig.data / 4, 6) + buf.writeUInt32BE(orig.data / 4, 10) + buf.writeUInt32BE(orig.data / 4, 14) + t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') + t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') + t.end() + }) + + t.test('mirror test with a custom obj containing ' + orig.data, function (t) { + t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') + t.end() + }) + }) + + t.test('decoding an incomplete 1 byte fixext data', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(2) + buf[0] = 0xd4 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.test('decoding an incomplete 2 byte fixext data', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(3) + buf[0] = 0xd5 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.test('decoding an incomplete 4 byte fixext data', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(5) + buf[0] = 0xd6 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.test('decoding an incomplete 8 byte fixext data', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(9) + buf[0] = 0xd7 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.test('decoding an incomplete 16 byte fixext data', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(17) + buf[0] = 0xd8 + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() + }) + + t.end() +}) diff --git a/node_modules/msgpack5/test/floats.js b/node_modules/msgpack5/test/floats.js new file mode 100644 index 0000000000..540a3fb735 --- /dev/null +++ b/node_modules/msgpack5/test/floats.js @@ -0,0 +1,117 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +test('encoding/decoding 32-bits float numbers', function (t) { + const encoder = msgpack() + const float32 = [ + 1.5, + 0.15625, + -2.5 + ] + + const float64 = [ + Math.pow(2, 150), + 1.337, + 2.2 + ] + + float64.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 9, 'must have 5 bytes') + t.equal(buf[0], 0xcb, 'must have the proper header') + + const dec = buf.readDoubleBE(1) + t.equal(dec, num, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.allocUnsafe(9) + buf[0] = 0xcb + buf.writeDoubleBE(num, 1) + + const dec = encoder.decode(buf) + t.equal(dec, num, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + const dec = encoder.decode(encoder.encode(num)) + t.equal(dec, num, 'must decode correctly') + t.end() + }) + }) + + float32.forEach(function (num) { + t.test('encoding ' + num, function (t) { + const buf = encoder.encode(num) + t.equal(buf.length, 5, 'must have 5 bytes') + t.equal(buf[0], 0xca, 'must have the proper header') + + const dec = buf.readFloatBE(1) + t.equal(dec, num, 'must decode correctly') + t.end() + }) + + t.test('forceFloat64 encoding ' + num, function (t) { + const enc = msgpack({ forceFloat64: true }) + const buf = enc.encode(num) + + t.equal(buf.length, 9, 'must have 9 bytes') + t.equal(buf[0], 0xcb, 'must have the proper header') + + const dec = buf.readDoubleBE(1) + t.equal(dec, num, 'must decode correctly') + t.end() + }) + + t.test('decoding ' + num, function (t) { + const buf = Buffer.allocUnsafe(5) + buf[0] = 0xca + buf.writeFloatBE(num, 1) + + const dec = encoder.decode(buf) + t.equal(dec, num, 'must decode correctly') + t.end() + }) + + t.test('mirror test ' + num, function (t) { + const dec = encoder.decode(encoder.encode(num)) + t.equal(dec, num, 'must decode correctly') + t.end() + }) + }) + + t.end() +}) + +test('decoding an incomplete 32-bits float numbers', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(4) + buf[0] = 0xca + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding an incomplete 64-bits float numbers', function (t) { + const encoder = msgpack() + let buf = Buffer.allocUnsafe(8) + buf[0] = 0xcb + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + encoder.decode(buf) + }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/functions.js b/node_modules/msgpack5/test/functions.js new file mode 100644 index 0000000000..4a50103777 --- /dev/null +++ b/node_modules/msgpack5/test/functions.js @@ -0,0 +1,19 @@ +'use strict' + +const test = require('tape').test +const msgpack = require('../') +const noop = function () {} + +test('encode a function inside a map', function (t) { + const encoder = msgpack() + const expected = { + hello: 'world' + } + const toEncode = { + hello: 'world', + func: noop + } + + t.deepEqual(encoder.decode(encoder.encode(toEncode)), expected, 'remove the function from the map') + t.end() +}) diff --git a/node_modules/msgpack5/test/levelup-encoding.js b/node_modules/msgpack5/test/levelup-encoding.js new file mode 100644 index 0000000000..6bcab775c6 --- /dev/null +++ b/node_modules/msgpack5/test/levelup-encoding.js @@ -0,0 +1,69 @@ +'use strict' + +const test = require('tape').test +const level = require('memdb') +const msgpack = require('../') + +test('msgpack level encoding put', function (t) { + t.plan(4) + + const pack = msgpack() + const db = level({ + valueEncoding: pack + }) + const obj = { my: 'obj' } + + db.put('hello', obj, function (err) { + t.error(err, 'put has no errors') + db.get('hello', { valueEncoding: 'binary' }, function (err, buf) { + t.error(err, 'get has no error') + t.deepEqual(pack.decode(buf), obj) + db.close(function () { + t.pass('db closed') + }) + }) + }) +}) + +test('msgpack level encoding get', function (t) { + t.plan(4) + + const pack = msgpack() + const db = level({ + valueEncoding: pack + }) + const obj = { my: 'obj' } + const buf = pack.encode(obj) + + db.put('hello', buf, { valueEncoding: 'binary' }, function (err) { + t.error(err, 'putting has no errors') + db.get('hello', function (err, result) { + t.error(err, 'get has no error') + t.deepEqual(result, obj) + db.close(function () { + t.pass('db closed') + }) + }) + }) +}) + +test('msgpack level encoding mirror', function (t) { + t.plan(4) + + const pack = msgpack() + const db = level({ + valueEncoding: pack + }) + const obj = { my: 'obj' } + + db.put('hello', obj, function (err) { + t.error(err, 'putting has no errors') + db.get('hello', function (err, result) { + t.error(err, 'get has no error') + t.deepEqual(result, obj) + db.close(function () { + t.pass('db closed') + }) + }) + }) +}) diff --git a/node_modules/msgpack5/test/map-with-object-key.js b/node_modules/msgpack5/test/map-with-object-key.js new file mode 100644 index 0000000000..ad7c48eda8 --- /dev/null +++ b/node_modules/msgpack5/test/map-with-object-key.js @@ -0,0 +1,25 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +test('encode/decode map with multiple short buffers as both keys and values', function (t) { + const first = Buffer.from('first') + const second = Buffer.from('second') + const third = Buffer.from('third') + + const mapping = new Map().set(first, second) + .set(second, third) + .set(third, first) + + const pack = msgpack() + + const newMapping = pack.decode(pack.encode(mapping)) + + t.equals(newMapping.size, mapping.size) + t.deepEqual([...newMapping.keys()], [...mapping.keys()]) + t.deepEqual([...newMapping.values()], [...mapping.values()]) + + t.end() +}) diff --git a/node_modules/msgpack5/test/nested-containers.js b/node_modules/msgpack5/test/nested-containers.js new file mode 100644 index 0000000000..0ffcbd3674 --- /dev/null +++ b/node_modules/msgpack5/test/nested-containers.js @@ -0,0 +1,44 @@ +'use strict' + +const test = require('tape').test +const msgpack = require('../') + +test('encode/decode nested containers (map/array)', function (t) { + const encoder = msgpack() + + function doEncodeDecode (value) { + return encoder.decode(encoder.encode(value)) + } + + function preserveTest (A, message = 'works') { + const B = doEncodeDecode(A) + t.deepEqual(A, B, message) + } + + preserveTest({ + hello: 'world', + digit: 111, + array: [1, 2, 3, 4, 'string', { hello: 'world' }] + }) + + preserveTest([ + [ + { + hello: 'world', + array: [1, 2, 3, 4, 'string', { hello: 'world' }] + }, + { + digit: 111 + } + ], + [ + { + hello: 'world', + digit: 111, + array: [1, 2, 3, 4, 'string', { hello: 'world' }] + } + ] + ]) + + t.end() +}) diff --git a/node_modules/msgpack5/test/null.js b/node_modules/msgpack5/test/null.js new file mode 100644 index 0000000000..4e4de06596 --- /dev/null +++ b/node_modules/msgpack5/test/null.js @@ -0,0 +1,16 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +test('encode/decode null', function (t) { + const encoder = msgpack() + + t.equal(encoder.encode(null)[0], 0xc0, 'encode null as 0xc0') + t.equal(encoder.encode(null).length, 1, 'encode a buffer of length 1') + t.equal(encoder.decode(Buffer.from([0xc0])), null, 'decode 0xc0 as null') + t.equal(encoder.decode(encoder.encode(null)), null, 'mirror test null') + + t.end() +}) diff --git a/node_modules/msgpack5/test/numerictypeasserts.js b/node_modules/msgpack5/test/numerictypeasserts.js new file mode 100644 index 0000000000..098e0d1e20 --- /dev/null +++ b/node_modules/msgpack5/test/numerictypeasserts.js @@ -0,0 +1,49 @@ +'use strict' + +const test = require('tape').test +const msgpack = require('../') + +test('custom type registeration assertions', function (t) { + const encoder = msgpack() + + function Type0 (value) { + this.value = value + } + + function type0Encode (value) { + return new Type0(value) + } + + function type0Decode (type0) { + return type0.value + } + + function TypeNeg (value) { + this.value = value + } + + function typeNegEncode (value) { + return new TypeNeg(value) + } + + function typeNegDecode (typeneg) { + return typeneg.value + } + + t.doesNotThrow(function () { + encoder.register(0, Type0, type0Decode, type0Encode) + }, undefined, 'A type registered at 0 should not throw.') + t.throws(function () { + encoder.register(-1, TypeNeg, typeNegEncode, typeNegDecode) + }, undefined, 'A type registered as a negative value should throw') + + const encoded = encoder.encode(new Type0('hi')) + let decoded + t.equal(encoded.readUInt8(1), 0x0, 'must use the custom type assigned') + t.doesNotThrow(function () { + decoded = encoder.decode(encoded) + }, undefined, 'decoding custom 0 type should not throw') + t.equal(decoded instanceof Type0, true, 'must decode to custom type instance') + + t.end() +}) diff --git a/node_modules/msgpack5/test/object-prototype-poisoning.js b/node_modules/msgpack5/test/object-prototype-poisoning.js new file mode 100644 index 0000000000..641b25f81a --- /dev/null +++ b/node_modules/msgpack5/test/object-prototype-poisoning.js @@ -0,0 +1,49 @@ +'use strict' + +const test = require('tape').test +const msgpack = require('../') + +test('decode throws when object has forbidden __proto__ property', function (t) { + const encoder = msgpack() + + const payload = { hello: 'world' } + Object.defineProperty(payload, '__proto__', { + value: { polluted: true }, + enumerable: true + }) + + const encoded = encoder.encode(payload) + + t.throws(() => encoder.decode(encoded), /Object contains forbidden prototype property/) + t.end() +}) + +test('decode ignores forbidden __proto__ property if protoAction is "ignore"', function (t) { + const encoder = msgpack({ protoAction: 'ignore' }) + + const payload = { hello: 'world' } + Object.defineProperty(payload, '__proto__', { + value: { polluted: true }, + enumerable: true + }) + + const decoded = encoder.decode(encoder.encode(payload)) + + t.equal(decoded.polluted, true) + t.end() +}) + +test('decode removes forbidden __proto__ property if protoAction is "remove"', function (t) { + const encoder = msgpack({ protoAction: 'remove' }) + + const payload = { hello: 'world' } + Object.defineProperty(payload, '__proto__', { + value: { polluted: true }, + enumerable: true + }) + + const decoded = encoder.decode(encoder.encode(payload)) + + t.equal(decoded.polluted, undefined) + t.end() +}) diff --git a/node_modules/msgpack5/test/object-with-arrays.js b/node_modules/msgpack5/test/object-with-arrays.js new file mode 100644 index 0000000000..d2aa429e82 --- /dev/null +++ b/node_modules/msgpack5/test/object-with-arrays.js @@ -0,0 +1,69 @@ +'use strict' + +const test = require('tape').test +const msgpack = require('../') +const bl = require('bl') + +function build (size) { + const array = [] + let i + + for (i = 0; i < size; i++) { + array.push(42) + } + + return array +} + +test('decoding a map with multiple big arrays', function (t) { + const map = { + first: build(0xffff + 42), + second: build(0xffff + 42) + } + const pack = msgpack() + + t.deepEqual(pack.decode(pack.encode(map)), map) + t.end() +}) + +test('decoding a map with multiple big arrays. First one is incomplete', function (t) { + const array = build(0xffff + 42) + const map = { + first: array, + second: build(0xffff + 42) + } + const pack = msgpack() + + let buf = pack.encode(map) + // 1 (fixmap's header 0x82) + first key's length + 1 (first array's 0xdd) + const sizePosOfFirstArray = 1 + pack.encode('first').length + 1 + buf.writeUInt32BE(array.length + 10, sizePosOfFirstArray) // set first array's size bigger than its actual size + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + pack.decode(buf) + }, pack.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) + +test('decoding a map with multiple big arrays. Second one is incomplete', function (t) { + const array = build(0xffff + 42) + const map = { + first: array, + second: build(0xffff + 42) + } + const pack = msgpack() + + let buf = pack.encode(map) + // 1 (fixmap's header 0x82) + first key-value pair's length + second key's length + 1 (second array's 0xdd) + const sizePosOfSecondArray = 1 + pack.encode('first').length + pack.encode(array).length + pack.encode('second').length + 1 + buf.writeUInt32BE(array.length + 10, sizePosOfSecondArray) // set second array's size bigger than its actual size + buf = bl().append(buf) + const origLength = buf.length + t.throws(function () { + pack.decode(buf) + }, pack.IncompleteBufferError, 'must throw IncompleteBufferError') + t.equals(buf.length, origLength, 'must not consume any byte') + t.end() +}) diff --git a/node_modules/msgpack5/test/object-with-buffers.js b/node_modules/msgpack5/test/object-with-buffers.js new file mode 100644 index 0000000000..d1a3a6088b --- /dev/null +++ b/node_modules/msgpack5/test/object-with-buffers.js @@ -0,0 +1,33 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const fs = require('fs') +const p = require('path') +const msgpack = require('../') + +test('encode/decode map with multiple short buffers', function (t) { + const map = { + first: Buffer.from('first'), + second: Buffer.from('second'), + third: Buffer.from('third') + } + const pack = msgpack() + + t.deepEqual(pack.decode(pack.encode(map)), map) + t.end() +}) + +if (process.title !== 'browser') { + test('encode/decode map with all files in this directory', function (t) { + const files = fs.readdirSync(__dirname) + const map = files.reduce(function (acc, file) { + acc[file] = fs.readFileSync(p.join(__dirname, file)) + return acc + }, {}) + const pack = msgpack() + + t.deepEqual(pack.decode(pack.encode(map)), map) + t.end() + }) +} diff --git a/node_modules/msgpack5/test/object-with-many-keys.js b/node_modules/msgpack5/test/object-with-many-keys.js new file mode 100644 index 0000000000..d7dc05fc38 --- /dev/null +++ b/node_modules/msgpack5/test/object-with-many-keys.js @@ -0,0 +1,71 @@ +'use strict' + +const test = require('tape').test +const msgpack = require('../') + +test('encode/decode map with 10 keys', function (t) { + const map = {} + + for (let i = 0; i < 10; i++) { + map[i] = i + } + + const pack = msgpack() + + const encoded = pack.encode(map) + + // map16 byte + t.equal(encoded[0], 0x8A) + + t.deepEqual(pack.decode(encoded), map) + t.end() +}) + +test('encode/decode map with 10000 keys', function (t) { + const map = {} + + for (let i = 0; i < 10000; i++) { + map[i] = i + } + + const pack = msgpack() + + const encoded = pack.encode(map) + + // map16 byte + t.equal(encoded[0], 0xde) + + t.deepEqual(pack.decode(encoded), map) + t.end() +}) + +test('encode/decode map with 100000 keys', function (t) { + const map = {} + + for (let i = 0; i < 100000; i++) { + map[i] = i + } + + const pack = msgpack() + + const encoded = pack.encode(map) + + // map32 byte + t.equal(encoded[0], 0xdf) + + t.deepEqual(pack.decode(encoded), map) + t.end() +}) + +test('encode/decode map with 1000000 keys', function (t) { + const map = {} + + for (let i = 0; i < 1000000; i++) { + map[i] = i + } + + const pack = msgpack() + + t.deepEqual(pack.decode(pack.encode(map)), map) + t.end() +}) diff --git a/node_modules/msgpack5/test/object-with-strings.js b/node_modules/msgpack5/test/object-with-strings.js new file mode 100644 index 0000000000..e17f3fdbd0 --- /dev/null +++ b/node_modules/msgpack5/test/object-with-strings.js @@ -0,0 +1,32 @@ +'use strict' + +const test = require('tape').test +const fs = require('fs') +const p = require('path') +const msgpack = require('../') + +test('encode/decode map with multiple short buffers', function (t) { + const map = { + first: 'first', + second: 'second', + third: 'third' + } + const pack = msgpack() + + t.deepEqual(pack.decode(pack.encode(map)), map) + t.end() +}) + +if (process.title !== 'browser') { + test('encode/decode map with all files in this directory', function (t) { + const files = fs.readdirSync(__dirname) + const map = files.reduce(function (acc, file) { + acc[file] = fs.readFileSync(p.join(__dirname, file)).toString('utf8') + return acc + }, {}) + const pack = msgpack() + + t.deepEqual(pack.decode(pack.encode(map)), map) + t.end() + }) +} diff --git a/node_modules/msgpack5/test/prefer-map.js b/node_modules/msgpack5/test/prefer-map.js new file mode 100644 index 0000000000..d975687981 --- /dev/null +++ b/node_modules/msgpack5/test/prefer-map.js @@ -0,0 +1,71 @@ +const test = require('tape').test +const msgpack = require('../') + +const map = new Map() + .set('a', 1) + .set('1', 'hello') + .set('world', 2) + .set('0', 'again') + .set('01', null) + +test('round-trip string-keyed Maps', function (t) { + const encoder = msgpack({ preferMap: true }) + + for (const input of [new Map(), map]) { + const result = encoder.decode(encoder.encode(input)) + t.assert(result instanceof Map) + t.deepEqual(result, input) + } + + t.end() +}) + +test('preserve iteration order of string-keyed Maps', function (t) { + const encoder = msgpack({ preferMap: true }) + const decoded = encoder.decode(encoder.encode(map)) + + t.deepEqual([...decoded.keys()], [...map.keys()]) + + t.end() +}) + +test('user can still encode objects as ext maps', function (t) { + const encoder = msgpack({ preferMap: true }) + const tag = 0x42 + + // Polyfill Object.fromEntries for node 10 + const fromEntries = Object.fromEntries || (iterable => { + const object = {} + for (const [property, value] of iterable) { + object[property] = value + } + return object + }) + + encoder.register( + tag, + Object, + obj => encoder.encode(new Map(Object.entries(obj))), + data => fromEntries(encoder.decode(data)) + ) + + const inputs = [ + {}, + new Map(), + { foo: 'bar' }, + new Map().set('foo', 'bar'), + new Map().set(null, null), + { 0: 'baz' }, + ['baz'] + ] + + for (const input of inputs) { + const buf = encoder.encode(input) + const result = encoder.decode(buf) + + t.deepEqual(result, input) + t.equal(Object.getPrototypeOf(result), Object.getPrototypeOf(input)) + } + + t.end() +}) diff --git a/node_modules/msgpack5/test/sparse-arrays.js b/node_modules/msgpack5/test/sparse-arrays.js new file mode 100644 index 0000000000..c0c1d1425c --- /dev/null +++ b/node_modules/msgpack5/test/sparse-arrays.js @@ -0,0 +1,18 @@ +'use strict' + +const test = require('tape').test +const msgpack = require('../') + +test('throws when encoding sparse arrays', function (t) { + const encoder = msgpack() + + t.deepEqual(encoder.decode(encoder.encode(new Array(0))), []) + t.throws(() => encoder.encode(new Array(1)), /Sparse arrays/) + t.throws(() => encoder.encode(new Array(100)), /Sparse arrays/) + + const sparse = [1, 2, 3, 4] + delete sparse[3] + t.throws(() => encoder.encode(sparse), /Sparse arrays/) + + t.end() +}) diff --git a/node_modules/msgpack5/test/streams.js b/node_modules/msgpack5/test/streams.js new file mode 100644 index 0000000000..f22c9bceb1 --- /dev/null +++ b/node_modules/msgpack5/test/streams.js @@ -0,0 +1,261 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') +const BufferList = require('bl') + +test('must send an object through', function (t) { + t.plan(1) + + const pack = msgpack() + const encoder = pack.encoder() + const decoder = pack.decoder() + const data = { hello: 'world' } + + encoder.pipe(decoder) + + decoder.on('data', function (chunk) { + t.deepEqual(chunk, data) + }) + + encoder.end(data) +}) + +test('must send three objects through', function (t) { + const pack = msgpack() + const encoder = pack.encoder() + const decoder = pack.decoder() + const data = [ + { hello: 1 }, + { hello: 2 }, + { hello: 3 } + ] + + t.plan(data.length) + + decoder.on('data', function (chunk) { + t.deepEqual(chunk, data.shift()) + }) + + data.forEach(encoder.write.bind(encoder)) + + encoder.pipe(decoder) + + encoder.end() +}) + +test('end-to-end', function (t) { + const pack = msgpack() + const encoder = pack.encoder() + const decoder = pack.decoder() + const data = [ + { hello: 1 }, + { hello: 2 }, + { hello: 3 } + ] + + t.plan(data.length) + + decoder.on('data', function (chunk) { + t.deepEqual(chunk, data.shift()) + }) + + data.forEach(encoder.write.bind(encoder)) + + encoder.end() + + encoder.pipe(decoder) +}) + +test('encoding error wrapped', function (t) { + t.plan(1) + + const pack = msgpack() + const encoder = pack.encoder() + const data = new MyType() + + function MyType () { + } + + function mytypeEncode () { + throw new Error('muahha') + } + + function mytypeDecode () { + } + + pack.register(0x42, MyType, mytypeEncode, mytypeDecode) + + encoder.on('error', function (err) { + t.equal(err.message, 'muahha') + }) + + encoder.end(data) +}) + +test('decoding error wrapped', function (t) { + t.plan(1) + + const pack = msgpack() + const encoder = pack.encoder() + const decoder = pack.decoder() + const data = new MyType() + + function MyType () { + } + + function mytypeEncode () { + return Buffer.allocUnsafe(0) + } + + function mytypeDecode () { + throw new Error('muahha') + } + + pack.register(0x42, MyType, mytypeEncode, mytypeDecode) + + decoder.on('error', function (err) { + t.equal(err.message, 'muahha') + }) + + encoder.end(data) + + encoder.pipe(decoder) +}) + +test('decoding error wrapped', function (t) { + t.plan(1) + + const pack = msgpack() + const encoder = pack.encoder({ header: false }) + const decoder = pack.decoder({ header: false }) + const data = new MyType() + + function MyType () { + } + + function mytypeEncode () { + return Buffer.allocUnsafe(0) + } + + function mytypeDecode () { + throw new Error('muahha') + } + + pack.register(0x42, MyType, mytypeEncode, mytypeDecode) + + decoder.on('error', function (err) { + t.equal(err.message, 'muahha') + }) + + encoder.end(data) + + encoder.pipe(decoder) +}) + +test('concatenated buffers work', function (t) { + const pack = msgpack() + const encoder = pack.encoder() + const decoder = pack.decoder() + const data = [ + { hello: 1 }, + { hello: 2 }, + { hello: 3 } + ] + + t.plan(data.length) + + const bl = new BufferList() + encoder.on('data', bl.append.bind(bl)) + + data.forEach(encoder.write.bind(encoder)) + + decoder.on('data', function (d) { + t.deepEqual(d, data.shift()) + }) + + encoder.once('finish', function () { + const buf = bl.slice() + decoder.write(buf) + }) + + encoder.end() +}) + +test('nil processing works', function (t) { + t.plan(3) + + const pack = msgpack() + const decoder = pack.decoder({ wrap: true }) + let decodedItemIndex = 0 + + decoder.on('data', function (chunk) { + decodedItemIndex++ + t.deepEqual(chunk.value, decodedItemIndex === 1 ? null : false) + }) + + decoder.on('end', function () { + t.equal(decodedItemIndex, 2) + }) + + decoder.write(Buffer.from([0xc0, 0xc2])) + decoder.end() +}) + +test('encoder wrap mode works', function (t) { + t.plan(1) + + const pack = msgpack() + const encoder = pack.encoder({ wrap: true }) + const decoder = pack.decoder() + const data = { hello: 'world' } + const wrappedData = { value: data } + + encoder.pipe(decoder) + + decoder.on('data', function (chunk) { + t.deepEqual(chunk, data) + }) + + encoder.end(wrappedData) +}) + +test('encoder/decoder wrap mode must send an object through', function (t) { + t.plan(1) + + const pack = msgpack() + const encoder = pack.encoder({ wrap: true }) + const decoder = pack.decoder({ wrap: true }) + const data = { value: { hello: 'world' } } + + encoder.pipe(decoder) + + decoder.on('data', function (chunk) { + t.deepEqual(chunk, data) + }) + + encoder.end(data) +}) + +test('encoder pack null', function (t) { + t.plan(2) + const pack = msgpack() + const encoder = pack.encoder({ wrap: true }) + const decoder = pack.decoder({ wrap: true }) + + encoder.pipe(decoder) + + let decodedItemIndex = 0 + decoder.on('data', function (chunk) { + decodedItemIndex++ + t.deepEqual(chunk.value, null) + }) + + decoder.on('end', function () { + t.equal(decodedItemIndex, 1) + }) + + encoder.write({ value: null }) + encoder.end() +}) diff --git a/node_modules/msgpack5/test/timestamps.js b/node_modules/msgpack5/test/timestamps.js new file mode 100644 index 0000000000..aae5b5d366 --- /dev/null +++ b/node_modules/msgpack5/test/timestamps.js @@ -0,0 +1,116 @@ +'use strict' + +const Buffer = require('safe-buffer').Buffer +const test = require('tape').test +const msgpack = require('../') + +test('timestamp disabling', function (t) { + const encoder = msgpack({ disableTimestampEncoding: true }) + const timestamps = [ + [new Date('2018-01-02T03:04:05.000000000Z'), [0x80]] + ] + + timestamps.forEach(function (testcase) { + const item = testcase[0] + const expected = testcase[1] + + t.test('encoding ' + item.toString(), function (t) { + const buf = encoder.encode(item).slice() + t.equal(buf.length, expected.length, 'must have ' + expected.length + ' bytes') + t.equal(buf[0], expected[0], 'Should return 0x80 ({}) by default') + t.end() + }) + }) + + t.end() +}) +test('encoding/decoding timestamp 64', function (t) { + const encoder = msgpack() + const timestamps = [ + [new Date('2018-01-02T03:04:05.000000000Z'), [0xd6, 0xff, 0x5a, 0x4a, 0xf6, 0xa5]], + [new Date('2038-01-19T03:14:08.000000000Z'), [0xd6, 0xff, 0x80, 0x00, 0x00, 0x00]], + [new Date('2038-01-19T03:14:07.999000000Z'), [0xd7, 0xff, 0xee, 0x2E, 0x1F, 0x00, 0x7f, 0xff, 0xff, 0xff]], + [new Date('2106-02-07T06:28:16.000000000Z'), [0xd7, 0xff, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00]], + [new Date('2018-01-02T03:04:05.678000000Z'), [0xd7, 0xff, 0xa1, 0xa5, 0xd6, 0x00, 0x5a, 0x4a, 0xf6, 0xa5]] + ] + + timestamps.forEach(function (testcase) { + const item = testcase[0] + const expected = testcase[1] + + t.test('encoding ' + item.toString(), function (t) { + const buf = encoder.encode(item).slice() + t.equal(buf.length, expected.length, 'must have ' + expected.length + ' bytes') + switch (expected.length) { + case 6: + t.equal(buf[0], 0xd6, 'must have the correct header') + break + case 10: + t.equal(buf[0], 0xd7, 'must have the correct header') + break + } + t.equal(buf.readInt8(1), -1, 'must have the correct type') // Signed + for (let j = 2; j < buf.length; j++) { + t.equal(buf[j], expected[j], 'byte ' + (j - 2) + ' match') + } + t.end() + }) + + t.test('decoding ' + item, function (t) { + const buf = Buffer.from(expected) + const dt = encoder.decode(buf) + t.equal(dt.toString(), item.toString(), 'must decode correctly\nDecoded:\t' + dt * 1 + '\nExp:\t' + item * 1) + t.end() + }) + + t.test('mirror test ' + item, function (t) { + t.equal(encoder.decode(encoder.encode(item)) * 1, item * 1, 'must stay the same') + t.end() + }) + }) + + t.end() +}) + +test('encoding/decoding timestamp 96', function (t) { + const encoder = msgpack() + const timestamps = [ + [new Date('0001-01-02T03:04:05.000000000Z'), [0xc7, 0x0c, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xf1, 0x88, 0x6f, 0x85, 0xa5]], + [new Date('1251-01-19T03:14:08.000000000Z'), [0xc7, 0x0c, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xfa, 0xb7, 0xb2, 0xdf, 0x00]], + [new Date('1526-01-19T03:14:07.999000000Z'), [0xc7, 0x0c, 0xff, 0x3b, 0x8b, 0x87, 0xc0, 0xff, 0xff, 0xff, 0xfc, 0xbc, 0xf4, 0x34, 0x7f]], + [new Date('1920-02-07T06:28:16.000000000Z'), [0xc7, 0x0c, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xa2, 0x23, 0xf0, 0x00]], + [new Date('1969-01-02T03:04:05.678000000Z'), [0xc7, 0x0c, 0xff, 0x28, 0x69, 0x75, 0x80, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x20, 0x49, 0x25]], + [new Date('2514-05-30T02:04:05.678000000Z'), [0xc7, 0x0c, 0xff, 0x28, 0x69, 0x75, 0x80, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x02, 0x95]] + ] + + timestamps.forEach(function (testcase) { + const item = testcase[0] + const expected = testcase[1] + + t.test('encoding ' + item.toString(), function (t) { + const buf = encoder.encode(item).slice() + t.equal(buf.length, expected.length, 'must have ' + expected.length + ' bytes') + t.equal(buf[0], 0xc7, 'must have the correct header') + t.equal(buf.readInt8(1), 12, 'must have the correct size') + t.equal(buf.readInt8(2), -1, 'must have the correct type') // Signed + for (let j = 3; j < buf.length; j++) { + t.equal(buf[j], expected[j], 'byte ' + (j - 3) + ' match') + } + t.end() + }) + + t.test('decoding ' + item, function (t) { + const buf = Buffer.from(expected) + const dt = encoder.decode(buf) + t.equal(dt.toString(), item.toString(), 'must decode correctly\nDecoded:\t' + dt * 1 + '\nExp:\t' + item * 1) + t.end() + }) + + t.test('mirror test ' + item, function (t) { + t.equal(encoder.decode(encoder.encode(item)) * 1, item * 1, 'must stay the same') + t.end() + }) + }) + + t.end() +}) diff --git a/node_modules/readable-stream/CONTRIBUTING.md b/node_modules/readable-stream/CONTRIBUTING.md new file mode 100644 index 0000000000..f478d58dca --- /dev/null +++ b/node_modules/readable-stream/CONTRIBUTING.md @@ -0,0 +1,38 @@ +# Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +* (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +* (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +* (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +* (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + +## Moderation Policy + +The [Node.js Moderation Policy] applies to this WG. + +## Code of Conduct + +The [Node.js Code of Conduct][] applies to this WG. + +[Node.js Code of Conduct]: +https://github.com/nodejs/node/blob/master/CODE_OF_CONDUCT.md +[Node.js Moderation Policy]: +https://github.com/nodejs/TSC/blob/master/Moderation-Policy.md diff --git a/node_modules/readable-stream/GOVERNANCE.md b/node_modules/readable-stream/GOVERNANCE.md new file mode 100644 index 0000000000..16ffb93f24 --- /dev/null +++ b/node_modules/readable-stream/GOVERNANCE.md @@ -0,0 +1,136 @@ +### Streams Working Group + +The Node.js Streams is jointly governed by a Working Group +(WG) +that is responsible for high-level guidance of the project. + +The WG has final authority over this project including: + +* Technical direction +* Project governance and process (including this policy) +* Contribution policy +* GitHub repository hosting +* Conduct guidelines +* Maintaining the list of additional Collaborators + +For the current list of WG members, see the project +[README.md](./README.md#current-project-team-members). + +### Collaborators + +The readable-stream GitHub repository is +maintained by the WG and additional Collaborators who are added by the +WG on an ongoing basis. + +Individuals making significant and valuable contributions are made +Collaborators and given commit-access to the project. These +individuals are identified by the WG and their addition as +Collaborators is discussed during the WG meeting. + +_Note:_ If you make a significant contribution and are not considered +for commit-access log an issue or contact a WG member directly and it +will be brought up in the next WG meeting. + +Modifications of the contents of the readable-stream repository are +made on +a collaborative basis. Anybody with a GitHub account may propose a +modification via pull request and it will be considered by the project +Collaborators. All pull requests must be reviewed and accepted by a +Collaborator with sufficient expertise who is able to take full +responsibility for the change. In the case of pull requests proposed +by an existing Collaborator, an additional Collaborator is required +for sign-off. Consensus should be sought if additional Collaborators +participate and there is disagreement around a particular +modification. See _Consensus Seeking Process_ below for further detail +on the consensus model used for governance. + +Collaborators may opt to elevate significant or controversial +modifications, or modifications that have not found consensus to the +WG for discussion by assigning the ***WG-agenda*** tag to a pull +request or issue. The WG should serve as the final arbiter where +required. + +For the current list of Collaborators, see the project +[README.md](./README.md#members). + +### WG Membership + +WG seats are not time-limited. There is no fixed size of the WG. +However, the expected target is between 6 and 12, to ensure adequate +coverage of important areas of expertise, balanced with the ability to +make decisions efficiently. + +There is no specific set of requirements or qualifications for WG +membership beyond these rules. + +The WG may add additional members to the WG by unanimous consensus. + +A WG member may be removed from the WG by voluntary resignation, or by +unanimous consensus of all other WG members. + +Changes to WG membership should be posted in the agenda, and may be +suggested as any other agenda item (see "WG Meetings" below). + +If an addition or removal is proposed during a meeting, and the full +WG is not in attendance to participate, then the addition or removal +is added to the agenda for the subsequent meeting. This is to ensure +that all members are given the opportunity to participate in all +membership decisions. If a WG member is unable to attend a meeting +where a planned membership decision is being made, then their consent +is assumed. + +No more than 1/3 of the WG members may be affiliated with the same +employer. If removal or resignation of a WG member, or a change of +employment by a WG member, creates a situation where more than 1/3 of +the WG membership shares an employer, then the situation must be +immediately remedied by the resignation or removal of one or more WG +members affiliated with the over-represented employer(s). + +### WG Meetings + +The WG meets occasionally on a Google Hangout On Air. A designated moderator +approved by the WG runs the meeting. Each meeting should be +published to YouTube. + +Items are added to the WG agenda that are considered contentious or +are modifications of governance, contribution policy, WG membership, +or release process. + +The intention of the agenda is not to approve or review all patches; +that should happen continuously on GitHub and be handled by the larger +group of Collaborators. + +Any community member or contributor can ask that something be added to +the next meeting's agenda by logging a GitHub Issue. Any Collaborator, +WG member or the moderator can add the item to the agenda by adding +the ***WG-agenda*** tag to the issue. + +Prior to each WG meeting the moderator will share the Agenda with +members of the WG. WG members can add any items they like to the +agenda at the beginning of each meeting. The moderator and the WG +cannot veto or remove items. + +The WG may invite persons or representatives from certain projects to +participate in a non-voting capacity. + +The moderator is responsible for summarizing the discussion of each +agenda item and sends it as a pull request after the meeting. + +### Consensus Seeking Process + +The WG follows a +[Consensus +Seeking](http://en.wikipedia.org/wiki/Consensus-seeking_decision-making) +decision-making model. + +When an agenda item has appeared to reach a consensus the moderator +will ask "Does anyone object?" as a final call for dissent from the +consensus. + +If an agenda item cannot reach a consensus a WG member can call for +either a closing vote or a vote to table the issue to the next +meeting. The call for a vote must be seconded by a majority of the WG +or else the discussion will continue. Simple majority wins. + +Note that changes to WG membership require a majority consensus. See +"WG Membership" above. diff --git a/node_modules/readable-stream/LICENSE b/node_modules/readable-stream/LICENSE new file mode 100644 index 0000000000..2873b3b2e5 --- /dev/null +++ b/node_modules/readable-stream/LICENSE @@ -0,0 +1,47 @@ +Node.js is licensed for use as follows: + +""" +Copyright Node.js contributors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. +""" + +This license applies to parts of Node.js originating from the +https://github.com/joyent/node repository: + +""" +Copyright Joyent, Inc. and other Node contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. +""" diff --git a/node_modules/readable-stream/README.md b/node_modules/readable-stream/README.md new file mode 100644 index 0000000000..19117c1a05 --- /dev/null +++ b/node_modules/readable-stream/README.md @@ -0,0 +1,106 @@ +# readable-stream + +***Node.js core streams for userland*** [![Build Status](https://travis-ci.com/nodejs/readable-stream.svg?branch=master)](https://travis-ci.com/nodejs/readable-stream) + + +[![NPM](https://nodei.co/npm/readable-stream.png?downloads=true&downloadRank=true)](https://nodei.co/npm/readable-stream/) +[![NPM](https://nodei.co/npm-dl/readable-stream.png?&months=6&height=3)](https://nodei.co/npm/readable-stream/) + + +[![Sauce Test Status](https://saucelabs.com/browser-matrix/readabe-stream.svg)](https://saucelabs.com/u/readabe-stream) + +```bash +npm install --save readable-stream +``` + +This package is a mirror of the streams implementations in Node.js. + +Full documentation may be found on the [Node.js website](https://nodejs.org/dist/v10.18.1/docs/api/stream.html). + +If you want to guarantee a stable streams base, regardless of what version of +Node you, or the users of your libraries are using, use **readable-stream** *only* and avoid the *"stream"* module in Node-core, for background see [this blogpost](http://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html). + +As of version 2.0.0 **readable-stream** uses semantic versioning. + +## Version 3.x.x + +v3.x.x of `readable-stream` is a cut from Node 10. This version supports Node 6, 8, and 10, as well as evergreen browsers, IE 11 and latest Safari. The breaking changes introduced by v3 are composed by the combined breaking changes in [Node v9](https://nodejs.org/en/blog/release/v9.0.0/) and [Node v10](https://nodejs.org/en/blog/release/v10.0.0/), as follows: + +1. Error codes: https://github.com/nodejs/node/pull/13310, + https://github.com/nodejs/node/pull/13291, + https://github.com/nodejs/node/pull/16589, + https://github.com/nodejs/node/pull/15042, + https://github.com/nodejs/node/pull/15665, + https://github.com/nodejs/readable-stream/pull/344 +2. 'readable' have precedence over flowing + https://github.com/nodejs/node/pull/18994 +3. make virtual methods errors consistent + https://github.com/nodejs/node/pull/18813 +4. updated streams error handling + https://github.com/nodejs/node/pull/18438 +5. writable.end should return this. + https://github.com/nodejs/node/pull/18780 +6. readable continues to read when push('') + https://github.com/nodejs/node/pull/18211 +7. add custom inspect to BufferList + https://github.com/nodejs/node/pull/17907 +8. always defer 'readable' with nextTick + https://github.com/nodejs/node/pull/17979 + +## Version 2.x.x +v2.x.x of `readable-stream` is a cut of the stream module from Node 8 (there have been no semver-major changes from Node 4 to 8). This version supports all Node.js versions from 0.8, as well as evergreen browsers and IE 10 & 11. + +### Big Thanks + +Cross-browser Testing Platform and Open Source <3 Provided by [Sauce Labs][sauce] + +# Usage + +You can swap your `require('stream')` with `require('readable-stream')` +without any changes, if you are just using one of the main classes and +functions. + +```js +const { + Readable, + Writable, + Transform, + Duplex, + pipeline, + finished +} = require('readable-stream') +```` + +Note that `require('stream')` will return `Stream`, while +`require('readable-stream')` will return `Readable`. We discourage using +whatever is exported directly, but rather use one of the properties as +shown in the example above. + +# Streams Working Group + +`readable-stream` is maintained by the Streams Working Group, which +oversees the development and maintenance of the Streams API within +Node.js. The responsibilities of the Streams Working Group include: + +* Addressing stream issues on the Node.js issue tracker. +* Authoring and editing stream documentation within the Node.js project. +* Reviewing changes to stream subclasses within the Node.js project. +* Redirecting changes to streams from the Node.js project to this + project. +* Assisting in the implementation of stream providers within Node.js. +* Recommending versions of `readable-stream` to be included in Node.js. +* Messaging about the future of streams to give the community advance + notice of changes. + + +## Team Members + +* **Calvin Metcalf** ([@calvinmetcalf](https://github.com/calvinmetcalf)) <calvin.metcalf@gmail.com> + - Release GPG key: F3EF5F62A87FC27A22E643F714CE4FF5015AA242 +* **Mathias Buus** ([@mafintosh](https://github.com/mafintosh)) <mathiasbuus@gmail.com> +* **Matteo Collina** ([@mcollina](https://github.com/mcollina)) <matteo.collina@gmail.com> + - Release GPG key: 3ABC01543F22DD2239285CDD818674489FBC127E +* **Irina Shestak** ([@lrlna](https://github.com/lrlna)) <shestak.irina@gmail.com> +* **Yoshua Wyuts** ([@yoshuawuyts](https://github.com/yoshuawuyts)) <yoshuawuyts@gmail.com> + +[sauce]: https://saucelabs.com diff --git a/node_modules/readable-stream/errors-browser.js b/node_modules/readable-stream/errors-browser.js new file mode 100644 index 0000000000..fb8e73e189 --- /dev/null +++ b/node_modules/readable-stream/errors-browser.js @@ -0,0 +1,127 @@ +'use strict'; + +function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } + +var codes = {}; + +function createErrorType(code, message, Base) { + if (!Base) { + Base = Error; + } + + function getMessage(arg1, arg2, arg3) { + if (typeof message === 'string') { + return message; + } else { + return message(arg1, arg2, arg3); + } + } + + var NodeError = + /*#__PURE__*/ + function (_Base) { + _inheritsLoose(NodeError, _Base); + + function NodeError(arg1, arg2, arg3) { + return _Base.call(this, getMessage(arg1, arg2, arg3)) || this; + } + + return NodeError; + }(Base); + + NodeError.prototype.name = Base.name; + NodeError.prototype.code = code; + codes[code] = NodeError; +} // https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js + + +function oneOf(expected, thing) { + if (Array.isArray(expected)) { + var len = expected.length; + expected = expected.map(function (i) { + return String(i); + }); + + if (len > 2) { + return "one of ".concat(thing, " ").concat(expected.slice(0, len - 1).join(', '), ", or ") + expected[len - 1]; + } else if (len === 2) { + return "one of ".concat(thing, " ").concat(expected[0], " or ").concat(expected[1]); + } else { + return "of ".concat(thing, " ").concat(expected[0]); + } + } else { + return "of ".concat(thing, " ").concat(String(expected)); + } +} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith + + +function startsWith(str, search, pos) { + return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search; +} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith + + +function endsWith(str, search, this_len) { + if (this_len === undefined || this_len > str.length) { + this_len = str.length; + } + + return str.substring(this_len - search.length, this_len) === search; +} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes + + +function includes(str, search, start) { + if (typeof start !== 'number') { + start = 0; + } + + if (start + search.length > str.length) { + return false; + } else { + return str.indexOf(search, start) !== -1; + } +} + +createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) { + return 'The value "' + value + '" is invalid for option "' + name + '"'; +}, TypeError); +createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) { + // determiner: 'must be' or 'must not be' + var determiner; + + if (typeof expected === 'string' && startsWith(expected, 'not ')) { + determiner = 'must not be'; + expected = expected.replace(/^not /, ''); + } else { + determiner = 'must be'; + } + + var msg; + + if (endsWith(name, ' argument')) { + // For cases like 'first argument' + msg = "The ".concat(name, " ").concat(determiner, " ").concat(oneOf(expected, 'type')); + } else { + var type = includes(name, '.') ? 'property' : 'argument'; + msg = "The \"".concat(name, "\" ").concat(type, " ").concat(determiner, " ").concat(oneOf(expected, 'type')); + } + + msg += ". Received type ".concat(typeof actual); + return msg; +}, TypeError); +createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF'); +createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) { + return 'The ' + name + ' method is not implemented'; +}); +createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close'); +createErrorType('ERR_STREAM_DESTROYED', function (name) { + return 'Cannot call ' + name + ' after a stream was destroyed'; +}); +createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times'); +createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable'); +createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end'); +createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError); +createErrorType('ERR_UNKNOWN_ENCODING', function (arg) { + return 'Unknown encoding: ' + arg; +}, TypeError); +createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event'); +module.exports.codes = codes; diff --git a/node_modules/readable-stream/errors.js b/node_modules/readable-stream/errors.js new file mode 100644 index 0000000000..8471526d6e --- /dev/null +++ b/node_modules/readable-stream/errors.js @@ -0,0 +1,116 @@ +'use strict'; + +const codes = {}; + +function createErrorType(code, message, Base) { + if (!Base) { + Base = Error + } + + function getMessage (arg1, arg2, arg3) { + if (typeof message === 'string') { + return message + } else { + return message(arg1, arg2, arg3) + } + } + + class NodeError extends Base { + constructor (arg1, arg2, arg3) { + super(getMessage(arg1, arg2, arg3)); + } + } + + NodeError.prototype.name = Base.name; + NodeError.prototype.code = code; + + codes[code] = NodeError; +} + +// https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js +function oneOf(expected, thing) { + if (Array.isArray(expected)) { + const len = expected.length; + expected = expected.map((i) => String(i)); + if (len > 2) { + return `one of ${thing} ${expected.slice(0, len - 1).join(', ')}, or ` + + expected[len - 1]; + } else if (len === 2) { + return `one of ${thing} ${expected[0]} or ${expected[1]}`; + } else { + return `of ${thing} ${expected[0]}`; + } + } else { + return `of ${thing} ${String(expected)}`; + } +} + +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith +function startsWith(str, search, pos) { + return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search; +} + +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith +function endsWith(str, search, this_len) { + if (this_len === undefined || this_len > str.length) { + this_len = str.length; + } + return str.substring(this_len - search.length, this_len) === search; +} + +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes +function includes(str, search, start) { + if (typeof start !== 'number') { + start = 0; + } + + if (start + search.length > str.length) { + return false; + } else { + return str.indexOf(search, start) !== -1; + } +} + +createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) { + return 'The value "' + value + '" is invalid for option "' + name + '"' +}, TypeError); +createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) { + // determiner: 'must be' or 'must not be' + let determiner; + if (typeof expected === 'string' && startsWith(expected, 'not ')) { + determiner = 'must not be'; + expected = expected.replace(/^not /, ''); + } else { + determiner = 'must be'; + } + + let msg; + if (endsWith(name, ' argument')) { + // For cases like 'first argument' + msg = `The ${name} ${determiner} ${oneOf(expected, 'type')}`; + } else { + const type = includes(name, '.') ? 'property' : 'argument'; + msg = `The "${name}" ${type} ${determiner} ${oneOf(expected, 'type')}`; + } + + msg += `. Received type ${typeof actual}`; + return msg; +}, TypeError); +createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF'); +createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) { + return 'The ' + name + ' method is not implemented' +}); +createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close'); +createErrorType('ERR_STREAM_DESTROYED', function (name) { + return 'Cannot call ' + name + ' after a stream was destroyed'; +}); +createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times'); +createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable'); +createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end'); +createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError); +createErrorType('ERR_UNKNOWN_ENCODING', function (arg) { + return 'Unknown encoding: ' + arg +}, TypeError); +createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event'); + +module.exports.codes = codes; diff --git a/node_modules/readable-stream/experimentalWarning.js b/node_modules/readable-stream/experimentalWarning.js new file mode 100644 index 0000000000..78e841495b --- /dev/null +++ b/node_modules/readable-stream/experimentalWarning.js @@ -0,0 +1,17 @@ +'use strict' + +var experimentalWarnings = new Set(); + +function emitExperimentalWarning(feature) { + if (experimentalWarnings.has(feature)) return; + var msg = feature + ' is an experimental feature. This feature could ' + + 'change at any time'; + experimentalWarnings.add(feature); + process.emitWarning(msg, 'ExperimentalWarning'); +} + +function noop() {} + +module.exports.emitExperimentalWarning = process.emitWarning + ? emitExperimentalWarning + : noop; diff --git a/node_modules/readable-stream/lib/_stream_duplex.js b/node_modules/readable-stream/lib/_stream_duplex.js new file mode 100644 index 0000000000..19abfa604d --- /dev/null +++ b/node_modules/readable-stream/lib/_stream_duplex.js @@ -0,0 +1,126 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// a duplex stream is just a stream that is both readable and writable. +// Since JS doesn't have multiple prototypal inheritance, this class +// prototypally inherits from Readable, and then parasitically from +// Writable. + +'use strict'; + +/**/ +var objectKeys = Object.keys || function (obj) { + var keys = []; + for (var key in obj) keys.push(key); + return keys; +}; +/**/ + +module.exports = Duplex; +var Readable = require('./_stream_readable'); +var Writable = require('./_stream_writable'); +require('inherits')(Duplex, Readable); +{ + // Allow the keys array to be GC'ed. + var keys = objectKeys(Writable.prototype); + for (var v = 0; v < keys.length; v++) { + var method = keys[v]; + if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method]; + } +} +function Duplex(options) { + if (!(this instanceof Duplex)) return new Duplex(options); + Readable.call(this, options); + Writable.call(this, options); + this.allowHalfOpen = true; + if (options) { + if (options.readable === false) this.readable = false; + if (options.writable === false) this.writable = false; + if (options.allowHalfOpen === false) { + this.allowHalfOpen = false; + this.once('end', onend); + } + } +} +Object.defineProperty(Duplex.prototype, 'writableHighWaterMark', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState.highWaterMark; + } +}); +Object.defineProperty(Duplex.prototype, 'writableBuffer', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState && this._writableState.getBuffer(); + } +}); +Object.defineProperty(Duplex.prototype, 'writableLength', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState.length; + } +}); + +// the no-half-open enforcer +function onend() { + // If the writable side ended, then we're ok. + if (this._writableState.ended) return; + + // no more data can be written. + // But allow more writes to happen in this tick. + process.nextTick(onEndNT, this); +} +function onEndNT(self) { + self.end(); +} +Object.defineProperty(Duplex.prototype, 'destroyed', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + if (this._readableState === undefined || this._writableState === undefined) { + return false; + } + return this._readableState.destroyed && this._writableState.destroyed; + }, + set: function set(value) { + // we ignore the value if the stream + // has not been initialized yet + if (this._readableState === undefined || this._writableState === undefined) { + return; + } + + // backward compatibility, the user is explicitly + // managing destroyed + this._readableState.destroyed = value; + this._writableState.destroyed = value; + } +}); \ No newline at end of file diff --git a/node_modules/readable-stream/lib/_stream_passthrough.js b/node_modules/readable-stream/lib/_stream_passthrough.js new file mode 100644 index 0000000000..24a6bdde29 --- /dev/null +++ b/node_modules/readable-stream/lib/_stream_passthrough.js @@ -0,0 +1,37 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// a passthrough stream. +// basically just the most minimal sort of Transform stream. +// Every written chunk gets output as-is. + +'use strict'; + +module.exports = PassThrough; +var Transform = require('./_stream_transform'); +require('inherits')(PassThrough, Transform); +function PassThrough(options) { + if (!(this instanceof PassThrough)) return new PassThrough(options); + Transform.call(this, options); +} +PassThrough.prototype._transform = function (chunk, encoding, cb) { + cb(null, chunk); +}; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/_stream_readable.js b/node_modules/readable-stream/lib/_stream_readable.js new file mode 100644 index 0000000000..df1f608d53 --- /dev/null +++ b/node_modules/readable-stream/lib/_stream_readable.js @@ -0,0 +1,1027 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +'use strict'; + +module.exports = Readable; + +/**/ +var Duplex; +/**/ + +Readable.ReadableState = ReadableState; + +/**/ +var EE = require('events').EventEmitter; +var EElistenerCount = function EElistenerCount(emitter, type) { + return emitter.listeners(type).length; +}; +/**/ + +/**/ +var Stream = require('./internal/streams/stream'); +/**/ + +var Buffer = require('buffer').Buffer; +var OurUint8Array = (typeof global !== 'undefined' ? global : typeof window !== 'undefined' ? window : typeof self !== 'undefined' ? self : {}).Uint8Array || function () {}; +function _uint8ArrayToBuffer(chunk) { + return Buffer.from(chunk); +} +function _isUint8Array(obj) { + return Buffer.isBuffer(obj) || obj instanceof OurUint8Array; +} + +/**/ +var debugUtil = require('util'); +var debug; +if (debugUtil && debugUtil.debuglog) { + debug = debugUtil.debuglog('stream'); +} else { + debug = function debug() {}; +} +/**/ + +var BufferList = require('./internal/streams/buffer_list'); +var destroyImpl = require('./internal/streams/destroy'); +var _require = require('./internal/streams/state'), + getHighWaterMark = _require.getHighWaterMark; +var _require$codes = require('../errors').codes, + ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE, + ERR_STREAM_PUSH_AFTER_EOF = _require$codes.ERR_STREAM_PUSH_AFTER_EOF, + ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED, + ERR_STREAM_UNSHIFT_AFTER_END_EVENT = _require$codes.ERR_STREAM_UNSHIFT_AFTER_END_EVENT; + +// Lazy loaded to improve the startup performance. +var StringDecoder; +var createReadableStreamAsyncIterator; +var from; +require('inherits')(Readable, Stream); +var errorOrDestroy = destroyImpl.errorOrDestroy; +var kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume']; +function prependListener(emitter, event, fn) { + // Sadly this is not cacheable as some libraries bundle their own + // event emitter implementation with them. + if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn); + + // This is a hack to make sure that our error handler is attached before any + // userland ones. NEVER DO THIS. This is here only because this code needs + // to continue to work with older versions of Node.js that do not include + // the prependListener() method. The goal is to eventually remove this hack. + if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (Array.isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]]; +} +function ReadableState(options, stream, isDuplex) { + Duplex = Duplex || require('./_stream_duplex'); + options = options || {}; + + // Duplex streams are both readable and writable, but share + // the same options object. + // However, some cases require setting options to different + // values for the readable and the writable sides of the duplex stream. + // These options can be provided separately as readableXXX and writableXXX. + if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; + + // object stream flag. Used to make read(n) ignore n and to + // make all the buffer merging and length checks go away + this.objectMode = !!options.objectMode; + if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode; + + // the point at which it stops calling _read() to fill the buffer + // Note: 0 is a valid value, means "don't call _read preemptively ever" + this.highWaterMark = getHighWaterMark(this, options, 'readableHighWaterMark', isDuplex); + + // A linked list is used to store data chunks instead of an array because the + // linked list can remove elements from the beginning faster than + // array.shift() + this.buffer = new BufferList(); + this.length = 0; + this.pipes = null; + this.pipesCount = 0; + this.flowing = null; + this.ended = false; + this.endEmitted = false; + this.reading = false; + + // a flag to be able to tell if the event 'readable'/'data' is emitted + // immediately, or on a later tick. We set this to true at first, because + // any actions that shouldn't happen until "later" should generally also + // not happen before the first read call. + this.sync = true; + + // whenever we return null, then we set a flag to say + // that we're awaiting a 'readable' event emission. + this.needReadable = false; + this.emittedReadable = false; + this.readableListening = false; + this.resumeScheduled = false; + this.paused = true; + + // Should close be emitted on destroy. Defaults to true. + this.emitClose = options.emitClose !== false; + + // Should .destroy() be called after 'end' (and potentially 'finish') + this.autoDestroy = !!options.autoDestroy; + + // has it been destroyed + this.destroyed = false; + + // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + this.defaultEncoding = options.defaultEncoding || 'utf8'; + + // the number of writers that are awaiting a drain event in .pipe()s + this.awaitDrain = 0; + + // if true, a maybeReadMore has been scheduled + this.readingMore = false; + this.decoder = null; + this.encoding = null; + if (options.encoding) { + if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder; + this.decoder = new StringDecoder(options.encoding); + this.encoding = options.encoding; + } +} +function Readable(options) { + Duplex = Duplex || require('./_stream_duplex'); + if (!(this instanceof Readable)) return new Readable(options); + + // Checking for a Stream.Duplex instance is faster here instead of inside + // the ReadableState constructor, at least with V8 6.5 + var isDuplex = this instanceof Duplex; + this._readableState = new ReadableState(options, this, isDuplex); + + // legacy + this.readable = true; + if (options) { + if (typeof options.read === 'function') this._read = options.read; + if (typeof options.destroy === 'function') this._destroy = options.destroy; + } + Stream.call(this); +} +Object.defineProperty(Readable.prototype, 'destroyed', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + if (this._readableState === undefined) { + return false; + } + return this._readableState.destroyed; + }, + set: function set(value) { + // we ignore the value if the stream + // has not been initialized yet + if (!this._readableState) { + return; + } + + // backward compatibility, the user is explicitly + // managing destroyed + this._readableState.destroyed = value; + } +}); +Readable.prototype.destroy = destroyImpl.destroy; +Readable.prototype._undestroy = destroyImpl.undestroy; +Readable.prototype._destroy = function (err, cb) { + cb(err); +}; + +// Manually shove something into the read() buffer. +// This returns true if the highWaterMark has not been hit yet, +// similar to how Writable.write() returns true if you should +// write() some more. +Readable.prototype.push = function (chunk, encoding) { + var state = this._readableState; + var skipChunkCheck; + if (!state.objectMode) { + if (typeof chunk === 'string') { + encoding = encoding || state.defaultEncoding; + if (encoding !== state.encoding) { + chunk = Buffer.from(chunk, encoding); + encoding = ''; + } + skipChunkCheck = true; + } + } else { + skipChunkCheck = true; + } + return readableAddChunk(this, chunk, encoding, false, skipChunkCheck); +}; + +// Unshift should *always* be something directly out of read() +Readable.prototype.unshift = function (chunk) { + return readableAddChunk(this, chunk, null, true, false); +}; +function readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) { + debug('readableAddChunk', chunk); + var state = stream._readableState; + if (chunk === null) { + state.reading = false; + onEofChunk(stream, state); + } else { + var er; + if (!skipChunkCheck) er = chunkInvalid(state, chunk); + if (er) { + errorOrDestroy(stream, er); + } else if (state.objectMode || chunk && chunk.length > 0) { + if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) { + chunk = _uint8ArrayToBuffer(chunk); + } + if (addToFront) { + if (state.endEmitted) errorOrDestroy(stream, new ERR_STREAM_UNSHIFT_AFTER_END_EVENT());else addChunk(stream, state, chunk, true); + } else if (state.ended) { + errorOrDestroy(stream, new ERR_STREAM_PUSH_AFTER_EOF()); + } else if (state.destroyed) { + return false; + } else { + state.reading = false; + if (state.decoder && !encoding) { + chunk = state.decoder.write(chunk); + if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state); + } else { + addChunk(stream, state, chunk, false); + } + } + } else if (!addToFront) { + state.reading = false; + maybeReadMore(stream, state); + } + } + + // We can push more data if we are below the highWaterMark. + // Also, if we have no data yet, we can stand some more bytes. + // This is to work around cases where hwm=0, such as the repl. + return !state.ended && (state.length < state.highWaterMark || state.length === 0); +} +function addChunk(stream, state, chunk, addToFront) { + if (state.flowing && state.length === 0 && !state.sync) { + state.awaitDrain = 0; + stream.emit('data', chunk); + } else { + // update the buffer info. + state.length += state.objectMode ? 1 : chunk.length; + if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk); + if (state.needReadable) emitReadable(stream); + } + maybeReadMore(stream, state); +} +function chunkInvalid(state, chunk) { + var er; + if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) { + er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer', 'Uint8Array'], chunk); + } + return er; +} +Readable.prototype.isPaused = function () { + return this._readableState.flowing === false; +}; + +// backwards compatibility. +Readable.prototype.setEncoding = function (enc) { + if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder; + var decoder = new StringDecoder(enc); + this._readableState.decoder = decoder; + // If setEncoding(null), decoder.encoding equals utf8 + this._readableState.encoding = this._readableState.decoder.encoding; + + // Iterate over current buffer to convert already stored Buffers: + var p = this._readableState.buffer.head; + var content = ''; + while (p !== null) { + content += decoder.write(p.data); + p = p.next; + } + this._readableState.buffer.clear(); + if (content !== '') this._readableState.buffer.push(content); + this._readableState.length = content.length; + return this; +}; + +// Don't raise the hwm > 1GB +var MAX_HWM = 0x40000000; +function computeNewHighWaterMark(n) { + if (n >= MAX_HWM) { + // TODO(ronag): Throw ERR_VALUE_OUT_OF_RANGE. + n = MAX_HWM; + } else { + // Get the next highest power of 2 to prevent increasing hwm excessively in + // tiny amounts + n--; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + n++; + } + return n; +} + +// This function is designed to be inlinable, so please take care when making +// changes to the function body. +function howMuchToRead(n, state) { + if (n <= 0 || state.length === 0 && state.ended) return 0; + if (state.objectMode) return 1; + if (n !== n) { + // Only flow one buffer at a time + if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length; + } + // If we're asking for more than the current hwm, then raise the hwm. + if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n); + if (n <= state.length) return n; + // Don't have enough + if (!state.ended) { + state.needReadable = true; + return 0; + } + return state.length; +} + +// you can override either this method, or the async _read(n) below. +Readable.prototype.read = function (n) { + debug('read', n); + n = parseInt(n, 10); + var state = this._readableState; + var nOrig = n; + if (n !== 0) state.emittedReadable = false; + + // if we're doing read(0) to trigger a readable event, but we + // already have a bunch of data in the buffer, then just trigger + // the 'readable' event and move on. + if (n === 0 && state.needReadable && ((state.highWaterMark !== 0 ? state.length >= state.highWaterMark : state.length > 0) || state.ended)) { + debug('read: emitReadable', state.length, state.ended); + if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this); + return null; + } + n = howMuchToRead(n, state); + + // if we've ended, and we're now clear, then finish it up. + if (n === 0 && state.ended) { + if (state.length === 0) endReadable(this); + return null; + } + + // All the actual chunk generation logic needs to be + // *below* the call to _read. The reason is that in certain + // synthetic stream cases, such as passthrough streams, _read + // may be a completely synchronous operation which may change + // the state of the read buffer, providing enough data when + // before there was *not* enough. + // + // So, the steps are: + // 1. Figure out what the state of things will be after we do + // a read from the buffer. + // + // 2. If that resulting state will trigger a _read, then call _read. + // Note that this may be asynchronous, or synchronous. Yes, it is + // deeply ugly to write APIs this way, but that still doesn't mean + // that the Readable class should behave improperly, as streams are + // designed to be sync/async agnostic. + // Take note if the _read call is sync or async (ie, if the read call + // has returned yet), so that we know whether or not it's safe to emit + // 'readable' etc. + // + // 3. Actually pull the requested chunks out of the buffer and return. + + // if we need a readable event, then we need to do some reading. + var doRead = state.needReadable; + debug('need readable', doRead); + + // if we currently have less than the highWaterMark, then also read some + if (state.length === 0 || state.length - n < state.highWaterMark) { + doRead = true; + debug('length less than watermark', doRead); + } + + // however, if we've ended, then there's no point, and if we're already + // reading, then it's unnecessary. + if (state.ended || state.reading) { + doRead = false; + debug('reading or ended', doRead); + } else if (doRead) { + debug('do read'); + state.reading = true; + state.sync = true; + // if the length is currently zero, then we *need* a readable event. + if (state.length === 0) state.needReadable = true; + // call internal read method + this._read(state.highWaterMark); + state.sync = false; + // If _read pushed data synchronously, then `reading` will be false, + // and we need to re-evaluate how much data we can return to the user. + if (!state.reading) n = howMuchToRead(nOrig, state); + } + var ret; + if (n > 0) ret = fromList(n, state);else ret = null; + if (ret === null) { + state.needReadable = state.length <= state.highWaterMark; + n = 0; + } else { + state.length -= n; + state.awaitDrain = 0; + } + if (state.length === 0) { + // If we have nothing in the buffer, then we want to know + // as soon as we *do* get something into the buffer. + if (!state.ended) state.needReadable = true; + + // If we tried to read() past the EOF, then emit end on the next tick. + if (nOrig !== n && state.ended) endReadable(this); + } + if (ret !== null) this.emit('data', ret); + return ret; +}; +function onEofChunk(stream, state) { + debug('onEofChunk'); + if (state.ended) return; + if (state.decoder) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) { + state.buffer.push(chunk); + state.length += state.objectMode ? 1 : chunk.length; + } + } + state.ended = true; + if (state.sync) { + // if we are sync, wait until next tick to emit the data. + // Otherwise we risk emitting data in the flow() + // the readable code triggers during a read() call + emitReadable(stream); + } else { + // emit 'readable' now to make sure it gets picked up. + state.needReadable = false; + if (!state.emittedReadable) { + state.emittedReadable = true; + emitReadable_(stream); + } + } +} + +// Don't emit readable right away in sync mode, because this can trigger +// another read() call => stack overflow. This way, it might trigger +// a nextTick recursion warning, but that's not so bad. +function emitReadable(stream) { + var state = stream._readableState; + debug('emitReadable', state.needReadable, state.emittedReadable); + state.needReadable = false; + if (!state.emittedReadable) { + debug('emitReadable', state.flowing); + state.emittedReadable = true; + process.nextTick(emitReadable_, stream); + } +} +function emitReadable_(stream) { + var state = stream._readableState; + debug('emitReadable_', state.destroyed, state.length, state.ended); + if (!state.destroyed && (state.length || state.ended)) { + stream.emit('readable'); + state.emittedReadable = false; + } + + // The stream needs another readable event if + // 1. It is not flowing, as the flow mechanism will take + // care of it. + // 2. It is not ended. + // 3. It is below the highWaterMark, so we can schedule + // another readable later. + state.needReadable = !state.flowing && !state.ended && state.length <= state.highWaterMark; + flow(stream); +} + +// at this point, the user has presumably seen the 'readable' event, +// and called read() to consume some data. that may have triggered +// in turn another _read(n) call, in which case reading = true if +// it's in progress. +// However, if we're not ended, or reading, and the length < hwm, +// then go ahead and try to read some more preemptively. +function maybeReadMore(stream, state) { + if (!state.readingMore) { + state.readingMore = true; + process.nextTick(maybeReadMore_, stream, state); + } +} +function maybeReadMore_(stream, state) { + // Attempt to read more data if we should. + // + // The conditions for reading more data are (one of): + // - Not enough data buffered (state.length < state.highWaterMark). The loop + // is responsible for filling the buffer with enough data if such data + // is available. If highWaterMark is 0 and we are not in the flowing mode + // we should _not_ attempt to buffer any extra data. We'll get more data + // when the stream consumer calls read() instead. + // - No data in the buffer, and the stream is in flowing mode. In this mode + // the loop below is responsible for ensuring read() is called. Failing to + // call read here would abort the flow and there's no other mechanism for + // continuing the flow if the stream consumer has just subscribed to the + // 'data' event. + // + // In addition to the above conditions to keep reading data, the following + // conditions prevent the data from being read: + // - The stream has ended (state.ended). + // - There is already a pending 'read' operation (state.reading). This is a + // case where the the stream has called the implementation defined _read() + // method, but they are processing the call asynchronously and have _not_ + // called push() with new data. In this case we skip performing more + // read()s. The execution ends in this method again after the _read() ends + // up calling push() with more data. + while (!state.reading && !state.ended && (state.length < state.highWaterMark || state.flowing && state.length === 0)) { + var len = state.length; + debug('maybeReadMore read 0'); + stream.read(0); + if (len === state.length) + // didn't get any data, stop spinning. + break; + } + state.readingMore = false; +} + +// abstract method. to be overridden in specific implementation classes. +// call cb(er, data) where data is <= n in length. +// for virtual (non-string, non-buffer) streams, "length" is somewhat +// arbitrary, and perhaps not very meaningful. +Readable.prototype._read = function (n) { + errorOrDestroy(this, new ERR_METHOD_NOT_IMPLEMENTED('_read()')); +}; +Readable.prototype.pipe = function (dest, pipeOpts) { + var src = this; + var state = this._readableState; + switch (state.pipesCount) { + case 0: + state.pipes = dest; + break; + case 1: + state.pipes = [state.pipes, dest]; + break; + default: + state.pipes.push(dest); + break; + } + state.pipesCount += 1; + debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts); + var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr; + var endFn = doEnd ? onend : unpipe; + if (state.endEmitted) process.nextTick(endFn);else src.once('end', endFn); + dest.on('unpipe', onunpipe); + function onunpipe(readable, unpipeInfo) { + debug('onunpipe'); + if (readable === src) { + if (unpipeInfo && unpipeInfo.hasUnpiped === false) { + unpipeInfo.hasUnpiped = true; + cleanup(); + } + } + } + function onend() { + debug('onend'); + dest.end(); + } + + // when the dest drains, it reduces the awaitDrain counter + // on the source. This would be more elegant with a .once() + // handler in flow(), but adding and removing repeatedly is + // too slow. + var ondrain = pipeOnDrain(src); + dest.on('drain', ondrain); + var cleanedUp = false; + function cleanup() { + debug('cleanup'); + // cleanup event handlers once the pipe is broken + dest.removeListener('close', onclose); + dest.removeListener('finish', onfinish); + dest.removeListener('drain', ondrain); + dest.removeListener('error', onerror); + dest.removeListener('unpipe', onunpipe); + src.removeListener('end', onend); + src.removeListener('end', unpipe); + src.removeListener('data', ondata); + cleanedUp = true; + + // if the reader is waiting for a drain event from this + // specific writer, then it would cause it to never start + // flowing again. + // So, if this is awaiting a drain, then we just call it now. + // If we don't know, then assume that we are waiting for one. + if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain(); + } + src.on('data', ondata); + function ondata(chunk) { + debug('ondata'); + var ret = dest.write(chunk); + debug('dest.write', ret); + if (ret === false) { + // If the user unpiped during `dest.write()`, it is possible + // to get stuck in a permanently paused state if that write + // also returned false. + // => Check whether `dest` is still a piping destination. + if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) { + debug('false write response, pause', state.awaitDrain); + state.awaitDrain++; + } + src.pause(); + } + } + + // if the dest has an error, then stop piping into it. + // however, don't suppress the throwing behavior for this. + function onerror(er) { + debug('onerror', er); + unpipe(); + dest.removeListener('error', onerror); + if (EElistenerCount(dest, 'error') === 0) errorOrDestroy(dest, er); + } + + // Make sure our error handler is attached before userland ones. + prependListener(dest, 'error', onerror); + + // Both close and finish should trigger unpipe, but only once. + function onclose() { + dest.removeListener('finish', onfinish); + unpipe(); + } + dest.once('close', onclose); + function onfinish() { + debug('onfinish'); + dest.removeListener('close', onclose); + unpipe(); + } + dest.once('finish', onfinish); + function unpipe() { + debug('unpipe'); + src.unpipe(dest); + } + + // tell the dest that it's being piped to + dest.emit('pipe', src); + + // start the flow if it hasn't been started already. + if (!state.flowing) { + debug('pipe resume'); + src.resume(); + } + return dest; +}; +function pipeOnDrain(src) { + return function pipeOnDrainFunctionResult() { + var state = src._readableState; + debug('pipeOnDrain', state.awaitDrain); + if (state.awaitDrain) state.awaitDrain--; + if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) { + state.flowing = true; + flow(src); + } + }; +} +Readable.prototype.unpipe = function (dest) { + var state = this._readableState; + var unpipeInfo = { + hasUnpiped: false + }; + + // if we're not piping anywhere, then do nothing. + if (state.pipesCount === 0) return this; + + // just one destination. most common case. + if (state.pipesCount === 1) { + // passed in one, but it's not the right one. + if (dest && dest !== state.pipes) return this; + if (!dest) dest = state.pipes; + + // got a match. + state.pipes = null; + state.pipesCount = 0; + state.flowing = false; + if (dest) dest.emit('unpipe', this, unpipeInfo); + return this; + } + + // slow case. multiple pipe destinations. + + if (!dest) { + // remove all. + var dests = state.pipes; + var len = state.pipesCount; + state.pipes = null; + state.pipesCount = 0; + state.flowing = false; + for (var i = 0; i < len; i++) dests[i].emit('unpipe', this, { + hasUnpiped: false + }); + return this; + } + + // try to find the right one. + var index = indexOf(state.pipes, dest); + if (index === -1) return this; + state.pipes.splice(index, 1); + state.pipesCount -= 1; + if (state.pipesCount === 1) state.pipes = state.pipes[0]; + dest.emit('unpipe', this, unpipeInfo); + return this; +}; + +// set up data events if they are asked for +// Ensure readable listeners eventually get something +Readable.prototype.on = function (ev, fn) { + var res = Stream.prototype.on.call(this, ev, fn); + var state = this._readableState; + if (ev === 'data') { + // update readableListening so that resume() may be a no-op + // a few lines down. This is needed to support once('readable'). + state.readableListening = this.listenerCount('readable') > 0; + + // Try start flowing on next tick if stream isn't explicitly paused + if (state.flowing !== false) this.resume(); + } else if (ev === 'readable') { + if (!state.endEmitted && !state.readableListening) { + state.readableListening = state.needReadable = true; + state.flowing = false; + state.emittedReadable = false; + debug('on readable', state.length, state.reading); + if (state.length) { + emitReadable(this); + } else if (!state.reading) { + process.nextTick(nReadingNextTick, this); + } + } + } + return res; +}; +Readable.prototype.addListener = Readable.prototype.on; +Readable.prototype.removeListener = function (ev, fn) { + var res = Stream.prototype.removeListener.call(this, ev, fn); + if (ev === 'readable') { + // We need to check if there is someone still listening to + // readable and reset the state. However this needs to happen + // after readable has been emitted but before I/O (nextTick) to + // support once('readable', fn) cycles. This means that calling + // resume within the same tick will have no + // effect. + process.nextTick(updateReadableListening, this); + } + return res; +}; +Readable.prototype.removeAllListeners = function (ev) { + var res = Stream.prototype.removeAllListeners.apply(this, arguments); + if (ev === 'readable' || ev === undefined) { + // We need to check if there is someone still listening to + // readable and reset the state. However this needs to happen + // after readable has been emitted but before I/O (nextTick) to + // support once('readable', fn) cycles. This means that calling + // resume within the same tick will have no + // effect. + process.nextTick(updateReadableListening, this); + } + return res; +}; +function updateReadableListening(self) { + var state = self._readableState; + state.readableListening = self.listenerCount('readable') > 0; + if (state.resumeScheduled && !state.paused) { + // flowing needs to be set to true now, otherwise + // the upcoming resume will not flow. + state.flowing = true; + + // crude way to check if we should resume + } else if (self.listenerCount('data') > 0) { + self.resume(); + } +} +function nReadingNextTick(self) { + debug('readable nexttick read 0'); + self.read(0); +} + +// pause() and resume() are remnants of the legacy readable stream API +// If the user uses them, then switch into old mode. +Readable.prototype.resume = function () { + var state = this._readableState; + if (!state.flowing) { + debug('resume'); + // we flow only if there is no one listening + // for readable, but we still have to call + // resume() + state.flowing = !state.readableListening; + resume(this, state); + } + state.paused = false; + return this; +}; +function resume(stream, state) { + if (!state.resumeScheduled) { + state.resumeScheduled = true; + process.nextTick(resume_, stream, state); + } +} +function resume_(stream, state) { + debug('resume', state.reading); + if (!state.reading) { + stream.read(0); + } + state.resumeScheduled = false; + stream.emit('resume'); + flow(stream); + if (state.flowing && !state.reading) stream.read(0); +} +Readable.prototype.pause = function () { + debug('call pause flowing=%j', this._readableState.flowing); + if (this._readableState.flowing !== false) { + debug('pause'); + this._readableState.flowing = false; + this.emit('pause'); + } + this._readableState.paused = true; + return this; +}; +function flow(stream) { + var state = stream._readableState; + debug('flow', state.flowing); + while (state.flowing && stream.read() !== null); +} + +// wrap an old-style stream as the async data source. +// This is *not* part of the readable stream interface. +// It is an ugly unfortunate mess of history. +Readable.prototype.wrap = function (stream) { + var _this = this; + var state = this._readableState; + var paused = false; + stream.on('end', function () { + debug('wrapped end'); + if (state.decoder && !state.ended) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) _this.push(chunk); + } + _this.push(null); + }); + stream.on('data', function (chunk) { + debug('wrapped data'); + if (state.decoder) chunk = state.decoder.write(chunk); + + // don't skip over falsy values in objectMode + if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return; + var ret = _this.push(chunk); + if (!ret) { + paused = true; + stream.pause(); + } + }); + + // proxy all the other methods. + // important when wrapping filters and duplexes. + for (var i in stream) { + if (this[i] === undefined && typeof stream[i] === 'function') { + this[i] = function methodWrap(method) { + return function methodWrapReturnFunction() { + return stream[method].apply(stream, arguments); + }; + }(i); + } + } + + // proxy certain important events. + for (var n = 0; n < kProxyEvents.length; n++) { + stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n])); + } + + // when we try to consume some more bytes, simply unpause the + // underlying stream. + this._read = function (n) { + debug('wrapped _read', n); + if (paused) { + paused = false; + stream.resume(); + } + }; + return this; +}; +if (typeof Symbol === 'function') { + Readable.prototype[Symbol.asyncIterator] = function () { + if (createReadableStreamAsyncIterator === undefined) { + createReadableStreamAsyncIterator = require('./internal/streams/async_iterator'); + } + return createReadableStreamAsyncIterator(this); + }; +} +Object.defineProperty(Readable.prototype, 'readableHighWaterMark', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._readableState.highWaterMark; + } +}); +Object.defineProperty(Readable.prototype, 'readableBuffer', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._readableState && this._readableState.buffer; + } +}); +Object.defineProperty(Readable.prototype, 'readableFlowing', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._readableState.flowing; + }, + set: function set(state) { + if (this._readableState) { + this._readableState.flowing = state; + } + } +}); + +// exposed for testing purposes only. +Readable._fromList = fromList; +Object.defineProperty(Readable.prototype, 'readableLength', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._readableState.length; + } +}); + +// Pluck off n bytes from an array of buffers. +// Length is the combined lengths of all the buffers in the list. +// This function is designed to be inlinable, so please take care when making +// changes to the function body. +function fromList(n, state) { + // nothing buffered + if (state.length === 0) return null; + var ret; + if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) { + // read it all, truncate the list + if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.first();else ret = state.buffer.concat(state.length); + state.buffer.clear(); + } else { + // read part of list + ret = state.buffer.consume(n, state.decoder); + } + return ret; +} +function endReadable(stream) { + var state = stream._readableState; + debug('endReadable', state.endEmitted); + if (!state.endEmitted) { + state.ended = true; + process.nextTick(endReadableNT, state, stream); + } +} +function endReadableNT(state, stream) { + debug('endReadableNT', state.endEmitted, state.length); + + // Check that we didn't get one last unshift. + if (!state.endEmitted && state.length === 0) { + state.endEmitted = true; + stream.readable = false; + stream.emit('end'); + if (state.autoDestroy) { + // In case of duplex streams we need a way to detect + // if the writable side is ready for autoDestroy as well + var wState = stream._writableState; + if (!wState || wState.autoDestroy && wState.finished) { + stream.destroy(); + } + } + } +} +if (typeof Symbol === 'function') { + Readable.from = function (iterable, opts) { + if (from === undefined) { + from = require('./internal/streams/from'); + } + return from(Readable, iterable, opts); + }; +} +function indexOf(xs, x) { + for (var i = 0, l = xs.length; i < l; i++) { + if (xs[i] === x) return i; + } + return -1; +} \ No newline at end of file diff --git a/node_modules/readable-stream/lib/_stream_transform.js b/node_modules/readable-stream/lib/_stream_transform.js new file mode 100644 index 0000000000..1ccb7157be --- /dev/null +++ b/node_modules/readable-stream/lib/_stream_transform.js @@ -0,0 +1,190 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + +'use strict'; + +module.exports = Transform; +var _require$codes = require('../errors').codes, + ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED, + ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK, + ERR_TRANSFORM_ALREADY_TRANSFORMING = _require$codes.ERR_TRANSFORM_ALREADY_TRANSFORMING, + ERR_TRANSFORM_WITH_LENGTH_0 = _require$codes.ERR_TRANSFORM_WITH_LENGTH_0; +var Duplex = require('./_stream_duplex'); +require('inherits')(Transform, Duplex); +function afterTransform(er, data) { + var ts = this._transformState; + ts.transforming = false; + var cb = ts.writecb; + if (cb === null) { + return this.emit('error', new ERR_MULTIPLE_CALLBACK()); + } + ts.writechunk = null; + ts.writecb = null; + if (data != null) + // single equals check for both `null` and `undefined` + this.push(data); + cb(er); + var rs = this._readableState; + rs.reading = false; + if (rs.needReadable || rs.length < rs.highWaterMark) { + this._read(rs.highWaterMark); + } +} +function Transform(options) { + if (!(this instanceof Transform)) return new Transform(options); + Duplex.call(this, options); + this._transformState = { + afterTransform: afterTransform.bind(this), + needTransform: false, + transforming: false, + writecb: null, + writechunk: null, + writeencoding: null + }; + + // start out asking for a readable event once data is transformed. + this._readableState.needReadable = true; + + // we have implemented the _read method, and done the other things + // that Readable wants before the first _read call, so unset the + // sync guard flag. + this._readableState.sync = false; + if (options) { + if (typeof options.transform === 'function') this._transform = options.transform; + if (typeof options.flush === 'function') this._flush = options.flush; + } + + // When the writable side finishes, then flush out anything remaining. + this.on('prefinish', prefinish); +} +function prefinish() { + var _this = this; + if (typeof this._flush === 'function' && !this._readableState.destroyed) { + this._flush(function (er, data) { + done(_this, er, data); + }); + } else { + done(this, null, null); + } +} +Transform.prototype.push = function (chunk, encoding) { + this._transformState.needTransform = false; + return Duplex.prototype.push.call(this, chunk, encoding); +}; + +// This is the part where you do stuff! +// override this function in implementation classes. +// 'chunk' is an input chunk. +// +// Call `push(newChunk)` to pass along transformed output +// to the readable side. You may call 'push' zero or more times. +// +// Call `cb(err)` when you are done with this chunk. If you pass +// an error, then that'll put the hurt on the whole operation. If you +// never call cb(), then you'll never get another chunk. +Transform.prototype._transform = function (chunk, encoding, cb) { + cb(new ERR_METHOD_NOT_IMPLEMENTED('_transform()')); +}; +Transform.prototype._write = function (chunk, encoding, cb) { + var ts = this._transformState; + ts.writecb = cb; + ts.writechunk = chunk; + ts.writeencoding = encoding; + if (!ts.transforming) { + var rs = this._readableState; + if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark); + } +}; + +// Doesn't matter what the args are here. +// _transform does all the work. +// That we got here means that the readable side wants more data. +Transform.prototype._read = function (n) { + var ts = this._transformState; + if (ts.writechunk !== null && !ts.transforming) { + ts.transforming = true; + this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform); + } else { + // mark that we need a transform, so that any data that comes in + // will get processed, now that we've asked for it. + ts.needTransform = true; + } +}; +Transform.prototype._destroy = function (err, cb) { + Duplex.prototype._destroy.call(this, err, function (err2) { + cb(err2); + }); +}; +function done(stream, er, data) { + if (er) return stream.emit('error', er); + if (data != null) + // single equals check for both `null` and `undefined` + stream.push(data); + + // TODO(BridgeAR): Write a test for these two error cases + // if there's nothing in the write buffer, then that means + // that nothing more will ever be provided + if (stream._writableState.length) throw new ERR_TRANSFORM_WITH_LENGTH_0(); + if (stream._transformState.transforming) throw new ERR_TRANSFORM_ALREADY_TRANSFORMING(); + return stream.push(null); +} \ No newline at end of file diff --git a/node_modules/readable-stream/lib/_stream_writable.js b/node_modules/readable-stream/lib/_stream_writable.js new file mode 100644 index 0000000000..292415e23a --- /dev/null +++ b/node_modules/readable-stream/lib/_stream_writable.js @@ -0,0 +1,641 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + +'use strict'; + +module.exports = Writable; + +/* */ +function WriteReq(chunk, encoding, cb) { + this.chunk = chunk; + this.encoding = encoding; + this.callback = cb; + this.next = null; +} + +// It seems a linked list but it is not +// there will be only 2 of these for each stream +function CorkedRequest(state) { + var _this = this; + this.next = null; + this.entry = null; + this.finish = function () { + onCorkedFinish(_this, state); + }; +} +/* */ + +/**/ +var Duplex; +/**/ + +Writable.WritableState = WritableState; + +/**/ +var internalUtil = { + deprecate: require('util-deprecate') +}; +/**/ + +/**/ +var Stream = require('./internal/streams/stream'); +/**/ + +var Buffer = require('buffer').Buffer; +var OurUint8Array = (typeof global !== 'undefined' ? global : typeof window !== 'undefined' ? window : typeof self !== 'undefined' ? self : {}).Uint8Array || function () {}; +function _uint8ArrayToBuffer(chunk) { + return Buffer.from(chunk); +} +function _isUint8Array(obj) { + return Buffer.isBuffer(obj) || obj instanceof OurUint8Array; +} +var destroyImpl = require('./internal/streams/destroy'); +var _require = require('./internal/streams/state'), + getHighWaterMark = _require.getHighWaterMark; +var _require$codes = require('../errors').codes, + ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE, + ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED, + ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK, + ERR_STREAM_CANNOT_PIPE = _require$codes.ERR_STREAM_CANNOT_PIPE, + ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED, + ERR_STREAM_NULL_VALUES = _require$codes.ERR_STREAM_NULL_VALUES, + ERR_STREAM_WRITE_AFTER_END = _require$codes.ERR_STREAM_WRITE_AFTER_END, + ERR_UNKNOWN_ENCODING = _require$codes.ERR_UNKNOWN_ENCODING; +var errorOrDestroy = destroyImpl.errorOrDestroy; +require('inherits')(Writable, Stream); +function nop() {} +function WritableState(options, stream, isDuplex) { + Duplex = Duplex || require('./_stream_duplex'); + options = options || {}; + + // Duplex streams are both readable and writable, but share + // the same options object. + // However, some cases require setting options to different + // values for the readable and the writable sides of the duplex stream, + // e.g. options.readableObjectMode vs. options.writableObjectMode, etc. + if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; + + // object stream flag to indicate whether or not this stream + // contains buffers or objects. + this.objectMode = !!options.objectMode; + if (isDuplex) this.objectMode = this.objectMode || !!options.writableObjectMode; + + // the point at which write() starts returning false + // Note: 0 is a valid value, means that we always return false if + // the entire buffer is not flushed immediately on write() + this.highWaterMark = getHighWaterMark(this, options, 'writableHighWaterMark', isDuplex); + + // if _final has been called + this.finalCalled = false; + + // drain event flag. + this.needDrain = false; + // at the start of calling end() + this.ending = false; + // when end() has been called, and returned + this.ended = false; + // when 'finish' is emitted + this.finished = false; + + // has it been destroyed + this.destroyed = false; + + // should we decode strings into buffers before passing to _write? + // this is here so that some node-core streams can optimize string + // handling at a lower level. + var noDecode = options.decodeStrings === false; + this.decodeStrings = !noDecode; + + // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + this.defaultEncoding = options.defaultEncoding || 'utf8'; + + // not an actual buffer we keep track of, but a measurement + // of how much we're waiting to get pushed to some underlying + // socket or file. + this.length = 0; + + // a flag to see when we're in the middle of a write. + this.writing = false; + + // when true all writes will be buffered until .uncork() call + this.corked = 0; + + // a flag to be able to tell if the onwrite cb is called immediately, + // or on a later tick. We set this to true at first, because any + // actions that shouldn't happen until "later" should generally also + // not happen before the first write call. + this.sync = true; + + // a flag to know if we're processing previously buffered items, which + // may call the _write() callback in the same tick, so that we don't + // end up in an overlapped onwrite situation. + this.bufferProcessing = false; + + // the callback that's passed to _write(chunk,cb) + this.onwrite = function (er) { + onwrite(stream, er); + }; + + // the callback that the user supplies to write(chunk,encoding,cb) + this.writecb = null; + + // the amount that is being written when _write is called. + this.writelen = 0; + this.bufferedRequest = null; + this.lastBufferedRequest = null; + + // number of pending user-supplied write callbacks + // this must be 0 before 'finish' can be emitted + this.pendingcb = 0; + + // emit prefinish if the only thing we're waiting for is _write cbs + // This is relevant for synchronous Transform streams + this.prefinished = false; + + // True if the error was already emitted and should not be thrown again + this.errorEmitted = false; + + // Should close be emitted on destroy. Defaults to true. + this.emitClose = options.emitClose !== false; + + // Should .destroy() be called after 'finish' (and potentially 'end') + this.autoDestroy = !!options.autoDestroy; + + // count buffered requests + this.bufferedRequestCount = 0; + + // allocate the first CorkedRequest, there is always + // one allocated and free to use, and we maintain at most two + this.corkedRequestsFree = new CorkedRequest(this); +} +WritableState.prototype.getBuffer = function getBuffer() { + var current = this.bufferedRequest; + var out = []; + while (current) { + out.push(current); + current = current.next; + } + return out; +}; +(function () { + try { + Object.defineProperty(WritableState.prototype, 'buffer', { + get: internalUtil.deprecate(function writableStateBufferGetter() { + return this.getBuffer(); + }, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003') + }); + } catch (_) {} +})(); + +// Test _writableState for inheritance to account for Duplex streams, +// whose prototype chain only points to Readable. +var realHasInstance; +if (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') { + realHasInstance = Function.prototype[Symbol.hasInstance]; + Object.defineProperty(Writable, Symbol.hasInstance, { + value: function value(object) { + if (realHasInstance.call(this, object)) return true; + if (this !== Writable) return false; + return object && object._writableState instanceof WritableState; + } + }); +} else { + realHasInstance = function realHasInstance(object) { + return object instanceof this; + }; +} +function Writable(options) { + Duplex = Duplex || require('./_stream_duplex'); + + // Writable ctor is applied to Duplexes, too. + // `realHasInstance` is necessary because using plain `instanceof` + // would return false, as no `_writableState` property is attached. + + // Trying to use the custom `instanceof` for Writable here will also break the + // Node.js LazyTransform implementation, which has a non-trivial getter for + // `_writableState` that would lead to infinite recursion. + + // Checking for a Stream.Duplex instance is faster here instead of inside + // the WritableState constructor, at least with V8 6.5 + var isDuplex = this instanceof Duplex; + if (!isDuplex && !realHasInstance.call(Writable, this)) return new Writable(options); + this._writableState = new WritableState(options, this, isDuplex); + + // legacy. + this.writable = true; + if (options) { + if (typeof options.write === 'function') this._write = options.write; + if (typeof options.writev === 'function') this._writev = options.writev; + if (typeof options.destroy === 'function') this._destroy = options.destroy; + if (typeof options.final === 'function') this._final = options.final; + } + Stream.call(this); +} + +// Otherwise people can pipe Writable streams, which is just wrong. +Writable.prototype.pipe = function () { + errorOrDestroy(this, new ERR_STREAM_CANNOT_PIPE()); +}; +function writeAfterEnd(stream, cb) { + var er = new ERR_STREAM_WRITE_AFTER_END(); + // TODO: defer error events consistently everywhere, not just the cb + errorOrDestroy(stream, er); + process.nextTick(cb, er); +} + +// Checks that a user-supplied chunk is valid, especially for the particular +// mode the stream is in. Currently this means that `null` is never accepted +// and undefined/non-string values are only allowed in object mode. +function validChunk(stream, state, chunk, cb) { + var er; + if (chunk === null) { + er = new ERR_STREAM_NULL_VALUES(); + } else if (typeof chunk !== 'string' && !state.objectMode) { + er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer'], chunk); + } + if (er) { + errorOrDestroy(stream, er); + process.nextTick(cb, er); + return false; + } + return true; +} +Writable.prototype.write = function (chunk, encoding, cb) { + var state = this._writableState; + var ret = false; + var isBuf = !state.objectMode && _isUint8Array(chunk); + if (isBuf && !Buffer.isBuffer(chunk)) { + chunk = _uint8ArrayToBuffer(chunk); + } + if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding; + if (typeof cb !== 'function') cb = nop; + if (state.ending) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) { + state.pendingcb++; + ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb); + } + return ret; +}; +Writable.prototype.cork = function () { + this._writableState.corked++; +}; +Writable.prototype.uncork = function () { + var state = this._writableState; + if (state.corked) { + state.corked--; + if (!state.writing && !state.corked && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state); + } +}; +Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) { + // node::ParseEncoding() requires lower case. + if (typeof encoding === 'string') encoding = encoding.toLowerCase(); + if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new ERR_UNKNOWN_ENCODING(encoding); + this._writableState.defaultEncoding = encoding; + return this; +}; +Object.defineProperty(Writable.prototype, 'writableBuffer', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState && this._writableState.getBuffer(); + } +}); +function decodeChunk(state, chunk, encoding) { + if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') { + chunk = Buffer.from(chunk, encoding); + } + return chunk; +} +Object.defineProperty(Writable.prototype, 'writableHighWaterMark', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState.highWaterMark; + } +}); + +// if we're already writing something, then just put this +// in the queue, and wait our turn. Otherwise, call _write +// If we return false, then we need a drain event, so set that flag. +function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) { + if (!isBuf) { + var newChunk = decodeChunk(state, chunk, encoding); + if (chunk !== newChunk) { + isBuf = true; + encoding = 'buffer'; + chunk = newChunk; + } + } + var len = state.objectMode ? 1 : chunk.length; + state.length += len; + var ret = state.length < state.highWaterMark; + // we must ensure that previous needDrain will not be reset to false. + if (!ret) state.needDrain = true; + if (state.writing || state.corked) { + var last = state.lastBufferedRequest; + state.lastBufferedRequest = { + chunk: chunk, + encoding: encoding, + isBuf: isBuf, + callback: cb, + next: null + }; + if (last) { + last.next = state.lastBufferedRequest; + } else { + state.bufferedRequest = state.lastBufferedRequest; + } + state.bufferedRequestCount += 1; + } else { + doWrite(stream, state, false, len, chunk, encoding, cb); + } + return ret; +} +function doWrite(stream, state, writev, len, chunk, encoding, cb) { + state.writelen = len; + state.writecb = cb; + state.writing = true; + state.sync = true; + if (state.destroyed) state.onwrite(new ERR_STREAM_DESTROYED('write'));else if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite); + state.sync = false; +} +function onwriteError(stream, state, sync, er, cb) { + --state.pendingcb; + if (sync) { + // defer the callback if we are being called synchronously + // to avoid piling up things on the stack + process.nextTick(cb, er); + // this can emit finish, and it will always happen + // after error + process.nextTick(finishMaybe, stream, state); + stream._writableState.errorEmitted = true; + errorOrDestroy(stream, er); + } else { + // the caller expect this to happen before if + // it is async + cb(er); + stream._writableState.errorEmitted = true; + errorOrDestroy(stream, er); + // this can emit finish, but finish must + // always follow error + finishMaybe(stream, state); + } +} +function onwriteStateUpdate(state) { + state.writing = false; + state.writecb = null; + state.length -= state.writelen; + state.writelen = 0; +} +function onwrite(stream, er) { + var state = stream._writableState; + var sync = state.sync; + var cb = state.writecb; + if (typeof cb !== 'function') throw new ERR_MULTIPLE_CALLBACK(); + onwriteStateUpdate(state); + if (er) onwriteError(stream, state, sync, er, cb);else { + // Check if we're actually ready to finish, but don't emit yet + var finished = needFinish(state) || stream.destroyed; + if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) { + clearBuffer(stream, state); + } + if (sync) { + process.nextTick(afterWrite, stream, state, finished, cb); + } else { + afterWrite(stream, state, finished, cb); + } + } +} +function afterWrite(stream, state, finished, cb) { + if (!finished) onwriteDrain(stream, state); + state.pendingcb--; + cb(); + finishMaybe(stream, state); +} + +// Must force callback to be called on nextTick, so that we don't +// emit 'drain' before the write() consumer gets the 'false' return +// value, and has a chance to attach a 'drain' listener. +function onwriteDrain(stream, state) { + if (state.length === 0 && state.needDrain) { + state.needDrain = false; + stream.emit('drain'); + } +} + +// if there's something in the buffer waiting, then process it +function clearBuffer(stream, state) { + state.bufferProcessing = true; + var entry = state.bufferedRequest; + if (stream._writev && entry && entry.next) { + // Fast case, write everything using _writev() + var l = state.bufferedRequestCount; + var buffer = new Array(l); + var holder = state.corkedRequestsFree; + holder.entry = entry; + var count = 0; + var allBuffers = true; + while (entry) { + buffer[count] = entry; + if (!entry.isBuf) allBuffers = false; + entry = entry.next; + count += 1; + } + buffer.allBuffers = allBuffers; + doWrite(stream, state, true, state.length, buffer, '', holder.finish); + + // doWrite is almost always async, defer these to save a bit of time + // as the hot path ends with doWrite + state.pendingcb++; + state.lastBufferedRequest = null; + if (holder.next) { + state.corkedRequestsFree = holder.next; + holder.next = null; + } else { + state.corkedRequestsFree = new CorkedRequest(state); + } + state.bufferedRequestCount = 0; + } else { + // Slow case, write chunks one-by-one + while (entry) { + var chunk = entry.chunk; + var encoding = entry.encoding; + var cb = entry.callback; + var len = state.objectMode ? 1 : chunk.length; + doWrite(stream, state, false, len, chunk, encoding, cb); + entry = entry.next; + state.bufferedRequestCount--; + // if we didn't call the onwrite immediately, then + // it means that we need to wait until it does. + // also, that means that the chunk and cb are currently + // being processed, so move the buffer counter past them. + if (state.writing) { + break; + } + } + if (entry === null) state.lastBufferedRequest = null; + } + state.bufferedRequest = entry; + state.bufferProcessing = false; +} +Writable.prototype._write = function (chunk, encoding, cb) { + cb(new ERR_METHOD_NOT_IMPLEMENTED('_write()')); +}; +Writable.prototype._writev = null; +Writable.prototype.end = function (chunk, encoding, cb) { + var state = this._writableState; + if (typeof chunk === 'function') { + cb = chunk; + chunk = null; + encoding = null; + } else if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + if (chunk !== null && chunk !== undefined) this.write(chunk, encoding); + + // .end() fully uncorks + if (state.corked) { + state.corked = 1; + this.uncork(); + } + + // ignore unnecessary end() calls. + if (!state.ending) endWritable(this, state, cb); + return this; +}; +Object.defineProperty(Writable.prototype, 'writableLength', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState.length; + } +}); +function needFinish(state) { + return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing; +} +function callFinal(stream, state) { + stream._final(function (err) { + state.pendingcb--; + if (err) { + errorOrDestroy(stream, err); + } + state.prefinished = true; + stream.emit('prefinish'); + finishMaybe(stream, state); + }); +} +function prefinish(stream, state) { + if (!state.prefinished && !state.finalCalled) { + if (typeof stream._final === 'function' && !state.destroyed) { + state.pendingcb++; + state.finalCalled = true; + process.nextTick(callFinal, stream, state); + } else { + state.prefinished = true; + stream.emit('prefinish'); + } + } +} +function finishMaybe(stream, state) { + var need = needFinish(state); + if (need) { + prefinish(stream, state); + if (state.pendingcb === 0) { + state.finished = true; + stream.emit('finish'); + if (state.autoDestroy) { + // In case of duplex streams we need a way to detect + // if the readable side is ready for autoDestroy as well + var rState = stream._readableState; + if (!rState || rState.autoDestroy && rState.endEmitted) { + stream.destroy(); + } + } + } + } + return need; +} +function endWritable(stream, state, cb) { + state.ending = true; + finishMaybe(stream, state); + if (cb) { + if (state.finished) process.nextTick(cb);else stream.once('finish', cb); + } + state.ended = true; + stream.writable = false; +} +function onCorkedFinish(corkReq, state, err) { + var entry = corkReq.entry; + corkReq.entry = null; + while (entry) { + var cb = entry.callback; + state.pendingcb--; + cb(err); + entry = entry.next; + } + + // reuse the free corkReq. + state.corkedRequestsFree.next = corkReq; +} +Object.defineProperty(Writable.prototype, 'destroyed', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + if (this._writableState === undefined) { + return false; + } + return this._writableState.destroyed; + }, + set: function set(value) { + // we ignore the value if the stream + // has not been initialized yet + if (!this._writableState) { + return; + } + + // backward compatibility, the user is explicitly + // managing destroyed + this._writableState.destroyed = value; + } +}); +Writable.prototype.destroy = destroyImpl.destroy; +Writable.prototype._undestroy = destroyImpl.undestroy; +Writable.prototype._destroy = function (err, cb) { + cb(err); +}; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/async_iterator.js b/node_modules/readable-stream/lib/internal/streams/async_iterator.js new file mode 100644 index 0000000000..742c5a4674 --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/async_iterator.js @@ -0,0 +1,180 @@ +'use strict'; + +var _Object$setPrototypeO; +function _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } +function _toPropertyKey(arg) { var key = _toPrimitive(arg, "string"); return typeof key === "symbol" ? key : String(key); } +function _toPrimitive(input, hint) { if (typeof input !== "object" || input === null) return input; var prim = input[Symbol.toPrimitive]; if (prim !== undefined) { var res = prim.call(input, hint || "default"); if (typeof res !== "object") return res; throw new TypeError("@@toPrimitive must return a primitive value."); } return (hint === "string" ? String : Number)(input); } +var finished = require('./end-of-stream'); +var kLastResolve = Symbol('lastResolve'); +var kLastReject = Symbol('lastReject'); +var kError = Symbol('error'); +var kEnded = Symbol('ended'); +var kLastPromise = Symbol('lastPromise'); +var kHandlePromise = Symbol('handlePromise'); +var kStream = Symbol('stream'); +function createIterResult(value, done) { + return { + value: value, + done: done + }; +} +function readAndResolve(iter) { + var resolve = iter[kLastResolve]; + if (resolve !== null) { + var data = iter[kStream].read(); + // we defer if data is null + // we can be expecting either 'end' or + // 'error' + if (data !== null) { + iter[kLastPromise] = null; + iter[kLastResolve] = null; + iter[kLastReject] = null; + resolve(createIterResult(data, false)); + } + } +} +function onReadable(iter) { + // we wait for the next tick, because it might + // emit an error with process.nextTick + process.nextTick(readAndResolve, iter); +} +function wrapForNext(lastPromise, iter) { + return function (resolve, reject) { + lastPromise.then(function () { + if (iter[kEnded]) { + resolve(createIterResult(undefined, true)); + return; + } + iter[kHandlePromise](resolve, reject); + }, reject); + }; +} +var AsyncIteratorPrototype = Object.getPrototypeOf(function () {}); +var ReadableStreamAsyncIteratorPrototype = Object.setPrototypeOf((_Object$setPrototypeO = { + get stream() { + return this[kStream]; + }, + next: function next() { + var _this = this; + // if we have detected an error in the meanwhile + // reject straight away + var error = this[kError]; + if (error !== null) { + return Promise.reject(error); + } + if (this[kEnded]) { + return Promise.resolve(createIterResult(undefined, true)); + } + if (this[kStream].destroyed) { + // We need to defer via nextTick because if .destroy(err) is + // called, the error will be emitted via nextTick, and + // we cannot guarantee that there is no error lingering around + // waiting to be emitted. + return new Promise(function (resolve, reject) { + process.nextTick(function () { + if (_this[kError]) { + reject(_this[kError]); + } else { + resolve(createIterResult(undefined, true)); + } + }); + }); + } + + // if we have multiple next() calls + // we will wait for the previous Promise to finish + // this logic is optimized to support for await loops, + // where next() is only called once at a time + var lastPromise = this[kLastPromise]; + var promise; + if (lastPromise) { + promise = new Promise(wrapForNext(lastPromise, this)); + } else { + // fast path needed to support multiple this.push() + // without triggering the next() queue + var data = this[kStream].read(); + if (data !== null) { + return Promise.resolve(createIterResult(data, false)); + } + promise = new Promise(this[kHandlePromise]); + } + this[kLastPromise] = promise; + return promise; + } +}, _defineProperty(_Object$setPrototypeO, Symbol.asyncIterator, function () { + return this; +}), _defineProperty(_Object$setPrototypeO, "return", function _return() { + var _this2 = this; + // destroy(err, cb) is a private API + // we can guarantee we have that here, because we control the + // Readable class this is attached to + return new Promise(function (resolve, reject) { + _this2[kStream].destroy(null, function (err) { + if (err) { + reject(err); + return; + } + resolve(createIterResult(undefined, true)); + }); + }); +}), _Object$setPrototypeO), AsyncIteratorPrototype); +var createReadableStreamAsyncIterator = function createReadableStreamAsyncIterator(stream) { + var _Object$create; + var iterator = Object.create(ReadableStreamAsyncIteratorPrototype, (_Object$create = {}, _defineProperty(_Object$create, kStream, { + value: stream, + writable: true + }), _defineProperty(_Object$create, kLastResolve, { + value: null, + writable: true + }), _defineProperty(_Object$create, kLastReject, { + value: null, + writable: true + }), _defineProperty(_Object$create, kError, { + value: null, + writable: true + }), _defineProperty(_Object$create, kEnded, { + value: stream._readableState.endEmitted, + writable: true + }), _defineProperty(_Object$create, kHandlePromise, { + value: function value(resolve, reject) { + var data = iterator[kStream].read(); + if (data) { + iterator[kLastPromise] = null; + iterator[kLastResolve] = null; + iterator[kLastReject] = null; + resolve(createIterResult(data, false)); + } else { + iterator[kLastResolve] = resolve; + iterator[kLastReject] = reject; + } + }, + writable: true + }), _Object$create)); + iterator[kLastPromise] = null; + finished(stream, function (err) { + if (err && err.code !== 'ERR_STREAM_PREMATURE_CLOSE') { + var reject = iterator[kLastReject]; + // reject if we are waiting for data in the Promise + // returned by next() and store the error + if (reject !== null) { + iterator[kLastPromise] = null; + iterator[kLastResolve] = null; + iterator[kLastReject] = null; + reject(err); + } + iterator[kError] = err; + return; + } + var resolve = iterator[kLastResolve]; + if (resolve !== null) { + iterator[kLastPromise] = null; + iterator[kLastResolve] = null; + iterator[kLastReject] = null; + resolve(createIterResult(undefined, true)); + } + iterator[kEnded] = true; + }); + stream.on('readable', onReadable.bind(null, iterator)); + return iterator; +}; +module.exports = createReadableStreamAsyncIterator; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/buffer_list.js b/node_modules/readable-stream/lib/internal/streams/buffer_list.js new file mode 100644 index 0000000000..69bda497d3 --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/buffer_list.js @@ -0,0 +1,183 @@ +'use strict'; + +function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; } +function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { _defineProperty(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; } +function _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } +function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } +function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, _toPropertyKey(descriptor.key), descriptor); } } +function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); Object.defineProperty(Constructor, "prototype", { writable: false }); return Constructor; } +function _toPropertyKey(arg) { var key = _toPrimitive(arg, "string"); return typeof key === "symbol" ? key : String(key); } +function _toPrimitive(input, hint) { if (typeof input !== "object" || input === null) return input; var prim = input[Symbol.toPrimitive]; if (prim !== undefined) { var res = prim.call(input, hint || "default"); if (typeof res !== "object") return res; throw new TypeError("@@toPrimitive must return a primitive value."); } return (hint === "string" ? String : Number)(input); } +var _require = require('buffer'), + Buffer = _require.Buffer; +var _require2 = require('util'), + inspect = _require2.inspect; +var custom = inspect && inspect.custom || 'inspect'; +function copyBuffer(src, target, offset) { + Buffer.prototype.copy.call(src, target, offset); +} +module.exports = /*#__PURE__*/function () { + function BufferList() { + _classCallCheck(this, BufferList); + this.head = null; + this.tail = null; + this.length = 0; + } + _createClass(BufferList, [{ + key: "push", + value: function push(v) { + var entry = { + data: v, + next: null + }; + if (this.length > 0) this.tail.next = entry;else this.head = entry; + this.tail = entry; + ++this.length; + } + }, { + key: "unshift", + value: function unshift(v) { + var entry = { + data: v, + next: this.head + }; + if (this.length === 0) this.tail = entry; + this.head = entry; + ++this.length; + } + }, { + key: "shift", + value: function shift() { + if (this.length === 0) return; + var ret = this.head.data; + if (this.length === 1) this.head = this.tail = null;else this.head = this.head.next; + --this.length; + return ret; + } + }, { + key: "clear", + value: function clear() { + this.head = this.tail = null; + this.length = 0; + } + }, { + key: "join", + value: function join(s) { + if (this.length === 0) return ''; + var p = this.head; + var ret = '' + p.data; + while (p = p.next) ret += s + p.data; + return ret; + } + }, { + key: "concat", + value: function concat(n) { + if (this.length === 0) return Buffer.alloc(0); + var ret = Buffer.allocUnsafe(n >>> 0); + var p = this.head; + var i = 0; + while (p) { + copyBuffer(p.data, ret, i); + i += p.data.length; + p = p.next; + } + return ret; + } + + // Consumes a specified amount of bytes or characters from the buffered data. + }, { + key: "consume", + value: function consume(n, hasStrings) { + var ret; + if (n < this.head.data.length) { + // `slice` is the same for buffers and strings. + ret = this.head.data.slice(0, n); + this.head.data = this.head.data.slice(n); + } else if (n === this.head.data.length) { + // First chunk is a perfect match. + ret = this.shift(); + } else { + // Result spans more than one buffer. + ret = hasStrings ? this._getString(n) : this._getBuffer(n); + } + return ret; + } + }, { + key: "first", + value: function first() { + return this.head.data; + } + + // Consumes a specified amount of characters from the buffered data. + }, { + key: "_getString", + value: function _getString(n) { + var p = this.head; + var c = 1; + var ret = p.data; + n -= ret.length; + while (p = p.next) { + var str = p.data; + var nb = n > str.length ? str.length : n; + if (nb === str.length) ret += str;else ret += str.slice(0, n); + n -= nb; + if (n === 0) { + if (nb === str.length) { + ++c; + if (p.next) this.head = p.next;else this.head = this.tail = null; + } else { + this.head = p; + p.data = str.slice(nb); + } + break; + } + ++c; + } + this.length -= c; + return ret; + } + + // Consumes a specified amount of bytes from the buffered data. + }, { + key: "_getBuffer", + value: function _getBuffer(n) { + var ret = Buffer.allocUnsafe(n); + var p = this.head; + var c = 1; + p.data.copy(ret); + n -= p.data.length; + while (p = p.next) { + var buf = p.data; + var nb = n > buf.length ? buf.length : n; + buf.copy(ret, ret.length - n, 0, nb); + n -= nb; + if (n === 0) { + if (nb === buf.length) { + ++c; + if (p.next) this.head = p.next;else this.head = this.tail = null; + } else { + this.head = p; + p.data = buf.slice(nb); + } + break; + } + ++c; + } + this.length -= c; + return ret; + } + + // Make sure the linked list only shows the minimal necessary information. + }, { + key: custom, + value: function value(_, options) { + return inspect(this, _objectSpread(_objectSpread({}, options), {}, { + // Only inspect one level. + depth: 0, + // It should not recurse. + customInspect: false + })); + } + }]); + return BufferList; +}(); \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/destroy.js b/node_modules/readable-stream/lib/internal/streams/destroy.js new file mode 100644 index 0000000000..31a17c4dc4 --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/destroy.js @@ -0,0 +1,96 @@ +'use strict'; + +// undocumented cb() API, needed for core, not for public API +function destroy(err, cb) { + var _this = this; + var readableDestroyed = this._readableState && this._readableState.destroyed; + var writableDestroyed = this._writableState && this._writableState.destroyed; + if (readableDestroyed || writableDestroyed) { + if (cb) { + cb(err); + } else if (err) { + if (!this._writableState) { + process.nextTick(emitErrorNT, this, err); + } else if (!this._writableState.errorEmitted) { + this._writableState.errorEmitted = true; + process.nextTick(emitErrorNT, this, err); + } + } + return this; + } + + // we set destroyed to true before firing error callbacks in order + // to make it re-entrance safe in case destroy() is called within callbacks + + if (this._readableState) { + this._readableState.destroyed = true; + } + + // if this is a duplex stream mark the writable part as destroyed as well + if (this._writableState) { + this._writableState.destroyed = true; + } + this._destroy(err || null, function (err) { + if (!cb && err) { + if (!_this._writableState) { + process.nextTick(emitErrorAndCloseNT, _this, err); + } else if (!_this._writableState.errorEmitted) { + _this._writableState.errorEmitted = true; + process.nextTick(emitErrorAndCloseNT, _this, err); + } else { + process.nextTick(emitCloseNT, _this); + } + } else if (cb) { + process.nextTick(emitCloseNT, _this); + cb(err); + } else { + process.nextTick(emitCloseNT, _this); + } + }); + return this; +} +function emitErrorAndCloseNT(self, err) { + emitErrorNT(self, err); + emitCloseNT(self); +} +function emitCloseNT(self) { + if (self._writableState && !self._writableState.emitClose) return; + if (self._readableState && !self._readableState.emitClose) return; + self.emit('close'); +} +function undestroy() { + if (this._readableState) { + this._readableState.destroyed = false; + this._readableState.reading = false; + this._readableState.ended = false; + this._readableState.endEmitted = false; + } + if (this._writableState) { + this._writableState.destroyed = false; + this._writableState.ended = false; + this._writableState.ending = false; + this._writableState.finalCalled = false; + this._writableState.prefinished = false; + this._writableState.finished = false; + this._writableState.errorEmitted = false; + } +} +function emitErrorNT(self, err) { + self.emit('error', err); +} +function errorOrDestroy(stream, err) { + // We have tests that rely on errors being emitted + // in the same tick, so changing this is semver major. + // For now when you opt-in to autoDestroy we allow + // the error to be emitted nextTick. In a future + // semver major update we should change the default to this. + + var rState = stream._readableState; + var wState = stream._writableState; + if (rState && rState.autoDestroy || wState && wState.autoDestroy) stream.destroy(err);else stream.emit('error', err); +} +module.exports = { + destroy: destroy, + undestroy: undestroy, + errorOrDestroy: errorOrDestroy +}; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/end-of-stream.js b/node_modules/readable-stream/lib/internal/streams/end-of-stream.js new file mode 100644 index 0000000000..59c671b5af --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/end-of-stream.js @@ -0,0 +1,86 @@ +// Ported from https://github.com/mafintosh/end-of-stream with +// permission from the author, Mathias Buus (@mafintosh). + +'use strict'; + +var ERR_STREAM_PREMATURE_CLOSE = require('../../../errors').codes.ERR_STREAM_PREMATURE_CLOSE; +function once(callback) { + var called = false; + return function () { + if (called) return; + called = true; + for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { + args[_key] = arguments[_key]; + } + callback.apply(this, args); + }; +} +function noop() {} +function isRequest(stream) { + return stream.setHeader && typeof stream.abort === 'function'; +} +function eos(stream, opts, callback) { + if (typeof opts === 'function') return eos(stream, null, opts); + if (!opts) opts = {}; + callback = once(callback || noop); + var readable = opts.readable || opts.readable !== false && stream.readable; + var writable = opts.writable || opts.writable !== false && stream.writable; + var onlegacyfinish = function onlegacyfinish() { + if (!stream.writable) onfinish(); + }; + var writableEnded = stream._writableState && stream._writableState.finished; + var onfinish = function onfinish() { + writable = false; + writableEnded = true; + if (!readable) callback.call(stream); + }; + var readableEnded = stream._readableState && stream._readableState.endEmitted; + var onend = function onend() { + readable = false; + readableEnded = true; + if (!writable) callback.call(stream); + }; + var onerror = function onerror(err) { + callback.call(stream, err); + }; + var onclose = function onclose() { + var err; + if (readable && !readableEnded) { + if (!stream._readableState || !stream._readableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE(); + return callback.call(stream, err); + } + if (writable && !writableEnded) { + if (!stream._writableState || !stream._writableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE(); + return callback.call(stream, err); + } + }; + var onrequest = function onrequest() { + stream.req.on('finish', onfinish); + }; + if (isRequest(stream)) { + stream.on('complete', onfinish); + stream.on('abort', onclose); + if (stream.req) onrequest();else stream.on('request', onrequest); + } else if (writable && !stream._writableState) { + // legacy streams + stream.on('end', onlegacyfinish); + stream.on('close', onlegacyfinish); + } + stream.on('end', onend); + stream.on('finish', onfinish); + if (opts.error !== false) stream.on('error', onerror); + stream.on('close', onclose); + return function () { + stream.removeListener('complete', onfinish); + stream.removeListener('abort', onclose); + stream.removeListener('request', onrequest); + if (stream.req) stream.req.removeListener('finish', onfinish); + stream.removeListener('end', onlegacyfinish); + stream.removeListener('close', onlegacyfinish); + stream.removeListener('finish', onfinish); + stream.removeListener('end', onend); + stream.removeListener('error', onerror); + stream.removeListener('close', onclose); + }; +} +module.exports = eos; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/from-browser.js b/node_modules/readable-stream/lib/internal/streams/from-browser.js new file mode 100644 index 0000000000..a4ce56f3c9 --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/from-browser.js @@ -0,0 +1,3 @@ +module.exports = function () { + throw new Error('Readable.from is not available in the browser') +}; diff --git a/node_modules/readable-stream/lib/internal/streams/from.js b/node_modules/readable-stream/lib/internal/streams/from.js new file mode 100644 index 0000000000..0a34ee92e3 --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/from.js @@ -0,0 +1,52 @@ +'use strict'; + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } +function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; } +function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { _defineProperty(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; } +function _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } +function _toPropertyKey(arg) { var key = _toPrimitive(arg, "string"); return typeof key === "symbol" ? key : String(key); } +function _toPrimitive(input, hint) { if (typeof input !== "object" || input === null) return input; var prim = input[Symbol.toPrimitive]; if (prim !== undefined) { var res = prim.call(input, hint || "default"); if (typeof res !== "object") return res; throw new TypeError("@@toPrimitive must return a primitive value."); } return (hint === "string" ? String : Number)(input); } +var ERR_INVALID_ARG_TYPE = require('../../../errors').codes.ERR_INVALID_ARG_TYPE; +function from(Readable, iterable, opts) { + var iterator; + if (iterable && typeof iterable.next === 'function') { + iterator = iterable; + } else if (iterable && iterable[Symbol.asyncIterator]) iterator = iterable[Symbol.asyncIterator]();else if (iterable && iterable[Symbol.iterator]) iterator = iterable[Symbol.iterator]();else throw new ERR_INVALID_ARG_TYPE('iterable', ['Iterable'], iterable); + var readable = new Readable(_objectSpread({ + objectMode: true + }, opts)); + // Reading boolean to protect against _read + // being called before last iteration completion. + var reading = false; + readable._read = function () { + if (!reading) { + reading = true; + next(); + } + }; + function next() { + return _next2.apply(this, arguments); + } + function _next2() { + _next2 = _asyncToGenerator(function* () { + try { + var _yield$iterator$next = yield iterator.next(), + value = _yield$iterator$next.value, + done = _yield$iterator$next.done; + if (done) { + readable.push(null); + } else if (readable.push(yield value)) { + next(); + } else { + reading = false; + } + } catch (err) { + readable.destroy(err); + } + }); + return _next2.apply(this, arguments); + } + return readable; +} +module.exports = from; diff --git a/node_modules/readable-stream/lib/internal/streams/pipeline.js b/node_modules/readable-stream/lib/internal/streams/pipeline.js new file mode 100644 index 0000000000..e6f39241f9 --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/pipeline.js @@ -0,0 +1,86 @@ +// Ported from https://github.com/mafintosh/pump with +// permission from the author, Mathias Buus (@mafintosh). + +'use strict'; + +var eos; +function once(callback) { + var called = false; + return function () { + if (called) return; + called = true; + callback.apply(void 0, arguments); + }; +} +var _require$codes = require('../../../errors').codes, + ERR_MISSING_ARGS = _require$codes.ERR_MISSING_ARGS, + ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED; +function noop(err) { + // Rethrow the error if it exists to avoid swallowing it + if (err) throw err; +} +function isRequest(stream) { + return stream.setHeader && typeof stream.abort === 'function'; +} +function destroyer(stream, reading, writing, callback) { + callback = once(callback); + var closed = false; + stream.on('close', function () { + closed = true; + }); + if (eos === undefined) eos = require('./end-of-stream'); + eos(stream, { + readable: reading, + writable: writing + }, function (err) { + if (err) return callback(err); + closed = true; + callback(); + }); + var destroyed = false; + return function (err) { + if (closed) return; + if (destroyed) return; + destroyed = true; + + // request.destroy just do .end - .abort is what we want + if (isRequest(stream)) return stream.abort(); + if (typeof stream.destroy === 'function') return stream.destroy(); + callback(err || new ERR_STREAM_DESTROYED('pipe')); + }; +} +function call(fn) { + fn(); +} +function pipe(from, to) { + return from.pipe(to); +} +function popCallback(streams) { + if (!streams.length) return noop; + if (typeof streams[streams.length - 1] !== 'function') return noop; + return streams.pop(); +} +function pipeline() { + for (var _len = arguments.length, streams = new Array(_len), _key = 0; _key < _len; _key++) { + streams[_key] = arguments[_key]; + } + var callback = popCallback(streams); + if (Array.isArray(streams[0])) streams = streams[0]; + if (streams.length < 2) { + throw new ERR_MISSING_ARGS('streams'); + } + var error; + var destroys = streams.map(function (stream, i) { + var reading = i < streams.length - 1; + var writing = i > 0; + return destroyer(stream, reading, writing, function (err) { + if (!error) error = err; + if (err) destroys.forEach(call); + if (reading) return; + destroys.forEach(call); + callback(error); + }); + }); + return streams.reduce(pipe); +} +module.exports = pipeline; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/state.js b/node_modules/readable-stream/lib/internal/streams/state.js new file mode 100644 index 0000000000..3fbf8927e0 --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/state.js @@ -0,0 +1,22 @@ +'use strict'; + +var ERR_INVALID_OPT_VALUE = require('../../../errors').codes.ERR_INVALID_OPT_VALUE; +function highWaterMarkFrom(options, isDuplex, duplexKey) { + return options.highWaterMark != null ? options.highWaterMark : isDuplex ? options[duplexKey] : null; +} +function getHighWaterMark(state, options, duplexKey, isDuplex) { + var hwm = highWaterMarkFrom(options, isDuplex, duplexKey); + if (hwm != null) { + if (!(isFinite(hwm) && Math.floor(hwm) === hwm) || hwm < 0) { + var name = isDuplex ? duplexKey : 'highWaterMark'; + throw new ERR_INVALID_OPT_VALUE(name, hwm); + } + return Math.floor(hwm); + } + + // Default value + return state.objectMode ? 16 : 16 * 1024; +} +module.exports = { + getHighWaterMark: getHighWaterMark +}; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/stream-browser.js b/node_modules/readable-stream/lib/internal/streams/stream-browser.js new file mode 100644 index 0000000000..9332a3fdae --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/stream-browser.js @@ -0,0 +1 @@ +module.exports = require('events').EventEmitter; diff --git a/node_modules/readable-stream/lib/internal/streams/stream.js b/node_modules/readable-stream/lib/internal/streams/stream.js new file mode 100644 index 0000000000..ce2ad5b6ee --- /dev/null +++ b/node_modules/readable-stream/lib/internal/streams/stream.js @@ -0,0 +1 @@ +module.exports = require('stream'); diff --git a/node_modules/readable-stream/package.json b/node_modules/readable-stream/package.json new file mode 100644 index 0000000000..ade59e71aa --- /dev/null +++ b/node_modules/readable-stream/package.json @@ -0,0 +1,68 @@ +{ + "name": "readable-stream", + "version": "3.6.2", + "description": "Streams3, a user-land copy of the stream library from Node.js", + "main": "readable.js", + "engines": { + "node": ">= 6" + }, + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "devDependencies": { + "@babel/cli": "^7.2.0", + "@babel/core": "^7.2.0", + "@babel/polyfill": "^7.0.0", + "@babel/preset-env": "^7.2.0", + "airtap": "0.0.9", + "assert": "^1.4.0", + "bl": "^2.0.0", + "deep-strict-equal": "^0.2.0", + "events.once": "^2.0.2", + "glob": "^7.1.2", + "gunzip-maybe": "^1.4.1", + "hyperquest": "^2.1.3", + "lolex": "^2.6.0", + "nyc": "^11.0.0", + "pump": "^3.0.0", + "rimraf": "^2.6.2", + "tap": "^12.0.0", + "tape": "^4.9.0", + "tar-fs": "^1.16.2", + "util-promisify": "^2.1.0" + }, + "scripts": { + "test": "tap -J --no-esm test/parallel/*.js test/ours/*.js", + "ci": "TAP=1 tap --no-esm test/parallel/*.js test/ours/*.js | tee test.tap", + "test-browsers": "airtap --sauce-connect --loopback airtap.local -- test/browser.js", + "test-browser-local": "airtap --open --local -- test/browser.js", + "cover": "nyc npm test", + "report": "nyc report --reporter=lcov", + "update-browser-errors": "babel -o errors-browser.js errors.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/nodejs/readable-stream" + }, + "keywords": [ + "readable", + "stream", + "pipe" + ], + "browser": { + "util": false, + "worker_threads": false, + "./errors": "./errors-browser.js", + "./readable.js": "./readable-browser.js", + "./lib/internal/streams/from.js": "./lib/internal/streams/from-browser.js", + "./lib/internal/streams/stream.js": "./lib/internal/streams/stream-browser.js" + }, + "nyc": { + "include": [ + "lib/**.js" + ] + }, + "license": "MIT" +} diff --git a/node_modules/readable-stream/readable-browser.js b/node_modules/readable-stream/readable-browser.js new file mode 100644 index 0000000000..adbf60de83 --- /dev/null +++ b/node_modules/readable-stream/readable-browser.js @@ -0,0 +1,9 @@ +exports = module.exports = require('./lib/_stream_readable.js'); +exports.Stream = exports; +exports.Readable = exports; +exports.Writable = require('./lib/_stream_writable.js'); +exports.Duplex = require('./lib/_stream_duplex.js'); +exports.Transform = require('./lib/_stream_transform.js'); +exports.PassThrough = require('./lib/_stream_passthrough.js'); +exports.finished = require('./lib/internal/streams/end-of-stream.js'); +exports.pipeline = require('./lib/internal/streams/pipeline.js'); diff --git a/node_modules/readable-stream/readable.js b/node_modules/readable-stream/readable.js new file mode 100644 index 0000000000..9e0ca120de --- /dev/null +++ b/node_modules/readable-stream/readable.js @@ -0,0 +1,16 @@ +var Stream = require('stream'); +if (process.env.READABLE_STREAM === 'disable' && Stream) { + module.exports = Stream.Readable; + Object.assign(module.exports, Stream); + module.exports.Stream = Stream; +} else { + exports = module.exports = require('./lib/_stream_readable.js'); + exports.Stream = Stream || exports; + exports.Readable = exports; + exports.Writable = require('./lib/_stream_writable.js'); + exports.Duplex = require('./lib/_stream_duplex.js'); + exports.Transform = require('./lib/_stream_transform.js'); + exports.PassThrough = require('./lib/_stream_passthrough.js'); + exports.finished = require('./lib/internal/streams/end-of-stream.js'); + exports.pipeline = require('./lib/internal/streams/pipeline.js'); +} diff --git a/node_modules/safe-buffer/LICENSE b/node_modules/safe-buffer/LICENSE new file mode 100644 index 0000000000..0c068ceecb --- /dev/null +++ b/node_modules/safe-buffer/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Feross Aboukhadijeh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/node_modules/safe-buffer/README.md b/node_modules/safe-buffer/README.md new file mode 100644 index 0000000000..e9a81afd04 --- /dev/null +++ b/node_modules/safe-buffer/README.md @@ -0,0 +1,584 @@ +# safe-buffer [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url] [![javascript style guide][standard-image]][standard-url] + +[travis-image]: https://img.shields.io/travis/feross/safe-buffer/master.svg +[travis-url]: https://travis-ci.org/feross/safe-buffer +[npm-image]: https://img.shields.io/npm/v/safe-buffer.svg +[npm-url]: https://npmjs.org/package/safe-buffer +[downloads-image]: https://img.shields.io/npm/dm/safe-buffer.svg +[downloads-url]: https://npmjs.org/package/safe-buffer +[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg +[standard-url]: https://standardjs.com + +#### Safer Node.js Buffer API + +**Use the new Node.js Buffer APIs (`Buffer.from`, `Buffer.alloc`, +`Buffer.allocUnsafe`, `Buffer.allocUnsafeSlow`) in all versions of Node.js.** + +**Uses the built-in implementation when available.** + +## install + +``` +npm install safe-buffer +``` + +## usage + +The goal of this package is to provide a safe replacement for the node.js `Buffer`. + +It's a drop-in replacement for `Buffer`. You can use it by adding one `require` line to +the top of your node.js modules: + +```js +var Buffer = require('safe-buffer').Buffer + +// Existing buffer code will continue to work without issues: + +new Buffer('hey', 'utf8') +new Buffer([1, 2, 3], 'utf8') +new Buffer(obj) +new Buffer(16) // create an uninitialized buffer (potentially unsafe) + +// But you can use these new explicit APIs to make clear what you want: + +Buffer.from('hey', 'utf8') // convert from many types to a Buffer +Buffer.alloc(16) // create a zero-filled buffer (safe) +Buffer.allocUnsafe(16) // create an uninitialized buffer (potentially unsafe) +``` + +## api + +### Class Method: Buffer.from(array) + + +* `array` {Array} + +Allocates a new `Buffer` using an `array` of octets. + +```js +const buf = Buffer.from([0x62,0x75,0x66,0x66,0x65,0x72]); + // creates a new Buffer containing ASCII bytes + // ['b','u','f','f','e','r'] +``` + +A `TypeError` will be thrown if `array` is not an `Array`. + +### Class Method: Buffer.from(arrayBuffer[, byteOffset[, length]]) + + +* `arrayBuffer` {ArrayBuffer} The `.buffer` property of a `TypedArray` or + a `new ArrayBuffer()` +* `byteOffset` {Number} Default: `0` +* `length` {Number} Default: `arrayBuffer.length - byteOffset` + +When passed a reference to the `.buffer` property of a `TypedArray` instance, +the newly created `Buffer` will share the same allocated memory as the +TypedArray. + +```js +const arr = new Uint16Array(2); +arr[0] = 5000; +arr[1] = 4000; + +const buf = Buffer.from(arr.buffer); // shares the memory with arr; + +console.log(buf); + // Prints: + +// changing the TypedArray changes the Buffer also +arr[1] = 6000; + +console.log(buf); + // Prints: +``` + +The optional `byteOffset` and `length` arguments specify a memory range within +the `arrayBuffer` that will be shared by the `Buffer`. + +```js +const ab = new ArrayBuffer(10); +const buf = Buffer.from(ab, 0, 2); +console.log(buf.length); + // Prints: 2 +``` + +A `TypeError` will be thrown if `arrayBuffer` is not an `ArrayBuffer`. + +### Class Method: Buffer.from(buffer) + + +* `buffer` {Buffer} + +Copies the passed `buffer` data onto a new `Buffer` instance. + +```js +const buf1 = Buffer.from('buffer'); +const buf2 = Buffer.from(buf1); + +buf1[0] = 0x61; +console.log(buf1.toString()); + // 'auffer' +console.log(buf2.toString()); + // 'buffer' (copy is not changed) +``` + +A `TypeError` will be thrown if `buffer` is not a `Buffer`. + +### Class Method: Buffer.from(str[, encoding]) + + +* `str` {String} String to encode. +* `encoding` {String} Encoding to use, Default: `'utf8'` + +Creates a new `Buffer` containing the given JavaScript string `str`. If +provided, the `encoding` parameter identifies the character encoding. +If not provided, `encoding` defaults to `'utf8'`. + +```js +const buf1 = Buffer.from('this is a tést'); +console.log(buf1.toString()); + // prints: this is a tést +console.log(buf1.toString('ascii')); + // prints: this is a tC)st + +const buf2 = Buffer.from('7468697320697320612074c3a97374', 'hex'); +console.log(buf2.toString()); + // prints: this is a tést +``` + +A `TypeError` will be thrown if `str` is not a string. + +### Class Method: Buffer.alloc(size[, fill[, encoding]]) + + +* `size` {Number} +* `fill` {Value} Default: `undefined` +* `encoding` {String} Default: `utf8` + +Allocates a new `Buffer` of `size` bytes. If `fill` is `undefined`, the +`Buffer` will be *zero-filled*. + +```js +const buf = Buffer.alloc(5); +console.log(buf); + // +``` + +The `size` must be less than or equal to the value of +`require('buffer').kMaxLength` (on 64-bit architectures, `kMaxLength` is +`(2^31)-1`). Otherwise, a [`RangeError`][] is thrown. A zero-length Buffer will +be created if a `size` less than or equal to 0 is specified. + +If `fill` is specified, the allocated `Buffer` will be initialized by calling +`buf.fill(fill)`. See [`buf.fill()`][] for more information. + +```js +const buf = Buffer.alloc(5, 'a'); +console.log(buf); + // +``` + +If both `fill` and `encoding` are specified, the allocated `Buffer` will be +initialized by calling `buf.fill(fill, encoding)`. For example: + +```js +const buf = Buffer.alloc(11, 'aGVsbG8gd29ybGQ=', 'base64'); +console.log(buf); + // +``` + +Calling `Buffer.alloc(size)` can be significantly slower than the alternative +`Buffer.allocUnsafe(size)` but ensures that the newly created `Buffer` instance +contents will *never contain sensitive data*. + +A `TypeError` will be thrown if `size` is not a number. + +### Class Method: Buffer.allocUnsafe(size) + + +* `size` {Number} + +Allocates a new *non-zero-filled* `Buffer` of `size` bytes. The `size` must +be less than or equal to the value of `require('buffer').kMaxLength` (on 64-bit +architectures, `kMaxLength` is `(2^31)-1`). Otherwise, a [`RangeError`][] is +thrown. A zero-length Buffer will be created if a `size` less than or equal to +0 is specified. + +The underlying memory for `Buffer` instances created in this way is *not +initialized*. The contents of the newly created `Buffer` are unknown and +*may contain sensitive data*. Use [`buf.fill(0)`][] to initialize such +`Buffer` instances to zeroes. + +```js +const buf = Buffer.allocUnsafe(5); +console.log(buf); + // + // (octets will be different, every time) +buf.fill(0); +console.log(buf); + // +``` + +A `TypeError` will be thrown if `size` is not a number. + +Note that the `Buffer` module pre-allocates an internal `Buffer` instance of +size `Buffer.poolSize` that is used as a pool for the fast allocation of new +`Buffer` instances created using `Buffer.allocUnsafe(size)` (and the deprecated +`new Buffer(size)` constructor) only when `size` is less than or equal to +`Buffer.poolSize >> 1` (floor of `Buffer.poolSize` divided by two). The default +value of `Buffer.poolSize` is `8192` but can be modified. + +Use of this pre-allocated internal memory pool is a key difference between +calling `Buffer.alloc(size, fill)` vs. `Buffer.allocUnsafe(size).fill(fill)`. +Specifically, `Buffer.alloc(size, fill)` will *never* use the internal Buffer +pool, while `Buffer.allocUnsafe(size).fill(fill)` *will* use the internal +Buffer pool if `size` is less than or equal to half `Buffer.poolSize`. The +difference is subtle but can be important when an application requires the +additional performance that `Buffer.allocUnsafe(size)` provides. + +### Class Method: Buffer.allocUnsafeSlow(size) + + +* `size` {Number} + +Allocates a new *non-zero-filled* and non-pooled `Buffer` of `size` bytes. The +`size` must be less than or equal to the value of +`require('buffer').kMaxLength` (on 64-bit architectures, `kMaxLength` is +`(2^31)-1`). Otherwise, a [`RangeError`][] is thrown. A zero-length Buffer will +be created if a `size` less than or equal to 0 is specified. + +The underlying memory for `Buffer` instances created in this way is *not +initialized*. The contents of the newly created `Buffer` are unknown and +*may contain sensitive data*. Use [`buf.fill(0)`][] to initialize such +`Buffer` instances to zeroes. + +When using `Buffer.allocUnsafe()` to allocate new `Buffer` instances, +allocations under 4KB are, by default, sliced from a single pre-allocated +`Buffer`. This allows applications to avoid the garbage collection overhead of +creating many individually allocated Buffers. This approach improves both +performance and memory usage by eliminating the need to track and cleanup as +many `Persistent` objects. + +However, in the case where a developer may need to retain a small chunk of +memory from a pool for an indeterminate amount of time, it may be appropriate +to create an un-pooled Buffer instance using `Buffer.allocUnsafeSlow()` then +copy out the relevant bits. + +```js +// need to keep around a few small chunks of memory +const store = []; + +socket.on('readable', () => { + const data = socket.read(); + // allocate for retained data + const sb = Buffer.allocUnsafeSlow(10); + // copy the data into the new allocation + data.copy(sb, 0, 0, 10); + store.push(sb); +}); +``` + +Use of `Buffer.allocUnsafeSlow()` should be used only as a last resort *after* +a developer has observed undue memory retention in their applications. + +A `TypeError` will be thrown if `size` is not a number. + +### All the Rest + +The rest of the `Buffer` API is exactly the same as in node.js. +[See the docs](https://nodejs.org/api/buffer.html). + + +## Related links + +- [Node.js issue: Buffer(number) is unsafe](https://github.com/nodejs/node/issues/4660) +- [Node.js Enhancement Proposal: Buffer.from/Buffer.alloc/Buffer.zalloc/Buffer() soft-deprecate](https://github.com/nodejs/node-eps/pull/4) + +## Why is `Buffer` unsafe? + +Today, the node.js `Buffer` constructor is overloaded to handle many different argument +types like `String`, `Array`, `Object`, `TypedArrayView` (`Uint8Array`, etc.), +`ArrayBuffer`, and also `Number`. + +The API is optimized for convenience: you can throw any type at it, and it will try to do +what you want. + +Because the Buffer constructor is so powerful, you often see code like this: + +```js +// Convert UTF-8 strings to hex +function toHex (str) { + return new Buffer(str).toString('hex') +} +``` + +***But what happens if `toHex` is called with a `Number` argument?*** + +### Remote Memory Disclosure + +If an attacker can make your program call the `Buffer` constructor with a `Number` +argument, then they can make it allocate uninitialized memory from the node.js process. +This could potentially disclose TLS private keys, user data, or database passwords. + +When the `Buffer` constructor is passed a `Number` argument, it returns an +**UNINITIALIZED** block of memory of the specified `size`. When you create a `Buffer` like +this, you **MUST** overwrite the contents before returning it to the user. + +From the [node.js docs](https://nodejs.org/api/buffer.html#buffer_new_buffer_size): + +> `new Buffer(size)` +> +> - `size` Number +> +> The underlying memory for `Buffer` instances created in this way is not initialized. +> **The contents of a newly created `Buffer` are unknown and could contain sensitive +> data.** Use `buf.fill(0)` to initialize a Buffer to zeroes. + +(Emphasis our own.) + +Whenever the programmer intended to create an uninitialized `Buffer` you often see code +like this: + +```js +var buf = new Buffer(16) + +// Immediately overwrite the uninitialized buffer with data from another buffer +for (var i = 0; i < buf.length; i++) { + buf[i] = otherBuf[i] +} +``` + + +### Would this ever be a problem in real code? + +Yes. It's surprisingly common to forget to check the type of your variables in a +dynamically-typed language like JavaScript. + +Usually the consequences of assuming the wrong type is that your program crashes with an +uncaught exception. But the failure mode for forgetting to check the type of arguments to +the `Buffer` constructor is more catastrophic. + +Here's an example of a vulnerable service that takes a JSON payload and converts it to +hex: + +```js +// Take a JSON payload {str: "some string"} and convert it to hex +var server = http.createServer(function (req, res) { + var data = '' + req.setEncoding('utf8') + req.on('data', function (chunk) { + data += chunk + }) + req.on('end', function () { + var body = JSON.parse(data) + res.end(new Buffer(body.str).toString('hex')) + }) +}) + +server.listen(8080) +``` + +In this example, an http client just has to send: + +```json +{ + "str": 1000 +} +``` + +and it will get back 1,000 bytes of uninitialized memory from the server. + +This is a very serious bug. It's similar in severity to the +[the Heartbleed bug](http://heartbleed.com/) that allowed disclosure of OpenSSL process +memory by remote attackers. + + +### Which real-world packages were vulnerable? + +#### [`bittorrent-dht`](https://www.npmjs.com/package/bittorrent-dht) + +[Mathias Buus](https://github.com/mafintosh) and I +([Feross Aboukhadijeh](http://feross.org/)) found this issue in one of our own packages, +[`bittorrent-dht`](https://www.npmjs.com/package/bittorrent-dht). The bug would allow +anyone on the internet to send a series of messages to a user of `bittorrent-dht` and get +them to reveal 20 bytes at a time of uninitialized memory from the node.js process. + +Here's +[the commit](https://github.com/feross/bittorrent-dht/commit/6c7da04025d5633699800a99ec3fbadf70ad35b8) +that fixed it. We released a new fixed version, created a +[Node Security Project disclosure](https://nodesecurity.io/advisories/68), and deprecated all +vulnerable versions on npm so users will get a warning to upgrade to a newer version. + +#### [`ws`](https://www.npmjs.com/package/ws) + +That got us wondering if there were other vulnerable packages. Sure enough, within a short +period of time, we found the same issue in [`ws`](https://www.npmjs.com/package/ws), the +most popular WebSocket implementation in node.js. + +If certain APIs were called with `Number` parameters instead of `String` or `Buffer` as +expected, then uninitialized server memory would be disclosed to the remote peer. + +These were the vulnerable methods: + +```js +socket.send(number) +socket.ping(number) +socket.pong(number) +``` + +Here's a vulnerable socket server with some echo functionality: + +```js +server.on('connection', function (socket) { + socket.on('message', function (message) { + message = JSON.parse(message) + if (message.type === 'echo') { + socket.send(message.data) // send back the user's message + } + }) +}) +``` + +`socket.send(number)` called on the server, will disclose server memory. + +Here's [the release](https://github.com/websockets/ws/releases/tag/1.0.1) where the issue +was fixed, with a more detailed explanation. Props to +[Arnout Kazemier](https://github.com/3rd-Eden) for the quick fix. Here's the +[Node Security Project disclosure](https://nodesecurity.io/advisories/67). + + +### What's the solution? + +It's important that node.js offers a fast way to get memory otherwise performance-critical +applications would needlessly get a lot slower. + +But we need a better way to *signal our intent* as programmers. **When we want +uninitialized memory, we should request it explicitly.** + +Sensitive functionality should not be packed into a developer-friendly API that loosely +accepts many different types. This type of API encourages the lazy practice of passing +variables in without checking the type very carefully. + +#### A new API: `Buffer.allocUnsafe(number)` + +The functionality of creating buffers with uninitialized memory should be part of another +API. We propose `Buffer.allocUnsafe(number)`. This way, it's not part of an API that +frequently gets user input of all sorts of different types passed into it. + +```js +var buf = Buffer.allocUnsafe(16) // careful, uninitialized memory! + +// Immediately overwrite the uninitialized buffer with data from another buffer +for (var i = 0; i < buf.length; i++) { + buf[i] = otherBuf[i] +} +``` + + +### How do we fix node.js core? + +We sent [a PR to node.js core](https://github.com/nodejs/node/pull/4514) (merged as +`semver-major`) which defends against one case: + +```js +var str = 16 +new Buffer(str, 'utf8') +``` + +In this situation, it's implied that the programmer intended the first argument to be a +string, since they passed an encoding as a second argument. Today, node.js will allocate +uninitialized memory in the case of `new Buffer(number, encoding)`, which is probably not +what the programmer intended. + +But this is only a partial solution, since if the programmer does `new Buffer(variable)` +(without an `encoding` parameter) there's no way to know what they intended. If `variable` +is sometimes a number, then uninitialized memory will sometimes be returned. + +### What's the real long-term fix? + +We could deprecate and remove `new Buffer(number)` and use `Buffer.allocUnsafe(number)` when +we need uninitialized memory. But that would break 1000s of packages. + +~~We believe the best solution is to:~~ + +~~1. Change `new Buffer(number)` to return safe, zeroed-out memory~~ + +~~2. Create a new API for creating uninitialized Buffers. We propose: `Buffer.allocUnsafe(number)`~~ + +#### Update + +We now support adding three new APIs: + +- `Buffer.from(value)` - convert from any type to a buffer +- `Buffer.alloc(size)` - create a zero-filled buffer +- `Buffer.allocUnsafe(size)` - create an uninitialized buffer with given size + +This solves the core problem that affected `ws` and `bittorrent-dht` which is +`Buffer(variable)` getting tricked into taking a number argument. + +This way, existing code continues working and the impact on the npm ecosystem will be +minimal. Over time, npm maintainers can migrate performance-critical code to use +`Buffer.allocUnsafe(number)` instead of `new Buffer(number)`. + + +### Conclusion + +We think there's a serious design issue with the `Buffer` API as it exists today. It +promotes insecure software by putting high-risk functionality into a convenient API +with friendly "developer ergonomics". + +This wasn't merely a theoretical exercise because we found the issue in some of the +most popular npm packages. + +Fortunately, there's an easy fix that can be applied today. Use `safe-buffer` in place of +`buffer`. + +```js +var Buffer = require('safe-buffer').Buffer +``` + +Eventually, we hope that node.js core can switch to this new, safer behavior. We believe +the impact on the ecosystem would be minimal since it's not a breaking change. +Well-maintained, popular packages would be updated to use `Buffer.alloc` quickly, while +older, insecure packages would magically become safe from this attack vector. + + +## links + +- [Node.js PR: buffer: throw if both length and enc are passed](https://github.com/nodejs/node/pull/4514) +- [Node Security Project disclosure for `ws`](https://nodesecurity.io/advisories/67) +- [Node Security Project disclosure for`bittorrent-dht`](https://nodesecurity.io/advisories/68) + + +## credit + +The original issues in `bittorrent-dht` +([disclosure](https://nodesecurity.io/advisories/68)) and +`ws` ([disclosure](https://nodesecurity.io/advisories/67)) were discovered by +[Mathias Buus](https://github.com/mafintosh) and +[Feross Aboukhadijeh](http://feross.org/). + +Thanks to [Adam Baldwin](https://github.com/evilpacket) for helping disclose these issues +and for his work running the [Node Security Project](https://nodesecurity.io/). + +Thanks to [John Hiesey](https://github.com/jhiesey) for proofreading this README and +auditing the code. + + +## license + +MIT. Copyright (C) [Feross Aboukhadijeh](http://feross.org) diff --git a/node_modules/safe-buffer/index.d.ts b/node_modules/safe-buffer/index.d.ts new file mode 100644 index 0000000000..e9fed809a5 --- /dev/null +++ b/node_modules/safe-buffer/index.d.ts @@ -0,0 +1,187 @@ +declare module "safe-buffer" { + export class Buffer { + length: number + write(string: string, offset?: number, length?: number, encoding?: string): number; + toString(encoding?: string, start?: number, end?: number): string; + toJSON(): { type: 'Buffer', data: any[] }; + equals(otherBuffer: Buffer): boolean; + compare(otherBuffer: Buffer, targetStart?: number, targetEnd?: number, sourceStart?: number, sourceEnd?: number): number; + copy(targetBuffer: Buffer, targetStart?: number, sourceStart?: number, sourceEnd?: number): number; + slice(start?: number, end?: number): Buffer; + writeUIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; + writeUIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; + writeIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; + writeIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; + readUIntLE(offset: number, byteLength: number, noAssert?: boolean): number; + readUIntBE(offset: number, byteLength: number, noAssert?: boolean): number; + readIntLE(offset: number, byteLength: number, noAssert?: boolean): number; + readIntBE(offset: number, byteLength: number, noAssert?: boolean): number; + readUInt8(offset: number, noAssert?: boolean): number; + readUInt16LE(offset: number, noAssert?: boolean): number; + readUInt16BE(offset: number, noAssert?: boolean): number; + readUInt32LE(offset: number, noAssert?: boolean): number; + readUInt32BE(offset: number, noAssert?: boolean): number; + readInt8(offset: number, noAssert?: boolean): number; + readInt16LE(offset: number, noAssert?: boolean): number; + readInt16BE(offset: number, noAssert?: boolean): number; + readInt32LE(offset: number, noAssert?: boolean): number; + readInt32BE(offset: number, noAssert?: boolean): number; + readFloatLE(offset: number, noAssert?: boolean): number; + readFloatBE(offset: number, noAssert?: boolean): number; + readDoubleLE(offset: number, noAssert?: boolean): number; + readDoubleBE(offset: number, noAssert?: boolean): number; + swap16(): Buffer; + swap32(): Buffer; + swap64(): Buffer; + writeUInt8(value: number, offset: number, noAssert?: boolean): number; + writeUInt16LE(value: number, offset: number, noAssert?: boolean): number; + writeUInt16BE(value: number, offset: number, noAssert?: boolean): number; + writeUInt32LE(value: number, offset: number, noAssert?: boolean): number; + writeUInt32BE(value: number, offset: number, noAssert?: boolean): number; + writeInt8(value: number, offset: number, noAssert?: boolean): number; + writeInt16LE(value: number, offset: number, noAssert?: boolean): number; + writeInt16BE(value: number, offset: number, noAssert?: boolean): number; + writeInt32LE(value: number, offset: number, noAssert?: boolean): number; + writeInt32BE(value: number, offset: number, noAssert?: boolean): number; + writeFloatLE(value: number, offset: number, noAssert?: boolean): number; + writeFloatBE(value: number, offset: number, noAssert?: boolean): number; + writeDoubleLE(value: number, offset: number, noAssert?: boolean): number; + writeDoubleBE(value: number, offset: number, noAssert?: boolean): number; + fill(value: any, offset?: number, end?: number): this; + indexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; + lastIndexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; + includes(value: string | number | Buffer, byteOffset?: number, encoding?: string): boolean; + + /** + * Allocates a new buffer containing the given {str}. + * + * @param str String to store in buffer. + * @param encoding encoding to use, optional. Default is 'utf8' + */ + constructor (str: string, encoding?: string); + /** + * Allocates a new buffer of {size} octets. + * + * @param size count of octets to allocate. + */ + constructor (size: number); + /** + * Allocates a new buffer containing the given {array} of octets. + * + * @param array The octets to store. + */ + constructor (array: Uint8Array); + /** + * Produces a Buffer backed by the same allocated memory as + * the given {ArrayBuffer}. + * + * + * @param arrayBuffer The ArrayBuffer with which to share memory. + */ + constructor (arrayBuffer: ArrayBuffer); + /** + * Allocates a new buffer containing the given {array} of octets. + * + * @param array The octets to store. + */ + constructor (array: any[]); + /** + * Copies the passed {buffer} data onto a new {Buffer} instance. + * + * @param buffer The buffer to copy. + */ + constructor (buffer: Buffer); + prototype: Buffer; + /** + * Allocates a new Buffer using an {array} of octets. + * + * @param array + */ + static from(array: any[]): Buffer; + /** + * When passed a reference to the .buffer property of a TypedArray instance, + * the newly created Buffer will share the same allocated memory as the TypedArray. + * The optional {byteOffset} and {length} arguments specify a memory range + * within the {arrayBuffer} that will be shared by the Buffer. + * + * @param arrayBuffer The .buffer property of a TypedArray or a new ArrayBuffer() + * @param byteOffset + * @param length + */ + static from(arrayBuffer: ArrayBuffer, byteOffset?: number, length?: number): Buffer; + /** + * Copies the passed {buffer} data onto a new Buffer instance. + * + * @param buffer + */ + static from(buffer: Buffer): Buffer; + /** + * Creates a new Buffer containing the given JavaScript string {str}. + * If provided, the {encoding} parameter identifies the character encoding. + * If not provided, {encoding} defaults to 'utf8'. + * + * @param str + */ + static from(str: string, encoding?: string): Buffer; + /** + * Returns true if {obj} is a Buffer + * + * @param obj object to test. + */ + static isBuffer(obj: any): obj is Buffer; + /** + * Returns true if {encoding} is a valid encoding argument. + * Valid string encodings in Node 0.12: 'ascii'|'utf8'|'utf16le'|'ucs2'(alias of 'utf16le')|'base64'|'binary'(deprecated)|'hex' + * + * @param encoding string to test. + */ + static isEncoding(encoding: string): boolean; + /** + * Gives the actual byte length of a string. encoding defaults to 'utf8'. + * This is not the same as String.prototype.length since that returns the number of characters in a string. + * + * @param string string to test. + * @param encoding encoding used to evaluate (defaults to 'utf8') + */ + static byteLength(string: string, encoding?: string): number; + /** + * Returns a buffer which is the result of concatenating all the buffers in the list together. + * + * If the list has no items, or if the totalLength is 0, then it returns a zero-length buffer. + * If the list has exactly one item, then the first item of the list is returned. + * If the list has more than one item, then a new Buffer is created. + * + * @param list An array of Buffer objects to concatenate + * @param totalLength Total length of the buffers when concatenated. + * If totalLength is not provided, it is read from the buffers in the list. However, this adds an additional loop to the function, so it is faster to provide the length explicitly. + */ + static concat(list: Buffer[], totalLength?: number): Buffer; + /** + * The same as buf1.compare(buf2). + */ + static compare(buf1: Buffer, buf2: Buffer): number; + /** + * Allocates a new buffer of {size} octets. + * + * @param size count of octets to allocate. + * @param fill if specified, buffer will be initialized by calling buf.fill(fill). + * If parameter is omitted, buffer will be filled with zeros. + * @param encoding encoding used for call to buf.fill while initalizing + */ + static alloc(size: number, fill?: string | Buffer | number, encoding?: string): Buffer; + /** + * Allocates a new buffer of {size} octets, leaving memory not initialized, so the contents + * of the newly created Buffer are unknown and may contain sensitive data. + * + * @param size count of octets to allocate + */ + static allocUnsafe(size: number): Buffer; + /** + * Allocates a new non-pooled buffer of {size} octets, leaving memory not initialized, so the contents + * of the newly created Buffer are unknown and may contain sensitive data. + * + * @param size count of octets to allocate + */ + static allocUnsafeSlow(size: number): Buffer; + } +} \ No newline at end of file diff --git a/node_modules/safe-buffer/index.js b/node_modules/safe-buffer/index.js new file mode 100644 index 0000000000..f8d3ec9885 --- /dev/null +++ b/node_modules/safe-buffer/index.js @@ -0,0 +1,65 @@ +/*! safe-buffer. MIT License. Feross Aboukhadijeh */ +/* eslint-disable node/no-deprecated-api */ +var buffer = require('buffer') +var Buffer = buffer.Buffer + +// alternative to using Object.keys for old browsers +function copyProps (src, dst) { + for (var key in src) { + dst[key] = src[key] + } +} +if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) { + module.exports = buffer +} else { + // Copy properties from require('buffer') + copyProps(buffer, exports) + exports.Buffer = SafeBuffer +} + +function SafeBuffer (arg, encodingOrOffset, length) { + return Buffer(arg, encodingOrOffset, length) +} + +SafeBuffer.prototype = Object.create(Buffer.prototype) + +// Copy static methods from Buffer +copyProps(Buffer, SafeBuffer) + +SafeBuffer.from = function (arg, encodingOrOffset, length) { + if (typeof arg === 'number') { + throw new TypeError('Argument must not be a number') + } + return Buffer(arg, encodingOrOffset, length) +} + +SafeBuffer.alloc = function (size, fill, encoding) { + if (typeof size !== 'number') { + throw new TypeError('Argument must be a number') + } + var buf = Buffer(size) + if (fill !== undefined) { + if (typeof encoding === 'string') { + buf.fill(fill, encoding) + } else { + buf.fill(fill) + } + } else { + buf.fill(0) + } + return buf +} + +SafeBuffer.allocUnsafe = function (size) { + if (typeof size !== 'number') { + throw new TypeError('Argument must be a number') + } + return Buffer(size) +} + +SafeBuffer.allocUnsafeSlow = function (size) { + if (typeof size !== 'number') { + throw new TypeError('Argument must be a number') + } + return buffer.SlowBuffer(size) +} diff --git a/node_modules/safe-buffer/package.json b/node_modules/safe-buffer/package.json new file mode 100644 index 0000000000..f2869e2564 --- /dev/null +++ b/node_modules/safe-buffer/package.json @@ -0,0 +1,51 @@ +{ + "name": "safe-buffer", + "description": "Safer Node.js Buffer API", + "version": "5.2.1", + "author": { + "name": "Feross Aboukhadijeh", + "email": "feross@feross.org", + "url": "https://feross.org" + }, + "bugs": { + "url": "https://github.com/feross/safe-buffer/issues" + }, + "devDependencies": { + "standard": "*", + "tape": "^5.0.0" + }, + "homepage": "https://github.com/feross/safe-buffer", + "keywords": [ + "buffer", + "buffer allocate", + "node security", + "safe", + "safe-buffer", + "security", + "uninitialized" + ], + "license": "MIT", + "main": "index.js", + "types": "index.d.ts", + "repository": { + "type": "git", + "url": "git://github.com/feross/safe-buffer.git" + }, + "scripts": { + "test": "standard && tape test/*.js" + }, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] +} diff --git a/node_modules/string_decoder/LICENSE b/node_modules/string_decoder/LICENSE new file mode 100644 index 0000000000..778edb2073 --- /dev/null +++ b/node_modules/string_decoder/LICENSE @@ -0,0 +1,48 @@ +Node.js is licensed for use as follows: + +""" +Copyright Node.js contributors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. +""" + +This license applies to parts of Node.js originating from the +https://github.com/joyent/node repository: + +""" +Copyright Joyent, Inc. and other Node contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. +""" + diff --git a/node_modules/string_decoder/README.md b/node_modules/string_decoder/README.md new file mode 100644 index 0000000000..5fd58315ed --- /dev/null +++ b/node_modules/string_decoder/README.md @@ -0,0 +1,47 @@ +# string_decoder + +***Node-core v8.9.4 string_decoder for userland*** + + +[![NPM](https://nodei.co/npm/string_decoder.png?downloads=true&downloadRank=true)](https://nodei.co/npm/string_decoder/) +[![NPM](https://nodei.co/npm-dl/string_decoder.png?&months=6&height=3)](https://nodei.co/npm/string_decoder/) + + +```bash +npm install --save string_decoder +``` + +***Node-core string_decoder for userland*** + +This package is a mirror of the string_decoder implementation in Node-core. + +Full documentation may be found on the [Node.js website](https://nodejs.org/dist/v8.9.4/docs/api/). + +As of version 1.0.0 **string_decoder** uses semantic versioning. + +## Previous versions + +Previous version numbers match the versions found in Node core, e.g. 0.10.24 matches Node 0.10.24, likewise 0.11.10 matches Node 0.11.10. + +## Update + +The *build/* directory contains a build script that will scrape the source from the [nodejs/node](https://github.com/nodejs/node) repo given a specific Node version. + +## Streams Working Group + +`string_decoder` is maintained by the Streams Working Group, which +oversees the development and maintenance of the Streams API within +Node.js. The responsibilities of the Streams Working Group include: + +* Addressing stream issues on the Node.js issue tracker. +* Authoring and editing stream documentation within the Node.js project. +* Reviewing changes to stream subclasses within the Node.js project. +* Redirecting changes to streams from the Node.js project to this + project. +* Assisting in the implementation of stream providers within Node.js. +* Recommending versions of `readable-stream` to be included in Node.js. +* Messaging about the future of streams to give the community advance + notice of changes. + +See [readable-stream](https://github.com/nodejs/readable-stream) for +more details. diff --git a/node_modules/string_decoder/lib/string_decoder.js b/node_modules/string_decoder/lib/string_decoder.js new file mode 100644 index 0000000000..2e89e63f79 --- /dev/null +++ b/node_modules/string_decoder/lib/string_decoder.js @@ -0,0 +1,296 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +'use strict'; + +/**/ + +var Buffer = require('safe-buffer').Buffer; +/**/ + +var isEncoding = Buffer.isEncoding || function (encoding) { + encoding = '' + encoding; + switch (encoding && encoding.toLowerCase()) { + case 'hex':case 'utf8':case 'utf-8':case 'ascii':case 'binary':case 'base64':case 'ucs2':case 'ucs-2':case 'utf16le':case 'utf-16le':case 'raw': + return true; + default: + return false; + } +}; + +function _normalizeEncoding(enc) { + if (!enc) return 'utf8'; + var retried; + while (true) { + switch (enc) { + case 'utf8': + case 'utf-8': + return 'utf8'; + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return 'utf16le'; + case 'latin1': + case 'binary': + return 'latin1'; + case 'base64': + case 'ascii': + case 'hex': + return enc; + default: + if (retried) return; // undefined + enc = ('' + enc).toLowerCase(); + retried = true; + } + } +}; + +// Do not cache `Buffer.isEncoding` when checking encoding names as some +// modules monkey-patch it to support additional encodings +function normalizeEncoding(enc) { + var nenc = _normalizeEncoding(enc); + if (typeof nenc !== 'string' && (Buffer.isEncoding === isEncoding || !isEncoding(enc))) throw new Error('Unknown encoding: ' + enc); + return nenc || enc; +} + +// StringDecoder provides an interface for efficiently splitting a series of +// buffers into a series of JS strings without breaking apart multi-byte +// characters. +exports.StringDecoder = StringDecoder; +function StringDecoder(encoding) { + this.encoding = normalizeEncoding(encoding); + var nb; + switch (this.encoding) { + case 'utf16le': + this.text = utf16Text; + this.end = utf16End; + nb = 4; + break; + case 'utf8': + this.fillLast = utf8FillLast; + nb = 4; + break; + case 'base64': + this.text = base64Text; + this.end = base64End; + nb = 3; + break; + default: + this.write = simpleWrite; + this.end = simpleEnd; + return; + } + this.lastNeed = 0; + this.lastTotal = 0; + this.lastChar = Buffer.allocUnsafe(nb); +} + +StringDecoder.prototype.write = function (buf) { + if (buf.length === 0) return ''; + var r; + var i; + if (this.lastNeed) { + r = this.fillLast(buf); + if (r === undefined) return ''; + i = this.lastNeed; + this.lastNeed = 0; + } else { + i = 0; + } + if (i < buf.length) return r ? r + this.text(buf, i) : this.text(buf, i); + return r || ''; +}; + +StringDecoder.prototype.end = utf8End; + +// Returns only complete characters in a Buffer +StringDecoder.prototype.text = utf8Text; + +// Attempts to complete a partial non-UTF-8 character using bytes from a Buffer +StringDecoder.prototype.fillLast = function (buf) { + if (this.lastNeed <= buf.length) { + buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, this.lastNeed); + return this.lastChar.toString(this.encoding, 0, this.lastTotal); + } + buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, buf.length); + this.lastNeed -= buf.length; +}; + +// Checks the type of a UTF-8 byte, whether it's ASCII, a leading byte, or a +// continuation byte. If an invalid byte is detected, -2 is returned. +function utf8CheckByte(byte) { + if (byte <= 0x7F) return 0;else if (byte >> 5 === 0x06) return 2;else if (byte >> 4 === 0x0E) return 3;else if (byte >> 3 === 0x1E) return 4; + return byte >> 6 === 0x02 ? -1 : -2; +} + +// Checks at most 3 bytes at the end of a Buffer in order to detect an +// incomplete multi-byte UTF-8 character. The total number of bytes (2, 3, or 4) +// needed to complete the UTF-8 character (if applicable) are returned. +function utf8CheckIncomplete(self, buf, i) { + var j = buf.length - 1; + if (j < i) return 0; + var nb = utf8CheckByte(buf[j]); + if (nb >= 0) { + if (nb > 0) self.lastNeed = nb - 1; + return nb; + } + if (--j < i || nb === -2) return 0; + nb = utf8CheckByte(buf[j]); + if (nb >= 0) { + if (nb > 0) self.lastNeed = nb - 2; + return nb; + } + if (--j < i || nb === -2) return 0; + nb = utf8CheckByte(buf[j]); + if (nb >= 0) { + if (nb > 0) { + if (nb === 2) nb = 0;else self.lastNeed = nb - 3; + } + return nb; + } + return 0; +} + +// Validates as many continuation bytes for a multi-byte UTF-8 character as +// needed or are available. If we see a non-continuation byte where we expect +// one, we "replace" the validated continuation bytes we've seen so far with +// a single UTF-8 replacement character ('\ufffd'), to match v8's UTF-8 decoding +// behavior. The continuation byte check is included three times in the case +// where all of the continuation bytes for a character exist in the same buffer. +// It is also done this way as a slight performance increase instead of using a +// loop. +function utf8CheckExtraBytes(self, buf, p) { + if ((buf[0] & 0xC0) !== 0x80) { + self.lastNeed = 0; + return '\ufffd'; + } + if (self.lastNeed > 1 && buf.length > 1) { + if ((buf[1] & 0xC0) !== 0x80) { + self.lastNeed = 1; + return '\ufffd'; + } + if (self.lastNeed > 2 && buf.length > 2) { + if ((buf[2] & 0xC0) !== 0x80) { + self.lastNeed = 2; + return '\ufffd'; + } + } + } +} + +// Attempts to complete a multi-byte UTF-8 character using bytes from a Buffer. +function utf8FillLast(buf) { + var p = this.lastTotal - this.lastNeed; + var r = utf8CheckExtraBytes(this, buf, p); + if (r !== undefined) return r; + if (this.lastNeed <= buf.length) { + buf.copy(this.lastChar, p, 0, this.lastNeed); + return this.lastChar.toString(this.encoding, 0, this.lastTotal); + } + buf.copy(this.lastChar, p, 0, buf.length); + this.lastNeed -= buf.length; +} + +// Returns all complete UTF-8 characters in a Buffer. If the Buffer ended on a +// partial character, the character's bytes are buffered until the required +// number of bytes are available. +function utf8Text(buf, i) { + var total = utf8CheckIncomplete(this, buf, i); + if (!this.lastNeed) return buf.toString('utf8', i); + this.lastTotal = total; + var end = buf.length - (total - this.lastNeed); + buf.copy(this.lastChar, 0, end); + return buf.toString('utf8', i, end); +} + +// For UTF-8, a replacement character is added when ending on a partial +// character. +function utf8End(buf) { + var r = buf && buf.length ? this.write(buf) : ''; + if (this.lastNeed) return r + '\ufffd'; + return r; +} + +// UTF-16LE typically needs two bytes per character, but even if we have an even +// number of bytes available, we need to check if we end on a leading/high +// surrogate. In that case, we need to wait for the next two bytes in order to +// decode the last character properly. +function utf16Text(buf, i) { + if ((buf.length - i) % 2 === 0) { + var r = buf.toString('utf16le', i); + if (r) { + var c = r.charCodeAt(r.length - 1); + if (c >= 0xD800 && c <= 0xDBFF) { + this.lastNeed = 2; + this.lastTotal = 4; + this.lastChar[0] = buf[buf.length - 2]; + this.lastChar[1] = buf[buf.length - 1]; + return r.slice(0, -1); + } + } + return r; + } + this.lastNeed = 1; + this.lastTotal = 2; + this.lastChar[0] = buf[buf.length - 1]; + return buf.toString('utf16le', i, buf.length - 1); +} + +// For UTF-16LE we do not explicitly append special replacement characters if we +// end on a partial character, we simply let v8 handle that. +function utf16End(buf) { + var r = buf && buf.length ? this.write(buf) : ''; + if (this.lastNeed) { + var end = this.lastTotal - this.lastNeed; + return r + this.lastChar.toString('utf16le', 0, end); + } + return r; +} + +function base64Text(buf, i) { + var n = (buf.length - i) % 3; + if (n === 0) return buf.toString('base64', i); + this.lastNeed = 3 - n; + this.lastTotal = 3; + if (n === 1) { + this.lastChar[0] = buf[buf.length - 1]; + } else { + this.lastChar[0] = buf[buf.length - 2]; + this.lastChar[1] = buf[buf.length - 1]; + } + return buf.toString('base64', i, buf.length - n); +} + +function base64End(buf) { + var r = buf && buf.length ? this.write(buf) : ''; + if (this.lastNeed) return r + this.lastChar.toString('base64', 0, 3 - this.lastNeed); + return r; +} + +// Pass bytes on through for single-byte encodings (e.g. ascii, latin1, hex) +function simpleWrite(buf) { + return buf.toString(this.encoding); +} + +function simpleEnd(buf) { + return buf && buf.length ? this.write(buf) : ''; +} \ No newline at end of file diff --git a/node_modules/string_decoder/package.json b/node_modules/string_decoder/package.json new file mode 100644 index 0000000000..b2bb141160 --- /dev/null +++ b/node_modules/string_decoder/package.json @@ -0,0 +1,34 @@ +{ + "name": "string_decoder", + "version": "1.3.0", + "description": "The string_decoder module from Node core", + "main": "lib/string_decoder.js", + "files": [ + "lib" + ], + "dependencies": { + "safe-buffer": "~5.2.0" + }, + "devDependencies": { + "babel-polyfill": "^6.23.0", + "core-util-is": "^1.0.2", + "inherits": "^2.0.3", + "tap": "~0.4.8" + }, + "scripts": { + "test": "tap test/parallel/*.js && node test/verify-dependencies", + "ci": "tap test/parallel/*.js test/ours/*.js --tap | tee test.tap && node test/verify-dependencies.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/nodejs/string_decoder.git" + }, + "homepage": "https://github.com/nodejs/string_decoder", + "keywords": [ + "string", + "decoder", + "browser", + "browserify" + ], + "license": "MIT" +} diff --git a/node_modules/util-deprecate/History.md b/node_modules/util-deprecate/History.md new file mode 100644 index 0000000000..acc8675372 --- /dev/null +++ b/node_modules/util-deprecate/History.md @@ -0,0 +1,16 @@ + +1.0.2 / 2015-10-07 +================== + + * use try/catch when checking `localStorage` (#3, @kumavis) + +1.0.1 / 2014-11-25 +================== + + * browser: use `console.warn()` for deprecation calls + * browser: more jsdocs + +1.0.0 / 2014-04-30 +================== + + * initial commit diff --git a/node_modules/util-deprecate/LICENSE b/node_modules/util-deprecate/LICENSE new file mode 100644 index 0000000000..6a60e8c225 --- /dev/null +++ b/node_modules/util-deprecate/LICENSE @@ -0,0 +1,24 @@ +(The MIT License) + +Copyright (c) 2014 Nathan Rajlich + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/node_modules/util-deprecate/README.md b/node_modules/util-deprecate/README.md new file mode 100644 index 0000000000..75622fa7c2 --- /dev/null +++ b/node_modules/util-deprecate/README.md @@ -0,0 +1,53 @@ +util-deprecate +============== +### The Node.js `util.deprecate()` function with browser support + +In Node.js, this module simply re-exports the `util.deprecate()` function. + +In the web browser (i.e. via browserify), a browser-specific implementation +of the `util.deprecate()` function is used. + + +## API + +A `deprecate()` function is the only thing exposed by this module. + +``` javascript +// setup: +exports.foo = deprecate(foo, 'foo() is deprecated, use bar() instead'); + + +// users see: +foo(); +// foo() is deprecated, use bar() instead +foo(); +foo(); +``` + + +## License + +(The MIT License) + +Copyright (c) 2014 Nathan Rajlich + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/node_modules/util-deprecate/browser.js b/node_modules/util-deprecate/browser.js new file mode 100644 index 0000000000..549ae2f065 --- /dev/null +++ b/node_modules/util-deprecate/browser.js @@ -0,0 +1,67 @@ + +/** + * Module exports. + */ + +module.exports = deprecate; + +/** + * Mark that a method should not be used. + * Returns a modified function which warns once by default. + * + * If `localStorage.noDeprecation = true` is set, then it is a no-op. + * + * If `localStorage.throwDeprecation = true` is set, then deprecated functions + * will throw an Error when invoked. + * + * If `localStorage.traceDeprecation = true` is set, then deprecated functions + * will invoke `console.trace()` instead of `console.error()`. + * + * @param {Function} fn - the function to deprecate + * @param {String} msg - the string to print to the console when `fn` is invoked + * @returns {Function} a new "deprecated" version of `fn` + * @api public + */ + +function deprecate (fn, msg) { + if (config('noDeprecation')) { + return fn; + } + + var warned = false; + function deprecated() { + if (!warned) { + if (config('throwDeprecation')) { + throw new Error(msg); + } else if (config('traceDeprecation')) { + console.trace(msg); + } else { + console.warn(msg); + } + warned = true; + } + return fn.apply(this, arguments); + } + + return deprecated; +} + +/** + * Checks `localStorage` for boolean values for the given `name`. + * + * @param {String} name + * @returns {Boolean} + * @api private + */ + +function config (name) { + // accessing global.localStorage can trigger a DOMException in sandboxed iframes + try { + if (!global.localStorage) return false; + } catch (_) { + return false; + } + var val = global.localStorage[name]; + if (null == val) return false; + return String(val).toLowerCase() === 'true'; +} diff --git a/node_modules/util-deprecate/node.js b/node_modules/util-deprecate/node.js new file mode 100644 index 0000000000..5e6fcff5dd --- /dev/null +++ b/node_modules/util-deprecate/node.js @@ -0,0 +1,6 @@ + +/** + * For Node.js, simply re-export the core `util.deprecate` function. + */ + +module.exports = require('util').deprecate; diff --git a/node_modules/util-deprecate/package.json b/node_modules/util-deprecate/package.json new file mode 100644 index 0000000000..2e79f89a90 --- /dev/null +++ b/node_modules/util-deprecate/package.json @@ -0,0 +1,27 @@ +{ + "name": "util-deprecate", + "version": "1.0.2", + "description": "The Node.js `util.deprecate()` function with browser support", + "main": "node.js", + "browser": "browser.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "repository": { + "type": "git", + "url": "git://github.com/TooTallNate/util-deprecate.git" + }, + "keywords": [ + "util", + "deprecate", + "browserify", + "browser", + "node" + ], + "author": "Nathan Rajlich (http://n8.io/)", + "license": "MIT", + "bugs": { + "url": "https://github.com/TooTallNate/util-deprecate/issues" + }, + "homepage": "https://github.com/TooTallNate/util-deprecate" +} diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000000..8455638b1a --- /dev/null +++ b/package-lock.json @@ -0,0 +1,154 @@ +{ + "name": "flytekit", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "dependencies": { + "msgpack5": "^6.0.2" + } + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/bl": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-5.1.0.tgz", + "integrity": "sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ==", + "license": "MIT", + "dependencies": { + "buffer": "^6.0.3", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/msgpack5": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/msgpack5/-/msgpack5-6.0.2.tgz", + "integrity": "sha512-kBSpECAWslrciRF3jy6HkMckNa14j3VZwNUUe1ONO/yihs19MskiFnsWXm0Q0aPkDYDBRFvTKkEuEDY+HVxBvQ==", + "license": "MIT", + "dependencies": { + "bl": "^5.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.0.0", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 0000000000..6b25f8f14e --- /dev/null +++ b/package.json @@ -0,0 +1,5 @@ +{ + "dependencies": { + "msgpack5": "^6.0.2" + } +} From c4b641e08aec0fd01133f557fc69015ebc43d708 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 07:09:56 +0800 Subject: [PATCH 30/38] Fix Secrets Signed-off-by: Future-Outlier --- flytekit/types/structured/snowflake.py | 2 - node_modules/.package-lock.json | 149 -- node_modules/base64-js/LICENSE | 21 - node_modules/base64-js/README.md | 34 - node_modules/base64-js/base64js.min.js | 1 - node_modules/base64-js/index.d.ts | 3 - node_modules/base64-js/index.js | 150 -- node_modules/base64-js/package.json | 47 - node_modules/bl/.github/dependabot.yml | 16 - .../bl/.github/workflows/test-and-release.yml | 61 - node_modules/bl/BufferList.d.ts | 382 --- node_modules/bl/BufferList.js | 396 ---- node_modules/bl/CHANGELOG.md | 17 - node_modules/bl/LICENSE.md | 13 - node_modules/bl/README.md | 247 -- node_modules/bl/bl.js | 84 - node_modules/bl/index.d.ts | 88 - node_modules/bl/package.json | 123 - node_modules/bl/test/convert.js | 21 - node_modules/bl/test/indexOf.js | 492 ---- node_modules/bl/test/isBufferList.js | 32 - node_modules/bl/test/test.js | 914 ------- node_modules/buffer/AUTHORS.md | 73 - node_modules/buffer/LICENSE | 21 - node_modules/buffer/README.md | 410 ---- node_modules/buffer/index.d.ts | 194 -- node_modules/buffer/index.js | 2106 ----------------- node_modules/buffer/package.json | 93 - node_modules/ieee754/LICENSE | 11 - node_modules/ieee754/README.md | 51 - node_modules/ieee754/index.d.ts | 10 - node_modules/ieee754/index.js | 85 - node_modules/ieee754/package.json | 52 - node_modules/inherits/LICENSE | 16 - node_modules/inherits/README.md | 42 - node_modules/inherits/inherits.js | 9 - node_modules/inherits/inherits_browser.js | 27 - node_modules/inherits/package.json | 29 - .../msgpack5/.github/workflows/ci.yml | 30 - node_modules/msgpack5/.jshintrc | 7 - node_modules/msgpack5/CONTRIBUTING.md | 41 - node_modules/msgpack5/LICENSE | 21 - node_modules/msgpack5/README.md | 250 -- .../msgpack5/benchmarks/encodedecode.js | 21 - .../msgpack5/benchmarks/parseshortmap.js | 21 - node_modules/msgpack5/example.js | 44 - node_modules/msgpack5/index.js | 91 - node_modules/msgpack5/lib/codecs/DateCodec.js | 131 - node_modules/msgpack5/lib/decoder.js | 268 --- node_modules/msgpack5/lib/encoder.js | 298 --- node_modules/msgpack5/lib/helpers.js | 20 - node_modules/msgpack5/lib/streams.js | 90 - node_modules/msgpack5/package.json | 52 - node_modules/msgpack5/spec.html | 459 ---- node_modules/msgpack5/spec.md | 499 ---- .../msgpack5/test/1-byte-length-buffers.js | 79 - .../msgpack5/test/1-byte-length-exts.js | 102 - .../msgpack5/test/1-byte-length-strings.js | 80 - .../test/1-byte-length-uint8arrays.js | 43 - .../msgpack5/test/15-elements-arrays.js | 84 - .../msgpack5/test/15-elements-maps.js | 119 - .../msgpack5/test/16-bits-signed-integers.js | 56 - .../test/16-bits-unsigned-integers.js | 56 - .../msgpack5/test/2-bytes-length-arrays.js | 84 - .../msgpack5/test/2-bytes-length-buffers.js | 79 - .../msgpack5/test/2-bytes-length-exts.js | 86 - .../msgpack5/test/2-bytes-length-maps.js | 85 - .../msgpack5/test/2-bytes-length-strings.js | 87 - .../test/2-bytes-length-uint8arrays.js | 43 - .../msgpack5/test/31-chars-strings.js | 59 - .../msgpack5/test/32-bits-signed-integers.js | 55 - .../test/32-bits-unsigned-integers.js | 56 - .../msgpack5/test/32-bytes-strings.js | 39 - .../msgpack5/test/4-bytes-length-arrays.js | 78 - .../msgpack5/test/4-bytes-length-buffers.js | 78 - .../msgpack5/test/4-bytes-length-exts.js | 86 - .../msgpack5/test/4-bytes-length-strings.js | 83 - .../test/4-bytes-length-uint8arrays.js | 42 - .../msgpack5/test/5-bits-negative-integers.js | 36 - .../msgpack5/test/64-bits-signed-integers.js | 48 - .../test/64-bits-unsigned-integers.js | 48 - .../msgpack5/test/7-bits-positive-integers.js | 36 - .../msgpack5/test/8-bits-positive-integers.js | 51 - .../msgpack5/test/8-bits-signed-integers.js | 53 - node_modules/msgpack5/test/NaN.js | 52 - node_modules/msgpack5/test/booleans.js | 21 - .../msgpack5/test/compatibility-mode.js | 73 - node_modules/msgpack5/test/datenull.js | 13 - node_modules/msgpack5/test/doubles.js | 57 - .../msgpack5/test/ext-custom-encode-check.js | 64 - node_modules/msgpack5/test/fixexts.js | 497 ---- node_modules/msgpack5/test/floats.js | 117 - node_modules/msgpack5/test/functions.js | 19 - .../msgpack5/test/levelup-encoding.js | 69 - .../msgpack5/test/map-with-object-key.js | 25 - .../msgpack5/test/nested-containers.js | 44 - node_modules/msgpack5/test/null.js | 16 - .../msgpack5/test/numerictypeasserts.js | 49 - .../test/object-prototype-poisoning.js | 49 - .../msgpack5/test/object-with-arrays.js | 69 - .../msgpack5/test/object-with-buffers.js | 33 - .../msgpack5/test/object-with-many-keys.js | 71 - .../msgpack5/test/object-with-strings.js | 32 - node_modules/msgpack5/test/prefer-map.js | 71 - node_modules/msgpack5/test/sparse-arrays.js | 18 - node_modules/msgpack5/test/streams.js | 261 -- node_modules/msgpack5/test/timestamps.js | 116 - node_modules/readable-stream/CONTRIBUTING.md | 38 - node_modules/readable-stream/GOVERNANCE.md | 136 -- node_modules/readable-stream/LICENSE | 47 - node_modules/readable-stream/README.md | 106 - .../readable-stream/errors-browser.js | 127 - node_modules/readable-stream/errors.js | 116 - .../readable-stream/experimentalWarning.js | 17 - .../readable-stream/lib/_stream_duplex.js | 126 - .../lib/_stream_passthrough.js | 37 - .../readable-stream/lib/_stream_readable.js | 1027 -------- .../readable-stream/lib/_stream_transform.js | 190 -- .../readable-stream/lib/_stream_writable.js | 641 ----- .../lib/internal/streams/async_iterator.js | 180 -- .../lib/internal/streams/buffer_list.js | 183 -- .../lib/internal/streams/destroy.js | 96 - .../lib/internal/streams/end-of-stream.js | 86 - .../lib/internal/streams/from-browser.js | 3 - .../lib/internal/streams/from.js | 52 - .../lib/internal/streams/pipeline.js | 86 - .../lib/internal/streams/state.js | 22 - .../lib/internal/streams/stream-browser.js | 1 - .../lib/internal/streams/stream.js | 1 - node_modules/readable-stream/package.json | 68 - .../readable-stream/readable-browser.js | 9 - node_modules/readable-stream/readable.js | 16 - node_modules/safe-buffer/LICENSE | 21 - node_modules/safe-buffer/README.md | 584 ----- node_modules/safe-buffer/index.d.ts | 187 -- node_modules/safe-buffer/index.js | 65 - node_modules/safe-buffer/package.json | 51 - node_modules/string_decoder/LICENSE | 48 - node_modules/string_decoder/README.md | 47 - .../string_decoder/lib/string_decoder.js | 296 --- node_modules/string_decoder/package.json | 34 - node_modules/util-deprecate/History.md | 16 - node_modules/util-deprecate/LICENSE | 24 - node_modules/util-deprecate/README.md | 53 - node_modules/util-deprecate/browser.js | 67 - node_modules/util-deprecate/node.js | 6 - node_modules/util-deprecate/package.json | 27 - 147 files changed, 17569 deletions(-) delete mode 100644 node_modules/.package-lock.json delete mode 100644 node_modules/base64-js/LICENSE delete mode 100644 node_modules/base64-js/README.md delete mode 100644 node_modules/base64-js/base64js.min.js delete mode 100644 node_modules/base64-js/index.d.ts delete mode 100644 node_modules/base64-js/index.js delete mode 100644 node_modules/base64-js/package.json delete mode 100644 node_modules/bl/.github/dependabot.yml delete mode 100644 node_modules/bl/.github/workflows/test-and-release.yml delete mode 100644 node_modules/bl/BufferList.d.ts delete mode 100644 node_modules/bl/BufferList.js delete mode 100644 node_modules/bl/CHANGELOG.md delete mode 100644 node_modules/bl/LICENSE.md delete mode 100644 node_modules/bl/README.md delete mode 100644 node_modules/bl/bl.js delete mode 100644 node_modules/bl/index.d.ts delete mode 100644 node_modules/bl/package.json delete mode 100644 node_modules/bl/test/convert.js delete mode 100644 node_modules/bl/test/indexOf.js delete mode 100644 node_modules/bl/test/isBufferList.js delete mode 100644 node_modules/bl/test/test.js delete mode 100644 node_modules/buffer/AUTHORS.md delete mode 100644 node_modules/buffer/LICENSE delete mode 100644 node_modules/buffer/README.md delete mode 100644 node_modules/buffer/index.d.ts delete mode 100644 node_modules/buffer/index.js delete mode 100644 node_modules/buffer/package.json delete mode 100644 node_modules/ieee754/LICENSE delete mode 100644 node_modules/ieee754/README.md delete mode 100644 node_modules/ieee754/index.d.ts delete mode 100644 node_modules/ieee754/index.js delete mode 100644 node_modules/ieee754/package.json delete mode 100644 node_modules/inherits/LICENSE delete mode 100644 node_modules/inherits/README.md delete mode 100644 node_modules/inherits/inherits.js delete mode 100644 node_modules/inherits/inherits_browser.js delete mode 100644 node_modules/inherits/package.json delete mode 100644 node_modules/msgpack5/.github/workflows/ci.yml delete mode 100644 node_modules/msgpack5/.jshintrc delete mode 100644 node_modules/msgpack5/CONTRIBUTING.md delete mode 100644 node_modules/msgpack5/LICENSE delete mode 100644 node_modules/msgpack5/README.md delete mode 100644 node_modules/msgpack5/benchmarks/encodedecode.js delete mode 100644 node_modules/msgpack5/benchmarks/parseshortmap.js delete mode 100644 node_modules/msgpack5/example.js delete mode 100644 node_modules/msgpack5/index.js delete mode 100644 node_modules/msgpack5/lib/codecs/DateCodec.js delete mode 100644 node_modules/msgpack5/lib/decoder.js delete mode 100644 node_modules/msgpack5/lib/encoder.js delete mode 100644 node_modules/msgpack5/lib/helpers.js delete mode 100644 node_modules/msgpack5/lib/streams.js delete mode 100644 node_modules/msgpack5/package.json delete mode 100644 node_modules/msgpack5/spec.html delete mode 100644 node_modules/msgpack5/spec.md delete mode 100644 node_modules/msgpack5/test/1-byte-length-buffers.js delete mode 100644 node_modules/msgpack5/test/1-byte-length-exts.js delete mode 100644 node_modules/msgpack5/test/1-byte-length-strings.js delete mode 100644 node_modules/msgpack5/test/1-byte-length-uint8arrays.js delete mode 100644 node_modules/msgpack5/test/15-elements-arrays.js delete mode 100644 node_modules/msgpack5/test/15-elements-maps.js delete mode 100644 node_modules/msgpack5/test/16-bits-signed-integers.js delete mode 100644 node_modules/msgpack5/test/16-bits-unsigned-integers.js delete mode 100644 node_modules/msgpack5/test/2-bytes-length-arrays.js delete mode 100644 node_modules/msgpack5/test/2-bytes-length-buffers.js delete mode 100644 node_modules/msgpack5/test/2-bytes-length-exts.js delete mode 100644 node_modules/msgpack5/test/2-bytes-length-maps.js delete mode 100644 node_modules/msgpack5/test/2-bytes-length-strings.js delete mode 100644 node_modules/msgpack5/test/2-bytes-length-uint8arrays.js delete mode 100644 node_modules/msgpack5/test/31-chars-strings.js delete mode 100644 node_modules/msgpack5/test/32-bits-signed-integers.js delete mode 100644 node_modules/msgpack5/test/32-bits-unsigned-integers.js delete mode 100644 node_modules/msgpack5/test/32-bytes-strings.js delete mode 100644 node_modules/msgpack5/test/4-bytes-length-arrays.js delete mode 100644 node_modules/msgpack5/test/4-bytes-length-buffers.js delete mode 100644 node_modules/msgpack5/test/4-bytes-length-exts.js delete mode 100644 node_modules/msgpack5/test/4-bytes-length-strings.js delete mode 100644 node_modules/msgpack5/test/4-bytes-length-uint8arrays.js delete mode 100644 node_modules/msgpack5/test/5-bits-negative-integers.js delete mode 100644 node_modules/msgpack5/test/64-bits-signed-integers.js delete mode 100644 node_modules/msgpack5/test/64-bits-unsigned-integers.js delete mode 100644 node_modules/msgpack5/test/7-bits-positive-integers.js delete mode 100644 node_modules/msgpack5/test/8-bits-positive-integers.js delete mode 100644 node_modules/msgpack5/test/8-bits-signed-integers.js delete mode 100644 node_modules/msgpack5/test/NaN.js delete mode 100644 node_modules/msgpack5/test/booleans.js delete mode 100644 node_modules/msgpack5/test/compatibility-mode.js delete mode 100644 node_modules/msgpack5/test/datenull.js delete mode 100644 node_modules/msgpack5/test/doubles.js delete mode 100644 node_modules/msgpack5/test/ext-custom-encode-check.js delete mode 100644 node_modules/msgpack5/test/fixexts.js delete mode 100644 node_modules/msgpack5/test/floats.js delete mode 100644 node_modules/msgpack5/test/functions.js delete mode 100644 node_modules/msgpack5/test/levelup-encoding.js delete mode 100644 node_modules/msgpack5/test/map-with-object-key.js delete mode 100644 node_modules/msgpack5/test/nested-containers.js delete mode 100644 node_modules/msgpack5/test/null.js delete mode 100644 node_modules/msgpack5/test/numerictypeasserts.js delete mode 100644 node_modules/msgpack5/test/object-prototype-poisoning.js delete mode 100644 node_modules/msgpack5/test/object-with-arrays.js delete mode 100644 node_modules/msgpack5/test/object-with-buffers.js delete mode 100644 node_modules/msgpack5/test/object-with-many-keys.js delete mode 100644 node_modules/msgpack5/test/object-with-strings.js delete mode 100644 node_modules/msgpack5/test/prefer-map.js delete mode 100644 node_modules/msgpack5/test/sparse-arrays.js delete mode 100644 node_modules/msgpack5/test/streams.js delete mode 100644 node_modules/msgpack5/test/timestamps.js delete mode 100644 node_modules/readable-stream/CONTRIBUTING.md delete mode 100644 node_modules/readable-stream/GOVERNANCE.md delete mode 100644 node_modules/readable-stream/LICENSE delete mode 100644 node_modules/readable-stream/README.md delete mode 100644 node_modules/readable-stream/errors-browser.js delete mode 100644 node_modules/readable-stream/errors.js delete mode 100644 node_modules/readable-stream/experimentalWarning.js delete mode 100644 node_modules/readable-stream/lib/_stream_duplex.js delete mode 100644 node_modules/readable-stream/lib/_stream_passthrough.js delete mode 100644 node_modules/readable-stream/lib/_stream_readable.js delete mode 100644 node_modules/readable-stream/lib/_stream_transform.js delete mode 100644 node_modules/readable-stream/lib/_stream_writable.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/async_iterator.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/buffer_list.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/destroy.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/end-of-stream.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/from-browser.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/from.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/pipeline.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/state.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/stream-browser.js delete mode 100644 node_modules/readable-stream/lib/internal/streams/stream.js delete mode 100644 node_modules/readable-stream/package.json delete mode 100644 node_modules/readable-stream/readable-browser.js delete mode 100644 node_modules/readable-stream/readable.js delete mode 100644 node_modules/safe-buffer/LICENSE delete mode 100644 node_modules/safe-buffer/README.md delete mode 100644 node_modules/safe-buffer/index.d.ts delete mode 100644 node_modules/safe-buffer/index.js delete mode 100644 node_modules/safe-buffer/package.json delete mode 100644 node_modules/string_decoder/LICENSE delete mode 100644 node_modules/string_decoder/README.md delete mode 100644 node_modules/string_decoder/lib/string_decoder.js delete mode 100644 node_modules/string_decoder/package.json delete mode 100644 node_modules/util-deprecate/History.md delete mode 100644 node_modules/util-deprecate/LICENSE delete mode 100644 node_modules/util-deprecate/README.md delete mode 100644 node_modules/util-deprecate/browser.js delete mode 100644 node_modules/util-deprecate/node.js delete mode 100644 node_modules/util-deprecate/package.json diff --git a/flytekit/types/structured/snowflake.py b/flytekit/types/structured/snowflake.py index 5b2104041d..c603b55669 100644 --- a/flytekit/types/structured/snowflake.py +++ b/flytekit/types/structured/snowflake.py @@ -24,8 +24,6 @@ def get_private_key() -> bytes: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization - from flytekit.configuration.plugin import get_plugin - pk_string = flytekit.current_context().secrets.get("private_key", "snowflake", encode_mode="r") # Cryptography needs the string to be stripped and converted to bytes diff --git a/node_modules/.package-lock.json b/node_modules/.package-lock.json deleted file mode 100644 index d0d7582808..0000000000 --- a/node_modules/.package-lock.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "name": "flytekit", - "lockfileVersion": 3, - "requires": true, - "packages": { - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/bl": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-5.1.0.tgz", - "integrity": "sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ==", - "license": "MIT", - "dependencies": { - "buffer": "^6.0.3", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, - "node_modules/buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "BSD-3-Clause" - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "license": "ISC" - }, - "node_modules/msgpack5": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/msgpack5/-/msgpack5-6.0.2.tgz", - "integrity": "sha512-kBSpECAWslrciRF3jy6HkMckNa14j3VZwNUUe1ONO/yihs19MskiFnsWXm0Q0aPkDYDBRFvTKkEuEDY+HVxBvQ==", - "license": "MIT", - "dependencies": { - "bl": "^5.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.0.0", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "license": "MIT" - } - } -} diff --git a/node_modules/base64-js/LICENSE b/node_modules/base64-js/LICENSE deleted file mode 100644 index 6d52b8acfb..0000000000 --- a/node_modules/base64-js/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Jameson Little - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/node_modules/base64-js/README.md b/node_modules/base64-js/README.md deleted file mode 100644 index b42a48f416..0000000000 --- a/node_modules/base64-js/README.md +++ /dev/null @@ -1,34 +0,0 @@ -base64-js -========= - -`base64-js` does basic base64 encoding/decoding in pure JS. - -[![build status](https://secure.travis-ci.org/beatgammit/base64-js.png)](http://travis-ci.org/beatgammit/base64-js) - -Many browsers already have base64 encoding/decoding functionality, but it is for text data, not all-purpose binary data. - -Sometimes encoding/decoding binary data in the browser is useful, and that is what this module does. - -## install - -With [npm](https://npmjs.org) do: - -`npm install base64-js` and `var base64js = require('base64-js')` - -For use in web browsers do: - -`` - -[Get supported base64-js with the Tidelift Subscription](https://tidelift.com/subscription/pkg/npm-base64-js?utm_source=npm-base64-js&utm_medium=referral&utm_campaign=readme) - -## methods - -`base64js` has three exposed functions, `byteLength`, `toByteArray` and `fromByteArray`, which both take a single argument. - -* `byteLength` - Takes a base64 string and returns length of byte array -* `toByteArray` - Takes a base64 string and returns a byte array -* `fromByteArray` - Takes a byte array and returns a base64 string - -## license - -MIT diff --git a/node_modules/base64-js/base64js.min.js b/node_modules/base64-js/base64js.min.js deleted file mode 100644 index 908ac83fd1..0000000000 --- a/node_modules/base64-js/base64js.min.js +++ /dev/null @@ -1 +0,0 @@ -(function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;b="undefined"==typeof window?"undefined"==typeof global?"undefined"==typeof self?this:self:global:window,b.base64js=a()}})(function(){return function(){function b(d,e,g){function a(j,i){if(!e[j]){if(!d[j]){var f="function"==typeof require&&require;if(!i&&f)return f(j,!0);if(h)return h(j,!0);var c=new Error("Cannot find module '"+j+"'");throw c.code="MODULE_NOT_FOUND",c}var k=e[j]={exports:{}};d[j][0].call(k.exports,function(b){var c=d[j][1][b];return a(c||b)},k,k.exports,b,d,e,g)}return e[j].exports}for(var h="function"==typeof require&&require,c=0;c>16,j[k++]=255&b>>8,j[k++]=255&b;return 2===h&&(b=l[a.charCodeAt(c)]<<2|l[a.charCodeAt(c+1)]>>4,j[k++]=255&b),1===h&&(b=l[a.charCodeAt(c)]<<10|l[a.charCodeAt(c+1)]<<4|l[a.charCodeAt(c+2)]>>2,j[k++]=255&b>>8,j[k++]=255&b),j}function g(a){return k[63&a>>18]+k[63&a>>12]+k[63&a>>6]+k[63&a]}function h(a,b,c){for(var d,e=[],f=b;fj?j:g+f));return 1===d?(b=a[c-1],e.push(k[b>>2]+k[63&b<<4]+"==")):2===d&&(b=(a[c-2]<<8)+a[c-1],e.push(k[b>>10]+k[63&b>>4]+k[63&b<<2]+"=")),e.join("")}c.byteLength=function(a){var b=d(a),c=b[0],e=b[1];return 3*(c+e)/4-e},c.toByteArray=f,c.fromByteArray=j;for(var k=[],l=[],m="undefined"==typeof Uint8Array?Array:Uint8Array,n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",o=0,p=n.length;o 0) { - throw new Error('Invalid string. Length must be a multiple of 4') - } - - // Trim off extra bytes after placeholder bytes are found - // See: https://github.com/beatgammit/base64-js/issues/42 - var validLen = b64.indexOf('=') - if (validLen === -1) validLen = len - - var placeHoldersLen = validLen === len - ? 0 - : 4 - (validLen % 4) - - return [validLen, placeHoldersLen] -} - -// base64 is 4/3 + up to two characters of the original data -function byteLength (b64) { - var lens = getLens(b64) - var validLen = lens[0] - var placeHoldersLen = lens[1] - return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen -} - -function _byteLength (b64, validLen, placeHoldersLen) { - return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen -} - -function toByteArray (b64) { - var tmp - var lens = getLens(b64) - var validLen = lens[0] - var placeHoldersLen = lens[1] - - var arr = new Arr(_byteLength(b64, validLen, placeHoldersLen)) - - var curByte = 0 - - // if there are placeholders, only get up to the last complete 4 chars - var len = placeHoldersLen > 0 - ? validLen - 4 - : validLen - - var i - for (i = 0; i < len; i += 4) { - tmp = - (revLookup[b64.charCodeAt(i)] << 18) | - (revLookup[b64.charCodeAt(i + 1)] << 12) | - (revLookup[b64.charCodeAt(i + 2)] << 6) | - revLookup[b64.charCodeAt(i + 3)] - arr[curByte++] = (tmp >> 16) & 0xFF - arr[curByte++] = (tmp >> 8) & 0xFF - arr[curByte++] = tmp & 0xFF - } - - if (placeHoldersLen === 2) { - tmp = - (revLookup[b64.charCodeAt(i)] << 2) | - (revLookup[b64.charCodeAt(i + 1)] >> 4) - arr[curByte++] = tmp & 0xFF - } - - if (placeHoldersLen === 1) { - tmp = - (revLookup[b64.charCodeAt(i)] << 10) | - (revLookup[b64.charCodeAt(i + 1)] << 4) | - (revLookup[b64.charCodeAt(i + 2)] >> 2) - arr[curByte++] = (tmp >> 8) & 0xFF - arr[curByte++] = tmp & 0xFF - } - - return arr -} - -function tripletToBase64 (num) { - return lookup[num >> 18 & 0x3F] + - lookup[num >> 12 & 0x3F] + - lookup[num >> 6 & 0x3F] + - lookup[num & 0x3F] -} - -function encodeChunk (uint8, start, end) { - var tmp - var output = [] - for (var i = start; i < end; i += 3) { - tmp = - ((uint8[i] << 16) & 0xFF0000) + - ((uint8[i + 1] << 8) & 0xFF00) + - (uint8[i + 2] & 0xFF) - output.push(tripletToBase64(tmp)) - } - return output.join('') -} - -function fromByteArray (uint8) { - var tmp - var len = uint8.length - var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes - var parts = [] - var maxChunkLength = 16383 // must be multiple of 3 - - // go through the array every three bytes, we'll deal with trailing stuff later - for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) { - parts.push(encodeChunk(uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength))) - } - - // pad the end with zeros, but make sure to not forget the extra bytes - if (extraBytes === 1) { - tmp = uint8[len - 1] - parts.push( - lookup[tmp >> 2] + - lookup[(tmp << 4) & 0x3F] + - '==' - ) - } else if (extraBytes === 2) { - tmp = (uint8[len - 2] << 8) + uint8[len - 1] - parts.push( - lookup[tmp >> 10] + - lookup[(tmp >> 4) & 0x3F] + - lookup[(tmp << 2) & 0x3F] + - '=' - ) - } - - return parts.join('') -} diff --git a/node_modules/base64-js/package.json b/node_modules/base64-js/package.json deleted file mode 100644 index c3972e39f2..0000000000 --- a/node_modules/base64-js/package.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "base64-js", - "description": "Base64 encoding/decoding in pure JS", - "version": "1.5.1", - "author": "T. Jameson Little ", - "typings": "index.d.ts", - "bugs": { - "url": "https://github.com/beatgammit/base64-js/issues" - }, - "devDependencies": { - "babel-minify": "^0.5.1", - "benchmark": "^2.1.4", - "browserify": "^16.3.0", - "standard": "*", - "tape": "4.x" - }, - "homepage": "https://github.com/beatgammit/base64-js", - "keywords": [ - "base64" - ], - "license": "MIT", - "main": "index.js", - "repository": { - "type": "git", - "url": "git://github.com/beatgammit/base64-js.git" - }, - "scripts": { - "build": "browserify -s base64js -r ./ | minify > base64js.min.js", - "lint": "standard", - "test": "npm run lint && npm run unit", - "unit": "tape test/*.js" - }, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] -} diff --git a/node_modules/bl/.github/dependabot.yml b/node_modules/bl/.github/dependabot.yml deleted file mode 100644 index f4689933fb..0000000000 --- a/node_modules/bl/.github/dependabot.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: 2 -updates: - - package-ecosystem: 'github-actions' - directory: '/' - schedule: - interval: 'daily' - commit-message: - prefix: 'chore' - include: 'scope' - - package-ecosystem: 'npm' - directory: '/' - schedule: - interval: 'daily' - commit-message: - prefix: 'chore' - include: 'scope' diff --git a/node_modules/bl/.github/workflows/test-and-release.yml b/node_modules/bl/.github/workflows/test-and-release.yml deleted file mode 100644 index 65887a0296..0000000000 --- a/node_modules/bl/.github/workflows/test-and-release.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Test & Maybe Release -on: [push, pull_request] -jobs: - test: - strategy: - fail-fast: false - matrix: - node: [14.x, 16.x, 18.x, lts/*, current] - os: [macos-latest, ubuntu-latest, windows-latest] - runs-on: ${{ matrix.os }} - steps: - - name: Checkout Repository - uses: actions/checkout@v3 - - name: Use Node.js ${{ matrix.node }} - uses: actions/setup-node@v3.5.1 - with: - node-version: ${{ matrix.node }} - - name: Install Dependencies - run: | - npm install --no-progress - - name: Run tests - run: | - npm config set script-shell bash - npm run test:ci - release: - name: Release - needs: test - runs-on: ubuntu-latest - if: github.event_name == 'push' && github.ref == 'refs/heads/master' - steps: - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - name: Setup Node.js - uses: actions/setup-node@v3.5.1 - with: - node-version: 14 - - name: Install dependencies - run: | - npm install --no-progress --no-package-lock --no-save - - name: Build - run: | - npm run build - - name: Install plugins - run: | - npm install \ - @semantic-release/commit-analyzer \ - conventional-changelog-conventionalcommits \ - @semantic-release/release-notes-generator \ - @semantic-release/npm \ - @semantic-release/github \ - @semantic-release/git \ - @semantic-release/changelog \ - --no-progress --no-package-lock --no-save - - name: Release - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - NPM_TOKEN: ${{ secrets.NPM_TOKEN }} - run: npx semantic-release - diff --git a/node_modules/bl/BufferList.d.ts b/node_modules/bl/BufferList.d.ts deleted file mode 100644 index 1561583997..0000000000 --- a/node_modules/bl/BufferList.d.ts +++ /dev/null @@ -1,382 +0,0 @@ -export type BufferListAcceptedTypes = - | Buffer - | BufferList - | Uint8Array - | BufferListAcceptedTypes[] - | string - | number; - -export interface BufferListConstructor { - new (initData?: BufferListAcceptedTypes): BufferList; - (initData?: BufferListAcceptedTypes): BufferList; - - /** - * Determines if the passed object is a BufferList. It will return true - * if the passed object is an instance of BufferList or BufferListStream - * and false otherwise. - * - * N.B. this won't return true for BufferList or BufferListStream instances - * created by versions of this library before this static method was added. - * - * @param other - */ - - isBufferList(other: unknown): boolean; -} - -interface BufferList { - prototype: Object - - /** - * Get the length of the list in bytes. This is the sum of the lengths - * of all of the buffers contained in the list, minus any initial offset - * for a semi-consumed buffer at the beginning. Should accurately - * represent the total number of bytes that can be read from the list. - */ - - length: number; - - /** - * Adds an additional buffer or BufferList to the internal list. - * this is returned so it can be chained. - * - * @param buffer - */ - - append(buffer: BufferListAcceptedTypes): this; - - /** - * Will return the byte at the specified index. - * @param index - */ - - get(index: number): number; - - /** - * Returns a new Buffer object containing the bytes within the - * range specified. Both start and end are optional and will - * default to the beginning and end of the list respectively. - * - * If the requested range spans a single internal buffer then a - * slice of that buffer will be returned which shares the original - * memory range of that Buffer. If the range spans multiple buffers - * then copy operations will likely occur to give you a uniform Buffer. - * - * @param start - * @param end - */ - - slice(start?: number, end?: number): Buffer; - - /** - * Returns a new BufferList object containing the bytes within the - * range specified. Both start and end are optional and will default - * to the beginning and end of the list respectively. - * - * No copies will be performed. All buffers in the result share - * memory with the original list. - * - * @param start - * @param end - */ - - shallowSlice(start?: number, end?: number): this; - - /** - * Copies the content of the list in the `dest` buffer, starting from - * `destStart` and containing the bytes within the range specified - * with `srcStart` to `srcEnd`. - * - * `destStart`, `start` and `end` are optional and will default to the - * beginning of the dest buffer, and the beginning and end of the - * list respectively. - * - * @param dest - * @param destStart - * @param srcStart - * @param srcEnd - */ - - copy( - dest: Buffer, - destStart?: number, - srcStart?: number, - srcEnd?: number - ): Buffer; - - /** - * Performs a shallow-copy of the list. The internal Buffers remains the - * same, so if you change the underlying Buffers, the change will be - * reflected in both the original and the duplicate. - * - * This method is needed if you want to call consume() or pipe() and - * still keep the original list. - * - * @example - * - * ```js - * var bl = new BufferListStream(); - * bl.append('hello'); - * bl.append(' world'); - * bl.append('\n'); - * bl.duplicate().pipe(process.stdout, { end: false }); - * - * console.log(bl.toString()) - * ``` - */ - - duplicate(): this; - - /** - * Will shift bytes off the start of the list. The number of bytes - * consumed don't need to line up with the sizes of the internal - * Buffers—initial offsets will be calculated accordingly in order - * to give you a consistent view of the data. - * - * @param bytes - */ - - consume(bytes?: number): void; - - /** - * Will return a string representation of the buffer. The optional - * `start` and `end` arguments are passed on to `slice()`, while - * the encoding is passed on to `toString()` of the resulting Buffer. - * - * See the [`Buffer#toString()`](http://nodejs.org/docs/latest/api/buffer.html#buffer_buf_tostring_encoding_start_end) - * documentation for more information. - * - * @param encoding - * @param start - * @param end - */ - - toString(encoding?: string, start?: number, end?: number): string; - - /** - * Will return the byte at the specified index. indexOf() method - * returns the first index at which a given element can be found - * in the BufferList, or -1 if it is not present. - * - * @param value - * @param byteOffset - * @param encoding - */ - - indexOf( - value: string | number | Uint8Array | BufferList | Buffer, - byteOffset?: number, - encoding?: string - ): number; - - /** - * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - * - * @param offset - */ - - readDoubleBE: Buffer['readDoubleBE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - * - * @param offset - */ - - readDoubleLE: Buffer['readDoubleLE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - * - * @param offset - */ - - readFloatBE: Buffer['readFloatBE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - * - * @param offset - */ - - readFloatLE: Buffer['readFloatLE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - * - * @param offset - */ - - readInt32BE: Buffer['readInt32BE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - * - * @param offset - */ - - readInt32LE: Buffer['readInt32LE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - * - * @param offset - */ - - readUInt32BE: Buffer['readUInt32BE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - * - * @param offset - */ - - readUInt32LE: Buffer['readUInt32LE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are implemented and will operate across internal Buffer boundaries transparently. - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - * - * @param offset - */ - - readInt16BE: Buffer['readInt16BE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are - * implemented and will operate across internal Buffer boundaries transparently. - * - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) - * documentation for how these work. - * - * @param offset - */ - - readInt16LE: Buffer['readInt16LE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are - * implemented and will operate across internal Buffer boundaries transparently. - * - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) - * documentation for how these work. - * - * @param offset - */ - - readUInt16BE: Buffer['readUInt16BE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are - * implemented and will operate across internal Buffer boundaries transparently. - * - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) - * documentation for how these work. - * - * @param offset - */ - - readUInt16LE: Buffer['readUInt16LE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are - * implemented and will operate across internal Buffer boundaries transparently. - * - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) - * documentation for how these work. - * - * @param offset - */ - - readInt8: Buffer['readInt8']; - - /** - * All of the standard byte-reading methods of the Buffer interface are - * implemented and will operate across internal Buffer boundaries transparently. - * - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) - * documentation for how these work. - * - * @param offset - */ - - readUInt8: Buffer['readUInt8']; - - /** - * All of the standard byte-reading methods of the Buffer interface are - * implemented and will operate across internal Buffer boundaries transparently. - * - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) - * documentation for how these work. - * - * @param offset - */ - - readIntBE: Buffer['readIntBE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are - * implemented and will operate across internal Buffer boundaries transparently. - * - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) - * documentation for how these work. - * - * @param offset - */ - - readIntLE: Buffer['readIntLE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are - * implemented and will operate across internal Buffer boundaries transparently. - * - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) - * documentation for how these work. - * - * @param offset - */ - - readUIntBE: Buffer['readUIntBE']; - - /** - * All of the standard byte-reading methods of the Buffer interface are - * implemented and will operate across internal Buffer boundaries transparently. - * - * See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) - * documentation for how these work. - * - * @param offset - */ - - readUIntLE: Buffer['readUIntLE']; -} - -/** - * No arguments are required for the constructor, but you can initialise - * the list by passing in a single Buffer object or an array of Buffer - * objects. - * - * `new` is not strictly required, if you don't instantiate a new object, - * it will be done automatically for you so you can create a new instance - * simply with: - * - * ```js - * const { BufferList } = require('bl') - * const bl = BufferList() - * - * // equivalent to: - * - * const { BufferList } = require('bl') - * const bl = new BufferList() - * ``` - */ - -declare const BufferList: BufferListConstructor; diff --git a/node_modules/bl/BufferList.js b/node_modules/bl/BufferList.js deleted file mode 100644 index 471ee77889..0000000000 --- a/node_modules/bl/BufferList.js +++ /dev/null @@ -1,396 +0,0 @@ -'use strict' - -const { Buffer } = require('buffer') -const symbol = Symbol.for('BufferList') - -function BufferList (buf) { - if (!(this instanceof BufferList)) { - return new BufferList(buf) - } - - BufferList._init.call(this, buf) -} - -BufferList._init = function _init (buf) { - Object.defineProperty(this, symbol, { value: true }) - - this._bufs = [] - this.length = 0 - - if (buf) { - this.append(buf) - } -} - -BufferList.prototype._new = function _new (buf) { - return new BufferList(buf) -} - -BufferList.prototype._offset = function _offset (offset) { - if (offset === 0) { - return [0, 0] - } - - let tot = 0 - - for (let i = 0; i < this._bufs.length; i++) { - const _t = tot + this._bufs[i].length - if (offset < _t || i === this._bufs.length - 1) { - return [i, offset - tot] - } - tot = _t - } -} - -BufferList.prototype._reverseOffset = function (blOffset) { - const bufferId = blOffset[0] - let offset = blOffset[1] - - for (let i = 0; i < bufferId; i++) { - offset += this._bufs[i].length - } - - return offset -} - -BufferList.prototype.get = function get (index) { - if (index > this.length || index < 0) { - return undefined - } - - const offset = this._offset(index) - - return this._bufs[offset[0]][offset[1]] -} - -BufferList.prototype.slice = function slice (start, end) { - if (typeof start === 'number' && start < 0) { - start += this.length - } - - if (typeof end === 'number' && end < 0) { - end += this.length - } - - return this.copy(null, 0, start, end) -} - -BufferList.prototype.copy = function copy (dst, dstStart, srcStart, srcEnd) { - if (typeof srcStart !== 'number' || srcStart < 0) { - srcStart = 0 - } - - if (typeof srcEnd !== 'number' || srcEnd > this.length) { - srcEnd = this.length - } - - if (srcStart >= this.length) { - return dst || Buffer.alloc(0) - } - - if (srcEnd <= 0) { - return dst || Buffer.alloc(0) - } - - const copy = !!dst - const off = this._offset(srcStart) - const len = srcEnd - srcStart - let bytes = len - let bufoff = (copy && dstStart) || 0 - let start = off[1] - - // copy/slice everything - if (srcStart === 0 && srcEnd === this.length) { - if (!copy) { - // slice, but full concat if multiple buffers - return this._bufs.length === 1 - ? this._bufs[0] - : Buffer.concat(this._bufs, this.length) - } - - // copy, need to copy individual buffers - for (let i = 0; i < this._bufs.length; i++) { - this._bufs[i].copy(dst, bufoff) - bufoff += this._bufs[i].length - } - - return dst - } - - // easy, cheap case where it's a subset of one of the buffers - if (bytes <= this._bufs[off[0]].length - start) { - return copy - ? this._bufs[off[0]].copy(dst, dstStart, start, start + bytes) - : this._bufs[off[0]].slice(start, start + bytes) - } - - if (!copy) { - // a slice, we need something to copy in to - dst = Buffer.allocUnsafe(len) - } - - for (let i = off[0]; i < this._bufs.length; i++) { - const l = this._bufs[i].length - start - - if (bytes > l) { - this._bufs[i].copy(dst, bufoff, start) - bufoff += l - } else { - this._bufs[i].copy(dst, bufoff, start, start + bytes) - bufoff += l - break - } - - bytes -= l - - if (start) { - start = 0 - } - } - - // safeguard so that we don't return uninitialized memory - if (dst.length > bufoff) return dst.slice(0, bufoff) - - return dst -} - -BufferList.prototype.shallowSlice = function shallowSlice (start, end) { - start = start || 0 - end = typeof end !== 'number' ? this.length : end - - if (start < 0) { - start += this.length - } - - if (end < 0) { - end += this.length - } - - if (start === end) { - return this._new() - } - - const startOffset = this._offset(start) - const endOffset = this._offset(end) - const buffers = this._bufs.slice(startOffset[0], endOffset[0] + 1) - - if (endOffset[1] === 0) { - buffers.pop() - } else { - buffers[buffers.length - 1] = buffers[buffers.length - 1].slice(0, endOffset[1]) - } - - if (startOffset[1] !== 0) { - buffers[0] = buffers[0].slice(startOffset[1]) - } - - return this._new(buffers) -} - -BufferList.prototype.toString = function toString (encoding, start, end) { - return this.slice(start, end).toString(encoding) -} - -BufferList.prototype.consume = function consume (bytes) { - // first, normalize the argument, in accordance with how Buffer does it - bytes = Math.trunc(bytes) - // do nothing if not a positive number - if (Number.isNaN(bytes) || bytes <= 0) return this - - while (this._bufs.length) { - if (bytes >= this._bufs[0].length) { - bytes -= this._bufs[0].length - this.length -= this._bufs[0].length - this._bufs.shift() - } else { - this._bufs[0] = this._bufs[0].slice(bytes) - this.length -= bytes - break - } - } - - return this -} - -BufferList.prototype.duplicate = function duplicate () { - const copy = this._new() - - for (let i = 0; i < this._bufs.length; i++) { - copy.append(this._bufs[i]) - } - - return copy -} - -BufferList.prototype.append = function append (buf) { - if (buf == null) { - return this - } - - if (buf.buffer) { - // append a view of the underlying ArrayBuffer - this._appendBuffer(Buffer.from(buf.buffer, buf.byteOffset, buf.byteLength)) - } else if (Array.isArray(buf)) { - for (let i = 0; i < buf.length; i++) { - this.append(buf[i]) - } - } else if (this._isBufferList(buf)) { - // unwrap argument into individual BufferLists - for (let i = 0; i < buf._bufs.length; i++) { - this.append(buf._bufs[i]) - } - } else { - // coerce number arguments to strings, since Buffer(number) does - // uninitialized memory allocation - if (typeof buf === 'number') { - buf = buf.toString() - } - - this._appendBuffer(Buffer.from(buf)) - } - - return this -} - -BufferList.prototype._appendBuffer = function appendBuffer (buf) { - this._bufs.push(buf) - this.length += buf.length -} - -BufferList.prototype.indexOf = function (search, offset, encoding) { - if (encoding === undefined && typeof offset === 'string') { - encoding = offset - offset = undefined - } - - if (typeof search === 'function' || Array.isArray(search)) { - throw new TypeError('The "value" argument must be one of type string, Buffer, BufferList, or Uint8Array.') - } else if (typeof search === 'number') { - search = Buffer.from([search]) - } else if (typeof search === 'string') { - search = Buffer.from(search, encoding) - } else if (this._isBufferList(search)) { - search = search.slice() - } else if (Array.isArray(search.buffer)) { - search = Buffer.from(search.buffer, search.byteOffset, search.byteLength) - } else if (!Buffer.isBuffer(search)) { - search = Buffer.from(search) - } - - offset = Number(offset || 0) - - if (isNaN(offset)) { - offset = 0 - } - - if (offset < 0) { - offset = this.length + offset - } - - if (offset < 0) { - offset = 0 - } - - if (search.length === 0) { - return offset > this.length ? this.length : offset - } - - const blOffset = this._offset(offset) - let blIndex = blOffset[0] // index of which internal buffer we're working on - let buffOffset = blOffset[1] // offset of the internal buffer we're working on - - // scan over each buffer - for (; blIndex < this._bufs.length; blIndex++) { - const buff = this._bufs[blIndex] - - while (buffOffset < buff.length) { - const availableWindow = buff.length - buffOffset - - if (availableWindow >= search.length) { - const nativeSearchResult = buff.indexOf(search, buffOffset) - - if (nativeSearchResult !== -1) { - return this._reverseOffset([blIndex, nativeSearchResult]) - } - - buffOffset = buff.length - search.length + 1 // end of native search window - } else { - const revOffset = this._reverseOffset([blIndex, buffOffset]) - - if (this._match(revOffset, search)) { - return revOffset - } - - buffOffset++ - } - } - - buffOffset = 0 - } - - return -1 -} - -BufferList.prototype._match = function (offset, search) { - if (this.length - offset < search.length) { - return false - } - - for (let searchOffset = 0; searchOffset < search.length; searchOffset++) { - if (this.get(offset + searchOffset) !== search[searchOffset]) { - return false - } - } - return true -} - -;(function () { - const methods = { - readDoubleBE: 8, - readDoubleLE: 8, - readFloatBE: 4, - readFloatLE: 4, - readInt32BE: 4, - readInt32LE: 4, - readUInt32BE: 4, - readUInt32LE: 4, - readInt16BE: 2, - readInt16LE: 2, - readUInt16BE: 2, - readUInt16LE: 2, - readInt8: 1, - readUInt8: 1, - readIntBE: null, - readIntLE: null, - readUIntBE: null, - readUIntLE: null - } - - for (const m in methods) { - (function (m) { - if (methods[m] === null) { - BufferList.prototype[m] = function (offset, byteLength) { - return this.slice(offset, offset + byteLength)[m](0, byteLength) - } - } else { - BufferList.prototype[m] = function (offset = 0) { - return this.slice(offset, offset + methods[m])[m](0) - } - } - }(m)) - } -}()) - -// Used internally by the class and also as an indicator of this object being -// a `BufferList`. It's not possible to use `instanceof BufferList` in a browser -// environment because there could be multiple different copies of the -// BufferList class and some `BufferList`s might be `BufferList`s. -BufferList.prototype._isBufferList = function _isBufferList (b) { - return b instanceof BufferList || BufferList.isBufferList(b) -} - -BufferList.isBufferList = function isBufferList (b) { - return b != null && b[symbol] -} - -module.exports = BufferList diff --git a/node_modules/bl/CHANGELOG.md b/node_modules/bl/CHANGELOG.md deleted file mode 100644 index a6156dcf56..0000000000 --- a/node_modules/bl/CHANGELOG.md +++ /dev/null @@ -1,17 +0,0 @@ -## [5.1.0](https://github.com/rvagg/bl/compare/v5.0.0...v5.1.0) (2022-10-18) - - -### Features - -* added integrated TypeScript typings ([#108](https://github.com/rvagg/bl/issues/108)) ([433ff89](https://github.com/rvagg/bl/commit/433ff8942f47fab8a5c9d13b2c00989ccf8d0710)) - - -### Bug Fixes - -* windows support in tests ([387dfaf](https://github.com/rvagg/bl/commit/387dfaf9b2bca7849f12785436ceb01e42adac2c)) - - -### Trivial Changes - -* GH Actions, Dependabot, auto-release, remove Travis ([997f058](https://github.com/rvagg/bl/commit/997f058357de8f2a7f66998e80a72b491835573f)) -* **no-release:** bump standard from 16.0.4 to 17.0.0 ([#112](https://github.com/rvagg/bl/issues/112)) ([078bfe3](https://github.com/rvagg/bl/commit/078bfe33390d125297b1c946e5989c4aa9228961)) diff --git a/node_modules/bl/LICENSE.md b/node_modules/bl/LICENSE.md deleted file mode 100644 index ecbe516374..0000000000 --- a/node_modules/bl/LICENSE.md +++ /dev/null @@ -1,13 +0,0 @@ -The MIT License (MIT) -===================== - -Copyright (c) 2013-2019 bl contributors ----------------------------------- - -*bl contributors listed at * - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/node_modules/bl/README.md b/node_modules/bl/README.md deleted file mode 100644 index 9680b1dcb4..0000000000 --- a/node_modules/bl/README.md +++ /dev/null @@ -1,247 +0,0 @@ -# bl *(BufferList)* - -[![Build Status](https://api.travis-ci.com/rvagg/bl.svg?branch=master)](https://travis-ci.com/rvagg/bl/) - -**A Node.js Buffer list collector, reader and streamer thingy.** - -[![NPM](https://nodei.co/npm/bl.svg)](https://nodei.co/npm/bl/) - -**bl** is a storage object for collections of Node Buffers, exposing them with the main Buffer readable API. Also works as a duplex stream so you can collect buffers from a stream that emits them and emit buffers to a stream that consumes them! - -The original buffers are kept intact and copies are only done as necessary. Any reads that require the use of a single original buffer will return a slice of that buffer only (which references the same memory as the original buffer). Reads that span buffers perform concatenation as required and return the results transparently. - -```js -const { BufferList } = require('bl') - -const bl = new BufferList() -bl.append(Buffer.from('abcd')) -bl.append(Buffer.from('efg')) -bl.append('hi') // bl will also accept & convert Strings -bl.append(Buffer.from('j')) -bl.append(Buffer.from([ 0x3, 0x4 ])) - -console.log(bl.length) // 12 - -console.log(bl.slice(0, 10).toString('ascii')) // 'abcdefghij' -console.log(bl.slice(3, 10).toString('ascii')) // 'defghij' -console.log(bl.slice(3, 6).toString('ascii')) // 'def' -console.log(bl.slice(3, 8).toString('ascii')) // 'defgh' -console.log(bl.slice(5, 10).toString('ascii')) // 'fghij' - -console.log(bl.indexOf('def')) // 3 -console.log(bl.indexOf('asdf')) // -1 - -// or just use toString! -console.log(bl.toString()) // 'abcdefghij\u0003\u0004' -console.log(bl.toString('ascii', 3, 8)) // 'defgh' -console.log(bl.toString('ascii', 5, 10)) // 'fghij' - -// other standard Buffer readables -console.log(bl.readUInt16BE(10)) // 0x0304 -console.log(bl.readUInt16LE(10)) // 0x0403 -``` - -Give it a callback in the constructor and use it just like **[concat-stream](https://github.com/maxogden/node-concat-stream)**: - -```js -const { BufferListStream } = require('bl') -const fs = require('fs') - -fs.createReadStream('README.md') - .pipe(BufferListStream((err, data) => { // note 'new' isn't strictly required - // `data` is a complete Buffer object containing the full data - console.log(data.toString()) - })) -``` - -Note that when you use the *callback* method like this, the resulting `data` parameter is a concatenation of all `Buffer` objects in the list. If you want to avoid the overhead of this concatenation (in cases of extreme performance consciousness), then avoid the *callback* method and just listen to `'end'` instead, like a standard Stream. - -Or to fetch a URL using [hyperquest](https://github.com/substack/hyperquest) (should work with [request](http://github.com/mikeal/request) and even plain Node http too!): - -```js -const hyperquest = require('hyperquest') -const { BufferListStream } = require('bl') - -const url = 'https://raw.github.com/rvagg/bl/master/README.md' - -hyperquest(url).pipe(BufferListStream((err, data) => { - console.log(data.toString()) -})) -``` - -Or, use it as a readable stream to recompose a list of Buffers to an output source: - -```js -const { BufferListStream } = require('bl') -const fs = require('fs') - -var bl = new BufferListStream() -bl.append(Buffer.from('abcd')) -bl.append(Buffer.from('efg')) -bl.append(Buffer.from('hi')) -bl.append(Buffer.from('j')) - -bl.pipe(fs.createWriteStream('gibberish.txt')) -``` - -## API - - * new BufferList([ buf ]) - * BufferList.isBufferList(obj) - * bl.length - * bl.append(buffer) - * bl.get(index) - * bl.indexOf(value[, byteOffset][, encoding]) - * bl.slice([ start[, end ] ]) - * bl.shallowSlice([ start[, end ] ]) - * bl.copy(dest, [ destStart, [ srcStart [, srcEnd ] ] ]) - * bl.duplicate() - * bl.consume(bytes) - * bl.toString([encoding, [ start, [ end ]]]) - * bl.readDoubleBE(), bl.readDoubleLE(), bl.readFloatBE(), bl.readFloatLE(), bl.readInt32BE(), bl.readInt32LE(), bl.readUInt32BE(), bl.readUInt32LE(), bl.readInt16BE(), bl.readInt16LE(), bl.readUInt16BE(), bl.readUInt16LE(), bl.readInt8(), bl.readUInt8() - * new BufferListStream([ callback ]) - --------------------------------------------------------- - -### new BufferList([ Buffer | Buffer array | BufferList | BufferList array | String ]) -No arguments are _required_ for the constructor, but you can initialise the list by passing in a single `Buffer` object or an array of `Buffer` objects. - -`new` is not strictly required, if you don't instantiate a new object, it will be done automatically for you so you can create a new instance simply with: - -```js -const { BufferList } = require('bl') -const bl = BufferList() - -// equivalent to: - -const { BufferList } = require('bl') -const bl = new BufferList() -``` - --------------------------------------------------------- - -### BufferList.isBufferList(obj) -Determines if the passed object is a `BufferList`. It will return `true` if the passed object is an instance of `BufferList` **or** `BufferListStream` and `false` otherwise. - -N.B. this won't return `true` for `BufferList` or `BufferListStream` instances created by versions of this library before this static method was added. - --------------------------------------------------------- - -### bl.length -Get the length of the list in bytes. This is the sum of the lengths of all of the buffers contained in the list, minus any initial offset for a semi-consumed buffer at the beginning. Should accurately represent the total number of bytes that can be read from the list. - --------------------------------------------------------- - -### bl.append(Buffer | Buffer array | BufferList | BufferList array | String) -`append(buffer)` adds an additional buffer or BufferList to the internal list. `this` is returned so it can be chained. - --------------------------------------------------------- - -### bl.get(index) -`get()` will return the byte at the specified index. - --------------------------------------------------------- - -### bl.indexOf(value[, byteOffset][, encoding]) -`get()` will return the byte at the specified index. -`indexOf()` method returns the first index at which a given element can be found in the BufferList, or -1 if it is not present. - --------------------------------------------------------- - -### bl.slice([ start, [ end ] ]) -`slice()` returns a new `Buffer` object containing the bytes within the range specified. Both `start` and `end` are optional and will default to the beginning and end of the list respectively. - -If the requested range spans a single internal buffer then a slice of that buffer will be returned which shares the original memory range of that Buffer. If the range spans multiple buffers then copy operations will likely occur to give you a uniform Buffer. - --------------------------------------------------------- - -### bl.shallowSlice([ start, [ end ] ]) -`shallowSlice()` returns a new `BufferList` object containing the bytes within the range specified. Both `start` and `end` are optional and will default to the beginning and end of the list respectively. - -No copies will be performed. All buffers in the result share memory with the original list. - --------------------------------------------------------- - -### bl.copy(dest, [ destStart, [ srcStart [, srcEnd ] ] ]) -`copy()` copies the content of the list in the `dest` buffer, starting from `destStart` and containing the bytes within the range specified with `srcStart` to `srcEnd`. `destStart`, `start` and `end` are optional and will default to the beginning of the `dest` buffer, and the beginning and end of the list respectively. - --------------------------------------------------------- - -### bl.duplicate() -`duplicate()` performs a **shallow-copy** of the list. The internal Buffers remains the same, so if you change the underlying Buffers, the change will be reflected in both the original and the duplicate. This method is needed if you want to call `consume()` or `pipe()` and still keep the original list.Example: - -```js -var bl = new BufferListStream() - -bl.append('hello') -bl.append(' world') -bl.append('\n') - -bl.duplicate().pipe(process.stdout, { end: false }) - -console.log(bl.toString()) -``` - --------------------------------------------------------- - -### bl.consume(bytes) -`consume()` will shift bytes *off the start of the list*. The number of bytes consumed don't need to line up with the sizes of the internal Buffers—initial offsets will be calculated accordingly in order to give you a consistent view of the data. - --------------------------------------------------------- - -### bl.toString([encoding, [ start, [ end ]]]) -`toString()` will return a string representation of the buffer. The optional `start` and `end` arguments are passed on to `slice()`, while the `encoding` is passed on to `toString()` of the resulting Buffer. See the [Buffer#toString()](http://nodejs.org/docs/latest/api/buffer.html#buffer_buf_tostring_encoding_start_end) documentation for more information. - --------------------------------------------------------- - -### bl.readDoubleBE(), bl.readDoubleLE(), bl.readFloatBE(), bl.readFloatLE(), bl.readInt32BE(), bl.readInt32LE(), bl.readUInt32BE(), bl.readUInt32LE(), bl.readInt16BE(), bl.readInt16LE(), bl.readUInt16BE(), bl.readUInt16LE(), bl.readInt8(), bl.readUInt8() - -All of the standard byte-reading methods of the `Buffer` interface are implemented and will operate across internal Buffer boundaries transparently. - -See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. - --------------------------------------------------------- - -### new BufferListStream([ callback | Buffer | Buffer array | BufferList | BufferList array | String ]) -**BufferListStream** is a Node **[Duplex Stream](http://nodejs.org/docs/latest/api/stream.html#stream_class_stream_duplex)**, so it can be read from and written to like a standard Node stream. You can also `pipe()` to and from a **BufferListStream** instance. - -The constructor takes an optional callback, if supplied, the callback will be called with an error argument followed by a reference to the **bl** instance, when `bl.end()` is called (i.e. from a piped stream). This is a convenient method of collecting the entire contents of a stream, particularly when the stream is *chunky*, such as a network stream. - -Normally, no arguments are required for the constructor, but you can initialise the list by passing in a single `Buffer` object or an array of `Buffer` object. - -`new` is not strictly required, if you don't instantiate a new object, it will be done automatically for you so you can create a new instance simply with: - -```js -const { BufferListStream } = require('bl') -const bl = BufferListStream() - -// equivalent to: - -const { BufferListStream } = require('bl') -const bl = new BufferListStream() -``` - -N.B. For backwards compatibility reasons, `BufferListStream` is the **default** export when you `require('bl')`: - -```js -const { BufferListStream } = require('bl') -// equivalent to: -const BufferListStream = require('bl') -``` - --------------------------------------------------------- - -## Contributors - -**bl** is brought to you by the following hackers: - - * [Rod Vagg](https://github.com/rvagg) - * [Matteo Collina](https://github.com/mcollina) - * [Jarett Cruger](https://github.com/jcrugzz) - - -## License & copyright - -Copyright (c) 2013-2019 bl contributors (listed above). - -bl is licensed under the MIT license. All rights not explicitly granted in the MIT license are reserved. See the included LICENSE.md file for more details. diff --git a/node_modules/bl/bl.js b/node_modules/bl/bl.js deleted file mode 100644 index 40228f8799..0000000000 --- a/node_modules/bl/bl.js +++ /dev/null @@ -1,84 +0,0 @@ -'use strict' - -const DuplexStream = require('readable-stream').Duplex -const inherits = require('inherits') -const BufferList = require('./BufferList') - -function BufferListStream (callback) { - if (!(this instanceof BufferListStream)) { - return new BufferListStream(callback) - } - - if (typeof callback === 'function') { - this._callback = callback - - const piper = function piper (err) { - if (this._callback) { - this._callback(err) - this._callback = null - } - }.bind(this) - - this.on('pipe', function onPipe (src) { - src.on('error', piper) - }) - this.on('unpipe', function onUnpipe (src) { - src.removeListener('error', piper) - }) - - callback = null - } - - BufferList._init.call(this, callback) - DuplexStream.call(this) -} - -inherits(BufferListStream, DuplexStream) -Object.assign(BufferListStream.prototype, BufferList.prototype) - -BufferListStream.prototype._new = function _new (callback) { - return new BufferListStream(callback) -} - -BufferListStream.prototype._write = function _write (buf, encoding, callback) { - this._appendBuffer(buf) - - if (typeof callback === 'function') { - callback() - } -} - -BufferListStream.prototype._read = function _read (size) { - if (!this.length) { - return this.push(null) - } - - size = Math.min(size, this.length) - this.push(this.slice(0, size)) - this.consume(size) -} - -BufferListStream.prototype.end = function end (chunk) { - DuplexStream.prototype.end.call(this, chunk) - - if (this._callback) { - this._callback(null, this.slice()) - this._callback = null - } -} - -BufferListStream.prototype._destroy = function _destroy (err, cb) { - this._bufs.length = 0 - this.length = 0 - cb(err) -} - -BufferListStream.prototype._isBufferList = function _isBufferList (b) { - return b instanceof BufferListStream || b instanceof BufferList || BufferListStream.isBufferList(b) -} - -BufferListStream.isBufferList = BufferList.isBufferList - -module.exports = BufferListStream -module.exports.BufferListStream = BufferListStream -module.exports.BufferList = BufferList diff --git a/node_modules/bl/index.d.ts b/node_modules/bl/index.d.ts deleted file mode 100644 index 07a8ee3d53..0000000000 --- a/node_modules/bl/index.d.ts +++ /dev/null @@ -1,88 +0,0 @@ -import { Duplex } from "readable-stream"; -import { - BufferList as BL, - BufferListConstructor, - BufferListAcceptedTypes, -} from "./BufferList"; - -type BufferListStreamInit = - | ((err: Error, buffer: Buffer) => void) - | BufferListAcceptedTypes; - -interface BufferListStreamConstructor { - new (initData?: BufferListStreamInit): BufferListStream; - (callback?: BufferListStreamInit): BufferListStream; - - /** - * Determines if the passed object is a BufferList. It will return true - * if the passed object is an instance of BufferList or BufferListStream - * and false otherwise. - * - * N.B. this won't return true for BufferList or BufferListStream instances - * created by versions of this library before this static method was added. - * - * @param other - */ - - isBufferList(other: unknown): boolean; - - /** - * Rexporting BufferList and BufferListStream to fix - * issue with require/commonjs import and "export = " below. - */ - - BufferList: BufferListConstructor; - BufferListStream: BufferListStreamConstructor; -} - -interface BufferListStream extends Duplex, BL { - prototype: BufferListStream & BL; -} - -/** - * BufferListStream is a Node Duplex Stream, so it can be read from - * and written to like a standard Node stream. You can also pipe() - * to and from a BufferListStream instance. - * - * The constructor takes an optional callback, if supplied, the - * callback will be called with an error argument followed by a - * reference to the bl instance, when bl.end() is called - * (i.e. from a piped stream). - * - * This is a convenient method of collecting the entire contents of - * a stream, particularly when the stream is chunky, such as a network - * stream. - * - * Normally, no arguments are required for the constructor, but you can - * initialise the list by passing in a single Buffer object or an array - * of Buffer object. - * - * `new` is not strictly required, if you don't instantiate a new object, - * it will be done automatically for you so you can create a new instance - * simply with: - * - * ```js - * const { BufferListStream } = require('bl'); - * const bl = BufferListStream(); - * - * // equivalent to: - * - * const { BufferListStream } = require('bl'); - * const bl = new BufferListStream(); - * ``` - * - * N.B. For backwards compatibility reasons, BufferListStream is the default - * export when you `require('bl')`: - * - * ```js - * const { BufferListStream } = require('bl') - * - * // equivalent to: - * - * const BufferListStream = require('bl') - * ``` - */ - -declare const BufferListStream: BufferListStreamConstructor; - -export = BufferListStream; diff --git a/node_modules/bl/package.json b/node_modules/bl/package.json deleted file mode 100644 index 92a08e3089..0000000000 --- a/node_modules/bl/package.json +++ /dev/null @@ -1,123 +0,0 @@ -{ - "name": "bl", - "version": "5.1.0", - "description": "Buffer List: collect buffers and access with a standard readable Buffer interface, streamable too!", - "license": "MIT", - "main": "bl.js", - "scripts": { - "lint": "standard *.js test/*.js", - "test": "npm run lint && npm run test:types && node test/test.js | faucet", - "test:ci": "npm run lint && node test/test.js && npm run test:types", - "test:types": "tsc --allowJs --noEmit test/test.js", - "build": "true" - }, - "repository": { - "type": "git", - "url": "https://github.com/rvagg/bl.git" - }, - "homepage": "https://github.com/rvagg/bl", - "authors": [ - "Rod Vagg (https://github.com/rvagg)", - "Matteo Collina (https://github.com/mcollina)", - "Jarett Cruger (https://github.com/jcrugzz)" - ], - "keywords": [ - "buffer", - "buffers", - "stream", - "awesomesauce" - ], - "dependencies": { - "buffer": "^6.0.3", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - }, - "devDependencies": { - "@types/readable-stream": "^2.3.13", - "faucet": "~0.0.1", - "standard": "^17.0.0", - "tape": "^5.2.2", - "typescript": "~4.7.3" - }, - "release": { - "branches": [ - "master" - ], - "plugins": [ - [ - "@semantic-release/commit-analyzer", - { - "preset": "conventionalcommits", - "releaseRules": [ - { - "breaking": true, - "release": "major" - }, - { - "revert": true, - "release": "patch" - }, - { - "type": "feat", - "release": "minor" - }, - { - "type": "fix", - "release": "patch" - }, - { - "type": "chore", - "release": "patch" - }, - { - "type": "docs", - "release": "patch" - }, - { - "type": "test", - "release": "patch" - }, - { - "scope": "no-release", - "release": false - } - ] - } - ], - [ - "@semantic-release/release-notes-generator", - { - "preset": "conventionalcommits", - "presetConfig": { - "types": [ - { - "type": "feat", - "section": "Features" - }, - { - "type": "fix", - "section": "Bug Fixes" - }, - { - "type": "chore", - "section": "Trivial Changes" - }, - { - "type": "docs", - "section": "Trivial Changes" - }, - { - "type": "test", - "section": "Tests" - } - ] - } - } - ], - "@semantic-release/changelog", - "@semantic-release/npm", - "@semantic-release/github", - "@semantic-release/git" - ] - } -} diff --git a/node_modules/bl/test/convert.js b/node_modules/bl/test/convert.js deleted file mode 100644 index 9f3e23599e..0000000000 --- a/node_modules/bl/test/convert.js +++ /dev/null @@ -1,21 +0,0 @@ -'use strict' - -const tape = require('tape') -const { BufferList, BufferListStream } = require('../') -const { Buffer } = require('buffer') - -tape('convert from BufferList to BufferListStream', (t) => { - const data = Buffer.from(`TEST-${Date.now()}`) - const bl = new BufferList(data) - const bls = new BufferListStream(bl) - t.ok(bl.slice().equals(bls.slice())) - t.end() -}) - -tape('convert from BufferListStream to BufferList', (t) => { - const data = Buffer.from(`TEST-${Date.now()}`) - const bls = new BufferListStream(data) - const bl = new BufferList(bls) - t.ok(bl.slice().equals(bls.slice())) - t.end() -}) diff --git a/node_modules/bl/test/indexOf.js b/node_modules/bl/test/indexOf.js deleted file mode 100644 index 62dcb01f3b..0000000000 --- a/node_modules/bl/test/indexOf.js +++ /dev/null @@ -1,492 +0,0 @@ -'use strict' - -const tape = require('tape') -const BufferList = require('../') -const { Buffer } = require('buffer') - -tape('indexOf single byte needle', (t) => { - const bl = new BufferList(['abcdefg', 'abcdefg', '12345']) - - t.equal(bl.indexOf('e'), 4) - t.equal(bl.indexOf('e', 5), 11) - t.equal(bl.indexOf('e', 12), -1) - t.equal(bl.indexOf('5'), 18) - - t.end() -}) - -tape('indexOf multiple byte needle', (t) => { - const bl = new BufferList(['abcdefg', 'abcdefg']) - - t.equal(bl.indexOf('ef'), 4) - t.equal(bl.indexOf('ef', 5), 11) - - t.end() -}) - -tape('indexOf multiple byte needles across buffer boundaries', (t) => { - const bl = new BufferList(['abcdefg', 'abcdefg']) - - t.equal(bl.indexOf('fgabc'), 5) - - t.end() -}) - -tape('indexOf takes a Uint8Array search', (t) => { - const bl = new BufferList(['abcdefg', 'abcdefg']) - const search = new Uint8Array([102, 103, 97, 98, 99]) // fgabc - - t.equal(bl.indexOf(search), 5) - - t.end() -}) - -tape('indexOf takes a buffer list search', (t) => { - const bl = new BufferList(['abcdefg', 'abcdefg']) - const search = new BufferList('fgabc') - - t.equal(bl.indexOf(search), 5) - - t.end() -}) - -tape('indexOf a zero byte needle', (t) => { - const b = new BufferList('abcdef') - const bufEmpty = Buffer.from('') - - t.equal(b.indexOf(''), 0) - t.equal(b.indexOf('', 1), 1) - t.equal(b.indexOf('', b.length + 1), b.length) - t.equal(b.indexOf('', Infinity), b.length) - t.equal(b.indexOf(bufEmpty), 0) - t.equal(b.indexOf(bufEmpty, 1), 1) - t.equal(b.indexOf(bufEmpty, b.length + 1), b.length) - t.equal(b.indexOf(bufEmpty, Infinity), b.length) - - t.end() -}) - -tape('indexOf buffers smaller and larger than the needle', (t) => { - const bl = new BufferList(['abcdefg', 'a', 'bcdefg', 'a', 'bcfgab']) - - t.equal(bl.indexOf('fgabc'), 5) - t.equal(bl.indexOf('fgabc', 6), 12) - t.equal(bl.indexOf('fgabc', 13), -1) - - t.end() -}) - -// only present in node 6+ -;(process.version.substr(1).split('.')[0] >= 6) && tape('indexOf latin1 and binary encoding', (t) => { - const b = new BufferList('abcdef') - - // test latin1 encoding - t.equal( - new BufferList(Buffer.from(b.toString('latin1'), 'latin1')) - .indexOf('d', 0, 'latin1'), - 3 - ) - t.equal( - new BufferList(Buffer.from(b.toString('latin1'), 'latin1')) - .indexOf(Buffer.from('d', 'latin1'), 0, 'latin1'), - 3 - ) - t.equal( - new BufferList(Buffer.from('aa\u00e8aa', 'latin1')) - .indexOf('\u00e8', 'latin1'), - 2 - ) - t.equal( - new BufferList(Buffer.from('\u00e8', 'latin1')) - .indexOf('\u00e8', 'latin1'), - 0 - ) - t.equal( - new BufferList(Buffer.from('\u00e8', 'latin1')) - .indexOf(Buffer.from('\u00e8', 'latin1'), 'latin1'), - 0 - ) - - // test binary encoding - t.equal( - new BufferList(Buffer.from(b.toString('binary'), 'binary')) - .indexOf('d', 0, 'binary'), - 3 - ) - t.equal( - new BufferList(Buffer.from(b.toString('binary'), 'binary')) - .indexOf(Buffer.from('d', 'binary'), 0, 'binary'), - 3 - ) - t.equal( - new BufferList(Buffer.from('aa\u00e8aa', 'binary')) - .indexOf('\u00e8', 'binary'), - 2 - ) - t.equal( - new BufferList(Buffer.from('\u00e8', 'binary')) - .indexOf('\u00e8', 'binary'), - 0 - ) - t.equal( - new BufferList(Buffer.from('\u00e8', 'binary')) - .indexOf(Buffer.from('\u00e8', 'binary'), 'binary'), - 0 - ) - - t.end() -}) - -tape('indexOf the entire nodejs10 buffer test suite', (t) => { - const b = new BufferList('abcdef') - const bufA = Buffer.from('a') - const bufBc = Buffer.from('bc') - const bufF = Buffer.from('f') - const bufZ = Buffer.from('z') - - const stringComparison = 'abcdef' - - t.equal(b.indexOf('a'), 0) - t.equal(b.indexOf('a', 1), -1) - t.equal(b.indexOf('a', -1), -1) - t.equal(b.indexOf('a', -4), -1) - t.equal(b.indexOf('a', -b.length), 0) - t.equal(b.indexOf('a', NaN), 0) - t.equal(b.indexOf('a', -Infinity), 0) - t.equal(b.indexOf('a', Infinity), -1) - t.equal(b.indexOf('bc'), 1) - t.equal(b.indexOf('bc', 2), -1) - t.equal(b.indexOf('bc', -1), -1) - t.equal(b.indexOf('bc', -3), -1) - t.equal(b.indexOf('bc', -5), 1) - t.equal(b.indexOf('bc', NaN), 1) - t.equal(b.indexOf('bc', -Infinity), 1) - t.equal(b.indexOf('bc', Infinity), -1) - t.equal(b.indexOf('f'), b.length - 1) - t.equal(b.indexOf('z'), -1) - - // empty search tests - t.equal(b.indexOf(bufA), 0) - t.equal(b.indexOf(bufA, 1), -1) - t.equal(b.indexOf(bufA, -1), -1) - t.equal(b.indexOf(bufA, -4), -1) - t.equal(b.indexOf(bufA, -b.length), 0) - t.equal(b.indexOf(bufA, NaN), 0) - t.equal(b.indexOf(bufA, -Infinity), 0) - t.equal(b.indexOf(bufA, Infinity), -1) - t.equal(b.indexOf(bufBc), 1) - t.equal(b.indexOf(bufBc, 2), -1) - t.equal(b.indexOf(bufBc, -1), -1) - t.equal(b.indexOf(bufBc, -3), -1) - t.equal(b.indexOf(bufBc, -5), 1) - t.equal(b.indexOf(bufBc, NaN), 1) - t.equal(b.indexOf(bufBc, -Infinity), 1) - t.equal(b.indexOf(bufBc, Infinity), -1) - t.equal(b.indexOf(bufF), b.length - 1) - t.equal(b.indexOf(bufZ), -1) - t.equal(b.indexOf(0x61), 0) - t.equal(b.indexOf(0x61, 1), -1) - t.equal(b.indexOf(0x61, -1), -1) - t.equal(b.indexOf(0x61, -4), -1) - t.equal(b.indexOf(0x61, -b.length), 0) - t.equal(b.indexOf(0x61, NaN), 0) - t.equal(b.indexOf(0x61, -Infinity), 0) - t.equal(b.indexOf(0x61, Infinity), -1) - t.equal(b.indexOf(0x0), -1) - - // test offsets - t.equal(b.indexOf('d', 2), 3) - t.equal(b.indexOf('f', 5), 5) - t.equal(b.indexOf('f', -1), 5) - t.equal(b.indexOf('f', 6), -1) - - t.equal(b.indexOf(Buffer.from('d'), 2), 3) - t.equal(b.indexOf(Buffer.from('f'), 5), 5) - t.equal(b.indexOf(Buffer.from('f'), -1), 5) - t.equal(b.indexOf(Buffer.from('f'), 6), -1) - - t.equal(Buffer.from('ff').indexOf(Buffer.from('f'), 1, 'ucs2'), -1) - - // test invalid and uppercase encoding - t.equal(b.indexOf('b', 'utf8'), 1) - t.equal(b.indexOf('b', 'UTF8'), 1) - t.equal(b.indexOf('62', 'HEX'), 1) - t.throws(() => b.indexOf('bad', 'enc'), TypeError) - - // test hex encoding - t.equal( - Buffer.from(b.toString('hex'), 'hex') - .indexOf('64', 0, 'hex'), - 3 - ) - t.equal( - Buffer.from(b.toString('hex'), 'hex') - .indexOf(Buffer.from('64', 'hex'), 0, 'hex'), - 3 - ) - - // test base64 encoding - t.equal( - Buffer.from(b.toString('base64'), 'base64') - .indexOf('ZA==', 0, 'base64'), - 3 - ) - t.equal( - Buffer.from(b.toString('base64'), 'base64') - .indexOf(Buffer.from('ZA==', 'base64'), 0, 'base64'), - 3 - ) - - // test ascii encoding - t.equal( - Buffer.from(b.toString('ascii'), 'ascii') - .indexOf('d', 0, 'ascii'), - 3 - ) - t.equal( - Buffer.from(b.toString('ascii'), 'ascii') - .indexOf(Buffer.from('d', 'ascii'), 0, 'ascii'), - 3 - ) - - // test optional offset with passed encoding - t.equal(Buffer.from('aaaa0').indexOf('30', 'hex'), 4) - t.equal(Buffer.from('aaaa00a').indexOf('3030', 'hex'), 4) - - { - // test usc2 encoding - const twoByteString = Buffer.from('\u039a\u0391\u03a3\u03a3\u0395', 'ucs2') - - t.equal(8, twoByteString.indexOf('\u0395', 4, 'ucs2')) - t.equal(6, twoByteString.indexOf('\u03a3', -4, 'ucs2')) - t.equal(4, twoByteString.indexOf('\u03a3', -6, 'ucs2')) - t.equal(4, twoByteString.indexOf( - Buffer.from('\u03a3', 'ucs2'), -6, 'ucs2')) - t.equal(-1, twoByteString.indexOf('\u03a3', -2, 'ucs2')) - } - - const mixedByteStringUcs2 = - Buffer.from('\u039a\u0391abc\u03a3\u03a3\u0395', 'ucs2') - - t.equal(6, mixedByteStringUcs2.indexOf('bc', 0, 'ucs2')) - t.equal(10, mixedByteStringUcs2.indexOf('\u03a3', 0, 'ucs2')) - t.equal(-1, mixedByteStringUcs2.indexOf('\u0396', 0, 'ucs2')) - - t.equal( - 6, mixedByteStringUcs2.indexOf(Buffer.from('bc', 'ucs2'), 0, 'ucs2')) - t.equal( - 10, mixedByteStringUcs2.indexOf(Buffer.from('\u03a3', 'ucs2'), 0, 'ucs2')) - t.equal( - -1, mixedByteStringUcs2.indexOf(Buffer.from('\u0396', 'ucs2'), 0, 'ucs2')) - - { - const twoByteString = Buffer.from('\u039a\u0391\u03a3\u03a3\u0395', 'ucs2') - - // Test single char pattern - t.equal(0, twoByteString.indexOf('\u039a', 0, 'ucs2')) - let index = twoByteString.indexOf('\u0391', 0, 'ucs2') - t.equal(2, index, `Alpha - at index ${index}`) - index = twoByteString.indexOf('\u03a3', 0, 'ucs2') - t.equal(4, index, `First Sigma - at index ${index}`) - index = twoByteString.indexOf('\u03a3', 6, 'ucs2') - t.equal(6, index, `Second Sigma - at index ${index}`) - index = twoByteString.indexOf('\u0395', 0, 'ucs2') - t.equal(8, index, `Epsilon - at index ${index}`) - index = twoByteString.indexOf('\u0392', 0, 'ucs2') - t.equal(-1, index, `Not beta - at index ${index}`) - - // Test multi-char pattern - index = twoByteString.indexOf('\u039a\u0391', 0, 'ucs2') - t.equal(0, index, `Lambda Alpha - at index ${index}`) - index = twoByteString.indexOf('\u0391\u03a3', 0, 'ucs2') - t.equal(2, index, `Alpha Sigma - at index ${index}`) - index = twoByteString.indexOf('\u03a3\u03a3', 0, 'ucs2') - t.equal(4, index, `Sigma Sigma - at index ${index}`) - index = twoByteString.indexOf('\u03a3\u0395', 0, 'ucs2') - t.equal(6, index, `Sigma Epsilon - at index ${index}`) - } - - const mixedByteStringUtf8 = Buffer.from('\u039a\u0391abc\u03a3\u03a3\u0395') - - t.equal(5, mixedByteStringUtf8.indexOf('bc')) - t.equal(5, mixedByteStringUtf8.indexOf('bc', 5)) - t.equal(5, mixedByteStringUtf8.indexOf('bc', -8)) - t.equal(7, mixedByteStringUtf8.indexOf('\u03a3')) - t.equal(-1, mixedByteStringUtf8.indexOf('\u0396')) - - // Test complex string indexOf algorithms. Only trigger for long strings. - // Long string that isn't a simple repeat of a shorter string. - let longString = 'A' - for (let i = 66; i < 76; i++) { // from 'B' to 'K' - longString = longString + String.fromCharCode(i) + longString - } - - const longBufferString = Buffer.from(longString) - - // pattern of 15 chars, repeated every 16 chars in long - let pattern = 'ABACABADABACABA' - for (let i = 0; i < longBufferString.length - pattern.length; i += 7) { - const index = longBufferString.indexOf(pattern, i) - t.equal((i + 15) & ~0xf, index, - `Long ABACABA...-string at index ${i}`) - } - - let index = longBufferString.indexOf('AJABACA') - t.equal(510, index, `Long AJABACA, First J - at index ${index}`) - index = longBufferString.indexOf('AJABACA', 511) - t.equal(1534, index, `Long AJABACA, Second J - at index ${index}`) - - pattern = 'JABACABADABACABA' - index = longBufferString.indexOf(pattern) - t.equal(511, index, `Long JABACABA..., First J - at index ${index}`) - index = longBufferString.indexOf(pattern, 512) - t.equal( - 1535, index, `Long JABACABA..., Second J - at index ${index}`) - - // Search for a non-ASCII string in a pure ASCII string. - const asciiString = Buffer.from( - 'somethingnotatallsinisterwhichalsoworks') - t.equal(-1, asciiString.indexOf('\x2061')) - t.equal(3, asciiString.indexOf('eth', 0)) - - // Search in string containing many non-ASCII chars. - const allCodePoints = [] - for (let i = 0; i < 65536; i++) { - allCodePoints[i] = i - } - - const allCharsString = String.fromCharCode.apply(String, allCodePoints) - const allCharsBufferUtf8 = Buffer.from(allCharsString) - const allCharsBufferUcs2 = Buffer.from(allCharsString, 'ucs2') - - // Search for string long enough to trigger complex search with ASCII pattern - // and UC16 subject. - t.equal(-1, allCharsBufferUtf8.indexOf('notfound')) - t.equal(-1, allCharsBufferUcs2.indexOf('notfound')) - - // Needle is longer than haystack, but only because it's encoded as UTF-16 - t.equal(Buffer.from('aaaa').indexOf('a'.repeat(4), 'ucs2'), -1) - - t.equal(Buffer.from('aaaa').indexOf('a'.repeat(4), 'utf8'), 0) - t.equal(Buffer.from('aaaa').indexOf('你好', 'ucs2'), -1) - - // Haystack has odd length, but the needle is UCS2. - t.equal(Buffer.from('aaaaa').indexOf('b', 'ucs2'), -1) - - { - // Find substrings in Utf8. - const lengths = [1, 3, 15] // Single char, simple and complex. - const indices = [0x5, 0x60, 0x400, 0x680, 0x7ee, 0xFF02, 0x16610, 0x2f77b] - for (let lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) { - for (let i = 0; i < indices.length; i++) { - const index = indices[i] - let length = lengths[lengthIndex] - - if (index + length > 0x7F) { - length = 2 * length - } - - if (index + length > 0x7FF) { - length = 3 * length - } - - if (index + length > 0xFFFF) { - length = 4 * length - } - - const patternBufferUtf8 = allCharsBufferUtf8.slice(index, index + length) - t.equal(index, allCharsBufferUtf8.indexOf(patternBufferUtf8)) - - const patternStringUtf8 = patternBufferUtf8.toString() - t.equal(index, allCharsBufferUtf8.indexOf(patternStringUtf8)) - } - } - } - - { - // Find substrings in Usc2. - const lengths = [2, 4, 16] // Single char, simple and complex. - const indices = [0x5, 0x65, 0x105, 0x205, 0x285, 0x2005, 0x2085, 0xfff0] - - for (let lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) { - for (let i = 0; i < indices.length; i++) { - const index = indices[i] * 2 - const length = lengths[lengthIndex] - - const patternBufferUcs2 = - allCharsBufferUcs2.slice(index, index + length) - t.equal( - index, allCharsBufferUcs2.indexOf(patternBufferUcs2, 0, 'ucs2')) - - const patternStringUcs2 = patternBufferUcs2.toString('ucs2') - t.equal( - index, allCharsBufferUcs2.indexOf(patternStringUcs2, 0, 'ucs2')) - } - } - } - - [ - () => {}, - {}, - [] - ].forEach((val) => { - t.throws(() => b.indexOf(val), TypeError, `"${JSON.stringify(val)}" should throw`) - }) - - // Test weird offset arguments. - // The following offsets coerce to NaN or 0, searching the whole Buffer - t.equal(b.indexOf('b', undefined), 1) - t.equal(b.indexOf('b', {}), 1) - t.equal(b.indexOf('b', 0), 1) - t.equal(b.indexOf('b', null), 1) - t.equal(b.indexOf('b', []), 1) - - // The following offset coerces to 2, in other words +[2] === 2 - t.equal(b.indexOf('b', [2]), -1) - - // Behavior should match String.indexOf() - t.equal( - b.indexOf('b', undefined), - stringComparison.indexOf('b', undefined)) - t.equal( - b.indexOf('b', {}), - stringComparison.indexOf('b', {})) - t.equal( - b.indexOf('b', 0), - stringComparison.indexOf('b', 0)) - t.equal( - b.indexOf('b', null), - stringComparison.indexOf('b', null)) - t.equal( - b.indexOf('b', []), - stringComparison.indexOf('b', [])) - t.equal( - b.indexOf('b', [2]), - stringComparison.indexOf('b', [2])) - - // test truncation of Number arguments to uint8 - { - const buf = Buffer.from('this is a test') - - t.equal(buf.indexOf(0x6973), 3) - t.equal(buf.indexOf(0x697320), 4) - t.equal(buf.indexOf(0x69732069), 2) - t.equal(buf.indexOf(0x697374657374), 0) - t.equal(buf.indexOf(0x69737374), 0) - t.equal(buf.indexOf(0x69737465), 11) - t.equal(buf.indexOf(0x69737465), 11) - t.equal(buf.indexOf(-140), 0) - t.equal(buf.indexOf(-152), 1) - t.equal(buf.indexOf(0xff), -1) - t.equal(buf.indexOf(0xffff), -1) - } - - // Test that Uint8Array arguments are okay. - { - const needle = new Uint8Array([0x66, 0x6f, 0x6f]) - const haystack = new BufferList(Buffer.from('a foo b foo')) - t.equal(haystack.indexOf(needle), 2) - } - - t.end() -}) diff --git a/node_modules/bl/test/isBufferList.js b/node_modules/bl/test/isBufferList.js deleted file mode 100644 index 9d895d59b3..0000000000 --- a/node_modules/bl/test/isBufferList.js +++ /dev/null @@ -1,32 +0,0 @@ -'use strict' - -const tape = require('tape') -const { BufferList, BufferListStream } = require('../') -const { Buffer } = require('buffer') - -tape('isBufferList positives', (t) => { - t.ok(BufferList.isBufferList(new BufferList())) - t.ok(BufferList.isBufferList(new BufferListStream())) - - t.end() -}) - -tape('isBufferList negatives', (t) => { - const types = [ - null, - undefined, - NaN, - true, - false, - {}, - [], - Buffer.alloc(0), - [Buffer.alloc(0)] - ] - - for (const obj of types) { - t.notOk(BufferList.isBufferList(obj)) - } - - t.end() -}) diff --git a/node_modules/bl/test/test.js b/node_modules/bl/test/test.js deleted file mode 100644 index 668dc170b3..0000000000 --- a/node_modules/bl/test/test.js +++ /dev/null @@ -1,914 +0,0 @@ -// @ts-check -'use strict' - -const tape = require('tape') -const crypto = require('crypto') -const fs = require('fs') -const path = require('path') -const os = require('os') -const BufferListStream = require('../') -const { Buffer } = require('buffer') - -/** - * This typedef allows us to add _bufs to the API without declaring it publicly on types. - * @typedef { BufferListStream & { _bufs?: Buffer[] }} BufferListStreamWithPrivate - */ - -/** - * Just for typechecking in js - * @type { NodeJS.Process & { browser?: boolean }} - */ - -const process = globalThis.process - -/** @type {BufferEncoding[]} */ -const encodings = ['ascii', 'utf8', 'utf-8', 'hex', 'binary', 'base64'] - -if (process.browser) { - encodings.push( - 'ucs2', - 'ucs-2', - 'utf16le', - /** - * This alias is not in typescript typings for BufferEncoding. Still have to fix - * @see https://nodejs.org/api/buffer.html#buffers-and-character-encodings - */ - // @ts-ignore - 'utf-16le' - ) -} - -require('./indexOf') -require('./isBufferList') -require('./convert') - -tape('single bytes from single buffer', function (t) { - const bl = new BufferListStream() - - bl.append(Buffer.from('abcd')) - - t.equal(bl.length, 4) - t.equal(bl.get(-1), undefined) - t.equal(bl.get(0), 97) - t.equal(bl.get(1), 98) - t.equal(bl.get(2), 99) - t.equal(bl.get(3), 100) - t.equal(bl.get(4), undefined) - - t.end() -}) - -tape('single bytes from multiple buffers', function (t) { - const bl = new BufferListStream() - - bl.append(Buffer.from('abcd')) - bl.append(Buffer.from('efg')) - bl.append(Buffer.from('hi')) - bl.append(Buffer.from('j')) - - t.equal(bl.length, 10) - - t.equal(bl.get(0), 97) - t.equal(bl.get(1), 98) - t.equal(bl.get(2), 99) - t.equal(bl.get(3), 100) - t.equal(bl.get(4), 101) - t.equal(bl.get(5), 102) - t.equal(bl.get(6), 103) - t.equal(bl.get(7), 104) - t.equal(bl.get(8), 105) - t.equal(bl.get(9), 106) - - t.end() -}) - -tape('multi bytes from single buffer', function (t) { - const bl = new BufferListStream() - - bl.append(Buffer.from('abcd')) - - t.equal(bl.length, 4) - - t.equal(bl.slice(0, 4).toString('ascii'), 'abcd') - t.equal(bl.slice(0, 3).toString('ascii'), 'abc') - t.equal(bl.slice(1, 4).toString('ascii'), 'bcd') - t.equal(bl.slice(-4, -1).toString('ascii'), 'abc') - - t.end() -}) - -tape('multi bytes from single buffer (negative indexes)', function (t) { - const bl = new BufferListStream() - - bl.append(Buffer.from('buffer')) - - t.equal(bl.length, 6) - - t.equal(bl.slice(-6, -1).toString('ascii'), 'buffe') - t.equal(bl.slice(-6, -2).toString('ascii'), 'buff') - t.equal(bl.slice(-5, -2).toString('ascii'), 'uff') - - t.end() -}) - -tape('multiple bytes from multiple buffers', function (t) { - const bl = new BufferListStream() - - bl.append(Buffer.from('abcd')) - bl.append(Buffer.from('efg')) - bl.append(Buffer.from('hi')) - bl.append(Buffer.from('j')) - - t.equal(bl.length, 10) - - t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') - t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') - t.equal(bl.slice(3, 6).toString('ascii'), 'def') - t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') - t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') - t.equal(bl.slice(-7, -4).toString('ascii'), 'def') - - t.end() -}) - -tape('multiple bytes from multiple buffer lists', function (t) { - const bl = new BufferListStream() - - bl.append(new BufferListStream([Buffer.from('abcd'), Buffer.from('efg')])) - bl.append(new BufferListStream([Buffer.from('hi'), Buffer.from('j')])) - - t.equal(bl.length, 10) - - t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') - - t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') - t.equal(bl.slice(3, 6).toString('ascii'), 'def') - t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') - t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') - - t.end() -}) - -// same data as previous test, just using nested constructors -tape('multiple bytes from crazy nested buffer lists', function (t) { - const bl = new BufferListStream() - - bl.append( - new BufferListStream([ - new BufferListStream([ - new BufferListStream(Buffer.from('abc')), - Buffer.from('d'), - new BufferListStream(Buffer.from('efg')) - ]), - new BufferListStream([Buffer.from('hi')]), - new BufferListStream(Buffer.from('j')) - ]) - ) - - t.equal(bl.length, 10) - - t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') - - t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') - t.equal(bl.slice(3, 6).toString('ascii'), 'def') - t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') - t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') - - t.end() -}) - -tape('append accepts arrays of Buffers', function (t) { - const bl = new BufferListStream() - - bl.append(Buffer.from('abc')) - bl.append([Buffer.from('def')]) - bl.append([Buffer.from('ghi'), Buffer.from('jkl')]) - bl.append([Buffer.from('mnop'), Buffer.from('qrstu'), Buffer.from('vwxyz')]) - t.equal(bl.length, 26) - t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') - - t.end() -}) - -tape('append accepts arrays of Uint8Arrays', function (t) { - const bl = new BufferListStream() - - bl.append(new Uint8Array([97, 98, 99])) - bl.append([Uint8Array.from([100, 101, 102])]) - bl.append([new Uint8Array([103, 104, 105]), new Uint8Array([106, 107, 108])]) - bl.append([new Uint8Array([109, 110, 111, 112]), new Uint8Array([113, 114, 115, 116, 117]), new Uint8Array([118, 119, 120, 121, 122])]) - t.equal(bl.length, 26) - t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') - - t.end() -}) - -tape('append accepts arrays of BufferLists', function (t) { - const bl = new BufferListStream() - - bl.append(Buffer.from('abc')) - bl.append([new BufferListStream('def')]) - bl.append( - new BufferListStream([Buffer.from('ghi'), new BufferListStream('jkl')]) - ) - bl.append([ - Buffer.from('mnop'), - new BufferListStream([Buffer.from('qrstu'), Buffer.from('vwxyz')]) - ]) - t.equal(bl.length, 26) - t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') - - t.end() -}) - -tape('append chainable', function (t) { - const bl = new BufferListStream() - - t.ok(bl.append(Buffer.from('abcd')) === bl) - t.ok(bl.append([Buffer.from('abcd')]) === bl) - t.ok(bl.append(new BufferListStream(Buffer.from('abcd'))) === bl) - t.ok(bl.append([new BufferListStream(Buffer.from('abcd'))]) === bl) - - t.end() -}) - -tape('append chainable (test results)', function (t) { - const bl = new BufferListStream('abc') - .append([new BufferListStream('def')]) - .append( - new BufferListStream([Buffer.from('ghi'), new BufferListStream('jkl')]) - ) - .append([ - Buffer.from('mnop'), - new BufferListStream([Buffer.from('qrstu'), Buffer.from('vwxyz')]) - ]) - - t.equal(bl.length, 26) - t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') - - t.end() -}) - -tape('consuming from multiple buffers', function (t) { - const bl = new BufferListStream() - - bl.append(Buffer.from('abcd')) - bl.append(Buffer.from('efg')) - bl.append(Buffer.from('hi')) - bl.append(Buffer.from('j')) - - t.equal(bl.length, 10) - - t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') - - bl.consume(3) - t.equal(bl.length, 7) - t.equal(bl.slice(0, 7).toString('ascii'), 'defghij') - - bl.consume(2) - t.equal(bl.length, 5) - t.equal(bl.slice(0, 5).toString('ascii'), 'fghij') - - bl.consume(1) - t.equal(bl.length, 4) - t.equal(bl.slice(0, 4).toString('ascii'), 'ghij') - - bl.consume(1) - t.equal(bl.length, 3) - t.equal(bl.slice(0, 3).toString('ascii'), 'hij') - - bl.consume(2) - t.equal(bl.length, 1) - t.equal(bl.slice(0, 1).toString('ascii'), 'j') - - t.end() -}) - -tape('complete consumption', function (t) { - /** @type {BufferListStreamWithPrivate} */ - const bl = new BufferListStream() - - bl.append(Buffer.from('a')) - bl.append(Buffer.from('b')) - - bl.consume(2) - - t.equal(bl.length, 0) - t.equal(bl._bufs.length, 0) - - t.end() -}) - -tape('test readUInt8 / readInt8', function (t) { - const buf1 = Buffer.alloc(1) - const buf2 = Buffer.alloc(3) - const buf3 = Buffer.alloc(3) - const bl = new BufferListStream() - - buf1[0] = 0x1 - buf2[1] = 0x3 - buf2[2] = 0x4 - buf3[0] = 0x23 - buf3[1] = 0x42 - - bl.append(buf1) - bl.append(buf2) - bl.append(buf3) - - t.equal(bl.readUInt8(), 0x1) - t.equal(bl.readUInt8(2), 0x3) - t.equal(bl.readInt8(2), 0x3) - t.equal(bl.readUInt8(3), 0x4) - t.equal(bl.readInt8(3), 0x4) - t.equal(bl.readUInt8(4), 0x23) - t.equal(bl.readInt8(4), 0x23) - t.equal(bl.readUInt8(5), 0x42) - t.equal(bl.readInt8(5), 0x42) - - t.end() -}) - -tape('test readUInt16LE / readUInt16BE / readInt16LE / readInt16BE', function (t) { - const buf1 = Buffer.alloc(1) - const buf2 = Buffer.alloc(3) - const buf3 = Buffer.alloc(3) - const bl = new BufferListStream() - - buf1[0] = 0x1 - buf2[1] = 0x3 - buf2[2] = 0x4 - buf3[0] = 0x23 - buf3[1] = 0x42 - - bl.append(buf1) - bl.append(buf2) - bl.append(buf3) - - t.equal(bl.readUInt16BE(), 0x0100) - t.equal(bl.readUInt16LE(), 0x0001) - t.equal(bl.readUInt16BE(2), 0x0304) - t.equal(bl.readUInt16LE(2), 0x0403) - t.equal(bl.readInt16BE(2), 0x0304) - t.equal(bl.readInt16LE(2), 0x0403) - t.equal(bl.readUInt16BE(3), 0x0423) - t.equal(bl.readUInt16LE(3), 0x2304) - t.equal(bl.readInt16BE(3), 0x0423) - t.equal(bl.readInt16LE(3), 0x2304) - t.equal(bl.readUInt16BE(4), 0x2342) - t.equal(bl.readUInt16LE(4), 0x4223) - t.equal(bl.readInt16BE(4), 0x2342) - t.equal(bl.readInt16LE(4), 0x4223) - - t.end() -}) - -tape('test readUInt32LE / readUInt32BE / readInt32LE / readInt32BE', function (t) { - const buf1 = Buffer.alloc(1) - const buf2 = Buffer.alloc(3) - const buf3 = Buffer.alloc(3) - const bl = new BufferListStream() - - buf1[0] = 0x1 - buf2[1] = 0x3 - buf2[2] = 0x4 - buf3[0] = 0x23 - buf3[1] = 0x42 - - bl.append(buf1) - bl.append(buf2) - bl.append(buf3) - - t.equal(bl.readUInt32BE(), 0x01000304) - t.equal(bl.readUInt32LE(), 0x04030001) - t.equal(bl.readUInt32BE(2), 0x03042342) - t.equal(bl.readUInt32LE(2), 0x42230403) - t.equal(bl.readInt32BE(2), 0x03042342) - t.equal(bl.readInt32LE(2), 0x42230403) - - t.end() -}) - -tape('test readUIntLE / readUIntBE / readIntLE / readIntBE', function (t) { - const buf1 = Buffer.alloc(1) - const buf2 = Buffer.alloc(3) - const buf3 = Buffer.alloc(3) - const bl = new BufferListStream() - - buf2[0] = 0x2 - buf2[1] = 0x3 - buf2[2] = 0x4 - buf3[0] = 0x23 - buf3[1] = 0x42 - buf3[2] = 0x61 - - bl.append(buf1) - bl.append(buf2) - bl.append(buf3) - - t.equal(bl.readUIntBE(1, 1), 0x02) - t.equal(bl.readUIntBE(1, 2), 0x0203) - t.equal(bl.readUIntBE(1, 3), 0x020304) - t.equal(bl.readUIntBE(1, 4), 0x02030423) - t.equal(bl.readUIntBE(1, 5), 0x0203042342) - t.equal(bl.readUIntBE(1, 6), 0x020304234261) - t.equal(bl.readUIntLE(1, 1), 0x02) - t.equal(bl.readUIntLE(1, 2), 0x0302) - t.equal(bl.readUIntLE(1, 3), 0x040302) - t.equal(bl.readUIntLE(1, 4), 0x23040302) - t.equal(bl.readUIntLE(1, 5), 0x4223040302) - t.equal(bl.readUIntLE(1, 6), 0x614223040302) - t.equal(bl.readIntBE(1, 1), 0x02) - t.equal(bl.readIntBE(1, 2), 0x0203) - t.equal(bl.readIntBE(1, 3), 0x020304) - t.equal(bl.readIntBE(1, 4), 0x02030423) - t.equal(bl.readIntBE(1, 5), 0x0203042342) - t.equal(bl.readIntBE(1, 6), 0x020304234261) - t.equal(bl.readIntLE(1, 1), 0x02) - t.equal(bl.readIntLE(1, 2), 0x0302) - t.equal(bl.readIntLE(1, 3), 0x040302) - t.equal(bl.readIntLE(1, 4), 0x23040302) - t.equal(bl.readIntLE(1, 5), 0x4223040302) - t.equal(bl.readIntLE(1, 6), 0x614223040302) - - t.end() -}) - -tape('test readFloatLE / readFloatBE', function (t) { - const buf1 = Buffer.alloc(1) - const buf2 = Buffer.alloc(3) - const buf3 = Buffer.alloc(3) - const bl = new BufferListStream() - - buf1[0] = 0x01 - buf2[1] = 0x00 - buf2[2] = 0x00 - buf3[0] = 0x80 - buf3[1] = 0x3f - - bl.append(buf1) - bl.append(buf2) - bl.append(buf3) - - const canonical = Buffer.concat([buf1, buf2, buf3]) - t.equal(bl.readFloatLE(), canonical.readFloatLE()) - t.equal(bl.readFloatBE(), canonical.readFloatBE()) - t.equal(bl.readFloatLE(2), canonical.readFloatLE(2)) - t.equal(bl.readFloatBE(2), canonical.readFloatBE(2)) - - t.end() -}) - -tape('test readDoubleLE / readDoubleBE', function (t) { - const buf1 = Buffer.alloc(1) - const buf2 = Buffer.alloc(3) - const buf3 = Buffer.alloc(10) - const bl = new BufferListStream() - - buf1[0] = 0x01 - buf2[1] = 0x55 - buf2[2] = 0x55 - buf3[0] = 0x55 - buf3[1] = 0x55 - buf3[2] = 0x55 - buf3[3] = 0x55 - buf3[4] = 0xd5 - buf3[5] = 0x3f - - bl.append(buf1) - bl.append(buf2) - bl.append(buf3) - - const canonical = Buffer.concat([buf1, buf2, buf3]) - t.equal(bl.readDoubleBE(), canonical.readDoubleBE()) - t.equal(bl.readDoubleLE(), canonical.readDoubleLE()) - t.equal(bl.readDoubleBE(2), canonical.readDoubleBE(2)) - t.equal(bl.readDoubleLE(2), canonical.readDoubleLE(2)) - - t.end() -}) - -tape('test toString', function (t) { - const bl = new BufferListStream() - - bl.append(Buffer.from('abcd')) - bl.append(Buffer.from('efg')) - bl.append(Buffer.from('hi')) - bl.append(Buffer.from('j')) - - t.equal(bl.toString('ascii', 0, 10), 'abcdefghij') - t.equal(bl.toString('ascii', 3, 10), 'defghij') - t.equal(bl.toString('ascii', 3, 6), 'def') - t.equal(bl.toString('ascii', 3, 8), 'defgh') - t.equal(bl.toString('ascii', 5, 10), 'fghij') - - t.end() -}) - -tape('test toString encoding', function (t) { - const bl = new BufferListStream() - const b = Buffer.from('abcdefghij\xff\x00') - - bl.append(Buffer.from('abcd')) - bl.append(Buffer.from('efg')) - bl.append(Buffer.from('hi')) - bl.append(Buffer.from('j')) - bl.append(Buffer.from('\xff\x00')) - - encodings.forEach(function (enc) { - t.equal(bl.toString(enc), b.toString(enc), enc) - }) - - t.end() -}) - -tape('uninitialized memory', function (t) { - const secret = crypto.randomBytes(256) - for (let i = 0; i < 1e6; i++) { - const clone = Buffer.from(secret) - const bl = new BufferListStream() - bl.append(Buffer.from('a')) - bl.consume(-1024) - const buf = bl.slice(1) - if (buf.indexOf(clone) !== -1) { - t.fail(`Match (at ${i})`) - break - } - } - t.end() -}) - -!process.browser && tape('test stream', function (t) { - const random = crypto.randomBytes(65534) - - const bl = new BufferListStream((err, buf) => { - t.ok(Buffer.isBuffer(buf)) - t.ok(err === null) - t.ok(random.equals(bl.slice())) - t.ok(random.equals(buf.slice())) - - bl.pipe(fs.createWriteStream(path.join(os.tmpdir(), 'bl_test_rnd_out.dat'))) - .on('close', function () { - const rndhash = crypto.createHash('md5').update(random).digest('hex') - const md5sum = crypto.createHash('md5') - const s = fs.createReadStream(path.join(os.tmpdir(), 'bl_test_rnd_out.dat')) - - s.on('data', md5sum.update.bind(md5sum)) - s.on('end', function () { - t.equal(rndhash, md5sum.digest('hex'), 'woohoo! correct hash!') - t.end() - }) - }) - }) - - fs.writeFileSync(path.join(os.tmpdir(), 'bl_test_rnd.dat'), random) - fs.createReadStream(path.join(os.tmpdir(), 'bl_test_rnd.dat')).pipe(bl) -}) - -tape('instantiation with Buffer', function (t) { - const buf = crypto.randomBytes(1024) - const buf2 = crypto.randomBytes(1024) - let b = BufferListStream(buf) - - t.equal(buf.toString('hex'), b.slice().toString('hex'), 'same buffer') - b = BufferListStream([buf, buf2]) - t.equal(b.slice().toString('hex'), Buffer.concat([buf, buf2]).toString('hex'), 'same buffer') - - t.end() -}) - -tape('test String appendage', function (t) { - const bl = new BufferListStream() - const b = Buffer.from('abcdefghij\xff\x00') - - bl.append('abcd') - bl.append('efg') - bl.append('hi') - bl.append('j') - bl.append('\xff\x00') - - encodings.forEach(function (enc) { - t.equal(bl.toString(enc), b.toString(enc)) - }) - - t.end() -}) - -tape('test Number appendage', function (t) { - const bl = new BufferListStream() - const b = Buffer.from('1234567890') - - bl.append(1234) - bl.append(567) - bl.append(89) - bl.append(0) - - encodings.forEach(function (enc) { - t.equal(bl.toString(enc), b.toString(enc)) - }) - - t.end() -}) - -tape('write nothing, should get empty buffer', function (t) { - t.plan(3) - BufferListStream(function (err, data) { - t.notOk(err, 'no error') - t.ok(Buffer.isBuffer(data), 'got a buffer') - t.equal(0, data.length, 'got a zero-length buffer') - t.end() - }).end() -}) - -tape('unicode string', function (t) { - t.plan(2) - - const inp1 = '\u2600' - const inp2 = '\u2603' - const exp = inp1 + ' and ' + inp2 - const bl = BufferListStream() - - bl.write(inp1) - bl.write(' and ') - bl.write(inp2) - t.equal(exp, bl.toString()) - t.equal(Buffer.from(exp).toString('hex'), bl.toString('hex')) -}) - -tape('should emit finish', function (t) { - const source = BufferListStream() - const dest = BufferListStream() - - source.write('hello') - source.pipe(dest) - - dest.on('finish', function () { - t.equal(dest.toString('utf8'), 'hello') - t.end() - }) -}) - -tape('basic copy', function (t) { - const buf = crypto.randomBytes(1024) - const buf2 = Buffer.alloc(1024) - const b = BufferListStream(buf) - - b.copy(buf2) - t.equal(b.slice().toString('hex'), buf2.toString('hex'), 'same buffer') - - t.end() -}) - -tape('copy after many appends', function (t) { - const buf = crypto.randomBytes(512) - const buf2 = Buffer.alloc(1024) - const b = BufferListStream(buf) - - b.append(buf) - b.copy(buf2) - t.equal(b.slice().toString('hex'), buf2.toString('hex'), 'same buffer') - - t.end() -}) - -tape('copy at a precise position', function (t) { - const buf = crypto.randomBytes(1004) - const buf2 = Buffer.alloc(1024) - const b = BufferListStream(buf) - - b.copy(buf2, 20) - t.equal(b.slice().toString('hex'), buf2.slice(20).toString('hex'), 'same buffer') - - t.end() -}) - -tape('copy starting from a precise location', function (t) { - const buf = crypto.randomBytes(10) - const buf2 = Buffer.alloc(5) - const b = BufferListStream(buf) - - b.copy(buf2, 0, 5) - t.equal(b.slice(5).toString('hex'), buf2.toString('hex'), 'same buffer') - - t.end() -}) - -tape('copy in an interval', function (t) { - const rnd = crypto.randomBytes(10) - const b = BufferListStream(rnd) // put the random bytes there - const actual = Buffer.alloc(3) - const expected = Buffer.alloc(3) - - rnd.copy(expected, 0, 5, 8) - b.copy(actual, 0, 5, 8) - - t.equal(actual.toString('hex'), expected.toString('hex'), 'same buffer') - - t.end() -}) - -tape('copy an interval between two buffers', function (t) { - const buf = crypto.randomBytes(10) - const buf2 = Buffer.alloc(10) - const b = BufferListStream(buf) - - b.append(buf) - b.copy(buf2, 0, 5, 15) - - t.equal(b.slice(5, 15).toString('hex'), buf2.toString('hex'), 'same buffer') - - t.end() -}) - -tape('shallow slice across buffer boundaries', function (t) { - const bl = new BufferListStream(['First', 'Second', 'Third']) - - t.equal(bl.shallowSlice(3, 13).toString(), 'stSecondTh') - - t.end() -}) - -tape('shallow slice within single buffer', function (t) { - t.plan(2) - - const bl = new BufferListStream(['First', 'Second', 'Third']) - - t.equal(bl.shallowSlice(5, 10).toString(), 'Secon') - t.equal(bl.shallowSlice(7, 10).toString(), 'con') - - t.end() -}) - -tape('shallow slice single buffer', function (t) { - t.plan(3) - - const bl = new BufferListStream(['First', 'Second', 'Third']) - - t.equal(bl.shallowSlice(0, 5).toString(), 'First') - t.equal(bl.shallowSlice(5, 11).toString(), 'Second') - t.equal(bl.shallowSlice(11, 16).toString(), 'Third') -}) - -tape('shallow slice with negative or omitted indices', function (t) { - t.plan(4) - - const bl = new BufferListStream(['First', 'Second', 'Third']) - - t.equal(bl.shallowSlice().toString(), 'FirstSecondThird') - t.equal(bl.shallowSlice(5).toString(), 'SecondThird') - t.equal(bl.shallowSlice(5, -3).toString(), 'SecondTh') - t.equal(bl.shallowSlice(-8).toString(), 'ondThird') -}) - -tape('shallow slice does not make a copy', function (t) { - t.plan(1) - - const buffers = [Buffer.from('First'), Buffer.from('Second'), Buffer.from('Third')] - const bl = new BufferListStream(buffers).shallowSlice(5, -3) - - buffers[1].fill('h') - buffers[2].fill('h') - - t.equal(bl.toString(), 'hhhhhhhh') -}) - -tape('shallow slice with 0 length', function (t) { - t.plan(1) - - const buffers = [Buffer.from('First'), Buffer.from('Second'), Buffer.from('Third')] - const bl = (new BufferListStream(buffers)).shallowSlice(0, 0) - - t.equal(bl.length, 0) -}) - -tape('shallow slice with 0 length from middle', function (t) { - t.plan(1) - - const buffers = [Buffer.from('First'), Buffer.from('Second'), Buffer.from('Third')] - const bl = (new BufferListStream(buffers)).shallowSlice(10, 10) - - t.equal(bl.length, 0) -}) - -tape('duplicate', function (t) { - t.plan(2) - - const bl = new BufferListStream('abcdefghij\xff\x00') - const dup = bl.duplicate() - - t.equal(bl.prototype, dup.prototype) - t.equal(bl.toString('hex'), dup.toString('hex')) -}) - -tape('destroy no pipe', function (t) { - t.plan(2) - - /** @type {BufferListStreamWithPrivate} */ - const bl = new BufferListStream('alsdkfja;lsdkfja;lsdk') - - bl.destroy() - - t.equal(bl._bufs.length, 0) - t.equal(bl.length, 0) -}) - -tape('destroy with error', function (t) { - t.plan(3) - - /** @type {BufferListStreamWithPrivate} */ - const bl = new BufferListStream('alsdkfja;lsdkfja;lsdk') - const err = new Error('kaboom') - - bl.destroy(err) - bl.on('error', function (_err) { - t.equal(_err, err) - }) - - t.equal(bl._bufs.length, 0) - t.equal(bl.length, 0) -}) - -!process.browser && tape('destroy with pipe before read end', function (t) { - t.plan(2) - - /** @type {BufferListStreamWithPrivate} */ - const bl = new BufferListStream() - fs.createReadStream(path.join(__dirname, '/test.js')) - .pipe(bl) - - bl.destroy() - - t.equal(bl._bufs.length, 0) - t.equal(bl.length, 0) -}) - -!process.browser && tape('destroy with pipe before read end with race', function (t) { - t.plan(2) - - /** @type {BufferListStreamWithPrivate} */ - const bl = new BufferListStream() - - fs.createReadStream(path.join(__dirname, '/test.js')) - .pipe(bl) - - setTimeout(function () { - bl.destroy() - setTimeout(function () { - t.equal(bl._bufs.length, 0) - t.equal(bl.length, 0) - }, 500) - }, 500) -}) - -!process.browser && tape('destroy with pipe after read end', function (t) { - t.plan(2) - - /** @type {BufferListStreamWithPrivate} */ - const bl = new BufferListStream() - fs.createReadStream(path.join(__dirname, '/test.js')) - .on('end', onEnd) - .pipe(bl) - - function onEnd () { - bl.destroy() - - t.equal(bl._bufs.length, 0) - t.equal(bl.length, 0) - } -}) - -!process.browser && tape('destroy with pipe while writing to a destination', function (t) { - t.plan(4) - - /** @type {BufferListStreamWithPrivate} */ - const bl = new BufferListStream() - const ds = new BufferListStream() - - fs.createReadStream(path.join(__dirname, '/test.js')) - .on('end', onEnd) - .pipe(bl) - - function onEnd () { - bl.pipe(ds) - - setTimeout(function () { - bl.destroy() - - t.equals(bl._bufs.length, 0) - t.equals(bl.length, 0) - - ds.destroy() - - t.equals(bl._bufs.length, 0) - t.equals(bl.length, 0) - }, 100) - } -}) - -!process.browser && tape('handle error', function (t) { - t.plan(2) - - fs.createReadStream('/does/not/exist').pipe(BufferListStream(function (err, data) { - t.ok(err instanceof Error, 'has error') - t.notOk(data, 'no data') - })) -}) diff --git a/node_modules/buffer/AUTHORS.md b/node_modules/buffer/AUTHORS.md deleted file mode 100644 index 468aa1908c..0000000000 --- a/node_modules/buffer/AUTHORS.md +++ /dev/null @@ -1,73 +0,0 @@ -# Authors - -#### Ordered by first contribution. - -- Romain Beauxis (toots@rastageeks.org) -- Tobias Koppers (tobias.koppers@googlemail.com) -- Janus (ysangkok@gmail.com) -- Rainer Dreyer (rdrey1@gmail.com) -- Tõnis Tiigi (tonistiigi@gmail.com) -- James Halliday (mail@substack.net) -- Michael Williamson (mike@zwobble.org) -- elliottcable (github@elliottcable.name) -- rafael (rvalle@livelens.net) -- Andrew Kelley (superjoe30@gmail.com) -- Andreas Madsen (amwebdk@gmail.com) -- Mike Brevoort (mike.brevoort@pearson.com) -- Brian White (mscdex@mscdex.net) -- Feross Aboukhadijeh (feross@feross.org) -- Ruben Verborgh (ruben@verborgh.org) -- eliang (eliang.cs@gmail.com) -- Jesse Tane (jesse.tane@gmail.com) -- Alfonso Boza (alfonso@cloud.com) -- Mathias Buus (mathiasbuus@gmail.com) -- Devon Govett (devongovett@gmail.com) -- Daniel Cousens (github@dcousens.com) -- Joseph Dykstra (josephdykstra@gmail.com) -- Parsha Pourkhomami (parshap+git@gmail.com) -- Damjan Košir (damjan.kosir@gmail.com) -- daverayment (dave.rayment@gmail.com) -- kawanet (u-suke@kawa.net) -- Linus Unnebäck (linus@folkdatorn.se) -- Nolan Lawson (nolan.lawson@gmail.com) -- Calvin Metcalf (calvin.metcalf@gmail.com) -- Koki Takahashi (hakatasiloving@gmail.com) -- Guy Bedford (guybedford@gmail.com) -- Jan Schär (jscissr@gmail.com) -- RaulTsc (tomescu.raul@gmail.com) -- Matthieu Monsch (monsch@alum.mit.edu) -- Dan Ehrenberg (littledan@chromium.org) -- Kirill Fomichev (fanatid@ya.ru) -- Yusuke Kawasaki (u-suke@kawa.net) -- DC (dcposch@dcpos.ch) -- John-David Dalton (john.david.dalton@gmail.com) -- adventure-yunfei (adventure030@gmail.com) -- Emil Bay (github@tixz.dk) -- Sam Sudar (sudar.sam@gmail.com) -- Volker Mische (volker.mische@gmail.com) -- David Walton (support@geekstocks.com) -- Сковорода Никита Андреевич (chalkerx@gmail.com) -- greenkeeper[bot] (greenkeeper[bot]@users.noreply.github.com) -- ukstv (sergey.ukustov@machinomy.com) -- Renée Kooi (renee@kooi.me) -- ranbochen (ranbochen@qq.com) -- Vladimir Borovik (bobahbdb@gmail.com) -- greenkeeper[bot] (23040076+greenkeeper[bot]@users.noreply.github.com) -- kumavis (aaron@kumavis.me) -- Sergey Ukustov (sergey.ukustov@machinomy.com) -- Fei Liu (liu.feiwood@gmail.com) -- Blaine Bublitz (blaine.bublitz@gmail.com) -- clement (clement@seald.io) -- Koushik Dutta (koushd@gmail.com) -- Jordan Harband (ljharb@gmail.com) -- Niklas Mischkulnig (mischnic@users.noreply.github.com) -- Nikolai Vavilov (vvnicholas@gmail.com) -- Fedor Nezhivoi (gyzerok@users.noreply.github.com) -- shuse2 (shus.toda@gmail.com) -- Peter Newman (peternewman@users.noreply.github.com) -- mathmakgakpak (44949126+mathmakgakpak@users.noreply.github.com) -- jkkang (jkkang@smartauth.kr) -- Deklan Webster (deklanw@gmail.com) -- Martin Heidegger (martin.heidegger@gmail.com) - -#### Generated by bin/update-authors.sh. diff --git a/node_modules/buffer/LICENSE b/node_modules/buffer/LICENSE deleted file mode 100644 index d6bf75dcf1..0000000000 --- a/node_modules/buffer/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) Feross Aboukhadijeh, and other contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/node_modules/buffer/README.md b/node_modules/buffer/README.md deleted file mode 100644 index 451e23576b..0000000000 --- a/node_modules/buffer/README.md +++ /dev/null @@ -1,410 +0,0 @@ -# buffer [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url] [![javascript style guide][standard-image]][standard-url] - -[travis-image]: https://img.shields.io/travis/feross/buffer/master.svg -[travis-url]: https://travis-ci.org/feross/buffer -[npm-image]: https://img.shields.io/npm/v/buffer.svg -[npm-url]: https://npmjs.org/package/buffer -[downloads-image]: https://img.shields.io/npm/dm/buffer.svg -[downloads-url]: https://npmjs.org/package/buffer -[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg -[standard-url]: https://standardjs.com - -#### The buffer module from [node.js](https://nodejs.org/), for the browser. - -[![saucelabs][saucelabs-image]][saucelabs-url] - -[saucelabs-image]: https://saucelabs.com/browser-matrix/buffer.svg -[saucelabs-url]: https://saucelabs.com/u/buffer - -With [browserify](http://browserify.org), simply `require('buffer')` or use the `Buffer` global and you will get this module. - -The goal is to provide an API that is 100% identical to -[node's Buffer API](https://nodejs.org/api/buffer.html). Read the -[official docs](https://nodejs.org/api/buffer.html) for the full list of properties, -instance methods, and class methods that are supported. - -## features - -- Manipulate binary data like a boss, in all browsers! -- Super fast. Backed by Typed Arrays (`Uint8Array`/`ArrayBuffer`, not `Object`) -- Extremely small bundle size (**6.75KB minified + gzipped**, 51.9KB with comments) -- Excellent browser support (Chrome, Firefox, Edge, Safari 11+, iOS 11+, Android, etc.) -- Preserves Node API exactly, with one minor difference (see below) -- Square-bracket `buf[4]` notation works! -- Does not modify any browser prototypes or put anything on `window` -- Comprehensive test suite (including all buffer tests from node.js core) - -## install - -To use this module directly (without browserify), install it: - -```bash -npm install buffer -``` - -This module was previously called **native-buffer-browserify**, but please use **buffer** -from now on. - -If you do not use a bundler, you can use the [standalone script](https://bundle.run/buffer). - -## usage - -The module's API is identical to node's `Buffer` API. Read the -[official docs](https://nodejs.org/api/buffer.html) for the full list of properties, -instance methods, and class methods that are supported. - -As mentioned above, `require('buffer')` or use the `Buffer` global with -[browserify](http://browserify.org) and this module will automatically be included -in your bundle. Almost any npm module will work in the browser, even if it assumes that -the node `Buffer` API will be available. - -To depend on this module explicitly (without browserify), require it like this: - -```js -var Buffer = require('buffer/').Buffer // note: the trailing slash is important! -``` - -To require this module explicitly, use `require('buffer/')` which tells the node.js module -lookup algorithm (also used by browserify) to use the **npm module** named `buffer` -instead of the **node.js core** module named `buffer`! - - -## how does it work? - -The Buffer constructor returns instances of `Uint8Array` that have their prototype -changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of `Uint8Array`, -so the returned instances will have all the node `Buffer` methods and the -`Uint8Array` methods. Square bracket notation works as expected -- it returns a -single octet. - -The `Uint8Array` prototype remains unmodified. - - -## tracking the latest node api - -This module tracks the Buffer API in the latest (unstable) version of node.js. The Buffer -API is considered **stable** in the -[node stability index](https://nodejs.org/docs/latest/api/documentation.html#documentation_stability_index), -so it is unlikely that there will ever be breaking changes. -Nonetheless, when/if the Buffer API changes in node, this module's API will change -accordingly. - -## related packages - -- [`buffer-reverse`](https://www.npmjs.com/package/buffer-reverse) - Reverse a buffer -- [`buffer-xor`](https://www.npmjs.com/package/buffer-xor) - Bitwise xor a buffer -- [`is-buffer`](https://www.npmjs.com/package/is-buffer) - Determine if an object is a Buffer without including the whole `Buffer` package - -## conversion packages - -### convert typed array to buffer - -Use [`typedarray-to-buffer`](https://www.npmjs.com/package/typedarray-to-buffer) to convert any kind of typed array to a `Buffer`. Does not perform a copy, so it's super fast. - -### convert buffer to typed array - -`Buffer` is a subclass of `Uint8Array` (which is a typed array). So there is no need to explicitly convert to typed array. Just use the buffer as a `Uint8Array`. - -### convert blob to buffer - -Use [`blob-to-buffer`](https://www.npmjs.com/package/blob-to-buffer) to convert a `Blob` to a `Buffer`. - -### convert buffer to blob - -To convert a `Buffer` to a `Blob`, use the `Blob` constructor: - -```js -var blob = new Blob([ buffer ]) -``` - -Optionally, specify a mimetype: - -```js -var blob = new Blob([ buffer ], { type: 'text/html' }) -``` - -### convert arraybuffer to buffer - -To convert an `ArrayBuffer` to a `Buffer`, use the `Buffer.from` function. Does not perform a copy, so it's super fast. - -```js -var buffer = Buffer.from(arrayBuffer) -``` - -### convert buffer to arraybuffer - -To convert a `Buffer` to an `ArrayBuffer`, use the `.buffer` property (which is present on all `Uint8Array` objects): - -```js -var arrayBuffer = buffer.buffer.slice( - buffer.byteOffset, buffer.byteOffset + buffer.byteLength -) -``` - -Alternatively, use the [`to-arraybuffer`](https://www.npmjs.com/package/to-arraybuffer) module. - -## performance - -See perf tests in `/perf`. - -`BrowserBuffer` is the browser `buffer` module (this repo). `Uint8Array` is included as a -sanity check (since `BrowserBuffer` uses `Uint8Array` under the hood, `Uint8Array` will -always be at least a bit faster). Finally, `NodeBuffer` is the node.js buffer module, -which is included to compare against. - -NOTE: Performance has improved since these benchmarks were taken. PR welcome to update the README. - -### Chrome 38 - -| Method | Operations | Accuracy | Sampled | Fastest | -|:-------|:-----------|:---------|:--------|:-------:| -| BrowserBuffer#bracket-notation | 11,457,464 ops/sec | ±0.86% | 66 | ✓ | -| Uint8Array#bracket-notation | 10,824,332 ops/sec | ±0.74% | 65 | | -| | | | | -| BrowserBuffer#concat | 450,532 ops/sec | ±0.76% | 68 | | -| Uint8Array#concat | 1,368,911 ops/sec | ±1.50% | 62 | ✓ | -| | | | | -| BrowserBuffer#copy(16000) | 903,001 ops/sec | ±0.96% | 67 | | -| Uint8Array#copy(16000) | 1,422,441 ops/sec | ±1.04% | 66 | ✓ | -| | | | | -| BrowserBuffer#copy(16) | 11,431,358 ops/sec | ±0.46% | 69 | | -| Uint8Array#copy(16) | 13,944,163 ops/sec | ±1.12% | 68 | ✓ | -| | | | | -| BrowserBuffer#new(16000) | 106,329 ops/sec | ±6.70% | 44 | | -| Uint8Array#new(16000) | 131,001 ops/sec | ±2.85% | 31 | ✓ | -| | | | | -| BrowserBuffer#new(16) | 1,554,491 ops/sec | ±1.60% | 65 | | -| Uint8Array#new(16) | 6,623,930 ops/sec | ±1.66% | 65 | ✓ | -| | | | | -| BrowserBuffer#readDoubleBE | 112,830 ops/sec | ±0.51% | 69 | ✓ | -| DataView#getFloat64 | 93,500 ops/sec | ±0.57% | 68 | | -| | | | | -| BrowserBuffer#readFloatBE | 146,678 ops/sec | ±0.95% | 68 | ✓ | -| DataView#getFloat32 | 99,311 ops/sec | ±0.41% | 67 | | -| | | | | -| BrowserBuffer#readUInt32LE | 843,214 ops/sec | ±0.70% | 69 | ✓ | -| DataView#getUint32 | 103,024 ops/sec | ±0.64% | 67 | | -| | | | | -| BrowserBuffer#slice | 1,013,941 ops/sec | ±0.75% | 67 | | -| Uint8Array#subarray | 1,903,928 ops/sec | ±0.53% | 67 | ✓ | -| | | | | -| BrowserBuffer#writeFloatBE | 61,387 ops/sec | ±0.90% | 67 | | -| DataView#setFloat32 | 141,249 ops/sec | ±0.40% | 66 | ✓ | - - -### Firefox 33 - -| Method | Operations | Accuracy | Sampled | Fastest | -|:-------|:-----------|:---------|:--------|:-------:| -| BrowserBuffer#bracket-notation | 20,800,421 ops/sec | ±1.84% | 60 | | -| Uint8Array#bracket-notation | 20,826,235 ops/sec | ±2.02% | 61 | ✓ | -| | | | | -| BrowserBuffer#concat | 153,076 ops/sec | ±2.32% | 61 | | -| Uint8Array#concat | 1,255,674 ops/sec | ±8.65% | 52 | ✓ | -| | | | | -| BrowserBuffer#copy(16000) | 1,105,312 ops/sec | ±1.16% | 63 | | -| Uint8Array#copy(16000) | 1,615,911 ops/sec | ±0.55% | 66 | ✓ | -| | | | | -| BrowserBuffer#copy(16) | 16,357,599 ops/sec | ±0.73% | 68 | | -| Uint8Array#copy(16) | 31,436,281 ops/sec | ±1.05% | 68 | ✓ | -| | | | | -| BrowserBuffer#new(16000) | 52,995 ops/sec | ±6.01% | 35 | | -| Uint8Array#new(16000) | 87,686 ops/sec | ±5.68% | 45 | ✓ | -| | | | | -| BrowserBuffer#new(16) | 252,031 ops/sec | ±1.61% | 66 | | -| Uint8Array#new(16) | 8,477,026 ops/sec | ±0.49% | 68 | ✓ | -| | | | | -| BrowserBuffer#readDoubleBE | 99,871 ops/sec | ±0.41% | 69 | | -| DataView#getFloat64 | 285,663 ops/sec | ±0.70% | 68 | ✓ | -| | | | | -| BrowserBuffer#readFloatBE | 115,540 ops/sec | ±0.42% | 69 | | -| DataView#getFloat32 | 288,722 ops/sec | ±0.82% | 68 | ✓ | -| | | | | -| BrowserBuffer#readUInt32LE | 633,926 ops/sec | ±1.08% | 67 | ✓ | -| DataView#getUint32 | 294,808 ops/sec | ±0.79% | 64 | | -| | | | | -| BrowserBuffer#slice | 349,425 ops/sec | ±0.46% | 69 | | -| Uint8Array#subarray | 5,965,819 ops/sec | ±0.60% | 65 | ✓ | -| | | | | -| BrowserBuffer#writeFloatBE | 59,980 ops/sec | ±0.41% | 67 | | -| DataView#setFloat32 | 317,634 ops/sec | ±0.63% | 68 | ✓ | - -### Safari 8 - -| Method | Operations | Accuracy | Sampled | Fastest | -|:-------|:-----------|:---------|:--------|:-------:| -| BrowserBuffer#bracket-notation | 10,279,729 ops/sec | ±2.25% | 56 | ✓ | -| Uint8Array#bracket-notation | 10,030,767 ops/sec | ±2.23% | 59 | | -| | | | | -| BrowserBuffer#concat | 144,138 ops/sec | ±1.38% | 65 | | -| Uint8Array#concat | 4,950,764 ops/sec | ±1.70% | 63 | ✓ | -| | | | | -| BrowserBuffer#copy(16000) | 1,058,548 ops/sec | ±1.51% | 64 | | -| Uint8Array#copy(16000) | 1,409,666 ops/sec | ±1.17% | 65 | ✓ | -| | | | | -| BrowserBuffer#copy(16) | 6,282,529 ops/sec | ±1.88% | 58 | | -| Uint8Array#copy(16) | 11,907,128 ops/sec | ±2.87% | 58 | ✓ | -| | | | | -| BrowserBuffer#new(16000) | 101,663 ops/sec | ±3.89% | 57 | | -| Uint8Array#new(16000) | 22,050,818 ops/sec | ±6.51% | 46 | ✓ | -| | | | | -| BrowserBuffer#new(16) | 176,072 ops/sec | ±2.13% | 64 | | -| Uint8Array#new(16) | 24,385,731 ops/sec | ±5.01% | 51 | ✓ | -| | | | | -| BrowserBuffer#readDoubleBE | 41,341 ops/sec | ±1.06% | 67 | | -| DataView#getFloat64 | 322,280 ops/sec | ±0.84% | 68 | ✓ | -| | | | | -| BrowserBuffer#readFloatBE | 46,141 ops/sec | ±1.06% | 65 | | -| DataView#getFloat32 | 337,025 ops/sec | ±0.43% | 69 | ✓ | -| | | | | -| BrowserBuffer#readUInt32LE | 151,551 ops/sec | ±1.02% | 66 | | -| DataView#getUint32 | 308,278 ops/sec | ±0.94% | 67 | ✓ | -| | | | | -| BrowserBuffer#slice | 197,365 ops/sec | ±0.95% | 66 | | -| Uint8Array#subarray | 9,558,024 ops/sec | ±3.08% | 58 | ✓ | -| | | | | -| BrowserBuffer#writeFloatBE | 17,518 ops/sec | ±1.03% | 63 | | -| DataView#setFloat32 | 319,751 ops/sec | ±0.48% | 68 | ✓ | - - -### Node 0.11.14 - -| Method | Operations | Accuracy | Sampled | Fastest | -|:-------|:-----------|:---------|:--------|:-------:| -| BrowserBuffer#bracket-notation | 10,489,828 ops/sec | ±3.25% | 90 | | -| Uint8Array#bracket-notation | 10,534,884 ops/sec | ±0.81% | 92 | ✓ | -| NodeBuffer#bracket-notation | 10,389,910 ops/sec | ±0.97% | 87 | | -| | | | | -| BrowserBuffer#concat | 487,830 ops/sec | ±2.58% | 88 | | -| Uint8Array#concat | 1,814,327 ops/sec | ±1.28% | 88 | ✓ | -| NodeBuffer#concat | 1,636,523 ops/sec | ±1.88% | 73 | | -| | | | | -| BrowserBuffer#copy(16000) | 1,073,665 ops/sec | ±0.77% | 90 | | -| Uint8Array#copy(16000) | 1,348,517 ops/sec | ±0.84% | 89 | ✓ | -| NodeBuffer#copy(16000) | 1,289,533 ops/sec | ±0.82% | 93 | | -| | | | | -| BrowserBuffer#copy(16) | 12,782,706 ops/sec | ±0.74% | 85 | | -| Uint8Array#copy(16) | 14,180,427 ops/sec | ±0.93% | 92 | ✓ | -| NodeBuffer#copy(16) | 11,083,134 ops/sec | ±1.06% | 89 | | -| | | | | -| BrowserBuffer#new(16000) | 141,678 ops/sec | ±3.30% | 67 | | -| Uint8Array#new(16000) | 161,491 ops/sec | ±2.96% | 60 | | -| NodeBuffer#new(16000) | 292,699 ops/sec | ±3.20% | 55 | ✓ | -| | | | | -| BrowserBuffer#new(16) | 1,655,466 ops/sec | ±2.41% | 82 | | -| Uint8Array#new(16) | 14,399,926 ops/sec | ±0.91% | 94 | ✓ | -| NodeBuffer#new(16) | 3,894,696 ops/sec | ±0.88% | 92 | | -| | | | | -| BrowserBuffer#readDoubleBE | 109,582 ops/sec | ±0.75% | 93 | ✓ | -| DataView#getFloat64 | 91,235 ops/sec | ±0.81% | 90 | | -| NodeBuffer#readDoubleBE | 88,593 ops/sec | ±0.96% | 81 | | -| | | | | -| BrowserBuffer#readFloatBE | 139,854 ops/sec | ±1.03% | 85 | ✓ | -| DataView#getFloat32 | 98,744 ops/sec | ±0.80% | 89 | | -| NodeBuffer#readFloatBE | 92,769 ops/sec | ±0.94% | 93 | | -| | | | | -| BrowserBuffer#readUInt32LE | 710,861 ops/sec | ±0.82% | 92 | | -| DataView#getUint32 | 117,893 ops/sec | ±0.84% | 91 | | -| NodeBuffer#readUInt32LE | 851,412 ops/sec | ±0.72% | 93 | ✓ | -| | | | | -| BrowserBuffer#slice | 1,673,877 ops/sec | ±0.73% | 94 | | -| Uint8Array#subarray | 6,919,243 ops/sec | ±0.67% | 90 | ✓ | -| NodeBuffer#slice | 4,617,604 ops/sec | ±0.79% | 93 | | -| | | | | -| BrowserBuffer#writeFloatBE | 66,011 ops/sec | ±0.75% | 93 | | -| DataView#setFloat32 | 127,760 ops/sec | ±0.72% | 93 | ✓ | -| NodeBuffer#writeFloatBE | 103,352 ops/sec | ±0.83% | 93 | | - -### iojs 1.8.1 - -| Method | Operations | Accuracy | Sampled | Fastest | -|:-------|:-----------|:---------|:--------|:-------:| -| BrowserBuffer#bracket-notation | 10,990,488 ops/sec | ±1.11% | 91 | | -| Uint8Array#bracket-notation | 11,268,757 ops/sec | ±0.65% | 97 | | -| NodeBuffer#bracket-notation | 11,353,260 ops/sec | ±0.83% | 94 | ✓ | -| | | | | -| BrowserBuffer#concat | 378,954 ops/sec | ±0.74% | 94 | | -| Uint8Array#concat | 1,358,288 ops/sec | ±0.97% | 87 | | -| NodeBuffer#concat | 1,934,050 ops/sec | ±1.11% | 78 | ✓ | -| | | | | -| BrowserBuffer#copy(16000) | 894,538 ops/sec | ±0.56% | 84 | | -| Uint8Array#copy(16000) | 1,442,656 ops/sec | ±0.71% | 96 | | -| NodeBuffer#copy(16000) | 1,457,898 ops/sec | ±0.53% | 92 | ✓ | -| | | | | -| BrowserBuffer#copy(16) | 12,870,457 ops/sec | ±0.67% | 95 | | -| Uint8Array#copy(16) | 16,643,989 ops/sec | ±0.61% | 93 | ✓ | -| NodeBuffer#copy(16) | 14,885,848 ops/sec | ±0.74% | 94 | | -| | | | | -| BrowserBuffer#new(16000) | 109,264 ops/sec | ±4.21% | 63 | | -| Uint8Array#new(16000) | 138,916 ops/sec | ±1.87% | 61 | | -| NodeBuffer#new(16000) | 281,449 ops/sec | ±3.58% | 51 | ✓ | -| | | | | -| BrowserBuffer#new(16) | 1,362,935 ops/sec | ±0.56% | 99 | | -| Uint8Array#new(16) | 6,193,090 ops/sec | ±0.64% | 95 | ✓ | -| NodeBuffer#new(16) | 4,745,425 ops/sec | ±1.56% | 90 | | -| | | | | -| BrowserBuffer#readDoubleBE | 118,127 ops/sec | ±0.59% | 93 | ✓ | -| DataView#getFloat64 | 107,332 ops/sec | ±0.65% | 91 | | -| NodeBuffer#readDoubleBE | 116,274 ops/sec | ±0.94% | 95 | | -| | | | | -| BrowserBuffer#readFloatBE | 150,326 ops/sec | ±0.58% | 95 | ✓ | -| DataView#getFloat32 | 110,541 ops/sec | ±0.57% | 98 | | -| NodeBuffer#readFloatBE | 121,599 ops/sec | ±0.60% | 87 | | -| | | | | -| BrowserBuffer#readUInt32LE | 814,147 ops/sec | ±0.62% | 93 | | -| DataView#getUint32 | 137,592 ops/sec | ±0.64% | 90 | | -| NodeBuffer#readUInt32LE | 931,650 ops/sec | ±0.71% | 96 | ✓ | -| | | | | -| BrowserBuffer#slice | 878,590 ops/sec | ±0.68% | 93 | | -| Uint8Array#subarray | 2,843,308 ops/sec | ±1.02% | 90 | | -| NodeBuffer#slice | 4,998,316 ops/sec | ±0.68% | 90 | ✓ | -| | | | | -| BrowserBuffer#writeFloatBE | 65,927 ops/sec | ±0.74% | 93 | | -| DataView#setFloat32 | 139,823 ops/sec | ±0.97% | 89 | ✓ | -| NodeBuffer#writeFloatBE | 135,763 ops/sec | ±0.65% | 96 | | -| | | | | - -## Testing the project - -First, install the project: - - npm install - -Then, to run tests in Node.js, run: - - npm run test-node - -To test locally in a browser, you can run: - - npm run test-browser-es5-local # For ES5 browsers that don't support ES6 - npm run test-browser-es6-local # For ES6 compliant browsers - -This will print out a URL that you can then open in a browser to run the tests, using [airtap](https://www.npmjs.com/package/airtap). - -To run automated browser tests using Saucelabs, ensure that your `SAUCE_USERNAME` and `SAUCE_ACCESS_KEY` environment variables are set, then run: - - npm test - -This is what's run in Travis, to check against various browsers. The list of browsers is kept in the `bin/airtap-es5.yml` and `bin/airtap-es6.yml` files. - -## JavaScript Standard Style - -This module uses [JavaScript Standard Style](https://github.com/feross/standard). - -[![JavaScript Style Guide](https://cdn.rawgit.com/feross/standard/master/badge.svg)](https://github.com/feross/standard) - -To test that the code conforms to the style, `npm install` and run: - - ./node_modules/.bin/standard - -## credit - -This was originally forked from [buffer-browserify](https://github.com/toots/buffer-browserify). - -## Security Policies and Procedures - -The `buffer` team and community take all security bugs in `buffer` seriously. Please see our [security policies and procedures](https://github.com/feross/security) document to learn how to report issues. - -## license - -MIT. Copyright (C) [Feross Aboukhadijeh](http://feross.org), and other contributors. Originally forked from an MIT-licensed module by Romain Beauxis. diff --git a/node_modules/buffer/index.d.ts b/node_modules/buffer/index.d.ts deleted file mode 100644 index 07096a2f72..0000000000 --- a/node_modules/buffer/index.d.ts +++ /dev/null @@ -1,194 +0,0 @@ -export class Buffer extends Uint8Array { - length: number - write(string: string, offset?: number, length?: number, encoding?: string): number; - toString(encoding?: string, start?: number, end?: number): string; - toJSON(): { type: 'Buffer', data: any[] }; - equals(otherBuffer: Buffer): boolean; - compare(otherBuffer: Uint8Array, targetStart?: number, targetEnd?: number, sourceStart?: number, sourceEnd?: number): number; - copy(targetBuffer: Buffer, targetStart?: number, sourceStart?: number, sourceEnd?: number): number; - slice(start?: number, end?: number): Buffer; - writeUIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; - writeUIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; - writeIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; - writeIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; - readUIntLE(offset: number, byteLength: number, noAssert?: boolean): number; - readUIntBE(offset: number, byteLength: number, noAssert?: boolean): number; - readIntLE(offset: number, byteLength: number, noAssert?: boolean): number; - readIntBE(offset: number, byteLength: number, noAssert?: boolean): number; - readUInt8(offset: number, noAssert?: boolean): number; - readUInt16LE(offset: number, noAssert?: boolean): number; - readUInt16BE(offset: number, noAssert?: boolean): number; - readUInt32LE(offset: number, noAssert?: boolean): number; - readUInt32BE(offset: number, noAssert?: boolean): number; - readBigUInt64LE(offset: number): BigInt; - readBigUInt64BE(offset: number): BigInt; - readInt8(offset: number, noAssert?: boolean): number; - readInt16LE(offset: number, noAssert?: boolean): number; - readInt16BE(offset: number, noAssert?: boolean): number; - readInt32LE(offset: number, noAssert?: boolean): number; - readInt32BE(offset: number, noAssert?: boolean): number; - readBigInt64LE(offset: number): BigInt; - readBigInt64BE(offset: number): BigInt; - readFloatLE(offset: number, noAssert?: boolean): number; - readFloatBE(offset: number, noAssert?: boolean): number; - readDoubleLE(offset: number, noAssert?: boolean): number; - readDoubleBE(offset: number, noAssert?: boolean): number; - reverse(): this; - swap16(): Buffer; - swap32(): Buffer; - swap64(): Buffer; - writeUInt8(value: number, offset: number, noAssert?: boolean): number; - writeUInt16LE(value: number, offset: number, noAssert?: boolean): number; - writeUInt16BE(value: number, offset: number, noAssert?: boolean): number; - writeUInt32LE(value: number, offset: number, noAssert?: boolean): number; - writeUInt32BE(value: number, offset: number, noAssert?: boolean): number; - writeBigUInt64LE(value: number, offset: number): BigInt; - writeBigUInt64BE(value: number, offset: number): BigInt; - writeInt8(value: number, offset: number, noAssert?: boolean): number; - writeInt16LE(value: number, offset: number, noAssert?: boolean): number; - writeInt16BE(value: number, offset: number, noAssert?: boolean): number; - writeInt32LE(value: number, offset: number, noAssert?: boolean): number; - writeInt32BE(value: number, offset: number, noAssert?: boolean): number; - writeBigInt64LE(value: number, offset: number): BigInt; - writeBigInt64BE(value: number, offset: number): BigInt; - writeFloatLE(value: number, offset: number, noAssert?: boolean): number; - writeFloatBE(value: number, offset: number, noAssert?: boolean): number; - writeDoubleLE(value: number, offset: number, noAssert?: boolean): number; - writeDoubleBE(value: number, offset: number, noAssert?: boolean): number; - fill(value: any, offset?: number, end?: number): this; - indexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; - lastIndexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; - includes(value: string | number | Buffer, byteOffset?: number, encoding?: string): boolean; - - /** - * Allocates a new buffer containing the given {str}. - * - * @param str String to store in buffer. - * @param encoding encoding to use, optional. Default is 'utf8' - */ - constructor (str: string, encoding?: string); - /** - * Allocates a new buffer of {size} octets. - * - * @param size count of octets to allocate. - */ - constructor (size: number); - /** - * Allocates a new buffer containing the given {array} of octets. - * - * @param array The octets to store. - */ - constructor (array: Uint8Array); - /** - * Produces a Buffer backed by the same allocated memory as - * the given {ArrayBuffer}. - * - * - * @param arrayBuffer The ArrayBuffer with which to share memory. - */ - constructor (arrayBuffer: ArrayBuffer); - /** - * Allocates a new buffer containing the given {array} of octets. - * - * @param array The octets to store. - */ - constructor (array: any[]); - /** - * Copies the passed {buffer} data onto a new {Buffer} instance. - * - * @param buffer The buffer to copy. - */ - constructor (buffer: Buffer); - prototype: Buffer; - /** - * Allocates a new Buffer using an {array} of octets. - * - * @param array - */ - static from(array: any[]): Buffer; - /** - * When passed a reference to the .buffer property of a TypedArray instance, - * the newly created Buffer will share the same allocated memory as the TypedArray. - * The optional {byteOffset} and {length} arguments specify a memory range - * within the {arrayBuffer} that will be shared by the Buffer. - * - * @param arrayBuffer The .buffer property of a TypedArray or a new ArrayBuffer() - * @param byteOffset - * @param length - */ - static from(arrayBuffer: ArrayBuffer, byteOffset?: number, length?: number): Buffer; - /** - * Copies the passed {buffer} data onto a new Buffer instance. - * - * @param buffer - */ - static from(buffer: Buffer | Uint8Array): Buffer; - /** - * Creates a new Buffer containing the given JavaScript string {str}. - * If provided, the {encoding} parameter identifies the character encoding. - * If not provided, {encoding} defaults to 'utf8'. - * - * @param str - */ - static from(str: string, encoding?: string): Buffer; - /** - * Returns true if {obj} is a Buffer - * - * @param obj object to test. - */ - static isBuffer(obj: any): obj is Buffer; - /** - * Returns true if {encoding} is a valid encoding argument. - * Valid string encodings in Node 0.12: 'ascii'|'utf8'|'utf16le'|'ucs2'(alias of 'utf16le')|'base64'|'binary'(deprecated)|'hex' - * - * @param encoding string to test. - */ - static isEncoding(encoding: string): boolean; - /** - * Gives the actual byte length of a string. encoding defaults to 'utf8'. - * This is not the same as String.prototype.length since that returns the number of characters in a string. - * - * @param string string to test. - * @param encoding encoding used to evaluate (defaults to 'utf8') - */ - static byteLength(string: string, encoding?: string): number; - /** - * Returns a buffer which is the result of concatenating all the buffers in the list together. - * - * If the list has no items, or if the totalLength is 0, then it returns a zero-length buffer. - * If the list has exactly one item, then the first item of the list is returned. - * If the list has more than one item, then a new Buffer is created. - * - * @param list An array of Buffer objects to concatenate - * @param totalLength Total length of the buffers when concatenated. - * If totalLength is not provided, it is read from the buffers in the list. However, this adds an additional loop to the function, so it is faster to provide the length explicitly. - */ - static concat(list: Uint8Array[], totalLength?: number): Buffer; - /** - * The same as buf1.compare(buf2). - */ - static compare(buf1: Uint8Array, buf2: Uint8Array): number; - /** - * Allocates a new buffer of {size} octets. - * - * @param size count of octets to allocate. - * @param fill if specified, buffer will be initialized by calling buf.fill(fill). - * If parameter is omitted, buffer will be filled with zeros. - * @param encoding encoding used for call to buf.fill while initializing - */ - static alloc(size: number, fill?: string | Buffer | number, encoding?: string): Buffer; - /** - * Allocates a new buffer of {size} octets, leaving memory not initialized, so the contents - * of the newly created Buffer are unknown and may contain sensitive data. - * - * @param size count of octets to allocate - */ - static allocUnsafe(size: number): Buffer; - /** - * Allocates a new non-pooled buffer of {size} octets, leaving memory not initialized, so the contents - * of the newly created Buffer are unknown and may contain sensitive data. - * - * @param size count of octets to allocate - */ - static allocUnsafeSlow(size: number): Buffer; -} diff --git a/node_modules/buffer/index.js b/node_modules/buffer/index.js deleted file mode 100644 index 7a0e9c2a12..0000000000 --- a/node_modules/buffer/index.js +++ /dev/null @@ -1,2106 +0,0 @@ -/*! - * The buffer module from node.js, for the browser. - * - * @author Feross Aboukhadijeh - * @license MIT - */ -/* eslint-disable no-proto */ - -'use strict' - -const base64 = require('base64-js') -const ieee754 = require('ieee754') -const customInspectSymbol = - (typeof Symbol === 'function' && typeof Symbol['for'] === 'function') // eslint-disable-line dot-notation - ? Symbol['for']('nodejs.util.inspect.custom') // eslint-disable-line dot-notation - : null - -exports.Buffer = Buffer -exports.SlowBuffer = SlowBuffer -exports.INSPECT_MAX_BYTES = 50 - -const K_MAX_LENGTH = 0x7fffffff -exports.kMaxLength = K_MAX_LENGTH - -/** - * If `Buffer.TYPED_ARRAY_SUPPORT`: - * === true Use Uint8Array implementation (fastest) - * === false Print warning and recommend using `buffer` v4.x which has an Object - * implementation (most compatible, even IE6) - * - * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+, - * Opera 11.6+, iOS 4.2+. - * - * We report that the browser does not support typed arrays if the are not subclassable - * using __proto__. Firefox 4-29 lacks support for adding new properties to `Uint8Array` - * (See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438). IE 10 lacks support - * for __proto__ and has a buggy typed array implementation. - */ -Buffer.TYPED_ARRAY_SUPPORT = typedArraySupport() - -if (!Buffer.TYPED_ARRAY_SUPPORT && typeof console !== 'undefined' && - typeof console.error === 'function') { - console.error( - 'This browser lacks typed array (Uint8Array) support which is required by ' + - '`buffer` v5.x. Use `buffer` v4.x if you require old browser support.' - ) -} - -function typedArraySupport () { - // Can typed array instances can be augmented? - try { - const arr = new Uint8Array(1) - const proto = { foo: function () { return 42 } } - Object.setPrototypeOf(proto, Uint8Array.prototype) - Object.setPrototypeOf(arr, proto) - return arr.foo() === 42 - } catch (e) { - return false - } -} - -Object.defineProperty(Buffer.prototype, 'parent', { - enumerable: true, - get: function () { - if (!Buffer.isBuffer(this)) return undefined - return this.buffer - } -}) - -Object.defineProperty(Buffer.prototype, 'offset', { - enumerable: true, - get: function () { - if (!Buffer.isBuffer(this)) return undefined - return this.byteOffset - } -}) - -function createBuffer (length) { - if (length > K_MAX_LENGTH) { - throw new RangeError('The value "' + length + '" is invalid for option "size"') - } - // Return an augmented `Uint8Array` instance - const buf = new Uint8Array(length) - Object.setPrototypeOf(buf, Buffer.prototype) - return buf -} - -/** - * The Buffer constructor returns instances of `Uint8Array` that have their - * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of - * `Uint8Array`, so the returned instances will have all the node `Buffer` methods - * and the `Uint8Array` methods. Square bracket notation works as expected -- it - * returns a single octet. - * - * The `Uint8Array` prototype remains unmodified. - */ - -function Buffer (arg, encodingOrOffset, length) { - // Common case. - if (typeof arg === 'number') { - if (typeof encodingOrOffset === 'string') { - throw new TypeError( - 'The "string" argument must be of type string. Received type number' - ) - } - return allocUnsafe(arg) - } - return from(arg, encodingOrOffset, length) -} - -Buffer.poolSize = 8192 // not used by this implementation - -function from (value, encodingOrOffset, length) { - if (typeof value === 'string') { - return fromString(value, encodingOrOffset) - } - - if (ArrayBuffer.isView(value)) { - return fromArrayView(value) - } - - if (value == null) { - throw new TypeError( - 'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' + - 'or Array-like Object. Received type ' + (typeof value) - ) - } - - if (isInstance(value, ArrayBuffer) || - (value && isInstance(value.buffer, ArrayBuffer))) { - return fromArrayBuffer(value, encodingOrOffset, length) - } - - if (typeof SharedArrayBuffer !== 'undefined' && - (isInstance(value, SharedArrayBuffer) || - (value && isInstance(value.buffer, SharedArrayBuffer)))) { - return fromArrayBuffer(value, encodingOrOffset, length) - } - - if (typeof value === 'number') { - throw new TypeError( - 'The "value" argument must not be of type number. Received type number' - ) - } - - const valueOf = value.valueOf && value.valueOf() - if (valueOf != null && valueOf !== value) { - return Buffer.from(valueOf, encodingOrOffset, length) - } - - const b = fromObject(value) - if (b) return b - - if (typeof Symbol !== 'undefined' && Symbol.toPrimitive != null && - typeof value[Symbol.toPrimitive] === 'function') { - return Buffer.from(value[Symbol.toPrimitive]('string'), encodingOrOffset, length) - } - - throw new TypeError( - 'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' + - 'or Array-like Object. Received type ' + (typeof value) - ) -} - -/** - * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError - * if value is a number. - * Buffer.from(str[, encoding]) - * Buffer.from(array) - * Buffer.from(buffer) - * Buffer.from(arrayBuffer[, byteOffset[, length]]) - **/ -Buffer.from = function (value, encodingOrOffset, length) { - return from(value, encodingOrOffset, length) -} - -// Note: Change prototype *after* Buffer.from is defined to workaround Chrome bug: -// https://github.com/feross/buffer/pull/148 -Object.setPrototypeOf(Buffer.prototype, Uint8Array.prototype) -Object.setPrototypeOf(Buffer, Uint8Array) - -function assertSize (size) { - if (typeof size !== 'number') { - throw new TypeError('"size" argument must be of type number') - } else if (size < 0) { - throw new RangeError('The value "' + size + '" is invalid for option "size"') - } -} - -function alloc (size, fill, encoding) { - assertSize(size) - if (size <= 0) { - return createBuffer(size) - } - if (fill !== undefined) { - // Only pay attention to encoding if it's a string. This - // prevents accidentally sending in a number that would - // be interpreted as a start offset. - return typeof encoding === 'string' - ? createBuffer(size).fill(fill, encoding) - : createBuffer(size).fill(fill) - } - return createBuffer(size) -} - -/** - * Creates a new filled Buffer instance. - * alloc(size[, fill[, encoding]]) - **/ -Buffer.alloc = function (size, fill, encoding) { - return alloc(size, fill, encoding) -} - -function allocUnsafe (size) { - assertSize(size) - return createBuffer(size < 0 ? 0 : checked(size) | 0) -} - -/** - * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance. - * */ -Buffer.allocUnsafe = function (size) { - return allocUnsafe(size) -} -/** - * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance. - */ -Buffer.allocUnsafeSlow = function (size) { - return allocUnsafe(size) -} - -function fromString (string, encoding) { - if (typeof encoding !== 'string' || encoding === '') { - encoding = 'utf8' - } - - if (!Buffer.isEncoding(encoding)) { - throw new TypeError('Unknown encoding: ' + encoding) - } - - const length = byteLength(string, encoding) | 0 - let buf = createBuffer(length) - - const actual = buf.write(string, encoding) - - if (actual !== length) { - // Writing a hex string, for example, that contains invalid characters will - // cause everything after the first invalid character to be ignored. (e.g. - // 'abxxcd' will be treated as 'ab') - buf = buf.slice(0, actual) - } - - return buf -} - -function fromArrayLike (array) { - const length = array.length < 0 ? 0 : checked(array.length) | 0 - const buf = createBuffer(length) - for (let i = 0; i < length; i += 1) { - buf[i] = array[i] & 255 - } - return buf -} - -function fromArrayView (arrayView) { - if (isInstance(arrayView, Uint8Array)) { - const copy = new Uint8Array(arrayView) - return fromArrayBuffer(copy.buffer, copy.byteOffset, copy.byteLength) - } - return fromArrayLike(arrayView) -} - -function fromArrayBuffer (array, byteOffset, length) { - if (byteOffset < 0 || array.byteLength < byteOffset) { - throw new RangeError('"offset" is outside of buffer bounds') - } - - if (array.byteLength < byteOffset + (length || 0)) { - throw new RangeError('"length" is outside of buffer bounds') - } - - let buf - if (byteOffset === undefined && length === undefined) { - buf = new Uint8Array(array) - } else if (length === undefined) { - buf = new Uint8Array(array, byteOffset) - } else { - buf = new Uint8Array(array, byteOffset, length) - } - - // Return an augmented `Uint8Array` instance - Object.setPrototypeOf(buf, Buffer.prototype) - - return buf -} - -function fromObject (obj) { - if (Buffer.isBuffer(obj)) { - const len = checked(obj.length) | 0 - const buf = createBuffer(len) - - if (buf.length === 0) { - return buf - } - - obj.copy(buf, 0, 0, len) - return buf - } - - if (obj.length !== undefined) { - if (typeof obj.length !== 'number' || numberIsNaN(obj.length)) { - return createBuffer(0) - } - return fromArrayLike(obj) - } - - if (obj.type === 'Buffer' && Array.isArray(obj.data)) { - return fromArrayLike(obj.data) - } -} - -function checked (length) { - // Note: cannot use `length < K_MAX_LENGTH` here because that fails when - // length is NaN (which is otherwise coerced to zero.) - if (length >= K_MAX_LENGTH) { - throw new RangeError('Attempt to allocate Buffer larger than maximum ' + - 'size: 0x' + K_MAX_LENGTH.toString(16) + ' bytes') - } - return length | 0 -} - -function SlowBuffer (length) { - if (+length != length) { // eslint-disable-line eqeqeq - length = 0 - } - return Buffer.alloc(+length) -} - -Buffer.isBuffer = function isBuffer (b) { - return b != null && b._isBuffer === true && - b !== Buffer.prototype // so Buffer.isBuffer(Buffer.prototype) will be false -} - -Buffer.compare = function compare (a, b) { - if (isInstance(a, Uint8Array)) a = Buffer.from(a, a.offset, a.byteLength) - if (isInstance(b, Uint8Array)) b = Buffer.from(b, b.offset, b.byteLength) - if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) { - throw new TypeError( - 'The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array' - ) - } - - if (a === b) return 0 - - let x = a.length - let y = b.length - - for (let i = 0, len = Math.min(x, y); i < len; ++i) { - if (a[i] !== b[i]) { - x = a[i] - y = b[i] - break - } - } - - if (x < y) return -1 - if (y < x) return 1 - return 0 -} - -Buffer.isEncoding = function isEncoding (encoding) { - switch (String(encoding).toLowerCase()) { - case 'hex': - case 'utf8': - case 'utf-8': - case 'ascii': - case 'latin1': - case 'binary': - case 'base64': - case 'ucs2': - case 'ucs-2': - case 'utf16le': - case 'utf-16le': - return true - default: - return false - } -} - -Buffer.concat = function concat (list, length) { - if (!Array.isArray(list)) { - throw new TypeError('"list" argument must be an Array of Buffers') - } - - if (list.length === 0) { - return Buffer.alloc(0) - } - - let i - if (length === undefined) { - length = 0 - for (i = 0; i < list.length; ++i) { - length += list[i].length - } - } - - const buffer = Buffer.allocUnsafe(length) - let pos = 0 - for (i = 0; i < list.length; ++i) { - let buf = list[i] - if (isInstance(buf, Uint8Array)) { - if (pos + buf.length > buffer.length) { - if (!Buffer.isBuffer(buf)) buf = Buffer.from(buf) - buf.copy(buffer, pos) - } else { - Uint8Array.prototype.set.call( - buffer, - buf, - pos - ) - } - } else if (!Buffer.isBuffer(buf)) { - throw new TypeError('"list" argument must be an Array of Buffers') - } else { - buf.copy(buffer, pos) - } - pos += buf.length - } - return buffer -} - -function byteLength (string, encoding) { - if (Buffer.isBuffer(string)) { - return string.length - } - if (ArrayBuffer.isView(string) || isInstance(string, ArrayBuffer)) { - return string.byteLength - } - if (typeof string !== 'string') { - throw new TypeError( - 'The "string" argument must be one of type string, Buffer, or ArrayBuffer. ' + - 'Received type ' + typeof string - ) - } - - const len = string.length - const mustMatch = (arguments.length > 2 && arguments[2] === true) - if (!mustMatch && len === 0) return 0 - - // Use a for loop to avoid recursion - let loweredCase = false - for (;;) { - switch (encoding) { - case 'ascii': - case 'latin1': - case 'binary': - return len - case 'utf8': - case 'utf-8': - return utf8ToBytes(string).length - case 'ucs2': - case 'ucs-2': - case 'utf16le': - case 'utf-16le': - return len * 2 - case 'hex': - return len >>> 1 - case 'base64': - return base64ToBytes(string).length - default: - if (loweredCase) { - return mustMatch ? -1 : utf8ToBytes(string).length // assume utf8 - } - encoding = ('' + encoding).toLowerCase() - loweredCase = true - } - } -} -Buffer.byteLength = byteLength - -function slowToString (encoding, start, end) { - let loweredCase = false - - // No need to verify that "this.length <= MAX_UINT32" since it's a read-only - // property of a typed array. - - // This behaves neither like String nor Uint8Array in that we set start/end - // to their upper/lower bounds if the value passed is out of range. - // undefined is handled specially as per ECMA-262 6th Edition, - // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization. - if (start === undefined || start < 0) { - start = 0 - } - // Return early if start > this.length. Done here to prevent potential uint32 - // coercion fail below. - if (start > this.length) { - return '' - } - - if (end === undefined || end > this.length) { - end = this.length - } - - if (end <= 0) { - return '' - } - - // Force coercion to uint32. This will also coerce falsey/NaN values to 0. - end >>>= 0 - start >>>= 0 - - if (end <= start) { - return '' - } - - if (!encoding) encoding = 'utf8' - - while (true) { - switch (encoding) { - case 'hex': - return hexSlice(this, start, end) - - case 'utf8': - case 'utf-8': - return utf8Slice(this, start, end) - - case 'ascii': - return asciiSlice(this, start, end) - - case 'latin1': - case 'binary': - return latin1Slice(this, start, end) - - case 'base64': - return base64Slice(this, start, end) - - case 'ucs2': - case 'ucs-2': - case 'utf16le': - case 'utf-16le': - return utf16leSlice(this, start, end) - - default: - if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding) - encoding = (encoding + '').toLowerCase() - loweredCase = true - } - } -} - -// This property is used by `Buffer.isBuffer` (and the `is-buffer` npm package) -// to detect a Buffer instance. It's not possible to use `instanceof Buffer` -// reliably in a browserify context because there could be multiple different -// copies of the 'buffer' package in use. This method works even for Buffer -// instances that were created from another copy of the `buffer` package. -// See: https://github.com/feross/buffer/issues/154 -Buffer.prototype._isBuffer = true - -function swap (b, n, m) { - const i = b[n] - b[n] = b[m] - b[m] = i -} - -Buffer.prototype.swap16 = function swap16 () { - const len = this.length - if (len % 2 !== 0) { - throw new RangeError('Buffer size must be a multiple of 16-bits') - } - for (let i = 0; i < len; i += 2) { - swap(this, i, i + 1) - } - return this -} - -Buffer.prototype.swap32 = function swap32 () { - const len = this.length - if (len % 4 !== 0) { - throw new RangeError('Buffer size must be a multiple of 32-bits') - } - for (let i = 0; i < len; i += 4) { - swap(this, i, i + 3) - swap(this, i + 1, i + 2) - } - return this -} - -Buffer.prototype.swap64 = function swap64 () { - const len = this.length - if (len % 8 !== 0) { - throw new RangeError('Buffer size must be a multiple of 64-bits') - } - for (let i = 0; i < len; i += 8) { - swap(this, i, i + 7) - swap(this, i + 1, i + 6) - swap(this, i + 2, i + 5) - swap(this, i + 3, i + 4) - } - return this -} - -Buffer.prototype.toString = function toString () { - const length = this.length - if (length === 0) return '' - if (arguments.length === 0) return utf8Slice(this, 0, length) - return slowToString.apply(this, arguments) -} - -Buffer.prototype.toLocaleString = Buffer.prototype.toString - -Buffer.prototype.equals = function equals (b) { - if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer') - if (this === b) return true - return Buffer.compare(this, b) === 0 -} - -Buffer.prototype.inspect = function inspect () { - let str = '' - const max = exports.INSPECT_MAX_BYTES - str = this.toString('hex', 0, max).replace(/(.{2})/g, '$1 ').trim() - if (this.length > max) str += ' ... ' - return '' -} -if (customInspectSymbol) { - Buffer.prototype[customInspectSymbol] = Buffer.prototype.inspect -} - -Buffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) { - if (isInstance(target, Uint8Array)) { - target = Buffer.from(target, target.offset, target.byteLength) - } - if (!Buffer.isBuffer(target)) { - throw new TypeError( - 'The "target" argument must be one of type Buffer or Uint8Array. ' + - 'Received type ' + (typeof target) - ) - } - - if (start === undefined) { - start = 0 - } - if (end === undefined) { - end = target ? target.length : 0 - } - if (thisStart === undefined) { - thisStart = 0 - } - if (thisEnd === undefined) { - thisEnd = this.length - } - - if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) { - throw new RangeError('out of range index') - } - - if (thisStart >= thisEnd && start >= end) { - return 0 - } - if (thisStart >= thisEnd) { - return -1 - } - if (start >= end) { - return 1 - } - - start >>>= 0 - end >>>= 0 - thisStart >>>= 0 - thisEnd >>>= 0 - - if (this === target) return 0 - - let x = thisEnd - thisStart - let y = end - start - const len = Math.min(x, y) - - const thisCopy = this.slice(thisStart, thisEnd) - const targetCopy = target.slice(start, end) - - for (let i = 0; i < len; ++i) { - if (thisCopy[i] !== targetCopy[i]) { - x = thisCopy[i] - y = targetCopy[i] - break - } - } - - if (x < y) return -1 - if (y < x) return 1 - return 0 -} - -// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`, -// OR the last index of `val` in `buffer` at offset <= `byteOffset`. -// -// Arguments: -// - buffer - a Buffer to search -// - val - a string, Buffer, or number -// - byteOffset - an index into `buffer`; will be clamped to an int32 -// - encoding - an optional encoding, relevant is val is a string -// - dir - true for indexOf, false for lastIndexOf -function bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) { - // Empty buffer means no match - if (buffer.length === 0) return -1 - - // Normalize byteOffset - if (typeof byteOffset === 'string') { - encoding = byteOffset - byteOffset = 0 - } else if (byteOffset > 0x7fffffff) { - byteOffset = 0x7fffffff - } else if (byteOffset < -0x80000000) { - byteOffset = -0x80000000 - } - byteOffset = +byteOffset // Coerce to Number. - if (numberIsNaN(byteOffset)) { - // byteOffset: it it's undefined, null, NaN, "foo", etc, search whole buffer - byteOffset = dir ? 0 : (buffer.length - 1) - } - - // Normalize byteOffset: negative offsets start from the end of the buffer - if (byteOffset < 0) byteOffset = buffer.length + byteOffset - if (byteOffset >= buffer.length) { - if (dir) return -1 - else byteOffset = buffer.length - 1 - } else if (byteOffset < 0) { - if (dir) byteOffset = 0 - else return -1 - } - - // Normalize val - if (typeof val === 'string') { - val = Buffer.from(val, encoding) - } - - // Finally, search either indexOf (if dir is true) or lastIndexOf - if (Buffer.isBuffer(val)) { - // Special case: looking for empty string/buffer always fails - if (val.length === 0) { - return -1 - } - return arrayIndexOf(buffer, val, byteOffset, encoding, dir) - } else if (typeof val === 'number') { - val = val & 0xFF // Search for a byte value [0-255] - if (typeof Uint8Array.prototype.indexOf === 'function') { - if (dir) { - return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset) - } else { - return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset) - } - } - return arrayIndexOf(buffer, [val], byteOffset, encoding, dir) - } - - throw new TypeError('val must be string, number or Buffer') -} - -function arrayIndexOf (arr, val, byteOffset, encoding, dir) { - let indexSize = 1 - let arrLength = arr.length - let valLength = val.length - - if (encoding !== undefined) { - encoding = String(encoding).toLowerCase() - if (encoding === 'ucs2' || encoding === 'ucs-2' || - encoding === 'utf16le' || encoding === 'utf-16le') { - if (arr.length < 2 || val.length < 2) { - return -1 - } - indexSize = 2 - arrLength /= 2 - valLength /= 2 - byteOffset /= 2 - } - } - - function read (buf, i) { - if (indexSize === 1) { - return buf[i] - } else { - return buf.readUInt16BE(i * indexSize) - } - } - - let i - if (dir) { - let foundIndex = -1 - for (i = byteOffset; i < arrLength; i++) { - if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) { - if (foundIndex === -1) foundIndex = i - if (i - foundIndex + 1 === valLength) return foundIndex * indexSize - } else { - if (foundIndex !== -1) i -= i - foundIndex - foundIndex = -1 - } - } - } else { - if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength - for (i = byteOffset; i >= 0; i--) { - let found = true - for (let j = 0; j < valLength; j++) { - if (read(arr, i + j) !== read(val, j)) { - found = false - break - } - } - if (found) return i - } - } - - return -1 -} - -Buffer.prototype.includes = function includes (val, byteOffset, encoding) { - return this.indexOf(val, byteOffset, encoding) !== -1 -} - -Buffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) { - return bidirectionalIndexOf(this, val, byteOffset, encoding, true) -} - -Buffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) { - return bidirectionalIndexOf(this, val, byteOffset, encoding, false) -} - -function hexWrite (buf, string, offset, length) { - offset = Number(offset) || 0 - const remaining = buf.length - offset - if (!length) { - length = remaining - } else { - length = Number(length) - if (length > remaining) { - length = remaining - } - } - - const strLen = string.length - - if (length > strLen / 2) { - length = strLen / 2 - } - let i - for (i = 0; i < length; ++i) { - const parsed = parseInt(string.substr(i * 2, 2), 16) - if (numberIsNaN(parsed)) return i - buf[offset + i] = parsed - } - return i -} - -function utf8Write (buf, string, offset, length) { - return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length) -} - -function asciiWrite (buf, string, offset, length) { - return blitBuffer(asciiToBytes(string), buf, offset, length) -} - -function base64Write (buf, string, offset, length) { - return blitBuffer(base64ToBytes(string), buf, offset, length) -} - -function ucs2Write (buf, string, offset, length) { - return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length) -} - -Buffer.prototype.write = function write (string, offset, length, encoding) { - // Buffer#write(string) - if (offset === undefined) { - encoding = 'utf8' - length = this.length - offset = 0 - // Buffer#write(string, encoding) - } else if (length === undefined && typeof offset === 'string') { - encoding = offset - length = this.length - offset = 0 - // Buffer#write(string, offset[, length][, encoding]) - } else if (isFinite(offset)) { - offset = offset >>> 0 - if (isFinite(length)) { - length = length >>> 0 - if (encoding === undefined) encoding = 'utf8' - } else { - encoding = length - length = undefined - } - } else { - throw new Error( - 'Buffer.write(string, encoding, offset[, length]) is no longer supported' - ) - } - - const remaining = this.length - offset - if (length === undefined || length > remaining) length = remaining - - if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) { - throw new RangeError('Attempt to write outside buffer bounds') - } - - if (!encoding) encoding = 'utf8' - - let loweredCase = false - for (;;) { - switch (encoding) { - case 'hex': - return hexWrite(this, string, offset, length) - - case 'utf8': - case 'utf-8': - return utf8Write(this, string, offset, length) - - case 'ascii': - case 'latin1': - case 'binary': - return asciiWrite(this, string, offset, length) - - case 'base64': - // Warning: maxLength not taken into account in base64Write - return base64Write(this, string, offset, length) - - case 'ucs2': - case 'ucs-2': - case 'utf16le': - case 'utf-16le': - return ucs2Write(this, string, offset, length) - - default: - if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding) - encoding = ('' + encoding).toLowerCase() - loweredCase = true - } - } -} - -Buffer.prototype.toJSON = function toJSON () { - return { - type: 'Buffer', - data: Array.prototype.slice.call(this._arr || this, 0) - } -} - -function base64Slice (buf, start, end) { - if (start === 0 && end === buf.length) { - return base64.fromByteArray(buf) - } else { - return base64.fromByteArray(buf.slice(start, end)) - } -} - -function utf8Slice (buf, start, end) { - end = Math.min(buf.length, end) - const res = [] - - let i = start - while (i < end) { - const firstByte = buf[i] - let codePoint = null - let bytesPerSequence = (firstByte > 0xEF) - ? 4 - : (firstByte > 0xDF) - ? 3 - : (firstByte > 0xBF) - ? 2 - : 1 - - if (i + bytesPerSequence <= end) { - let secondByte, thirdByte, fourthByte, tempCodePoint - - switch (bytesPerSequence) { - case 1: - if (firstByte < 0x80) { - codePoint = firstByte - } - break - case 2: - secondByte = buf[i + 1] - if ((secondByte & 0xC0) === 0x80) { - tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F) - if (tempCodePoint > 0x7F) { - codePoint = tempCodePoint - } - } - break - case 3: - secondByte = buf[i + 1] - thirdByte = buf[i + 2] - if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) { - tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F) - if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) { - codePoint = tempCodePoint - } - } - break - case 4: - secondByte = buf[i + 1] - thirdByte = buf[i + 2] - fourthByte = buf[i + 3] - if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) { - tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F) - if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) { - codePoint = tempCodePoint - } - } - } - } - - if (codePoint === null) { - // we did not generate a valid codePoint so insert a - // replacement char (U+FFFD) and advance only 1 byte - codePoint = 0xFFFD - bytesPerSequence = 1 - } else if (codePoint > 0xFFFF) { - // encode to utf16 (surrogate pair dance) - codePoint -= 0x10000 - res.push(codePoint >>> 10 & 0x3FF | 0xD800) - codePoint = 0xDC00 | codePoint & 0x3FF - } - - res.push(codePoint) - i += bytesPerSequence - } - - return decodeCodePointsArray(res) -} - -// Based on http://stackoverflow.com/a/22747272/680742, the browser with -// the lowest limit is Chrome, with 0x10000 args. -// We go 1 magnitude less, for safety -const MAX_ARGUMENTS_LENGTH = 0x1000 - -function decodeCodePointsArray (codePoints) { - const len = codePoints.length - if (len <= MAX_ARGUMENTS_LENGTH) { - return String.fromCharCode.apply(String, codePoints) // avoid extra slice() - } - - // Decode in chunks to avoid "call stack size exceeded". - let res = '' - let i = 0 - while (i < len) { - res += String.fromCharCode.apply( - String, - codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH) - ) - } - return res -} - -function asciiSlice (buf, start, end) { - let ret = '' - end = Math.min(buf.length, end) - - for (let i = start; i < end; ++i) { - ret += String.fromCharCode(buf[i] & 0x7F) - } - return ret -} - -function latin1Slice (buf, start, end) { - let ret = '' - end = Math.min(buf.length, end) - - for (let i = start; i < end; ++i) { - ret += String.fromCharCode(buf[i]) - } - return ret -} - -function hexSlice (buf, start, end) { - const len = buf.length - - if (!start || start < 0) start = 0 - if (!end || end < 0 || end > len) end = len - - let out = '' - for (let i = start; i < end; ++i) { - out += hexSliceLookupTable[buf[i]] - } - return out -} - -function utf16leSlice (buf, start, end) { - const bytes = buf.slice(start, end) - let res = '' - // If bytes.length is odd, the last 8 bits must be ignored (same as node.js) - for (let i = 0; i < bytes.length - 1; i += 2) { - res += String.fromCharCode(bytes[i] + (bytes[i + 1] * 256)) - } - return res -} - -Buffer.prototype.slice = function slice (start, end) { - const len = this.length - start = ~~start - end = end === undefined ? len : ~~end - - if (start < 0) { - start += len - if (start < 0) start = 0 - } else if (start > len) { - start = len - } - - if (end < 0) { - end += len - if (end < 0) end = 0 - } else if (end > len) { - end = len - } - - if (end < start) end = start - - const newBuf = this.subarray(start, end) - // Return an augmented `Uint8Array` instance - Object.setPrototypeOf(newBuf, Buffer.prototype) - - return newBuf -} - -/* - * Need to make sure that buffer isn't trying to write out of bounds. - */ -function checkOffset (offset, ext, length) { - if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint') - if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length') -} - -Buffer.prototype.readUintLE = -Buffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) { - offset = offset >>> 0 - byteLength = byteLength >>> 0 - if (!noAssert) checkOffset(offset, byteLength, this.length) - - let val = this[offset] - let mul = 1 - let i = 0 - while (++i < byteLength && (mul *= 0x100)) { - val += this[offset + i] * mul - } - - return val -} - -Buffer.prototype.readUintBE = -Buffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) { - offset = offset >>> 0 - byteLength = byteLength >>> 0 - if (!noAssert) { - checkOffset(offset, byteLength, this.length) - } - - let val = this[offset + --byteLength] - let mul = 1 - while (byteLength > 0 && (mul *= 0x100)) { - val += this[offset + --byteLength] * mul - } - - return val -} - -Buffer.prototype.readUint8 = -Buffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 1, this.length) - return this[offset] -} - -Buffer.prototype.readUint16LE = -Buffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 2, this.length) - return this[offset] | (this[offset + 1] << 8) -} - -Buffer.prototype.readUint16BE = -Buffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 2, this.length) - return (this[offset] << 8) | this[offset + 1] -} - -Buffer.prototype.readUint32LE = -Buffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 4, this.length) - - return ((this[offset]) | - (this[offset + 1] << 8) | - (this[offset + 2] << 16)) + - (this[offset + 3] * 0x1000000) -} - -Buffer.prototype.readUint32BE = -Buffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 4, this.length) - - return (this[offset] * 0x1000000) + - ((this[offset + 1] << 16) | - (this[offset + 2] << 8) | - this[offset + 3]) -} - -Buffer.prototype.readBigUInt64LE = defineBigIntMethod(function readBigUInt64LE (offset) { - offset = offset >>> 0 - validateNumber(offset, 'offset') - const first = this[offset] - const last = this[offset + 7] - if (first === undefined || last === undefined) { - boundsError(offset, this.length - 8) - } - - const lo = first + - this[++offset] * 2 ** 8 + - this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 24 - - const hi = this[++offset] + - this[++offset] * 2 ** 8 + - this[++offset] * 2 ** 16 + - last * 2 ** 24 - - return BigInt(lo) + (BigInt(hi) << BigInt(32)) -}) - -Buffer.prototype.readBigUInt64BE = defineBigIntMethod(function readBigUInt64BE (offset) { - offset = offset >>> 0 - validateNumber(offset, 'offset') - const first = this[offset] - const last = this[offset + 7] - if (first === undefined || last === undefined) { - boundsError(offset, this.length - 8) - } - - const hi = first * 2 ** 24 + - this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 8 + - this[++offset] - - const lo = this[++offset] * 2 ** 24 + - this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 8 + - last - - return (BigInt(hi) << BigInt(32)) + BigInt(lo) -}) - -Buffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) { - offset = offset >>> 0 - byteLength = byteLength >>> 0 - if (!noAssert) checkOffset(offset, byteLength, this.length) - - let val = this[offset] - let mul = 1 - let i = 0 - while (++i < byteLength && (mul *= 0x100)) { - val += this[offset + i] * mul - } - mul *= 0x80 - - if (val >= mul) val -= Math.pow(2, 8 * byteLength) - - return val -} - -Buffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) { - offset = offset >>> 0 - byteLength = byteLength >>> 0 - if (!noAssert) checkOffset(offset, byteLength, this.length) - - let i = byteLength - let mul = 1 - let val = this[offset + --i] - while (i > 0 && (mul *= 0x100)) { - val += this[offset + --i] * mul - } - mul *= 0x80 - - if (val >= mul) val -= Math.pow(2, 8 * byteLength) - - return val -} - -Buffer.prototype.readInt8 = function readInt8 (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 1, this.length) - if (!(this[offset] & 0x80)) return (this[offset]) - return ((0xff - this[offset] + 1) * -1) -} - -Buffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 2, this.length) - const val = this[offset] | (this[offset + 1] << 8) - return (val & 0x8000) ? val | 0xFFFF0000 : val -} - -Buffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 2, this.length) - const val = this[offset + 1] | (this[offset] << 8) - return (val & 0x8000) ? val | 0xFFFF0000 : val -} - -Buffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 4, this.length) - - return (this[offset]) | - (this[offset + 1] << 8) | - (this[offset + 2] << 16) | - (this[offset + 3] << 24) -} - -Buffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 4, this.length) - - return (this[offset] << 24) | - (this[offset + 1] << 16) | - (this[offset + 2] << 8) | - (this[offset + 3]) -} - -Buffer.prototype.readBigInt64LE = defineBigIntMethod(function readBigInt64LE (offset) { - offset = offset >>> 0 - validateNumber(offset, 'offset') - const first = this[offset] - const last = this[offset + 7] - if (first === undefined || last === undefined) { - boundsError(offset, this.length - 8) - } - - const val = this[offset + 4] + - this[offset + 5] * 2 ** 8 + - this[offset + 6] * 2 ** 16 + - (last << 24) // Overflow - - return (BigInt(val) << BigInt(32)) + - BigInt(first + - this[++offset] * 2 ** 8 + - this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 24) -}) - -Buffer.prototype.readBigInt64BE = defineBigIntMethod(function readBigInt64BE (offset) { - offset = offset >>> 0 - validateNumber(offset, 'offset') - const first = this[offset] - const last = this[offset + 7] - if (first === undefined || last === undefined) { - boundsError(offset, this.length - 8) - } - - const val = (first << 24) + // Overflow - this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 8 + - this[++offset] - - return (BigInt(val) << BigInt(32)) + - BigInt(this[++offset] * 2 ** 24 + - this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 8 + - last) -}) - -Buffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 4, this.length) - return ieee754.read(this, offset, true, 23, 4) -} - -Buffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 4, this.length) - return ieee754.read(this, offset, false, 23, 4) -} - -Buffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 8, this.length) - return ieee754.read(this, offset, true, 52, 8) -} - -Buffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) { - offset = offset >>> 0 - if (!noAssert) checkOffset(offset, 8, this.length) - return ieee754.read(this, offset, false, 52, 8) -} - -function checkInt (buf, value, offset, ext, max, min) { - if (!Buffer.isBuffer(buf)) throw new TypeError('"buffer" argument must be a Buffer instance') - if (value > max || value < min) throw new RangeError('"value" argument is out of bounds') - if (offset + ext > buf.length) throw new RangeError('Index out of range') -} - -Buffer.prototype.writeUintLE = -Buffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) { - value = +value - offset = offset >>> 0 - byteLength = byteLength >>> 0 - if (!noAssert) { - const maxBytes = Math.pow(2, 8 * byteLength) - 1 - checkInt(this, value, offset, byteLength, maxBytes, 0) - } - - let mul = 1 - let i = 0 - this[offset] = value & 0xFF - while (++i < byteLength && (mul *= 0x100)) { - this[offset + i] = (value / mul) & 0xFF - } - - return offset + byteLength -} - -Buffer.prototype.writeUintBE = -Buffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) { - value = +value - offset = offset >>> 0 - byteLength = byteLength >>> 0 - if (!noAssert) { - const maxBytes = Math.pow(2, 8 * byteLength) - 1 - checkInt(this, value, offset, byteLength, maxBytes, 0) - } - - let i = byteLength - 1 - let mul = 1 - this[offset + i] = value & 0xFF - while (--i >= 0 && (mul *= 0x100)) { - this[offset + i] = (value / mul) & 0xFF - } - - return offset + byteLength -} - -Buffer.prototype.writeUint8 = -Buffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0) - this[offset] = (value & 0xff) - return offset + 1 -} - -Buffer.prototype.writeUint16LE = -Buffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0) - this[offset] = (value & 0xff) - this[offset + 1] = (value >>> 8) - return offset + 2 -} - -Buffer.prototype.writeUint16BE = -Buffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0) - this[offset] = (value >>> 8) - this[offset + 1] = (value & 0xff) - return offset + 2 -} - -Buffer.prototype.writeUint32LE = -Buffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0) - this[offset + 3] = (value >>> 24) - this[offset + 2] = (value >>> 16) - this[offset + 1] = (value >>> 8) - this[offset] = (value & 0xff) - return offset + 4 -} - -Buffer.prototype.writeUint32BE = -Buffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0) - this[offset] = (value >>> 24) - this[offset + 1] = (value >>> 16) - this[offset + 2] = (value >>> 8) - this[offset + 3] = (value & 0xff) - return offset + 4 -} - -function wrtBigUInt64LE (buf, value, offset, min, max) { - checkIntBI(value, min, max, buf, offset, 7) - - let lo = Number(value & BigInt(0xffffffff)) - buf[offset++] = lo - lo = lo >> 8 - buf[offset++] = lo - lo = lo >> 8 - buf[offset++] = lo - lo = lo >> 8 - buf[offset++] = lo - let hi = Number(value >> BigInt(32) & BigInt(0xffffffff)) - buf[offset++] = hi - hi = hi >> 8 - buf[offset++] = hi - hi = hi >> 8 - buf[offset++] = hi - hi = hi >> 8 - buf[offset++] = hi - return offset -} - -function wrtBigUInt64BE (buf, value, offset, min, max) { - checkIntBI(value, min, max, buf, offset, 7) - - let lo = Number(value & BigInt(0xffffffff)) - buf[offset + 7] = lo - lo = lo >> 8 - buf[offset + 6] = lo - lo = lo >> 8 - buf[offset + 5] = lo - lo = lo >> 8 - buf[offset + 4] = lo - let hi = Number(value >> BigInt(32) & BigInt(0xffffffff)) - buf[offset + 3] = hi - hi = hi >> 8 - buf[offset + 2] = hi - hi = hi >> 8 - buf[offset + 1] = hi - hi = hi >> 8 - buf[offset] = hi - return offset + 8 -} - -Buffer.prototype.writeBigUInt64LE = defineBigIntMethod(function writeBigUInt64LE (value, offset = 0) { - return wrtBigUInt64LE(this, value, offset, BigInt(0), BigInt('0xffffffffffffffff')) -}) - -Buffer.prototype.writeBigUInt64BE = defineBigIntMethod(function writeBigUInt64BE (value, offset = 0) { - return wrtBigUInt64BE(this, value, offset, BigInt(0), BigInt('0xffffffffffffffff')) -}) - -Buffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) { - const limit = Math.pow(2, (8 * byteLength) - 1) - - checkInt(this, value, offset, byteLength, limit - 1, -limit) - } - - let i = 0 - let mul = 1 - let sub = 0 - this[offset] = value & 0xFF - while (++i < byteLength && (mul *= 0x100)) { - if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) { - sub = 1 - } - this[offset + i] = ((value / mul) >> 0) - sub & 0xFF - } - - return offset + byteLength -} - -Buffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) { - const limit = Math.pow(2, (8 * byteLength) - 1) - - checkInt(this, value, offset, byteLength, limit - 1, -limit) - } - - let i = byteLength - 1 - let mul = 1 - let sub = 0 - this[offset + i] = value & 0xFF - while (--i >= 0 && (mul *= 0x100)) { - if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) { - sub = 1 - } - this[offset + i] = ((value / mul) >> 0) - sub & 0xFF - } - - return offset + byteLength -} - -Buffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80) - if (value < 0) value = 0xff + value + 1 - this[offset] = (value & 0xff) - return offset + 1 -} - -Buffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000) - this[offset] = (value & 0xff) - this[offset + 1] = (value >>> 8) - return offset + 2 -} - -Buffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000) - this[offset] = (value >>> 8) - this[offset + 1] = (value & 0xff) - return offset + 2 -} - -Buffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000) - this[offset] = (value & 0xff) - this[offset + 1] = (value >>> 8) - this[offset + 2] = (value >>> 16) - this[offset + 3] = (value >>> 24) - return offset + 4 -} - -Buffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000) - if (value < 0) value = 0xffffffff + value + 1 - this[offset] = (value >>> 24) - this[offset + 1] = (value >>> 16) - this[offset + 2] = (value >>> 8) - this[offset + 3] = (value & 0xff) - return offset + 4 -} - -Buffer.prototype.writeBigInt64LE = defineBigIntMethod(function writeBigInt64LE (value, offset = 0) { - return wrtBigUInt64LE(this, value, offset, -BigInt('0x8000000000000000'), BigInt('0x7fffffffffffffff')) -}) - -Buffer.prototype.writeBigInt64BE = defineBigIntMethod(function writeBigInt64BE (value, offset = 0) { - return wrtBigUInt64BE(this, value, offset, -BigInt('0x8000000000000000'), BigInt('0x7fffffffffffffff')) -}) - -function checkIEEE754 (buf, value, offset, ext, max, min) { - if (offset + ext > buf.length) throw new RangeError('Index out of range') - if (offset < 0) throw new RangeError('Index out of range') -} - -function writeFloat (buf, value, offset, littleEndian, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) { - checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38) - } - ieee754.write(buf, value, offset, littleEndian, 23, 4) - return offset + 4 -} - -Buffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) { - return writeFloat(this, value, offset, true, noAssert) -} - -Buffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) { - return writeFloat(this, value, offset, false, noAssert) -} - -function writeDouble (buf, value, offset, littleEndian, noAssert) { - value = +value - offset = offset >>> 0 - if (!noAssert) { - checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308) - } - ieee754.write(buf, value, offset, littleEndian, 52, 8) - return offset + 8 -} - -Buffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) { - return writeDouble(this, value, offset, true, noAssert) -} - -Buffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) { - return writeDouble(this, value, offset, false, noAssert) -} - -// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length) -Buffer.prototype.copy = function copy (target, targetStart, start, end) { - if (!Buffer.isBuffer(target)) throw new TypeError('argument should be a Buffer') - if (!start) start = 0 - if (!end && end !== 0) end = this.length - if (targetStart >= target.length) targetStart = target.length - if (!targetStart) targetStart = 0 - if (end > 0 && end < start) end = start - - // Copy 0 bytes; we're done - if (end === start) return 0 - if (target.length === 0 || this.length === 0) return 0 - - // Fatal error conditions - if (targetStart < 0) { - throw new RangeError('targetStart out of bounds') - } - if (start < 0 || start >= this.length) throw new RangeError('Index out of range') - if (end < 0) throw new RangeError('sourceEnd out of bounds') - - // Are we oob? - if (end > this.length) end = this.length - if (target.length - targetStart < end - start) { - end = target.length - targetStart + start - } - - const len = end - start - - if (this === target && typeof Uint8Array.prototype.copyWithin === 'function') { - // Use built-in when available, missing from IE11 - this.copyWithin(targetStart, start, end) - } else { - Uint8Array.prototype.set.call( - target, - this.subarray(start, end), - targetStart - ) - } - - return len -} - -// Usage: -// buffer.fill(number[, offset[, end]]) -// buffer.fill(buffer[, offset[, end]]) -// buffer.fill(string[, offset[, end]][, encoding]) -Buffer.prototype.fill = function fill (val, start, end, encoding) { - // Handle string cases: - if (typeof val === 'string') { - if (typeof start === 'string') { - encoding = start - start = 0 - end = this.length - } else if (typeof end === 'string') { - encoding = end - end = this.length - } - if (encoding !== undefined && typeof encoding !== 'string') { - throw new TypeError('encoding must be a string') - } - if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) { - throw new TypeError('Unknown encoding: ' + encoding) - } - if (val.length === 1) { - const code = val.charCodeAt(0) - if ((encoding === 'utf8' && code < 128) || - encoding === 'latin1') { - // Fast path: If `val` fits into a single byte, use that numeric value. - val = code - } - } - } else if (typeof val === 'number') { - val = val & 255 - } else if (typeof val === 'boolean') { - val = Number(val) - } - - // Invalid ranges are not set to a default, so can range check early. - if (start < 0 || this.length < start || this.length < end) { - throw new RangeError('Out of range index') - } - - if (end <= start) { - return this - } - - start = start >>> 0 - end = end === undefined ? this.length : end >>> 0 - - if (!val) val = 0 - - let i - if (typeof val === 'number') { - for (i = start; i < end; ++i) { - this[i] = val - } - } else { - const bytes = Buffer.isBuffer(val) - ? val - : Buffer.from(val, encoding) - const len = bytes.length - if (len === 0) { - throw new TypeError('The value "' + val + - '" is invalid for argument "value"') - } - for (i = 0; i < end - start; ++i) { - this[i + start] = bytes[i % len] - } - } - - return this -} - -// CUSTOM ERRORS -// ============= - -// Simplified versions from Node, changed for Buffer-only usage -const errors = {} -function E (sym, getMessage, Base) { - errors[sym] = class NodeError extends Base { - constructor () { - super() - - Object.defineProperty(this, 'message', { - value: getMessage.apply(this, arguments), - writable: true, - configurable: true - }) - - // Add the error code to the name to include it in the stack trace. - this.name = `${this.name} [${sym}]` - // Access the stack to generate the error message including the error code - // from the name. - this.stack // eslint-disable-line no-unused-expressions - // Reset the name to the actual name. - delete this.name - } - - get code () { - return sym - } - - set code (value) { - Object.defineProperty(this, 'code', { - configurable: true, - enumerable: true, - value, - writable: true - }) - } - - toString () { - return `${this.name} [${sym}]: ${this.message}` - } - } -} - -E('ERR_BUFFER_OUT_OF_BOUNDS', - function (name) { - if (name) { - return `${name} is outside of buffer bounds` - } - - return 'Attempt to access memory outside buffer bounds' - }, RangeError) -E('ERR_INVALID_ARG_TYPE', - function (name, actual) { - return `The "${name}" argument must be of type number. Received type ${typeof actual}` - }, TypeError) -E('ERR_OUT_OF_RANGE', - function (str, range, input) { - let msg = `The value of "${str}" is out of range.` - let received = input - if (Number.isInteger(input) && Math.abs(input) > 2 ** 32) { - received = addNumericalSeparator(String(input)) - } else if (typeof input === 'bigint') { - received = String(input) - if (input > BigInt(2) ** BigInt(32) || input < -(BigInt(2) ** BigInt(32))) { - received = addNumericalSeparator(received) - } - received += 'n' - } - msg += ` It must be ${range}. Received ${received}` - return msg - }, RangeError) - -function addNumericalSeparator (val) { - let res = '' - let i = val.length - const start = val[0] === '-' ? 1 : 0 - for (; i >= start + 4; i -= 3) { - res = `_${val.slice(i - 3, i)}${res}` - } - return `${val.slice(0, i)}${res}` -} - -// CHECK FUNCTIONS -// =============== - -function checkBounds (buf, offset, byteLength) { - validateNumber(offset, 'offset') - if (buf[offset] === undefined || buf[offset + byteLength] === undefined) { - boundsError(offset, buf.length - (byteLength + 1)) - } -} - -function checkIntBI (value, min, max, buf, offset, byteLength) { - if (value > max || value < min) { - const n = typeof min === 'bigint' ? 'n' : '' - let range - if (byteLength > 3) { - if (min === 0 || min === BigInt(0)) { - range = `>= 0${n} and < 2${n} ** ${(byteLength + 1) * 8}${n}` - } else { - range = `>= -(2${n} ** ${(byteLength + 1) * 8 - 1}${n}) and < 2 ** ` + - `${(byteLength + 1) * 8 - 1}${n}` - } - } else { - range = `>= ${min}${n} and <= ${max}${n}` - } - throw new errors.ERR_OUT_OF_RANGE('value', range, value) - } - checkBounds(buf, offset, byteLength) -} - -function validateNumber (value, name) { - if (typeof value !== 'number') { - throw new errors.ERR_INVALID_ARG_TYPE(name, 'number', value) - } -} - -function boundsError (value, length, type) { - if (Math.floor(value) !== value) { - validateNumber(value, type) - throw new errors.ERR_OUT_OF_RANGE(type || 'offset', 'an integer', value) - } - - if (length < 0) { - throw new errors.ERR_BUFFER_OUT_OF_BOUNDS() - } - - throw new errors.ERR_OUT_OF_RANGE(type || 'offset', - `>= ${type ? 1 : 0} and <= ${length}`, - value) -} - -// HELPER FUNCTIONS -// ================ - -const INVALID_BASE64_RE = /[^+/0-9A-Za-z-_]/g - -function base64clean (str) { - // Node takes equal signs as end of the Base64 encoding - str = str.split('=')[0] - // Node strips out invalid characters like \n and \t from the string, base64-js does not - str = str.trim().replace(INVALID_BASE64_RE, '') - // Node converts strings with length < 2 to '' - if (str.length < 2) return '' - // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not - while (str.length % 4 !== 0) { - str = str + '=' - } - return str -} - -function utf8ToBytes (string, units) { - units = units || Infinity - let codePoint - const length = string.length - let leadSurrogate = null - const bytes = [] - - for (let i = 0; i < length; ++i) { - codePoint = string.charCodeAt(i) - - // is surrogate component - if (codePoint > 0xD7FF && codePoint < 0xE000) { - // last char was a lead - if (!leadSurrogate) { - // no lead yet - if (codePoint > 0xDBFF) { - // unexpected trail - if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) - continue - } else if (i + 1 === length) { - // unpaired lead - if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) - continue - } - - // valid lead - leadSurrogate = codePoint - - continue - } - - // 2 leads in a row - if (codePoint < 0xDC00) { - if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) - leadSurrogate = codePoint - continue - } - - // valid surrogate pair - codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000 - } else if (leadSurrogate) { - // valid bmp char, but last char was a lead - if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) - } - - leadSurrogate = null - - // encode utf8 - if (codePoint < 0x80) { - if ((units -= 1) < 0) break - bytes.push(codePoint) - } else if (codePoint < 0x800) { - if ((units -= 2) < 0) break - bytes.push( - codePoint >> 0x6 | 0xC0, - codePoint & 0x3F | 0x80 - ) - } else if (codePoint < 0x10000) { - if ((units -= 3) < 0) break - bytes.push( - codePoint >> 0xC | 0xE0, - codePoint >> 0x6 & 0x3F | 0x80, - codePoint & 0x3F | 0x80 - ) - } else if (codePoint < 0x110000) { - if ((units -= 4) < 0) break - bytes.push( - codePoint >> 0x12 | 0xF0, - codePoint >> 0xC & 0x3F | 0x80, - codePoint >> 0x6 & 0x3F | 0x80, - codePoint & 0x3F | 0x80 - ) - } else { - throw new Error('Invalid code point') - } - } - - return bytes -} - -function asciiToBytes (str) { - const byteArray = [] - for (let i = 0; i < str.length; ++i) { - // Node's code seems to be doing this and not & 0x7F.. - byteArray.push(str.charCodeAt(i) & 0xFF) - } - return byteArray -} - -function utf16leToBytes (str, units) { - let c, hi, lo - const byteArray = [] - for (let i = 0; i < str.length; ++i) { - if ((units -= 2) < 0) break - - c = str.charCodeAt(i) - hi = c >> 8 - lo = c % 256 - byteArray.push(lo) - byteArray.push(hi) - } - - return byteArray -} - -function base64ToBytes (str) { - return base64.toByteArray(base64clean(str)) -} - -function blitBuffer (src, dst, offset, length) { - let i - for (i = 0; i < length; ++i) { - if ((i + offset >= dst.length) || (i >= src.length)) break - dst[i + offset] = src[i] - } - return i -} - -// ArrayBuffer or Uint8Array objects from other contexts (i.e. iframes) do not pass -// the `instanceof` check but they should be treated as of that type. -// See: https://github.com/feross/buffer/issues/166 -function isInstance (obj, type) { - return obj instanceof type || - (obj != null && obj.constructor != null && obj.constructor.name != null && - obj.constructor.name === type.name) -} -function numberIsNaN (obj) { - // For IE11 support - return obj !== obj // eslint-disable-line no-self-compare -} - -// Create lookup table for `toString('hex')` -// See: https://github.com/feross/buffer/issues/219 -const hexSliceLookupTable = (function () { - const alphabet = '0123456789abcdef' - const table = new Array(256) - for (let i = 0; i < 16; ++i) { - const i16 = i * 16 - for (let j = 0; j < 16; ++j) { - table[i16 + j] = alphabet[i] + alphabet[j] - } - } - return table -})() - -// Return not function with Error if BigInt not supported -function defineBigIntMethod (fn) { - return typeof BigInt === 'undefined' ? BufferBigIntNotDefined : fn -} - -function BufferBigIntNotDefined () { - throw new Error('BigInt not supported') -} diff --git a/node_modules/buffer/package.json b/node_modules/buffer/package.json deleted file mode 100644 index ca1ad9a707..0000000000 --- a/node_modules/buffer/package.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "name": "buffer", - "description": "Node.js Buffer API, for the browser", - "version": "6.0.3", - "author": { - "name": "Feross Aboukhadijeh", - "email": "feross@feross.org", - "url": "https://feross.org" - }, - "bugs": { - "url": "https://github.com/feross/buffer/issues" - }, - "contributors": [ - "Romain Beauxis ", - "James Halliday " - ], - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - }, - "devDependencies": { - "airtap": "^3.0.0", - "benchmark": "^2.1.4", - "browserify": "^17.0.0", - "concat-stream": "^2.0.0", - "hyperquest": "^2.1.3", - "is-buffer": "^2.0.5", - "is-nan": "^1.3.0", - "split": "^1.0.1", - "standard": "*", - "tape": "^5.0.1", - "through2": "^4.0.2", - "uglify-js": "^3.11.5" - }, - "homepage": "https://github.com/feross/buffer", - "jspm": { - "map": { - "./index.js": { - "node": "@node/buffer" - } - } - }, - "keywords": [ - "arraybuffer", - "browser", - "browserify", - "buffer", - "compatible", - "dataview", - "uint8array" - ], - "license": "MIT", - "main": "index.js", - "types": "index.d.ts", - "repository": { - "type": "git", - "url": "git://github.com/feross/buffer.git" - }, - "scripts": { - "perf": "browserify --debug perf/bracket-notation.js > perf/bundle.js && open perf/index.html", - "perf-node": "node perf/bracket-notation.js && node perf/concat.js && node perf/copy-big.js && node perf/copy.js && node perf/new-big.js && node perf/new.js && node perf/readDoubleBE.js && node perf/readFloatBE.js && node perf/readUInt32LE.js && node perf/slice.js && node perf/writeFloatBE.js", - "size": "browserify -r ./ | uglifyjs -c -m | gzip | wc -c", - "test": "standard && node ./bin/test.js", - "test-browser-old": "airtap -- test/*.js", - "test-browser-old-local": "airtap --local -- test/*.js", - "test-browser-new": "airtap -- test/*.js test/node/*.js", - "test-browser-new-local": "airtap --local -- test/*.js test/node/*.js", - "test-node": "tape test/*.js test/node/*.js", - "update-authors": "./bin/update-authors.sh" - }, - "standard": { - "ignore": [ - "test/node/**/*.js", - "test/common.js", - "test/_polyfill.js", - "perf/**/*.js" - ] - }, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] -} diff --git a/node_modules/ieee754/LICENSE b/node_modules/ieee754/LICENSE deleted file mode 100644 index 5aac82c78c..0000000000 --- a/node_modules/ieee754/LICENSE +++ /dev/null @@ -1,11 +0,0 @@ -Copyright 2008 Fair Oaks Labs, Inc. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/node_modules/ieee754/README.md b/node_modules/ieee754/README.md deleted file mode 100644 index cb7527b3ce..0000000000 --- a/node_modules/ieee754/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# ieee754 [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url] [![javascript style guide][standard-image]][standard-url] - -[travis-image]: https://img.shields.io/travis/feross/ieee754/master.svg -[travis-url]: https://travis-ci.org/feross/ieee754 -[npm-image]: https://img.shields.io/npm/v/ieee754.svg -[npm-url]: https://npmjs.org/package/ieee754 -[downloads-image]: https://img.shields.io/npm/dm/ieee754.svg -[downloads-url]: https://npmjs.org/package/ieee754 -[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg -[standard-url]: https://standardjs.com - -[![saucelabs][saucelabs-image]][saucelabs-url] - -[saucelabs-image]: https://saucelabs.com/browser-matrix/ieee754.svg -[saucelabs-url]: https://saucelabs.com/u/ieee754 - -### Read/write IEEE754 floating point numbers from/to a Buffer or array-like object. - -## install - -``` -npm install ieee754 -``` - -## methods - -`var ieee754 = require('ieee754')` - -The `ieee754` object has the following functions: - -``` -ieee754.read = function (buffer, offset, isLE, mLen, nBytes) -ieee754.write = function (buffer, value, offset, isLE, mLen, nBytes) -``` - -The arguments mean the following: - -- buffer = the buffer -- offset = offset into the buffer -- value = value to set (only for `write`) -- isLe = is little endian? -- mLen = mantissa length -- nBytes = number of bytes - -## what is ieee754? - -The IEEE Standard for Floating-Point Arithmetic (IEEE 754) is a technical standard for floating-point computation. [Read more](http://en.wikipedia.org/wiki/IEEE_floating_point). - -## license - -BSD 3 Clause. Copyright (c) 2008, Fair Oaks Labs, Inc. diff --git a/node_modules/ieee754/index.d.ts b/node_modules/ieee754/index.d.ts deleted file mode 100644 index f1e435487f..0000000000 --- a/node_modules/ieee754/index.d.ts +++ /dev/null @@ -1,10 +0,0 @@ -declare namespace ieee754 { - export function read( - buffer: Uint8Array, offset: number, isLE: boolean, mLen: number, - nBytes: number): number; - export function write( - buffer: Uint8Array, value: number, offset: number, isLE: boolean, - mLen: number, nBytes: number): void; - } - - export = ieee754; \ No newline at end of file diff --git a/node_modules/ieee754/index.js b/node_modules/ieee754/index.js deleted file mode 100644 index 81d26c343c..0000000000 --- a/node_modules/ieee754/index.js +++ /dev/null @@ -1,85 +0,0 @@ -/*! ieee754. BSD-3-Clause License. Feross Aboukhadijeh */ -exports.read = function (buffer, offset, isLE, mLen, nBytes) { - var e, m - var eLen = (nBytes * 8) - mLen - 1 - var eMax = (1 << eLen) - 1 - var eBias = eMax >> 1 - var nBits = -7 - var i = isLE ? (nBytes - 1) : 0 - var d = isLE ? -1 : 1 - var s = buffer[offset + i] - - i += d - - e = s & ((1 << (-nBits)) - 1) - s >>= (-nBits) - nBits += eLen - for (; nBits > 0; e = (e * 256) + buffer[offset + i], i += d, nBits -= 8) {} - - m = e & ((1 << (-nBits)) - 1) - e >>= (-nBits) - nBits += mLen - for (; nBits > 0; m = (m * 256) + buffer[offset + i], i += d, nBits -= 8) {} - - if (e === 0) { - e = 1 - eBias - } else if (e === eMax) { - return m ? NaN : ((s ? -1 : 1) * Infinity) - } else { - m = m + Math.pow(2, mLen) - e = e - eBias - } - return (s ? -1 : 1) * m * Math.pow(2, e - mLen) -} - -exports.write = function (buffer, value, offset, isLE, mLen, nBytes) { - var e, m, c - var eLen = (nBytes * 8) - mLen - 1 - var eMax = (1 << eLen) - 1 - var eBias = eMax >> 1 - var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0) - var i = isLE ? 0 : (nBytes - 1) - var d = isLE ? 1 : -1 - var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0 - - value = Math.abs(value) - - if (isNaN(value) || value === Infinity) { - m = isNaN(value) ? 1 : 0 - e = eMax - } else { - e = Math.floor(Math.log(value) / Math.LN2) - if (value * (c = Math.pow(2, -e)) < 1) { - e-- - c *= 2 - } - if (e + eBias >= 1) { - value += rt / c - } else { - value += rt * Math.pow(2, 1 - eBias) - } - if (value * c >= 2) { - e++ - c /= 2 - } - - if (e + eBias >= eMax) { - m = 0 - e = eMax - } else if (e + eBias >= 1) { - m = ((value * c) - 1) * Math.pow(2, mLen) - e = e + eBias - } else { - m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen) - e = 0 - } - } - - for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {} - - e = (e << mLen) | m - eLen += mLen - for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {} - - buffer[offset + i - d] |= s * 128 -} diff --git a/node_modules/ieee754/package.json b/node_modules/ieee754/package.json deleted file mode 100644 index 7b23851384..0000000000 --- a/node_modules/ieee754/package.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "ieee754", - "description": "Read/write IEEE754 floating point numbers from/to a Buffer or array-like object", - "version": "1.2.1", - "author": { - "name": "Feross Aboukhadijeh", - "email": "feross@feross.org", - "url": "https://feross.org" - }, - "contributors": [ - "Romain Beauxis " - ], - "devDependencies": { - "airtap": "^3.0.0", - "standard": "*", - "tape": "^5.0.1" - }, - "keywords": [ - "IEEE 754", - "buffer", - "convert", - "floating point", - "ieee754" - ], - "license": "BSD-3-Clause", - "main": "index.js", - "types": "index.d.ts", - "repository": { - "type": "git", - "url": "git://github.com/feross/ieee754.git" - }, - "scripts": { - "test": "standard && npm run test-node && npm run test-browser", - "test-browser": "airtap -- test/*.js", - "test-browser-local": "airtap --local -- test/*.js", - "test-node": "tape test/*.js" - }, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] -} diff --git a/node_modules/inherits/LICENSE b/node_modules/inherits/LICENSE deleted file mode 100644 index dea3013d67..0000000000 --- a/node_modules/inherits/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -The ISC License - -Copyright (c) Isaac Z. Schlueter - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - diff --git a/node_modules/inherits/README.md b/node_modules/inherits/README.md deleted file mode 100644 index b1c5665855..0000000000 --- a/node_modules/inherits/README.md +++ /dev/null @@ -1,42 +0,0 @@ -Browser-friendly inheritance fully compatible with standard node.js -[inherits](http://nodejs.org/api/util.html#util_util_inherits_constructor_superconstructor). - -This package exports standard `inherits` from node.js `util` module in -node environment, but also provides alternative browser-friendly -implementation through [browser -field](https://gist.github.com/shtylman/4339901). Alternative -implementation is a literal copy of standard one located in standalone -module to avoid requiring of `util`. It also has a shim for old -browsers with no `Object.create` support. - -While keeping you sure you are using standard `inherits` -implementation in node.js environment, it allows bundlers such as -[browserify](https://github.com/substack/node-browserify) to not -include full `util` package to your client code if all you need is -just `inherits` function. It worth, because browser shim for `util` -package is large and `inherits` is often the single function you need -from it. - -It's recommended to use this package instead of -`require('util').inherits` for any code that has chances to be used -not only in node.js but in browser too. - -## usage - -```js -var inherits = require('inherits'); -// then use exactly as the standard one -``` - -## note on version ~1.0 - -Version ~1.0 had completely different motivation and is not compatible -neither with 2.0 nor with standard node.js `inherits`. - -If you are using version ~1.0 and planning to switch to ~2.0, be -careful: - -* new version uses `super_` instead of `super` for referencing - superclass -* new version overwrites current prototype while old one preserves any - existing fields on it diff --git a/node_modules/inherits/inherits.js b/node_modules/inherits/inherits.js deleted file mode 100644 index f71f2d9329..0000000000 --- a/node_modules/inherits/inherits.js +++ /dev/null @@ -1,9 +0,0 @@ -try { - var util = require('util'); - /* istanbul ignore next */ - if (typeof util.inherits !== 'function') throw ''; - module.exports = util.inherits; -} catch (e) { - /* istanbul ignore next */ - module.exports = require('./inherits_browser.js'); -} diff --git a/node_modules/inherits/inherits_browser.js b/node_modules/inherits/inherits_browser.js deleted file mode 100644 index 86bbb3dc29..0000000000 --- a/node_modules/inherits/inherits_browser.js +++ /dev/null @@ -1,27 +0,0 @@ -if (typeof Object.create === 'function') { - // implementation from standard node.js 'util' module - module.exports = function inherits(ctor, superCtor) { - if (superCtor) { - ctor.super_ = superCtor - ctor.prototype = Object.create(superCtor.prototype, { - constructor: { - value: ctor, - enumerable: false, - writable: true, - configurable: true - } - }) - } - }; -} else { - // old school shim for old browsers - module.exports = function inherits(ctor, superCtor) { - if (superCtor) { - ctor.super_ = superCtor - var TempCtor = function () {} - TempCtor.prototype = superCtor.prototype - ctor.prototype = new TempCtor() - ctor.prototype.constructor = ctor - } - } -} diff --git a/node_modules/inherits/package.json b/node_modules/inherits/package.json deleted file mode 100644 index 37b4366b83..0000000000 --- a/node_modules/inherits/package.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "name": "inherits", - "description": "Browser-friendly inheritance fully compatible with standard node.js inherits()", - "version": "2.0.4", - "keywords": [ - "inheritance", - "class", - "klass", - "oop", - "object-oriented", - "inherits", - "browser", - "browserify" - ], - "main": "./inherits.js", - "browser": "./inherits_browser.js", - "repository": "git://github.com/isaacs/inherits", - "license": "ISC", - "scripts": { - "test": "tap" - }, - "devDependencies": { - "tap": "^14.2.4" - }, - "files": [ - "inherits.js", - "inherits_browser.js" - ] -} diff --git a/node_modules/msgpack5/.github/workflows/ci.yml b/node_modules/msgpack5/.github/workflows/ci.yml deleted file mode 100644 index b9109a0c3b..0000000000 --- a/node_modules/msgpack5/.github/workflows/ci.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: CI -on: [push, pull_request] -jobs: - test: - name: ${{ matrix.node-version }} ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [macOS-latest, windows-latest, ubuntu-latest] - node-version: [10, 12, 14, 16] - steps: - - uses: actions/checkout@v2 - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - name: Install - run: npm i - - name: Tests - run: npm test - - automerge: - needs: test - runs-on: ubuntu-latest - steps: - - uses: fastify/github-action-merge-dependabot@v1 - if: ${{ github.actor == 'dependabot[bot]' && github.event_name == 'pull_request' }} - with: - github-token: ${{secrets.github_token}} diff --git a/node_modules/msgpack5/.jshintrc b/node_modules/msgpack5/.jshintrc deleted file mode 100644 index 8863fcbaf5..0000000000 --- a/node_modules/msgpack5/.jshintrc +++ /dev/null @@ -1,7 +0,0 @@ -{ - "node": true, - "laxcomma": true, - "undef": true, - "unused": true, - "asi": true -} diff --git a/node_modules/msgpack5/CONTRIBUTING.md b/node_modules/msgpack5/CONTRIBUTING.md deleted file mode 100644 index 57210d6d52..0000000000 --- a/node_modules/msgpack5/CONTRIBUTING.md +++ /dev/null @@ -1,41 +0,0 @@ -Contributing -============ - -The main development is on GitHub at http://github.com/mcollina/msgpack5. -In order to contribute, fork the repo on github and send a pull requests with topic branches. -Do not forget to provide tests for your contribution. - -Contact the lead dev --------------------- - -You can reach [@matteocollina](http://twitter.com/matteocollina) on -twitter of via email at hello@matteocollina.com. - -Running the tests -------------- - -* Fork and clone the repository -* Run `npm install` -* Run `npm test` - - -Coding guidelines ----------------- - -Adopt the prevailing code style in the repository. -This project use [JSHint](http://www.jshint.com/) to validate the -source code formatting with a pre commit hook: please respect that. - - -Contribution License Agreement ----------------- - -Project license: MIT - -* You will only Submit Contributions where You have authored 100% of - the content. -* You will only Submit Contributions to which You have the necessary - rights. This means that if You are employed You have received the - necessary permissions from Your employer to make the Contributions. -* Whatever content You Contribute will be provided under the Project - License. diff --git a/node_modules/msgpack5/LICENSE b/node_modules/msgpack5/LICENSE deleted file mode 100644 index 63ac963572..0000000000 --- a/node_modules/msgpack5/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Matteo Collina - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/node_modules/msgpack5/README.md b/node_modules/msgpack5/README.md deleted file mode 100644 index e330218c1c..0000000000 --- a/node_modules/msgpack5/README.md +++ /dev/null @@ -1,250 +0,0 @@ -msgpack5  [![CI](https://github.com/mcollina/msgpack5/workflows/CI/badge.svg)](https://github.com/mcollina/msgpack5/actions?query=workflow%3ACI) -======== - -A msgpack v5 implementation for node.js and the browser, with extension point support. - -Install -------- - -```bash -npm install msgpack5 --save -``` - - -Usage ------ - -```js -var msgpack = require('msgpack5')() // namespace our extensions - , a = new MyType(2, 'a') - , encode = msgpack.encode - , decode = msgpack.decode - -msgpack.register(0x42, MyType, mytipeEncode, mytipeDecode) - -console.log(encode({ 'hello': 'world' }).toString('hex')) -// 81a568656c6c6fa5776f726c64 -console.log(decode(encode({ 'hello': 'world' }))) -// { hello: 'world' } -console.log(encode(a).toString('hex')) -// d5426161 -console.log(decode(encode(a)) instanceof MyType) -// true -console.log(decode(encode(a))) -// { value: 'a', size: 2 } - -function MyType(size, value) { - this.value = value - this.size = size -} - -function mytipeEncode(obj) { - var buf = new Buffer(obj.size) - buf.fill(obj.value) - return buf -} - -function mytipeDecode(data) { - var result = new MyType(data.length, data.toString('utf8', 0, 1)) - , i - - for (i = 0; i < data.length; i++) { - if (data.readUInt8(0) != data.readUInt8(i)) { - throw new Error('should all be the same') - } - } - - return result -} -``` - -In the Browser ------------ - -This library is compatible with [Browserify](http://npm.im/browserify). - -If you want to use standalone, grab the file in the `dist` folder of -this repo, and use in your own HTML page, the module will expose a -`msgpack5` global. - - -``` - -``` - -### To build - -``` - npm run build -``` - -API ---- - - - -## API - - * msgpack() - * msgpack().encode() - * msgpack().decode() - * msgpack().registerEncoder() - * msgpack().registerDecoder() - * msgpack().register() - * msgpack().encoder() - * msgpack().decoder() - -------------------------------------------------------- - - -### msgpack(options(obj)) - -Creates a new instance on which you can register new types for being -encoded. - -options: - -- `forceFloat64`, a boolean to that forces all floats to be encoded as 64-bits floats. Defaults to false. -- `sortKeys`, a boolean to force a determinate keys order -- `compatibilityMode`, a boolean that enables "compatibility mode" which doesn't use bin format family and str 8 format. Defaults to false. -- `disableTimestampEncoding`, a boolean that when set disables the encoding of Dates into the [timestamp extension type](https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type). Defaults to false. -- `preferMap`, a boolean that forces all maps to be decoded to `Map`s rather than plain objects. This ensures that `decode(encode(new Map())) instanceof Map` and that iteration order is preserved. Defaults to false. -- `protoAction`, a string which can be `error|ignore|remove` that determines what happens when decoding a plain object with a `__proto__` property which would cause prototype poisoning. `error` (default) throws an error, `remove` removes the property, `ignore` (not recommended) allows the property, thereby causing prototype poisoning on the decoded object. - -------------------------------------------------------- - - -### encode(object) - -Encodes `object` in msgpack, returns a [bl](http://npm.im/bl). - -------------------------------------------------------- - - -### decode(buf) - -Decodes buf from in msgpack. `buf` can be a `Buffer` or a [bl](http://npm.im/bl) instance. - -In order to support a stream interface, a user must pass in a [bl](http://npm.im/bl) instance. - -------------------------------------------------------- - - -### registerEncoder(check(obj), encode(obj)) - -Register a new custom object type for being automatically encoded. -The arguments are: - -- `check`, a function that will be called to check if the passed - object should be encoded with the `encode` function -- `encode`, a function that will be called to encode an object in binary - form; this function __must__ return a `Buffer` which include the same type - for [registerDecoder](#registerDecoder). - -------------------------------------------------------- - - -### registerDecoder(type, decode(buf)) - -Register a new custom object type for being automatically decoded. -The arguments are: - -- `type`, is a greater than zero integer identificating the type once serialized -- `decode`, a function that will be called to decode the object from - the passed `Buffer` - - -------------------------------------------------------- - - -### register(type, constructor, encode(obj), decode(buf)) - -Register a new custom object type for being automatically encoded and -decoded. The arguments are: - -- `type`, is a greater than zero integer identificating the type once serialized -- `constructor`, the function that will be used to match the objects - with `instanceof` -- `encode`, a function that will be called to encode an object in binary - form; this function __must__ return a `Buffer` that can be - deserialized by the `decode` function -- `decode`, a function that will be called to decode the object from - the passed `Buffer` - -This is just a commodity that calls -[`registerEncoder`](#registerEncoder) and -[`registerDecoder`](#registerDecoder) internally. - -------------------------------------------------------- - - -### encoder(options) - -Builds a stream in object mode that encodes msgpack. - -Supported options: -* `wrap`, objects should be passed to encoder in wrapped object {value: data}. Wrap option should be used if you need to pass null to encoder. - - -------------------------------------------------------- - - -### decoder(options) - -Builds a stream in object mode that decodes msgpack. - -Supported options: -* `wrap`, decoded objects returned in wrapped object {value: data}. Wrap option should be used if stream contains msgpack nil. - - -LevelUp Support ---------------- - -__msgpack5__ can be used as a LevelUp -[`valueEncoding`](https://github.com/rvagg/node-levelup#leveluplocation-options-callback) straight away: - -```js -var level = require('level') - , pack = msgpack() - , db = level('foo', { - valueEncoding: pack - }) - , obj = { my: 'obj' } - -db.put('hello', obj, function(err) { - db.get('hello', function(err, result) { - console.log(result) - db.close() - }) -}) - -``` - -Related projects ----------------- - -- [msgpack5rpc](http://npmjs.com/package/msgpack5rpc): An implementation of the - [msgpack-rpc spec](https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md) - on top of this library. - -Disclaimer ----------- - -This library is built fully on JS and on [bl](http://npm.im/bl) to -simplify the code. Every improvement that keeps the same API is welcome. - -Acknowledgements ----------------- - -This project was kindly sponsored by [nearForm](http://nearform.com). - - -This library was originally built as the data format for -[JSChan](http://npm.im/jschan). - -License -------- - -MIT diff --git a/node_modules/msgpack5/benchmarks/encodedecode.js b/node_modules/msgpack5/benchmarks/encodedecode.js deleted file mode 100644 index 384b8c1f51..0000000000 --- a/node_modules/msgpack5/benchmarks/encodedecode.js +++ /dev/null @@ -1,21 +0,0 @@ -const msgpack = require('../')() -const msg = { hello: 'world' } -const encode = msgpack.encode -const decode = msgpack.decode -const max = 100000 -let i - -function run () { - for (i = 0; i < max; i++) { - decode(encode(msg)) - } -} - -// preheat -run() - -const start = Date.now() -run() -const stop = Date.now() -console.log('time', stop - start) -console.log('decode/s', max / (stop - start) * 1000) diff --git a/node_modules/msgpack5/benchmarks/parseshortmap.js b/node_modules/msgpack5/benchmarks/parseshortmap.js deleted file mode 100644 index d9fded4412..0000000000 --- a/node_modules/msgpack5/benchmarks/parseshortmap.js +++ /dev/null @@ -1,21 +0,0 @@ -const msgpack = require('../')() -const bl = require('bl') -const msg = bl(msgpack.encode({ hello: 'world' })) -const decode = msgpack.decode -const max = 1000000 -let i - -function run () { - for (i = 0; i < max; i++) { - decode(msg.duplicate()) - } -} - -// preheat -run() - -const start = Date.now() -run() -const stop = Date.now() -console.log('time', stop - start) -console.log('decode/s', max / (stop - start) * 1000) diff --git a/node_modules/msgpack5/example.js b/node_modules/msgpack5/example.js deleted file mode 100644 index 048d23290b..0000000000 --- a/node_modules/msgpack5/example.js +++ /dev/null @@ -1,44 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const msgpack = require('./')() // namespace our extensions -const a = new MyType(2, 'a') -const encode = msgpack.encode -const decode = msgpack.decode - -msgpack.register(0x42, MyType, mytipeEncode, mytipeDecode) - -console.log(encode({ hello: 'world' }).toString('hex')) -// 81a568656c6c6fa5776f726c64 -console.log(decode(encode({ hello: 'world' }))) -// { hello: 'world' } -console.log(encode(a).toString('hex')) -// d5426161 -console.log(decode(encode(a)) instanceof MyType) -// true -console.log(decode(encode(a))) -// { value: 'a', size: 2 } - -function MyType (size, value) { - this.value = value - this.size = size -} - -function mytipeEncode (obj) { - const buf = Buffer.allocUnsafe(obj.size) - buf.fill(obj.value) - return buf -} - -function mytipeDecode (data) { - const result = new MyType(data.length, data.toString('utf8', 0, 1)) - let i - - for (i = 0; i < data.length; i++) { - if (data.readUInt8(0) != data.readUInt8(i)) { // eslint-disable-line - throw new Error('should all be the same') - } - } - - return result -} diff --git a/node_modules/msgpack5/index.js b/node_modules/msgpack5/index.js deleted file mode 100644 index b12620dfd2..0000000000 --- a/node_modules/msgpack5/index.js +++ /dev/null @@ -1,91 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const assert = require('assert') -const bl = require('bl') -const streams = require('./lib/streams') -const buildDecode = require('./lib/decoder') -const buildEncode = require('./lib/encoder') -const IncompleteBufferError = require('./lib/helpers.js').IncompleteBufferError -const DateCodec = require('./lib/codecs/DateCodec') - -function msgpack (options) { - const encodingTypes = [] - const decodingTypes = new Map() - - options = options || { - forceFloat64: false, - compatibilityMode: false, - // if true, skips encoding Dates using the msgpack - // timestamp ext format (-1) - disableTimestampEncoding: false, - preferMap: false, - // options.protoAction: 'error' (default) / 'remove' / 'ignore' - protoAction: 'error' - } - - decodingTypes.set(DateCodec.type, DateCodec.decode) - if (!options.disableTimestampEncoding) { - encodingTypes.push(DateCodec) - } - - function registerEncoder (check, encode) { - assert(check, 'must have an encode function') - assert(encode, 'must have an encode function') - - encodingTypes.push({ check, encode }) - - return this - } - - function registerDecoder (type, decode) { - assert(type >= 0, 'must have a non-negative type') - assert(decode, 'must have a decode function') - decodingTypes.set(type, decode) - return this - } - - function register (type, constructor, encode, decode) { - assert(constructor, 'must have a constructor') - assert(encode, 'must have an encode function') - assert(type >= 0, 'must have a non-negative type') - assert(decode, 'must have a decode function') - - function check (obj) { - return (obj instanceof constructor) - } - - function reEncode (obj) { - const buf = bl() - const header = Buffer.allocUnsafe(1) - - header.writeInt8(type, 0) - - buf.append(header) - buf.append(encode(obj)) - - return buf - } - - this.registerEncoder(check, reEncode) - this.registerDecoder(type, decode) - - return this - } - - return { - encode: buildEncode(encodingTypes, options), - decode: buildDecode(decodingTypes, options), - register, - registerEncoder, - registerDecoder, - encoder: streams.encoder, - decoder: streams.decoder, - // needed for levelup support - buffer: true, - type: 'msgpack5', - IncompleteBufferError - } -} - -module.exports = msgpack diff --git a/node_modules/msgpack5/lib/codecs/DateCodec.js b/node_modules/msgpack5/lib/codecs/DateCodec.js deleted file mode 100644 index a073d7c560..0000000000 --- a/node_modules/msgpack5/lib/codecs/DateCodec.js +++ /dev/null @@ -1,131 +0,0 @@ -const type = -1 - -function encode (dt) { - if (dt === null) { - return - } - - const millis = dt * 1 - const seconds = Math.floor(millis / 1000) - const nanos = (millis - seconds * 1000) * 1e6 - - if (seconds < 0 || seconds > 0x400000000) { - // Timestamp96 - const encoded = Buffer.allocUnsafe(13) - encoded[0] = -1 - - encoded.writeUInt32BE(nanos, 1) - - let hex = '' - if (seconds >= 0) { - const padhex = '0000000000000000' - hex = seconds.toString(16) - // add some padding - hex = padhex.slice(0, hex.length * -1) + hex - } else { - // encode seconds in 2's Complement 64Bit - // reverse sign - // keep all bits 0 and first 1 from right - // reverse all other bits - let bin = (seconds * -1).toString(2) - let i = bin.length - 1 - while (bin[i] === '0') { - i-- - } - bin = bin.slice(0, i).split('').map(function (bit) { return bit === '1' ? 0 : 1 }).join('') + bin.slice(i, bin.length) - // add some padding - const pad64 = '1111111111111111111111111111111111111111111111111111111111111111' - bin = pad64.slice(0, bin.length * -1) + bin - // convert to hex - bin.match(/.{1,8}/g).forEach(function (byte) { - byte = parseInt(byte, 2).toString(16) - if (byte.length === 1) { - byte = '0' + byte - } - hex += byte - }) - } - encoded.write(hex, 5, 'hex') - return encoded - } else if (nanos || seconds > 0xffffffff) { - // Timestamp64 - const encoded = Buffer.allocUnsafe(9) - encoded[0] = -1 - - const upperNanos = nanos * 4 - const upperSeconds = seconds / Math.pow(2, 32) - const upper = (upperNanos + upperSeconds) & 0xffffffff - const lower = seconds & 0xffffffff - - encoded.writeInt32BE(upper, 1) - encoded.writeInt32BE(lower, 5) - return encoded - } else { - // Timestamp32 - const encoded = Buffer.allocUnsafe(5) - encoded[0] = -1 - encoded.writeUInt32BE(Math.floor(millis / 1000), 1) - return encoded - } -} - -function check (obj) { - return typeof obj.getDate === 'function' -} - -function decode (buf) { - let seconds - let nanoseconds = 0 - let upper - let lower - let hex - - switch (buf.length) { - case 4: - // timestamp 32 stores the number of seconds that have elapsed since 1970-01-01 00:00:00 UTC in an 32-bit unsigned integer - seconds = buf.readUInt32BE(0) - break - - case 8: - // Timestamp 64 stores the number of seconds and nanoseconds that have elapsed - // since 1970-01-01 00:00:00 UTC in 32-bit unsigned integers, split 30/34 bits - upper = buf.readUInt32BE(0) - lower = buf.readUInt32BE(4) - nanoseconds = upper / 4 - seconds = ((upper & 0x03) * Math.pow(2, 32)) + lower // If we use bitwise operators, we get truncated to 32bits - break - - case 12: - // timestamp 96 stores the number of seconds and nanoseconds that have elapsed - // since 1970-01-01 00:00:00 UTC in 64-bit signed integer and 32-bit unsigned integer - - // get seconds in hex - hex = buf.toString('hex', 4, 12) - // check if seconds is a negative number - if (parseInt(buf.toString('hex', 4, 6), 16) & 0x80) { - // convert to binary - let bin = '' - const pad8 = '00000000' - hex.match(/.{1,2}/g).forEach(function (byte) { - byte = parseInt(byte, 16).toString(2) - byte = pad8.slice(0, byte.length * -1) + byte - bin += byte - }) - // decode seconds from 2's Complement 64Bit - // reverse all bits - // reverse sign - // remove one - seconds = -1 * parseInt(bin.split('').map(function (bit) { return bit === '1' ? 0 : 1 }).join(''), 2) - 1 - } else { - seconds = parseInt(hex, 16) - } - - nanoseconds = buf.readUInt32BE(0) - } - - const millis = (seconds * 1000) + Math.round(nanoseconds / 1E6) - - return new Date(millis) -} - -module.exports = { check, type, encode, decode } diff --git a/node_modules/msgpack5/lib/decoder.js b/node_modules/msgpack5/lib/decoder.js deleted file mode 100644 index 9296d75731..0000000000 --- a/node_modules/msgpack5/lib/decoder.js +++ /dev/null @@ -1,268 +0,0 @@ -'use strict' - -const bl = require('bl') -const IncompleteBufferError = require('./helpers.js').IncompleteBufferError - -const SIZES = { - 0xc4: 2, - 0xc5: 3, - 0xc6: 5, - 0xc7: 3, - 0xc8: 4, - 0xc9: 6, - 0xca: 5, - 0xcb: 9, - 0xcc: 2, - 0xcd: 3, - 0xce: 5, - 0xcf: 9, - 0xd0: 2, - 0xd1: 3, - 0xd2: 5, - 0xd3: 9, - 0xd4: 3, - 0xd5: 4, - 0xd6: 6, - 0xd7: 10, - 0xd8: 18, - 0xd9: 2, - 0xda: 3, - 0xdb: 5, - 0xde: 3, - 0xdc: 3, - 0xdd: 5 -} - -function isValidDataSize (dataLength, bufLength, headerLength) { - return bufLength >= headerLength + dataLength -} - -module.exports = function buildDecode (decodingTypes, options) { - const context = { decodingTypes, options, decode } - return decode - - function decode (buf) { - if (!bl.isBufferList(buf)) { - buf = bl(buf) - } - - const result = tryDecode(buf, 0, context) - // Handle worst case ASAP and keep code flat - if (!result) throw new IncompleteBufferError() - - buf.consume(result[1]) - return result[0] - } -} - -function decodeArray (buf, initialOffset, length, headerLength, context) { - let offset = initialOffset - const result = [] - let i = 0 - - while (i++ < length) { - const decodeResult = tryDecode(buf, offset, context) - if (!decodeResult) return null - - result.push(decodeResult[0]) - offset += decodeResult[1] - } - return [result, headerLength + offset - initialOffset] -} - -function decodeMap (buf, offset, length, headerLength, context) { - const _temp = decodeArray(buf, offset, 2 * length, headerLength, context) - if (!_temp) return null - const [result, consumedBytes] = _temp - - let isPlainObject = !context.options.preferMap - - if (isPlainObject) { - for (let i = 0; i < 2 * length; i += 2) { - if (typeof result[i] !== 'string') { - isPlainObject = false - break - } - } - } - - if (isPlainObject) { - const object = {} - for (let i = 0; i < 2 * length; i += 2) { - const key = result[i] - const val = result[i + 1] - - if (key === '__proto__') { - if (context.options.protoAction === 'error') { - throw new SyntaxError('Object contains forbidden prototype property') - } - - if (context.options.protoAction === 'remove') { - continue - } - } - - object[key] = val - } - return [object, consumedBytes] - } else { - const mapping = new Map() - for (let i = 0; i < 2 * length; i += 2) { - const key = result[i] - const val = result[i + 1] - mapping.set(key, val) - } - return [mapping, consumedBytes] - } -} - -function tryDecode (buf, initialOffset, context) { - if (buf.length <= initialOffset) return null - - const bufLength = buf.length - initialOffset - let offset = initialOffset - - const first = buf.readUInt8(offset) - offset += 1 - - const size = SIZES[first] || -1 - if (bufLength < size) return null - - if (first < 0x80) return [first, 1] // 7-bits positive ints - if ((first & 0xf0) === 0x80) { - const length = first & 0x0f - const headerSize = offset - initialOffset - // we have a map with less than 15 elements - return decodeMap(buf, offset, length, headerSize, context) - } - if ((first & 0xf0) === 0x90) { - const length = first & 0x0f - const headerSize = offset - initialOffset - // we have an array with less than 15 elements - return decodeArray(buf, offset, length, headerSize, context) - } - - if ((first & 0xe0) === 0xa0) { - // fixstr up to 31 bytes - const length = first & 0x1f - if (!isValidDataSize(length, bufLength, 1)) return null - const result = buf.toString('utf8', offset, offset + length) - return [result, length + 1] - } - if (first >= 0xc0 && first <= 0xc3) return decodeConstants(first) - if (first >= 0xc4 && first <= 0xc6) { - const length = buf.readUIntBE(offset, size - 1) - offset += size - 1 - - if (!isValidDataSize(length, bufLength, size)) return null - const result = buf.slice(offset, offset + length) - return [result, size + length] - } - if (first >= 0xc7 && first <= 0xc9) { - const length = buf.readUIntBE(offset, size - 2) - offset += size - 2 - - const type = buf.readInt8(offset) - offset += 1 - - if (!isValidDataSize(length, bufLength, size)) return null - return decodeExt(buf, offset, type, length, size, context) - } - if (first >= 0xca && first <= 0xcb) return decodeFloat(buf, offset, size - 1) - if (first >= 0xcc && first <= 0xcf) return decodeUnsignedInt(buf, offset, size - 1) - if (first >= 0xd0 && first <= 0xd3) return decodeSigned(buf, offset, size - 1) - if (first >= 0xd4 && first <= 0xd8) { - const type = buf.readInt8(offset) // Signed - offset += 1 - return decodeExt(buf, offset, type, size - 2, 2, context) - } - - if (first >= 0xd9 && first <= 0xdb) { - const length = buf.readUIntBE(offset, size - 1) - offset += size - 1 - - if (!isValidDataSize(length, bufLength, size)) return null - const result = buf.toString('utf8', offset, offset + length) - return [result, size + length] - } - if (first >= 0xdc && first <= 0xdd) { - const length = buf.readUIntBE(offset, size - 1) - offset += size - 1 - return decodeArray(buf, offset, length, size, context) - } - if (first >= 0xde && first <= 0xdf) { - let length - switch (first) { - case 0xde: - // maps up to 2^16 elements - 2 bytes - length = buf.readUInt16BE(offset) - offset += 2 - // console.log(offset - initialOffset) - return decodeMap(buf, offset, length, 3, context) - - case 0xdf: - length = buf.readUInt32BE(offset) - offset += 4 - return decodeMap(buf, offset, length, 5, context) - } - } - if (first >= 0xe0) return [first - 0x100, 1] // 5 bits negative ints - - throw new Error('not implemented yet') -} - -function decodeSigned (buf, offset, size) { - let result - if (size === 1) result = buf.readInt8(offset) - if (size === 2) result = buf.readInt16BE(offset) - if (size === 4) result = buf.readInt32BE(offset) - if (size === 8) result = readInt64BE(buf.slice(offset, offset + 8), 0) - return [result, size + 1] -} - -function decodeExt (buf, offset, type, size, headerSize, context) { - const toDecode = buf.slice(offset, offset + size) - - const decode = context.decodingTypes.get(type) - if (!decode) throw new Error('unable to find ext type ' + type) - - const value = decode(toDecode) - return [value, headerSize + size] -} - -function decodeUnsignedInt (buf, offset, size) { - const maxOffset = offset + size - let result = 0 - while (offset < maxOffset) { result += buf.readUInt8(offset++) * Math.pow(256, maxOffset - offset) } - return [result, size + 1] -} - -function decodeConstants (first) { - if (first === 0xc0) return [null, 1] - if (first === 0xc2) return [false, 1] - if (first === 0xc3) return [true, 1] -} - -function decodeFloat (buf, offset, size) { - let result - if (size === 4) result = buf.readFloatBE(offset) - if (size === 8) result = buf.readDoubleBE(offset) - return [result, size + 1] -} - -function readInt64BE (buf, offset) { - var negate = (buf[offset] & 0x80) == 0x80; // eslint-disable-line - - if (negate) { - let carry = 1 - for (let i = offset + 7; i >= offset; i--) { - const v = (buf[i] ^ 0xff) + carry - buf[i] = v & 0xff - carry = v >> 8 - } - } - - const hi = buf.readUInt32BE(offset + 0) - const lo = buf.readUInt32BE(offset + 4) - return (hi * 4294967296 + lo) * (negate ? -1 : +1) -} diff --git a/node_modules/msgpack5/lib/encoder.js b/node_modules/msgpack5/lib/encoder.js deleted file mode 100644 index 0ed736062b..0000000000 --- a/node_modules/msgpack5/lib/encoder.js +++ /dev/null @@ -1,298 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const bl = require('bl') -const isFloat = require('./helpers.js').isFloat - -module.exports = function buildEncode (encodingTypes, options) { - function encode (obj) { - if (obj === undefined) throw new Error('undefined is not encodable in msgpack!') - - if (obj === null) return Buffer.from([0xc0]) - if (obj === true) return Buffer.from([0xc3]) - if (obj === false) return Buffer.from([0xc2]) - - if (obj instanceof Map) return encodeMap(obj, options, encode) - - if (typeof obj === 'string') return encodeString(obj, options) - - if (obj && (obj.readUInt32LE || obj instanceof Uint8Array)) { - if (obj instanceof Uint8Array) { - obj = Buffer.from(obj) - } - // weird hack to support Buffer - // and Buffer-like objects - const _getBufferHeader = options.compatibilityMode ? getCompatibleBufferHeader : getBufferHeader - return bl([_getBufferHeader(obj.length), obj]) - } - if (Array.isArray(obj)) return encodeArray(obj, encode) - if (typeof obj === 'object') return encodeExt(obj, encodingTypes) || encodeObject(obj, options, encode) - if (typeof obj === 'number') return encodeNumber(obj, options) - - throw new Error('not implemented yet') - } - - return function (obj) { - return encode(obj).slice() - } -} - -// -// -// === MENTAL SEPARATOR === -// -// - -function encodeArray (array, encode) { - const acc = [getHeader(array.length, 0x90, 0xdc)] - - // This has to be forEach; Array.prototype.map preserves missing values and - // Array.prototype.values yields them as undefined - array.forEach(item => { - acc.push(encode(item)) - }) - - if (acc.length !== array.length + 1) { - throw new Error('Sparse arrays are not encodable in msgpack') - } - - return bl(acc) -} - -function encodeMap (map, options, encode) { - const acc = [getHeader(map.size, 0x80, 0xde)] - const keys = [...map.keys()] - - if (!options.preferMap) { - if (keys.every(item => typeof item === 'string')) { - console.warn('Map with string only keys will be deserialized as an object!') - } - } - - keys.forEach(key => { - acc.push(encode(key), encode(map.get(key))) - }) - return bl(acc) -} - -function encodeObject (obj, options, encode) { - const keys = [] - - for (const key in obj) { - if (Object.prototype.hasOwnProperty.call(obj, key) && - obj[key] !== undefined && - typeof obj[key] !== 'function') { - keys.push(key) - } - } - - const acc = [getHeader(keys.length, 0x80, 0xde)] - - if (options.sortKeys) keys.sort() - - keys.forEach(key => { - acc.push(encode(key), encode(obj[key])) - }) - - return bl(acc) -} - -function write64BitUint (buf, offset, num) { - const lo = num % 4294967296 - const hi = Math.floor(num / 4294967296) - - buf.writeUInt32BE(hi, offset + 0) - buf.writeUInt32BE(lo, offset + 4) -} - -function write64BitInt (buf, offset, num) { - const negate = num < 0 - num = Math.abs(num) - write64BitUint(buf, offset, num) - if (negate) negate64BitInt(buf, offset) -} - -function negate64BitInt (buf, offset) { - let i = offset + 8 - - // Optimization based on the fact that: - // buf[i] == 0x00 => (buf[i] ^ 0xff) + 1 = 0x100 = 0x00 + 1 curry - - while (i-- > offset) { - if (buf[i] === 0x00) continue - buf[i] = (buf[i] ^ 0xff) + 1 - break - } - - while (i-- > offset) { - buf[i] = buf[i] ^ 0xff - } -} - -const fround = Math.fround - -function encodeFloat (obj, forceFloat64) { - let buf - - if (forceFloat64 || !fround || !Object.is(fround(obj), obj)) { - buf = Buffer.allocUnsafe(9) - buf[0] = 0xcb - buf.writeDoubleBE(obj, 1) - } else { - buf = Buffer.allocUnsafe(5) - buf[0] = 0xca - buf.writeFloatBE(obj, 1) - } - - return buf -} - -function encodeExt (obj, encodingTypes) { - const codec = encodingTypes.find(codec => codec.check(obj)) - if (!codec) return null - const encoded = codec.encode(obj) - if (!encoded) return null - - return bl([getExtHeader(encoded.length - 1), encoded]) -} - -function getExtHeader (length) { - if (length === 1) return Buffer.from([0xd4]) - if (length === 2) return Buffer.from([0xd5]) - if (length === 4) return Buffer.from([0xd6]) - if (length === 8) return Buffer.from([0xd7]) - if (length === 16) return Buffer.from([0xd8]) - - if (length < 256) return Buffer.from([0xc7, length]) - if (length < 0x10000) return Buffer.from([0xc8, length >> 8, length & 0x00ff]) - return Buffer.from([0xc9, length >> 24, (length >> 16) & 0x000000ff, (length >> 8) & 0x000000ff, length & 0x000000ff]) -} - -function getHeader (length, tag1, tag2) { - if (length < 16) return Buffer.from([tag1 | length]) - const size = length < 0x10000 ? 2 : 4 - const buf = Buffer.allocUnsafe(1 + size) - buf[0] = length < 0x10000 ? tag2 : tag2 + 1 - buf.writeUIntBE(length, 1, size) - - return buf -} - -function encodeString (obj, options) { - const len = Buffer.byteLength(obj) - let buf - if (len < 32) { - buf = Buffer.allocUnsafe(1 + len) - buf[0] = 0xa0 | len - if (len > 0) { - buf.write(obj, 1) - } - } else if (len <= 0xff && !options.compatibilityMode) { - // str8, but only when not in compatibility mode - buf = Buffer.allocUnsafe(2 + len) - buf[0] = 0xd9 - buf[1] = len - buf.write(obj, 2) - } else if (len <= 0xffff) { - buf = Buffer.allocUnsafe(3 + len) - buf[0] = 0xda - buf.writeUInt16BE(len, 1) - buf.write(obj, 3) - } else { - buf = Buffer.allocUnsafe(5 + len) - buf[0] = 0xdb - buf.writeUInt32BE(len, 1) - buf.write(obj, 5) - } - return buf -} - -function getBufferHeader (length) { - let header - if (length <= 0xff) { - header = Buffer.allocUnsafe(2) - header[0] = 0xc4 - header[1] = length - } else if (length <= 0xffff) { - header = Buffer.allocUnsafe(3) - header[0] = 0xc5 - header.writeUInt16BE(length, 1) - } else { - header = Buffer.allocUnsafe(5) - header[0] = 0xc6 - header.writeUInt32BE(length, 1) - } - - return header -} - -function getCompatibleBufferHeader (length) { - let header - if (length <= 0x1f) { - // fix raw header: 101XXXXX - header = Buffer.allocUnsafe(1) - header[0] = 0xa0 | length - } else if (length <= 0xffff) { - // raw 16 header: 0xda, XXXXXXXX, XXXXXXXX - header = Buffer.allocUnsafe(3) - header[0] = 0xda - header.writeUInt16BE(length, 1) - } else { - // raw 32 header: 0xdb, XXXXXXXX, XXXXXXXX, XXXXXXXX, XXXXXXXX - header = Buffer.allocUnsafe(5) - header[0] = 0xdb - header.writeUInt32BE(length, 1) - } - return header -} - -function encodeNumber (obj, options) { - let buf - if (isFloat(obj)) return encodeFloat(obj, options.forceFloat64) - if (Math.abs(obj) > 9007199254740991) { - return encodeFloat(obj, true) - } - - if (obj >= 0) { - if (obj < 128) { - return Buffer.from([obj]) - } else if (obj < 256) { - return Buffer.from([0xcc, obj]) - } else if (obj < 65536) { - return Buffer.from([0xcd, 0xff & (obj >> 8), 0xff & (obj)]) - } else if (obj <= 0xffffffff) { - return Buffer.from([0xce, 0xff & (obj >> 24), 0xff & (obj >> 16), 0xff & (obj >> 8), 0xff & (obj)]) - } else if (obj <= 9007199254740991) { - buf = Buffer.allocUnsafe(9) - buf[0] = 0xcf - write64BitUint(buf, 1, obj) - } - } else { - if (obj >= -32) { - buf = Buffer.allocUnsafe(1) - buf[0] = 0x100 + obj - } else if (obj >= -128) { - buf = Buffer.allocUnsafe(2) - buf[0] = 0xd0 - buf.writeInt8(obj, 1) - } else if (obj >= -32768) { - buf = Buffer.allocUnsafe(3) - buf[0] = 0xd1 - buf.writeInt16BE(obj, 1) - } else if (obj > -214748365) { - buf = Buffer.allocUnsafe(5) - buf[0] = 0xd2 - buf.writeInt32BE(obj, 1) - } else if (obj >= -9007199254740991) { - buf = Buffer.allocUnsafe(9) - buf[0] = 0xd3 - write64BitInt(buf, 1, obj) - } - } - return buf -} - -// function order(num, n = 1, step = 2) { -// while (num = num >> step) n++; -// return n -// } diff --git a/node_modules/msgpack5/lib/helpers.js b/node_modules/msgpack5/lib/helpers.js deleted file mode 100644 index 26fdf21ea4..0000000000 --- a/node_modules/msgpack5/lib/helpers.js +++ /dev/null @@ -1,20 +0,0 @@ -'use strict' - -const util = require('util') - -exports.IncompleteBufferError = IncompleteBufferError - -function IncompleteBufferError (message) { - Error.call(this) // super constructor - if (Error.captureStackTrace) { - Error.captureStackTrace(this, this.constructor) // super helper method to include stack trace in error object - } - this.name = this.constructor.name - this.message = message || 'unable to decode' -} - -util.inherits(IncompleteBufferError, Error) - -exports.isFloat = function isFloat (n) { - return n % 1 !== 0 -} diff --git a/node_modules/msgpack5/lib/streams.js b/node_modules/msgpack5/lib/streams.js deleted file mode 100644 index 714f798c7d..0000000000 --- a/node_modules/msgpack5/lib/streams.js +++ /dev/null @@ -1,90 +0,0 @@ -'use strict' - -const Transform = require('readable-stream').Transform -const inherits = require('inherits') -const bl = require('bl') - -function Base (opts) { - opts = opts || {} - - opts.objectMode = true - opts.highWaterMark = 16 - - Transform.call(this, opts) - - this._msgpack = opts.msgpack -} - -inherits(Base, Transform) - -function Encoder (opts) { - if (!(this instanceof Encoder)) { - opts = opts || {} - opts.msgpack = this - return new Encoder(opts) - } - - Base.call(this, opts) - this._wrap = ('wrap' in opts) && opts.wrap -} - -inherits(Encoder, Base) - -Encoder.prototype._transform = function (obj, enc, done) { - let buf = null - - try { - buf = this._msgpack.encode(this._wrap ? obj.value : obj).slice(0) - } catch (err) { - this.emit('error', err) - return done() - } - - this.push(buf) - done() -} - -function Decoder (opts) { - if (!(this instanceof Decoder)) { - opts = opts || {} - opts.msgpack = this - return new Decoder(opts) - } - - Base.call(this, opts) - - this._chunks = bl() - this._wrap = ('wrap' in opts) && opts.wrap -} - -inherits(Decoder, Base) - -Decoder.prototype._transform = function (buf, enc, done) { - if (buf) { - this._chunks.append(buf) - } - - try { - let result = this._msgpack.decode(this._chunks) - if (this._wrap) { - result = { value: result } - } - this.push(result) - } catch (err) { - if (err instanceof this._msgpack.IncompleteBufferError) { - done() - } else { - this.emit('error', err) - } - return - } - - if (this._chunks.length > 0) { - this._transform(null, enc, done) - } else { - done() - } -} - -module.exports.decoder = Decoder -module.exports.encoder = Encoder diff --git a/node_modules/msgpack5/package.json b/node_modules/msgpack5/package.json deleted file mode 100644 index 0f5136439c..0000000000 --- a/node_modules/msgpack5/package.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "msgpack5", - "version": "6.0.2", - "description": "A msgpack v5 implementation for node.js and the browser, with extension points", - "main": "index.js", - "scripts": { - "test": "standard && tape test/* | tap-mocha-reporter dot", - "build": "npm run browserify && npm run dist", - "browserify": "browserify index.js -o dist/msgpack5.js -s msgpack5", - "dist": "uglifyjs dist/msgpack5.js -o dist/msgpack5.min.js" - }, - "pre-commit": [ - "test" - ], - "repository": { - "type": "git", - "url": "git://github.com/mcollina/msgpack5.git" - }, - "keywords": [ - "msgpack", - "extension", - "v5", - "MessagePack", - "ext" - ], - "author": "Matteo collina ", - "license": "MIT", - "bugs": { - "url": "https://github.com/mcollina/msgpack5/issues" - }, - "homepage": "https://github.com/mcollina/msgpack5", - "devDependencies": { - "browserify": "^17.0.0", - "memdb": "^1.3.1", - "pre-commit": "^1.2.2", - "standard": "^16.0.0", - "tap-mocha-reporter": "^5.0.0", - "tape": "^5.0.0", - "uglify-js": "^3.4.9" - }, - "standard": { - "ignore": [ - "dist/" - ] - }, - "dependencies": { - "bl": "^5.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.0.0", - "safe-buffer": "^5.1.2" - } -} diff --git a/node_modules/msgpack5/spec.html b/node_modules/msgpack5/spec.html deleted file mode 100644 index 61d925e330..0000000000 --- a/node_modules/msgpack5/spec.html +++ /dev/null @@ -1,459 +0,0 @@ -

MessagePack specification

-

MessagePack is an object serialization specification like JSON.

-

MessagePack has two concepts: type system and formats.

-

Serialization is conversion from application objects into MessagePack formats via MessagePack type system.

-

Deserialization is conversion from MessagePack formats into application objects via MessagePack type system.

-
Serialization:
-    Application objects
-    -->  MessagePack type system
-    -->  MessagePack formats (byte array)
-
-Deserialization:
-    MessagePack formats (byte array)
-    -->  MessagePack type system
-    -->  Application objects
-

This document describes the MessagePack type system, MesagePack formats and conversion of them.

-

Table of contents

- -

-

Type system

-
    -
  • Types
      -
    • Integer represents an integer
    • -
    • Nil represents nil
    • -
    • Boolean represents true or false
    • -
    • Float represents a floating point number
    • -
    • Raw
        -
      • String extending Raw type represents a UTF-8 string
      • -
      • Binary extending Raw type represents a byte array
      • -
      -
    • -
    • Array represents a sequence of objects
    • -
    • Map represents key-value pairs of objects
    • -
    • Extended implements Extension interface: represents a tuple of type information and a byte array where type information is an integer whose meaning is defined by applications
    • -
    -
  • -
  • Interfaces
      -
    • Extension represents a tuple of an integer and a byte array where the integer represents type information and the byte array represents data. The format of the data is defined by concrete types
    • -
    -
  • -
-

-

Limitation

-
    -
  • a value of an Integer object is limited from -(2^63) upto (2^64)-1
  • -
  • a value of a Float object is IEEE 754 single or double precision floating-point number
  • -
  • maximum length of a Binary object is (2^32)-1
  • -
  • maximum byte size of a String object is (2^32)-1
  • -
  • String objects may contain invalid byte sequence and the behavior of a deserializer depends on the actual implementation when it received invalid byte sequence
      -
    • Deserializers should provide functionality to get the original byte array so that applications can decide how to handle the object
    • -
    -
  • -
  • maximum number of elements of an Array object is (2^32)-1
  • -
  • maximum number of key-value associations of a Map object is (2^32)-1
  • -
-

-

Extension type

-

MessagePack allows applications to define application-specific types using the Extended type. -Extended type consists of an integer and a byte array where the integer represents a kind of types and the byte array represents data.

-

Applications can assign 0 to 127 to store application-specific type information.

-

MessagePack reserves -1 to -128 for future extension to add predefined types which will be described in separated documents.

-
[0, 127]: application-specific types
-[-1, -128]: reserved for predefined types
-

-

Formats

-

-

Overview

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
format namefirst byte (in binary)first byte (in hex)
positive fixint0xxxxxxx0x00 - 0x7f
fixmap1000xxxx0x80 - 0x8f
fixarray1001xxxx0x90 - 0x9f
fixstr101xxxxx0xa0 - 0xbf
nil110000000xc0
(never used)110000010xc1
false110000100xc2
true110000110xc3
bin 8110001000xc4
bin 16110001010xc5
bin 32110001100xc6
ext 8110001110xc7
ext 16110010000xc8
ext 32110010010xc9
float 32110010100xca
float 64110010110xcb
uint 8110011000xcc
uint 16110011010xcd
uint 32110011100xce
uint 64110011110xcf
int 8110100000xd0
int 16110100010xd1
int 32110100100xd2
int 64110100110xd3
fixext 1110101000xd4
fixext 2110101010xd5
fixext 4110101100xd6
fixext 8110101110xd7
fixext 16110110000xd8
str 8110110010xd9
str 16110110100xda
str 32110110110xdb
array 16110111000xdc
array 32110111010xdd
map 16110111100xde
map 32110111110xdf
negative fixint111xxxxx0xe0 - 0xff
- - -

-

Notation in diagrams

-
one byte:
-+--------+
-|        |
-+--------+
-
-a variable number of bytes:
-+========+
-|        |
-+========+
-
-variable number of objects stored in MessagePack format:
-+~~~~~~~~~~~~~~~~~+
-|                 |
-+~~~~~~~~~~~~~~~~~+
-

X, Y, Z and A are the symbols that will be replaced by an actual bit.

-

-

nil format

-

Nil format stores nil in 1 byte.

-
nil:
-+--------+
-|  0xc0  |
-+--------+
-

-

bool format family

-

Bool format family stores false or true in 1 byte.

-
false:
-+--------+
-|  0xc2  |
-+--------+
-
-true:
-+--------+
-|  0xc3  |
-+--------+
-

-

int format family

-

Int format family stores an integer in 1, 2, 3, 5, or 9 bytes.

-
positive fixnum stores 7-bit positive integer
-+--------+
-|0XXXXXXX|
-+--------+
-
-negative fixnum stores 5-bit negative integer
-+--------+
-|111YYYYY|
-+--------+
-
-* 0XXXXXXX is 8-bit unsigned integer
-* 111YYYYY is 8-bit signed integer
-
-uint 8 stores a 8-bit unsigned integer
-+--------+--------+
-|  0xcc  |ZZZZZZZZ|
-+--------+--------+
-
-uint 16 stores a 16-bit big-endian unsigned integer
-+--------+--------+--------+
-|  0xcd  |ZZZZZZZZ|ZZZZZZZZ|
-+--------+--------+--------+
-
-uint 32 stores a 32-bit big-endian unsigned integer
-+--------+--------+--------+--------+--------+
-|  0xce  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ
-+--------+--------+--------+--------+--------+
-
-uint 64 stores a 64-bit big-endian unsigned integer
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-|  0xcf  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-
-int 8 stores a 8-bit signed integer
-+--------+--------+
-|  0xd0  |ZZZZZZZZ|
-+--------+--------+
-
-int 16 stores a 16-bit big-endian signed integer
-+--------+--------+--------+
-|  0xd1  |ZZZZZZZZ|ZZZZZZZZ|
-+--------+--------+--------+
-
-int 32 stores a 32-bit big-endian signed integer
-+--------+--------+--------+--------+--------+
-|  0xd2  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|
-+--------+--------+--------+--------+--------+
-
-int 64 stores a 64-bit big-endian signed integer
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-|  0xd3  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-

-

float format family

-

Float format family stores an floating point number in 5 bytes or 9 bytes.

-
float 32 stores a floating point number in IEEE 754 single precision floating point number format:
-+--------+--------+--------+--------+--------+
-|  0xca  |XXXXXXXX|XXXXXXXX|XXXXXXXX|XXXXXXXX
-+--------+--------+--------+--------+--------+
-
-float 64 stores a floating point number in IEEE 754 double precision floating point number format:
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-|  0xcb  |YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-
-where
-* XXXXXXXX_XXXXXXXX_XXXXXXXX_XXXXXXXX is a big-endian IEEE 754 single precision floating point number
-* YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY is a big-endian
-  IEEE 754 double precision floating point number
-

-

str format family

-

Str format family stores an byte array in 1, 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array.

-
fixstr stores a byte array whose length is upto 31 bytes:
-+--------+========+
-|101XXXXX|  data  |
-+--------+========+
-
-str 8 stores a byte array whose length is upto (2^8)-1 bytes:
-+--------+--------+========+
-|  0xd9  |YYYYYYYY|  data  |
-+--------+--------+========+
-
-str 16 stores a byte array whose length is upto (2^16)-1 bytes:
-+--------+--------+--------+========+
-|  0xda  |ZZZZZZZZ|ZZZZZZZZ|  data  |
-+--------+--------+--------+========+
-
-str 32 stores a byte array whose length is upto (2^32)-1 bytes:
-+--------+--------+--------+--------+--------+========+
-|  0xdb  |AAAAAAAA|AAAAAAAA|AAAAAAAA|AAAAAAAA|  data  |
-+--------+--------+--------+--------+--------+========+
-
-where
-* XXXXX is a 5-bit unsigned integer which represents N
-* YYYYYYYY is a 8-bit unsigned integer which represents N
-* ZZZZZZZZ_ZZZZZZZZ is a 16-bit big-endian unsigned integer which represents N
-* AAAAAAAA_AAAAAAAA_AAAAAAAA_AAAAAAAA is a 32-bit big-endian unsigned integer which represents N
-* N is the length of data
-

-

bin format family

-

Bin format family stores an byte array in 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array.

-
bin 8 stores a byte array whose length is upto (2^8)-1 bytes:
-+--------+--------+========+
-|  0xc4  |XXXXXXXX|  data  |
-+--------+--------+========+
-
-bin 16 stores a byte array whose length is upto (2^16)-1 bytes:
-+--------+--------+--------+========+
-|  0xc5  |YYYYYYYY|YYYYYYYY|  data  |
-+--------+--------+--------+========+
-
-bin 32 stores a byte array whose length is upto (2^32)-1 bytes:
-+--------+--------+--------+--------+--------+========+
-|  0xc6  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|  data  |
-+--------+--------+--------+--------+--------+========+
-
-where
-* XXXXXXXX is a 8-bit unsigned integer which represents N
-* YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N
-* ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N
-* N is the length of data
-

-

array format family

-

Array format family stores a sequence of elements in 1, 3, or 5 bytes of extra bytes in addition to the elements.

-
fixarray stores an array whose length is upto 15 elements:
-+--------+~~~~~~~~~~~~~~~~~+
-|1001XXXX|    N objects    |
-+--------+~~~~~~~~~~~~~~~~~+
-
-array 16 stores an array whose length is upto (2^16)-1 elements:
-+--------+--------+--------+~~~~~~~~~~~~~~~~~+
-|  0xdc  |YYYYYYYY|YYYYYYYY|    N objects    |
-+--------+--------+--------+~~~~~~~~~~~~~~~~~+
-
-array 32 stores an array whose length is upto (2^32)-1 elements:
-+--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+
-|  0xdd  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|    N objects    |
-+--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+
-
-where
-* XXXX is a 4-bit unsigned integer which represents N
-* YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N
-* ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N
-    N is the size of a array
-

-

map format family

-

Map format family stores a sequence of key-value pairs in 1, 3, or 5 bytes of extra bytes in addition to the key-value pairs.

-
fixmap stores a map whose length is upto 15 elements
-+--------+~~~~~~~~~~~~~~~~~+
-|1000XXXX|   N*2 objects   |
-+--------+~~~~~~~~~~~~~~~~~+
-
-map 16 stores a map whose length is upto (2^16)-1 elements
-+--------+--------+--------+~~~~~~~~~~~~~~~~~+
-|  0xde  |YYYYYYYY|YYYYYYYY|   N*2 objects   |
-+--------+--------+--------+~~~~~~~~~~~~~~~~~+
-
-map 32 stores a map whose length is upto (2^32)-1 elements
-+--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+
-|  0xdf  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|   N*2 objects   |
-+--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+
-
-where
-* XXXX is a 4-bit unsigned integer which represents N
-* YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N
-* ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N
-* N is the size of a map
-* odd elements in objects are keys of a map
-* the next element of a key is its associated value
-

-

ext format family

-

Ext format family stores a tuple of an integer and a byte array.

-
fixext 1 stores an integer and a byte array whose length is 1 byte
-+--------+--------+--------+
-|  0xd4  |  type  |  data  |
-+--------+--------+--------+
-
-fixext 2 stores an integer and a byte array whose length is 2 bytes
-+--------+--------+--------+--------+
-|  0xd5  |  type  |       data      |
-+--------+--------+--------+--------+
-
-fixext 4 stores an integer and a byte array whose length is 4 bytes
-+--------+--------+--------+--------+--------+--------+
-|  0xd6  |  type  |                data               |
-+--------+--------+--------+--------+--------+--------+
-
-fixext 8 stores an integer and a byte array whose length is 8 bytes
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-|  0xd7  |  type  |                                  data                                 |
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-
-fixext 16 stores an integer and a byte array whose length is 16 bytes
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-|  0xd8  |  type  |                                  data                                  
-+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
-+--------+--------+--------+--------+--------+--------+--------+--------+
-                              data (cont.)                              |
-+--------+--------+--------+--------+--------+--------+--------+--------+
-
-ext 8 stores an integer and a byte array whose length is upto (2^8)-1 bytes:
-+--------+--------+--------+========+
-|  0xc7  |XXXXXXXX|  type  |  data  |
-+--------+--------+--------+========+
-
-ext 16 stores an integer and a byte array whose length is upto (2^16)-1 bytes:
-+--------+--------+--------+--------+========+
-|  0xc8  |YYYYYYYY|YYYYYYYY|  type  |  data  |
-+--------+--------+--------+--------+========+
-
-ext 32 stores an integer and a byte array whose length is upto (2^32)-1 bytes:
-+--------+--------+--------+--------+--------+--------+========+
-|  0xc9  |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|  type  |  data  |
-+--------+--------+--------+--------+--------+--------+========+
-
-where
-* XXXXXXXX is a 8-bit unsigned integer which represents N
-* YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N
-* ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a big-endian 32-bit unsigned integer which represents N
-* N is a length of data
-* type is a signed 8-bit signed integer
-* type < 0 is reserved for future extension including 2-byte type information
-

-

Serialization: type to format conversion

-

MessagePack serializers convert MessagePack types into formats as following:

- - - - - - - - - - - -
source typesoutput format
Integerint format family (positive fixint, negative fixint, int 8/16/32/64 or uint 8/16/32/64)
Nilnil
Booleanbool format family (false or true)
Floatfloat format family (float 32/64)
Stringstr format family (fixstr or str 8/16/32)
Binarybin format family (bin 8/16/32)
Arrayarray format family (fixarray or array 16/32)
Mapmap format family (fixmap or map 16/32)
Extendedext format family (fixext or ext 8/16/32)
- -

If an object can be represented in multiple possible output formats, serializers SHOULD use the format which represents the data in the smallest number of bytes.

-

-

Deserialization: format to type conversion

-

MessagePack deserializers convert convert MessagePack formats into types as following:

- - - - - - - - - - - -
source formatsoutput type
positive fixint, negative fixint, int 8/16/32/64 and uint 8/16/32/64Integer
nilNil
false and trueBoolean
float 32/64Float
fixstr and str 8/16/32String
bin 8/16/32Binary
fixarray and array 16/32Array
fixmap map 16/32Map
fixext and ext 8/16/32Extended
- -

-

Future discussion

-

-

Profile

-

Profile is an idea that Applications restrict the semantics of MessagePack while sharing the same syntax to adapt MessagePack for certain use cases.

-

For example, applications may remove Binary type, restrict keys of map objects to be String type, and put some restrictions to make the semantics compatible with JSON. Applications which use schema may remove String and Binary types and deal with byte arrays as Raw type. Applications which use hash (digest) of serialized data may sort keys of maps to make the serialized data deterministic.

-

-

implementation guidelines

-

-

Upgrading MessagePack specification

-

MessagePack specification is changed at this time. -Here is a guideline to upgrade existent MessagePack implementations:

-
    -
  • In a minor release, deserializers support the bin format family and str 8 format. The type of deserialized objects should be same with raw 16 (== str 16) or raw 32 (== str 32)
  • -
  • In a major release, serializers distinguish Binary type and String type using bin format family and str format family
      -
    • At the same time, serializers should offer "compatibility mode" which doesn't use bin format family and str 8 format
    • -
    -
  • -
-
-
MessagePack specification
-Last modified at 2013-04-21 21:52:33 -0700
-Sadayuki Furuhashi © 2013-04-21 21:52:33 -0700
-
diff --git a/node_modules/msgpack5/spec.md b/node_modules/msgpack5/spec.md deleted file mode 100644 index 0523896a58..0000000000 --- a/node_modules/msgpack5/spec.md +++ /dev/null @@ -1,499 +0,0 @@ -# MessagePack specification - -MessagePack is an object serialization specification like JSON. - -MessagePack has two concepts: **type system** and **formats**. - -Serialization is conversion from application objects into MessagePack formats via MessagePack type system. - -Deserialization is conversion from MessagePack formats into application objects via MessagePack type system. - - Serialization: - Application objects - --> MessagePack type system - --> MessagePack formats (byte array) - - Deserialization: - MessagePack formats (byte array) - --> MessagePack type system - --> Application objects - -This document describes the MessagePack type system, MesagePack formats and conversion of them. - -## Table of contents - -* MessagePack specification - * [Type system](#types) - * [Limitation](#types-limitation) - * [Extension type](#types-extension-type) - * [Formats](#formats) - * [Overview](#formats-overview) - * [Notation in diagrams](#formats-notation) - * [nil format family](#formats-nil) - * [bool format family](#formats-bool) - * [int format family](#formats-int) - * [float format family](#formats-float) - * [str format family](#formats-str) - * [bin format family](#formats-bin) - * [array format family](#formats-array) - * [map format family](#formats-map) - * [ext format family](#formats-ext) - * [Serialization: type to format conversion](#serialization) - * [Deserialization: format to type conversion](#deserialization) - * [Future discussion](#future) - * [Profile](#future-profiles) - * [Implementation guidelines](#impl) - * [Upgrade MessagePack specification](#impl-upgrade) - -
-## Type system - -* Types - * **Integer** represents an integer - * **Nil** represents nil - * **Boolean** represents true or false - * **Float** represents a floating point number - * **Raw** - * **String** extending Raw type represents a UTF-8 string - * **Binary** extending Raw type represents a byte array - * **Array** represents a sequence of objects - * **Map** represents key-value pairs of objects - * **Extended** implements Extension interface: represents a tuple of type information and a byte array where type information is an integer whose meaning is defined by applications -* Interfaces - * **Extension** represents a tuple of an integer and a byte array where the integer represents type information and the byte array represents data. The format of the data is defined by concrete types - - -### Limitation - -* a value of an Integer object is limited from `-(2^63)` upto `(2^64)-1` -* a value of a Float object is IEEE 754 single or double precision floating-point number -* maximum length of a Binary object is `(2^32)-1` -* maximum byte size of a String object is `(2^32)-1` -* String objects may contain invalid byte sequence and the behavior of a deserializer depends on the actual implementation when it received invalid byte sequence - * Deserializers should provide functionality to get the original byte array so that applications can decide how to handle the object -* maximum number of elements of an Array object is `(2^32)-1` -* maximum number of key-value associations of a Map object is `(2^32)-1` - - -### Extension type - -MessagePack allows applications to define application-specific types using the Extended type. -Extended type consists of an integer and a byte array where the integer represents a kind of types and the byte array represents data. - -Applications can assign `0` to `127` to store application-specific type information. - -MessagePack reserves `-1` to `-128` for future extension to add predefined types which will be described in separated documents. - - [0, 127]: application-specific types - [-1, -128]: reserved for predefined types - - - -## Formats - - -### Overview - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
format namefirst byte (in binary)first byte (in hex)
positive fixint0xxxxxxx0x00 - 0x7f
fixmap1000xxxx0x80 - 0x8f
fixarray1001xxxx0x90 - 0x9f
fixstr101xxxxx0xa0 - 0xbf
nil110000000xc0
(never used)110000010xc1
false110000100xc2
true110000110xc3
bin 8110001000xc4
bin 16110001010xc5
bin 32110001100xc6
ext 8110001110xc7
ext 16110010000xc8
ext 32110010010xc9
float 32110010100xca
float 64110010110xcb
uint 8110011000xcc
uint 16110011010xcd
uint 32110011100xce
uint 64110011110xcf
int 8110100000xd0
int 16110100010xd1
int 32110100100xd2
int 64110100110xd3
fixext 1110101000xd4
fixext 2110101010xd5
fixext 4110101100xd6
fixext 8110101110xd7
fixext 16110110000xd8
str 8110110010xd9
str 16110110100xda
str 32110110110xdb
array 16110111000xdc
array 32110111010xdd
map 16110111100xde
map 32110111110xdf
negative fixint111xxxxx0xe0 - 0xff
- - -
-### Notation in diagrams - - one byte: - +--------+ - | | - +--------+ - - a variable number of bytes: - +========+ - | | - +========+ - - variable number of objects stored in MessagePack format: - +~~~~~~~~~~~~~~~~~+ - | | - +~~~~~~~~~~~~~~~~~+ - -`X`, `Y`, `Z` and `A` are the symbols that will be replaced by an actual bit. - - -### nil format - -Nil format stores nil in 1 byte. - - nil: - +--------+ - | 0xc0 | - +--------+ - - -### bool format family - -Bool format family stores false or true in 1 byte. - - false: - +--------+ - | 0xc2 | - +--------+ - - true: - +--------+ - | 0xc3 | - +--------+ - - -### int format family - -Int format family stores an integer in 1, 2, 3, 5, or 9 bytes. - - positive fixnum stores 7-bit positive integer - +--------+ - |0XXXXXXX| - +--------+ - - negative fixnum stores 5-bit negative integer - +--------+ - |111YYYYY| - +--------+ - - * 0XXXXXXX is 8-bit unsigned integer - * 111YYYYY is 8-bit signed integer - - uint 8 stores a 8-bit unsigned integer - +--------+--------+ - | 0xcc |ZZZZZZZZ| - +--------+--------+ - - uint 16 stores a 16-bit big-endian unsigned integer - +--------+--------+--------+ - | 0xcd |ZZZZZZZZ|ZZZZZZZZ| - +--------+--------+--------+ - - uint 32 stores a 32-bit big-endian unsigned integer - +--------+--------+--------+--------+--------+ - | 0xce |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ - +--------+--------+--------+--------+--------+ - - uint 64 stores a 64-bit big-endian unsigned integer - +--------+--------+--------+--------+--------+--------+--------+--------+--------+ - | 0xcf |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| - +--------+--------+--------+--------+--------+--------+--------+--------+--------+ - - int 8 stores a 8-bit signed integer - +--------+--------+ - | 0xd0 |ZZZZZZZZ| - +--------+--------+ - - int 16 stores a 16-bit big-endian signed integer - +--------+--------+--------+ - | 0xd1 |ZZZZZZZZ|ZZZZZZZZ| - +--------+--------+--------+ - - int 32 stores a 32-bit big-endian signed integer - +--------+--------+--------+--------+--------+ - | 0xd2 |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| - +--------+--------+--------+--------+--------+ - - int 64 stores a 64-bit big-endian signed integer - +--------+--------+--------+--------+--------+--------+--------+--------+--------+ - | 0xd3 |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| - +--------+--------+--------+--------+--------+--------+--------+--------+--------+ - - -### float format family - -Float format family stores an floating point number in 5 bytes or 9 bytes. - - float 32 stores a floating point number in IEEE 754 single precision floating point number format: - +--------+--------+--------+--------+--------+ - | 0xca |XXXXXXXX|XXXXXXXX|XXXXXXXX|XXXXXXXX - +--------+--------+--------+--------+--------+ - - float 64 stores a floating point number in IEEE 754 double precision floating point number format: - +--------+--------+--------+--------+--------+--------+--------+--------+--------+ - | 0xcb |YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY|YYYYYYYY| - +--------+--------+--------+--------+--------+--------+--------+--------+--------+ - - where - * XXXXXXXX_XXXXXXXX_XXXXXXXX_XXXXXXXX is a big-endian IEEE 754 single precision floating point number - * YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY_YYYYYYYY is a big-endian - IEEE 754 double precision floating point number - - - -### str format family - -Str format family stores an byte array in 1, 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array. - - fixstr stores a byte array whose length is upto 31 bytes: - +--------+========+ - |101XXXXX| data | - +--------+========+ - - str 8 stores a byte array whose length is upto (2^8)-1 bytes: - +--------+--------+========+ - | 0xd9 |YYYYYYYY| data | - +--------+--------+========+ - - str 16 stores a byte array whose length is upto (2^16)-1 bytes: - +--------+--------+--------+========+ - | 0xda |ZZZZZZZZ|ZZZZZZZZ| data | - +--------+--------+--------+========+ - - str 32 stores a byte array whose length is upto (2^32)-1 bytes: - +--------+--------+--------+--------+--------+========+ - | 0xdb |AAAAAAAA|AAAAAAAA|AAAAAAAA|AAAAAAAA| data | - +--------+--------+--------+--------+--------+========+ - - where - * XXXXX is a 5-bit unsigned integer which represents N - * YYYYYYYY is a 8-bit unsigned integer which represents N - * ZZZZZZZZ_ZZZZZZZZ is a 16-bit big-endian unsigned integer which represents N - * AAAAAAAA_AAAAAAAA_AAAAAAAA_AAAAAAAA is a 32-bit big-endian unsigned integer which represents N - * N is the length of data - - -### bin format family - -Bin format family stores an byte array in 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array. - - bin 8 stores a byte array whose length is upto (2^8)-1 bytes: - +--------+--------+========+ - | 0xc4 |XXXXXXXX| data | - +--------+--------+========+ - - bin 16 stores a byte array whose length is upto (2^16)-1 bytes: - +--------+--------+--------+========+ - | 0xc5 |YYYYYYYY|YYYYYYYY| data | - +--------+--------+--------+========+ - - bin 32 stores a byte array whose length is upto (2^32)-1 bytes: - +--------+--------+--------+--------+--------+========+ - | 0xc6 |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| data | - +--------+--------+--------+--------+--------+========+ - - where - * XXXXXXXX is a 8-bit unsigned integer which represents N - * YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N - * ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N - * N is the length of data - - -### array format family - -Array format family stores a sequence of elements in 1, 3, or 5 bytes of extra bytes in addition to the elements. - - fixarray stores an array whose length is upto 15 elements: - +--------+~~~~~~~~~~~~~~~~~+ - |1001XXXX| N objects | - +--------+~~~~~~~~~~~~~~~~~+ - - array 16 stores an array whose length is upto (2^16)-1 elements: - +--------+--------+--------+~~~~~~~~~~~~~~~~~+ - | 0xdc |YYYYYYYY|YYYYYYYY| N objects | - +--------+--------+--------+~~~~~~~~~~~~~~~~~+ - - array 32 stores an array whose length is upto (2^32)-1 elements: - +--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+ - | 0xdd |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| N objects | - +--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+ - - where - * XXXX is a 4-bit unsigned integer which represents N - * YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N - * ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N - N is the size of a array - - -### map format family - -Map format family stores a sequence of key-value pairs in 1, 3, or 5 bytes of extra bytes in addition to the key-value pairs. - - fixmap stores a map whose length is upto 15 elements - +--------+~~~~~~~~~~~~~~~~~+ - |1000XXXX| N*2 objects | - +--------+~~~~~~~~~~~~~~~~~+ - - map 16 stores a map whose length is upto (2^16)-1 elements - +--------+--------+--------+~~~~~~~~~~~~~~~~~+ - | 0xde |YYYYYYYY|YYYYYYYY| N*2 objects | - +--------+--------+--------+~~~~~~~~~~~~~~~~~+ - - map 32 stores a map whose length is upto (2^32)-1 elements - +--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+ - | 0xdf |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| N*2 objects | - +--------+--------+--------+--------+--------+~~~~~~~~~~~~~~~~~+ - - where - * XXXX is a 4-bit unsigned integer which represents N - * YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N - * ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a 32-bit big-endian unsigned integer which represents N - * N is the size of a map - * odd elements in objects are keys of a map - * the next element of a key is its associated value - - -### ext format family - -Ext format family stores a tuple of an integer and a byte array. - - fixext 1 stores an integer and a byte array whose length is 1 byte - +--------+--------+--------+ - | 0xd4 | type | data | - +--------+--------+--------+ - - fixext 2 stores an integer and a byte array whose length is 2 bytes - +--------+--------+--------+--------+ - | 0xd5 | type | data | - +--------+--------+--------+--------+ - - fixext 4 stores an integer and a byte array whose length is 4 bytes - +--------+--------+--------+--------+--------+--------+ - | 0xd6 | type | data | - +--------+--------+--------+--------+--------+--------+ - - fixext 8 stores an integer and a byte array whose length is 8 bytes - +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ - | 0xd7 | type | data | - +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ - - fixext 16 stores an integer and a byte array whose length is 16 bytes - +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ - | 0xd8 | type | data - +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ - +--------+--------+--------+--------+--------+--------+--------+--------+ - data (cont.) | - +--------+--------+--------+--------+--------+--------+--------+--------+ - - ext 8 stores an integer and a byte array whose length is upto (2^8)-1 bytes: - +--------+--------+--------+========+ - | 0xc7 |XXXXXXXX| type | data | - +--------+--------+--------+========+ - - ext 16 stores an integer and a byte array whose length is upto (2^16)-1 bytes: - +--------+--------+--------+--------+========+ - | 0xc8 |YYYYYYYY|YYYYYYYY| type | data | - +--------+--------+--------+--------+========+ - - ext 32 stores an integer and a byte array whose length is upto (2^32)-1 bytes: - +--------+--------+--------+--------+--------+--------+========+ - | 0xc9 |ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ|ZZZZZZZZ| type | data | - +--------+--------+--------+--------+--------+--------+========+ - - where - * XXXXXXXX is a 8-bit unsigned integer which represents N - * YYYYYYYY_YYYYYYYY is a 16-bit big-endian unsigned integer which represents N - * ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ_ZZZZZZZZ is a big-endian 32-bit unsigned integer which represents N - * N is a length of data - * type is a signed 8-bit signed integer - * type < 0 is reserved for future extension including 2-byte type information - - - -## Serialization: type to format conversion - -MessagePack serializers convert MessagePack types into formats as following: - - - - - - - - - - - - -
source typesoutput format
Integerint format family (positive fixint, negative fixint, int 8/16/32/64 or uint 8/16/32/64)
Nilnil
Booleanbool format family (false or true)
Floatfloat format family (float 32/64)
Stringstr format family (fixstr or str 8/16/32)
Binarybin format family (bin 8/16/32)
Arrayarray format family (fixarray or array 16/32)
Mapmap format family (fixmap or map 16/32)
Extendedext format family (fixext or ext 8/16/32)
- -If an object can be represented in multiple possible output formats, serializers SHOULD use the format which represents the data in the smallest number of bytes. - - -
-## Deserialization: format to type conversion - -MessagePack deserializers convert convert MessagePack formats into types as following: - - - - - - - - - - - - -
source formatsoutput type
positive fixint, negative fixint, int 8/16/32/64 and uint 8/16/32/64Integer
nilNil
false and trueBoolean
float 32/64Float
fixstr and str 8/16/32String
bin 8/16/32Binary
fixarray and array 16/32Array
fixmap map 16/32Map
fixext and ext 8/16/32Extended
- -
-## Future discussion - - -### Profile - -Profile is an idea that Applications restrict the semantics of MessagePack while sharing the same syntax to adapt MessagePack for certain use cases. - -For example, applications may remove Binary type, restrict keys of map objects to be String type, and put some restrictions to make the semantics compatible with JSON. Applications which use schema may remove String and Binary types and deal with byte arrays as Raw type. Applications which use hash (digest) of serialized data may sort keys of maps to make the serialized data deterministic. - - -## implementation guidelines - - -### Upgrading MessagePack specification - -MessagePack specification is changed at this time. -Here is a guideline to upgrade existent MessagePack implementations: - -* In a minor release, deserializers support the bin format family and str 8 format. The type of deserialized objects should be same with raw 16 (== str 16) or raw 32 (== str 32) -* In a major release, serializers distinguish Binary type and String type using bin format family and str format family - * At the same time, serializers should offer "compatibility mode" which doesn't use bin format family and str 8 format - - -___ - - MessagePack specification - Last modified at 2013-04-21 21:52:33 -0700 - Sadayuki Furuhashi © 2013-04-21 21:52:33 -0700 diff --git a/node_modules/msgpack5/test/1-byte-length-buffers.js b/node_modules/msgpack5/test/1-byte-length-buffers.js deleted file mode 100644 index 701b88d23c..0000000000 --- a/node_modules/msgpack5/test/1-byte-length-buffers.js +++ /dev/null @@ -1,79 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -function build (size) { - const buf = Buffer.allocUnsafe(size) - buf.fill('a') - - return buf -} - -test('encode/decode 2^8-1 bytes buffers', function (t) { - const encoder = msgpack() - const all = [] - - all.push(build(Math.pow(2, 8) - 1)) - all.push(build(Math.pow(2, 6) + 1)) - all.push(build(1)) - all.push(Buffer.allocUnsafe(0)) - - all.forEach(function (orig) { - t.test('encoding a buffer of length ' + orig.length, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 2 + orig.length, 'must have the right length') - t.equal(buf.readUInt8(0), 0xc4, 'must have the proper header') - t.equal(buf.readUInt8(1), orig.length, 'must include the buf length') - t.equal(buf.toString('utf8', 2), orig.toString('utf8'), 'must decode correctly') - t.end() - }) - - t.test('decoding a buffer of length ' + orig.length, function (t) { - const buf = Buffer.allocUnsafe(2 + orig.length) - buf[0] = 0xc4 - buf[1] = orig.length - orig.copy(buf, 2) - t.equal(encoder.decode(buf).toString('utf8'), orig.toString('utf8'), 'must decode correctly') - t.end() - }) - - t.test('mirror test a buffer of length ' + orig.length, function (t) { - t.equal(encoder.decode(encoder.encode(orig)).toString(), orig.toString(), 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding a chopped 2^8-1 bytes buffer', function (t) { - const encoder = msgpack() - const orig = build(Math.pow(2, 6)) - let buf = Buffer.allocUnsafe(2 + orig.length) - buf[0] = 0xc4 - buf[1] = Math.pow(2, 8) - 1 // set bigger size - orig.copy(buf, 2) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete header of 2^8-1 bytes buffer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(1) - buf[0] = 0xc4 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/1-byte-length-exts.js b/node_modules/msgpack5/test/1-byte-length-exts.js deleted file mode 100644 index 6066bbcbbd..0000000000 --- a/node_modules/msgpack5/test/1-byte-length-exts.js +++ /dev/null @@ -1,102 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encode/decode variable ext data up to 0xff', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (size, value) { - this.value = value - this.size = size - } - - function mytipeEncode (obj) { - const buf = Buffer.allocUnsafe(obj.size) - buf.fill(obj.value) - return buf - } - - function mytipeDecode (data) { - const result = new MyType(data.length, data.toString('utf8', 0, 1)) - - for (let i = 0; i < data.length; i++) { - if (data.readUInt8(0) !== data.readUInt8(i)) { - throw new Error('should all be the same') - } - } - - return result - } - - encoder.register(0x42, MyType, mytipeEncode, mytipeDecode) - - // no 1 as it's a fixext - // no 2 as it's a fixext - all.push(new MyType(3, 'a')) - // no 4 as it's a fixext - all.push(new MyType(5, 'a')) - all.push(new MyType(6, 'a')) - all.push(new MyType(7, 'a')) - // no 8 as it's a fixext - all.push(new MyType(9, 'a')) - all.push(new MyType(10, 'a')) - all.push(new MyType(11, 'a')) - all.push(new MyType(12, 'a')) - all.push(new MyType(13, 'a')) - all.push(new MyType(14, 'a')) - all.push(new MyType(15, 'a')) - // no 16 as it's a fixext - all.push(new MyType(17, 'a')) - - all.push(new MyType(255, 'a')) - - all.forEach(function (orig) { - t.test('encoding a custom obj of length ' + orig.size, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 3 + orig.size, 'must have the right length') - t.equal(buf.readUInt8(0), 0xc7, 'must have the ext header') - t.equal(buf.readUInt8(1), orig.size, 'must include the data length') - t.equal(buf.readUInt8(2), 0x42, 'must include the custom type id') - t.equal(buf.toString('utf8', 3, 4), orig.value, 'must decode correctly') - t.end() - }) - - t.test('mirror test with a custom obj of length ' + orig.size, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.test('decoding an incomplete variable ext data up to 0xff', function (t) { - const obj = encoder.encode(new MyType(250, 'a')) - let buf = Buffer.allocUnsafe(obj.length) - buf[0] = 0xc7 - buf.writeUInt8(obj.length + 2, 1) // set bigger size - obj.copy(buf, 2, 2, obj.length) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.test('decoding an incomplete header of variable ext data up to 0xff', function (t) { - let buf = Buffer.allocUnsafe(2) - buf[0] = 0xc7 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/1-byte-length-strings.js b/node_modules/msgpack5/test/1-byte-length-strings.js deleted file mode 100644 index d5efbe4ebb..0000000000 --- a/node_modules/msgpack5/test/1-byte-length-strings.js +++ /dev/null @@ -1,80 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encode/decode 32 <-> (2^8-1) bytes strings', function (t) { - const encoder = msgpack() - const all = [] - let i - - // build base - for (i = 'a'; i.length < 32;) { - i += 'a' - } - - for (; i.length < Math.pow(2, 8); i += 'aaaaa') { - all.push(i) - } - - all.forEach(function (str) { - t.test('encoding a string of length ' + str.length, function (t) { - const buf = encoder.encode(str) - t.equal(buf.length, 2 + Buffer.byteLength(str), 'must be the proper length') - t.equal(buf.readUInt8(0), 0xd9, 'must have the proper header') - t.equal(buf.readUInt8(1), Buffer.byteLength(str), 'must include the str length') - t.equal(buf.toString('utf8', 2, Buffer.byteLength(str) + 2), str, 'must decode correctly') - t.end() - }) - - t.test('decoding a string of length ' + str.length, function (t) { - const buf = Buffer.allocUnsafe(2 + Buffer.byteLength(str)) - buf[0] = 0xd9 - buf[1] = Buffer.byteLength(str) - buf.write(str, 2) - t.equal(encoder.decode(buf), str, 'must decode correctly') - t.end() - }) - - t.test('mirror test a string of length ' + str.length, function (t) { - t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding a chopped string', function (t) { - const encoder = msgpack() - let str - for (str = 'a'; str.length < 40;) { - str += 'a' - } - let buf = Buffer.allocUnsafe(2 + Buffer.byteLength(str)) - buf[0] = 0xd9 - buf[1] = Buffer.byteLength(str) + 10 // set bigger size - buf.write(str, 2) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete header of a string', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(1) - buf[0] = 0xd9 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/1-byte-length-uint8arrays.js b/node_modules/msgpack5/test/1-byte-length-uint8arrays.js deleted file mode 100644 index 3b6253925d..0000000000 --- a/node_modules/msgpack5/test/1-byte-length-uint8arrays.js +++ /dev/null @@ -1,43 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -function build (size) { - const array = [] - let i - - for (i = 0; i < size; i++) { - array.push(42) - } - - return new Uint8Array(array) -} - -test('encode/decode 2^8-1 Uint8Arrays', function (t) { - const encoder = msgpack() - const all = [] - - all.push(build(Math.pow(2, 8) - 1)) - all.push(build(Math.pow(2, 6) + 1)) - all.push(build(1)) - all.push(new Uint8Array(0)) - - all.forEach(function (array) { - t.test('encoding Uint8Array of length ' + array.byteLength + ' bytes', function (t) { - const buf = encoder.encode(array) - t.equal(buf.length, 2 + array.byteLength, 'must have the right length') - t.equal(buf.readUInt8(0), 0xc4, 'must have the proper header') - t.equal(buf.readUInt8(1), array.byteLength, 'must include the buf length') - t.end() - }) - - t.test('mirror test for an Uint8Array of length ' + array.byteLength + ' bytes', function (t) { - t.deepEqual(encoder.decode(encoder.encode(array)), Buffer.from(array), 'must stay the same') - t.end() - }) - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/15-elements-arrays.js b/node_modules/msgpack5/test/15-elements-arrays.js deleted file mode 100644 index bf76904907..0000000000 --- a/node_modules/msgpack5/test/15-elements-arrays.js +++ /dev/null @@ -1,84 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -function build (size, obj) { - const array = [] - let i - - for (i = 0; i < size; i++) { - array.push(obj) - } - - return array -} - -function computeLength (array) { - let length = 1 // the header - let multi = 1 - - if (array[0] && typeof array[0] === 'string') { - multi += array[0].length - } - - length += array.length * multi - - return length -} - -test('encode/decode arrays up to 15 elements', function (t) { - const encoder = msgpack() - const all = [] - let i - - for (i = 0; i < 16; i++) { - all.push(build(i, 42)) - } - - for (i = 0; i < 16; i++) { - all.push(build(i, 'aaa')) - } - - all.forEach(function (array) { - t.test('encoding an array with ' + array.length + ' elements of ' + array[0], function (t) { - const buf = encoder.encode(array) - // the array is full of 1-byte integers - t.equal(buf.length, computeLength(array), 'must have the right length') - t.equal(buf.readUInt8(0) & 0xf0, 0x90, 'must have the proper header') - t.equal(buf.readUInt8(0) & 0x0f, array.length, 'must include the array length') - t.end() - }) - - t.test('mirror test for an array of length ' + array.length + ' with ' + array[0], function (t) { - t.deepEqual(encoder.decode(encoder.encode(array)), array, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete array', function (t) { - const encoder = msgpack() - - const array = ['a', 'b', 'c'] - const size = computeLength(array) - let buf = Buffer.allocUnsafe(size) - buf[0] = 0x90 | array.length + 2 // set bigger size - let pos = 1 - for (let i = 0; i < array.length; i++) { - const obj = encoder.encode(array[i], true) - obj.copy(buf, pos) - pos += obj.length - } - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(origLength, buf.length, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/15-elements-maps.js b/node_modules/msgpack5/test/15-elements-maps.js deleted file mode 100644 index 8506c0fb4f..0000000000 --- a/node_modules/msgpack5/test/15-elements-maps.js +++ /dev/null @@ -1,119 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -function build (size, value) { - const map = {} - let i - - for (i = 0; i < size; i++) { - map[i + 100 + ''] = value - } - - return map -} - -function computeLength (map) { - let length = 1 // the header - let multi = 5 // we have 4 bytes for each key, plus 1 byte for the value - - if (map[100] && typeof map[100] === 'string') { - multi += map[100].length - } - - length += Object.keys(map).length * multi - - return length -} - -test('encode/decode maps up to 15 elements', function (t) { - const encoder = msgpack() - const all = [] - let i - - for (i = 0; i < 16; i++) { - all.push(build(i, 42)) - } - - for (i = 0; i < 16; i++) { - all.push(build(i, 'aaa')) - } - - all.forEach(function (map) { - const length = Object.keys(map).length - t.test('encoding a map with ' + length + ' elements of ' + map[100], function (t) { - const buf = encoder.encode(map) - t.equal(buf.length, computeLength(map), 'must have the right length') - t.equal(buf.readUInt8(0) & 0xf0, 0x80, 'must have the proper header') - t.equal(buf.readUInt8(0) & 0x0f, length, 'must include the map length') - t.end() - }) - - t.test('mirror test for a map of length ' + length + ' with ' + map[100], function (t) { - t.deepEqual(encoder.decode(encoder.encode(map)), map, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('do not encode undefined in a map', function (t) { - const instance = msgpack() - const expected = { hello: 'world' } - const toEncode = { a: undefined, hello: 'world' } - const buf = instance.encode(toEncode) - - t.deepEqual(expected, instance.decode(buf), 'must ignore undefined') - t.end() -}) - -test('encode NaN in a map', function (t) { - const instance = msgpack() - const toEncode = { a: NaN, hello: 'world' } - - const buf = instance.encode(toEncode) - - t.assert(Object.is(instance.decode(buf).a, NaN)) - - const expected = { ...toEncode } - delete toEncode.a - const actual = instance.decode(buf) - delete buf.a - - t.deepEqual(actual, expected) - - t.end() -}) - -test('encode/decode map with buf, ints and strings', function (t) { - const map = { - topic: 'hello', - qos: 1, - payload: Buffer.from('world'), - messageId: '42', - ttl: 1416309270167 - } - const pack = msgpack() - - t.deepEqual(pack.decode(pack.encode(map)), map) - t.end() -}) - -test('decoding a chopped map', function (t) { - const encoder = msgpack() - const map = encoder.encode({ a: 'b', c: 'd', e: 'f' }) - let buf = Buffer.allocUnsafe(map.length) - buf[0] = 0x80 | 5 // set bigger size - map.copy(buf, 1, 1, map.length) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/16-bits-signed-integers.js b/node_modules/msgpack5/test/16-bits-signed-integers.js deleted file mode 100644 index 0b2a545186..0000000000 --- a/node_modules/msgpack5/test/16-bits-signed-integers.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 16-bits big-endian signed integers', function (t) { - const encoder = msgpack() - const allNum = [] - let i - - for (i = 129; i < 32768; i += 1423) { - allNum.push(-i) - } - - allNum.push(-32768) - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 3, 'must have 3 bytes') - t.equal(buf[0], 0xd1, 'must have the proper header') - t.equal(buf.readInt16BE(1), num, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.allocUnsafe(3) - buf[0] = 0xd1 - buf.writeInt16BE(num, 1) - t.equal(encoder.decode(buf), num, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 16-bits big-endian integer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(2) - buf[0] = 0xd1 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/16-bits-unsigned-integers.js b/node_modules/msgpack5/test/16-bits-unsigned-integers.js deleted file mode 100644 index 78d59089fe..0000000000 --- a/node_modules/msgpack5/test/16-bits-unsigned-integers.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 16-bits big-endian unsigned integers', function (t) { - const encoder = msgpack() - const allNum = [] - let i - - for (i = 256; i < 65536; i += 1423) { - allNum.push(i) - } - - allNum.push(65535) - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 3, 'must have 3 bytes') - t.equal(buf[0], 0xcd, 'must have the proper header') - t.equal(buf.readUInt16BE(1), num, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.allocUnsafe(3) - buf[0] = 0xcd - buf.writeUInt16BE(num, 1) - t.equal(encoder.decode(buf), num, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 16-bits big-endian unsigned integer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(2) - buf[0] = 0xcd - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/2-bytes-length-arrays.js b/node_modules/msgpack5/test/2-bytes-length-arrays.js deleted file mode 100644 index 5ff658b32c..0000000000 --- a/node_modules/msgpack5/test/2-bytes-length-arrays.js +++ /dev/null @@ -1,84 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -function build (size) { - const array = [] - let i - - for (i = 0; i < size; i++) { - array.push(42) - } - - return array -} - -test('encode/decode arrays up to 0xffff elements', function (t) { - const encoder = msgpack() - const all = [] - let i - - for (i = 16; i < 0xffff; i += 4242) { - all.push(build(i)) - } - - all.push(build(0xff)) - all.push(build(0xffff)) - - all.forEach(function (array) { - t.test('encoding an array with ' + array.length + ' elements', function (t) { - const buf = encoder.encode(array) - // the array is full of 1-byte integers - t.equal(buf.length, 3 + array.length, 'must have the right length') - t.equal(buf.readUInt8(0), 0xdc, 'must have the proper header') - t.equal(buf.readUInt16BE(1), array.length, 'must include the array length') - t.end() - }) - - t.test('mirror test for an array of length ' + array.length, function (t) { - t.deepEqual(encoder.decode(encoder.encode(array)), array, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete array', function (t) { - const encoder = msgpack() - - const array = build(0xffff / 2) - let buf = Buffer.allocUnsafe(3 + array.length) - buf[0] = 0xdc - buf.writeUInt16BE(array.length + 10, 1) // set bigger size - let pos = 3 - for (let i = 0; i < array.length; i++) { - const obj = encoder.encode(array[i], true) - obj.copy(buf, pos) - pos += obj.length - } - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(origLength, buf.length, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete header', function (t) { - const encoder = msgpack() - - let buf = Buffer.allocUnsafe(2) - buf[0] = 0xdc - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/2-bytes-length-buffers.js b/node_modules/msgpack5/test/2-bytes-length-buffers.js deleted file mode 100644 index 5fdc3ee594..0000000000 --- a/node_modules/msgpack5/test/2-bytes-length-buffers.js +++ /dev/null @@ -1,79 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -function build (size) { - const buf = Buffer.allocUnsafe(size) - buf.fill('a') - - return buf -} - -test('encode/decode 2^16-1 bytes buffers', function (t) { - const encoder = msgpack() - const all = [] - - all.push(build(Math.pow(2, 8))) - all.push(build(Math.pow(2, 8) + 1)) - all.push(build(Math.pow(2, 12) + 1)) - all.push(build(Math.pow(2, 16) - 1)) - - all.forEach(function (orig) { - t.test('encoding a buffer of length ' + orig.length, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 3 + orig.length, 'must have the right length') - t.equal(buf.readUInt8(0), 0xc5, 'must have the proper header') - t.equal(buf.readUInt16BE(1), orig.length, 'must include the buf length') - t.equal(buf.toString('utf8', 3), orig.toString('utf8'), 'must decode correctly') - t.end() - }) - - t.test('decoding a buffer of length ' + orig.length, function (t) { - const buf = Buffer.allocUnsafe(3 + orig.length) - buf[0] = 0xc5 - buf.writeUInt16BE(orig.length, 1) - orig.copy(buf, 3) - t.equal(encoder.decode(buf).toString('utf8'), orig.toString('utf8'), 'must decode correctly') - t.end() - }) - - t.test('mirror test a buffer of length ' + orig.length, function (t) { - t.equal(encoder.decode(encoder.encode(orig)).toString(), orig.toString(), 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding a chopped 2^16-1 bytes buffer', function (t) { - const encoder = msgpack() - const orig = build(Math.pow(2, 12)) - let buf = Buffer.allocUnsafe(3 + orig.length) - buf[0] = 0xc5 - buf[1] = Math.pow(2, 16) - 1 // set bigger size - orig.copy(buf, 3) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete header of 2^16-1 bytes buffer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(2) - buf[0] = 0xc5 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/2-bytes-length-exts.js b/node_modules/msgpack5/test/2-bytes-length-exts.js deleted file mode 100644 index f5aefb633a..0000000000 --- a/node_modules/msgpack5/test/2-bytes-length-exts.js +++ /dev/null @@ -1,86 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encode/decode variable ext data up between 0x0100 and 0xffff', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (size, value) { - this.value = value - this.size = size - } - - function mytipeEncode (obj) { - const buf = Buffer.allocUnsafe(obj.size) - buf.fill(obj.value) - return buf - } - - function mytipeDecode (data) { - const result = new MyType(data.length, data.toString('utf8', 0, 1)) - - for (let i = 0; i < data.length; i++) { - if (data.readUInt8(0) !== data.readUInt8(i)) { - throw new Error('should all be the same') - } - } - - return result - } - - encoder.register(0x42, MyType, mytipeEncode, mytipeDecode) - - all.push(new MyType(0x0100, 'a')) - all.push(new MyType(0x0101, 'a')) - all.push(new MyType(0xffff, 'a')) - - all.forEach(function (orig) { - t.test('encoding a custom obj of length ' + orig.size, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 4 + orig.size, 'must have the right length') - t.equal(buf.readUInt8(0), 0xc8, 'must have the ext header') - t.equal(buf.readUInt16BE(1), orig.size, 'must include the data length') - t.equal(buf.readUInt8(3), 0x42, 'must include the custom type id') - t.equal(buf.toString('utf8', 4, 5), orig.value, 'must decode correctly') - t.end() - }) - - t.test('mirror test with a custom obj of length ' + orig.size, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.test('decoding an incomplete variable ext data up between 0x0100 and 0xffff', function (t) { - const obj = encoder.encode(new MyType(0xfff0, 'a')) - let buf = Buffer.allocUnsafe(obj.length) - buf[0] = 0xc8 - buf.writeUInt16BE(obj.length + 2, 1) // set bigger size - obj.copy(buf, 3, 3, obj.length) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.test('decoding an incomplete header of variable ext data up between 0x0100 and 0xffff', function (t) { - let buf = Buffer.allocUnsafe(3) - buf[0] = 0xc8 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/2-bytes-length-maps.js b/node_modules/msgpack5/test/2-bytes-length-maps.js deleted file mode 100644 index 0df6321ec3..0000000000 --- a/node_modules/msgpack5/test/2-bytes-length-maps.js +++ /dev/null @@ -1,85 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') -const base = 100000 - -function build (size, value) { - const map = {} - - for (let i = 0; i < size; i++) { - map[i + base] = value - } - - return map -} - -function computeLength (mapLength) { - let length = 3 // the header - const multi = ('' + base).length + 1 + 1 // we have bytes for each key, plus 1 byte for the value - - length += mapLength * multi - - return length -} - -test('encode/decode maps up to 2^16-1 elements', function (t) { - const encoder = msgpack() - - function doTest (length) { - const map = build(length, 42) - const buf = encoder.encode(map) - - t.test('encoding a map with ' + length + ' elements of ' + map[base], function (t) { - // the map is full of 1-byte integers - t.equal(buf.length, computeLength(length), 'must have the right length') - t.equal(buf.readUInt8(0), 0xde, 'must have the proper header') - t.equal(buf.readUInt16BE(1), length, 'must include the map length') - t.end() - }) - - t.test('mirror test for a map of length ' + length + ' with ' + map[base], function (t) { - t.deepEqual(encoder.decode(buf), map, 'must stay the same') - t.end() - }) - } - - doTest(Math.pow(2, 8)) - doTest(Math.pow(2, 8) + 1) - doTest(Math.pow(2, 12) + 1) - // too slow - // doTest(Math.pow(2, 16) - 1) - - t.end() -}) - -test('decoding a chopped map', function (t) { - const encoder = msgpack() - const map = encoder.encode(build(Math.pow(2, 12) + 1, 42)) - let buf = Buffer.allocUnsafe(map.length) - buf[0] = 0xde - buf.writeUInt16BE(Math.pow(2, 16) - 1, 1) // set bigger size - map.copy(buf, 3, 3, map.length) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete header of a map', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(2) - buf[0] = 0xde - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/2-bytes-length-strings.js b/node_modules/msgpack5/test/2-bytes-length-strings.js deleted file mode 100644 index ee9f8c1b1d..0000000000 --- a/node_modules/msgpack5/test/2-bytes-length-strings.js +++ /dev/null @@ -1,87 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encode/decode 2^8 <-> (2^16-1) bytes strings', function (t) { - const encoder = msgpack() - const all = [] - let str - - str = Buffer.allocUnsafe(Math.pow(2, 8)) - str.fill('a') - all.push(str.toString()) - - str = Buffer.allocUnsafe(Math.pow(2, 8) + 1) - str.fill('a') - all.push(str.toString()) - - str = Buffer.allocUnsafe(Math.pow(2, 14)) - str.fill('a') - all.push(str.toString()) - - str = Buffer.allocUnsafe(Math.pow(2, 16) - 1) - str.fill('a') - all.push(str.toString()) - - all.forEach(function (str) { - t.test('encoding a string of length ' + str.length, function (t) { - const buf = encoder.encode(str) - t.equal(buf.length, 3 + Buffer.byteLength(str), 'must be the proper length') - t.equal(buf[0], 0xda, 'must have the proper header') - t.equal(buf.readUInt16BE(1), Buffer.byteLength(str), 'must include the str length') - t.equal(buf.toString('utf8', 3, Buffer.byteLength(str) + 3), str, 'must decode correctly') - t.end() - }) - - t.test('decoding a string of length ' + str.length, function (t) { - const buf = Buffer.allocUnsafe(3 + Buffer.byteLength(str)) - buf[0] = 0xda - buf.writeUInt16BE(Buffer.byteLength(str), 1) - buf.write(str, 3) - t.equal(encoder.decode(buf), str, 'must decode correctly') - t.end() - }) - - t.test('mirror test a string of length ' + str.length, function (t) { - t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding a chopped string', function (t) { - const encoder = msgpack() - let str - for (str = 'a'; str.length < 0xff + 100;) { - str += 'a' - } - let buf = Buffer.allocUnsafe(3 + Buffer.byteLength(str)) - buf[0] = 0xda - buf.writeUInt16BE(Buffer.byteLength(str) + 10, 1) // set bigger size - buf.write(str, 3) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete header of a string', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(2) - buf[0] = 0xda - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/2-bytes-length-uint8arrays.js b/node_modules/msgpack5/test/2-bytes-length-uint8arrays.js deleted file mode 100644 index c824a30998..0000000000 --- a/node_modules/msgpack5/test/2-bytes-length-uint8arrays.js +++ /dev/null @@ -1,43 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -function build (size) { - const array = [] - let i - - for (i = 0; i < size; i++) { - array.push(42) - } - - return new Uint8Array(array) -} - -test('encode/decode 2^8-1 Uint8Arrays', function (t) { - const encoder = msgpack() - const all = [] - - all.push(build(Math.pow(2, 8))) - all.push(build(Math.pow(2, 8) + 1)) - all.push(build(Math.pow(2, 12) + 1)) - all.push(build(Math.pow(2, 16) - 1)) - - all.forEach(function (array) { - t.test('encoding Uint8Array of length ' + array.byteLength + ' bytes', function (t) { - const buf = encoder.encode(array) - t.equal(buf.length, 3 + array.byteLength, 'must have the right length') - t.equal(buf.readUInt8(0), 0xc5, 'must have the proper header') - t.equal(buf.readUInt16BE(1), array.byteLength, 'must include the buf length') - t.end() - }) - - t.test('mirror test for an Uint8Array of length ' + array.byteLength + ' bytes', function (t) { - t.deepEqual(encoder.decode(encoder.encode(array)), Buffer.from(array), 'must stay the same') - t.end() - }) - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/31-chars-strings.js b/node_modules/msgpack5/test/31-chars-strings.js deleted file mode 100644 index 239aeb6105..0000000000 --- a/node_modules/msgpack5/test/31-chars-strings.js +++ /dev/null @@ -1,59 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encode/decode strings with max 31 of length', function (t) { - const encoder = msgpack() - const all = [] - - // build base - for (let i = ''; i.length < 32; i += 'a') { - all.push(i) - } - - all.forEach(function (str) { - t.test('encoding a string of length ' + str.length, function (t) { - const buf = encoder.encode(str) - t.equal(buf.length, 1 + Buffer.byteLength(str), 'must be the proper length') - t.equal(buf.readUInt8(0) & 0xe0, 0xa0, 'must have the proper header') - t.equal(buf.readUInt8(0) & 0x1f, Buffer.byteLength(str), 'must include the str length') - t.equal(buf.toString('utf8', 1, Buffer.byteLength(str) + 2), str, 'must decode correctly') - t.end() - }) - - t.test('decoding a string of length ' + str.length, function (t) { - const buf = Buffer.allocUnsafe(1 + Buffer.byteLength(str)) - buf[0] = 0xa0 | Buffer.byteLength(str) - if (str.length > 0) { - buf.write(str, 1) - } - t.equal(encoder.decode(buf), str, 'must decode correctly') - t.end() - }) - - t.test('mirror test a string of length ' + str.length, function (t) { - t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding a chopped string', function (t) { - const encoder = msgpack() - const str = 'aaa' - let buf = Buffer.allocUnsafe(1 + Buffer.byteLength(str)) - buf[0] = 0xa0 | Buffer.byteLength(str) + 2 // set bigger size - buf.write(str, 1) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/32-bits-signed-integers.js b/node_modules/msgpack5/test/32-bits-signed-integers.js deleted file mode 100644 index 1bb9c4ad4b..0000000000 --- a/node_modules/msgpack5/test/32-bits-signed-integers.js +++ /dev/null @@ -1,55 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 32-bits big-endian signed integers', function (t) { - const encoder = msgpack() - const allNum = [] - - for (let i = 32769; i < 214748364; i += 10235023) { - allNum.push(-i) - } - - allNum.push(-214748364) - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 5, 'must have 5 bytes') - t.equal(buf[0], 0xd2, 'must have the proper header') - t.equal(buf.readInt32BE(1), num, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.allocUnsafe(5) - buf[0] = 0xd2 - buf.writeInt32BE(num, 1) - t.equal(encoder.decode(buf), num, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 32-bits big-endian integer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(4) - buf[0] = 0xd2 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/32-bits-unsigned-integers.js b/node_modules/msgpack5/test/32-bits-unsigned-integers.js deleted file mode 100644 index 4d73505f20..0000000000 --- a/node_modules/msgpack5/test/32-bits-unsigned-integers.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 32-bits big-endian unsigned integers', function (t) { - const encoder = msgpack() - const allNum = [] - - for (let i = 65536; i < 0xffffffff; i += 102350237) { - allNum.push(i) - } - - allNum.push(0xfffffffe) - allNum.push(0xffffffff) - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 5, 'must have 5 bytes') - t.equal(buf[0], 0xce, 'must have the proper header') - t.equal(buf.readUInt32BE(1), num, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.allocUnsafe(5) - buf[0] = 0xce - buf.writeUInt32BE(num, 1) - t.equal(encoder.decode(buf), num, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 32-bits big-endian unsigned integer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(4) - buf[0] = 0xce - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/32-bytes-strings.js b/node_modules/msgpack5/test/32-bytes-strings.js deleted file mode 100644 index b9c2d0eb9d..0000000000 --- a/node_modules/msgpack5/test/32-bytes-strings.js +++ /dev/null @@ -1,39 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -test('encode/decode up to 31 bytes strings', function (t) { - const encoder = msgpack() - const all = [] - - for (let i = 'a'; i.length < 32; i += 'a') { - all.push(i) - } - - all.forEach(function (str) { - t.test('encoding a string of length ' + str.length, function (t) { - const buf = encoder.encode(str) - t.equal(buf.length, 1 + Buffer.byteLength(str), 'must have 2 bytes') - t.equal(buf[0] & 0xe0, 0xa0, 'must have the proper header') - t.equal(buf.toString('utf8', 1, Buffer.byteLength(str) + 1), str, 'must decode correctly') - t.end() - }) - - t.test('decoding a string of length ' + str.length, function (t) { - const buf = Buffer.allocUnsafe(1 + Buffer.byteLength(str)) - buf[0] = 0xa0 | Buffer.byteLength(str) - buf.write(str, 1) - t.equal(encoder.decode(buf), str, 'must decode correctly') - t.end() - }) - - t.test('mirror test a string of length ' + str.length, function (t) { - t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') - t.end() - }) - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/4-bytes-length-arrays.js b/node_modules/msgpack5/test/4-bytes-length-arrays.js deleted file mode 100644 index 46e92cccbd..0000000000 --- a/node_modules/msgpack5/test/4-bytes-length-arrays.js +++ /dev/null @@ -1,78 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -function build (size) { - const array = [] - - for (let i = 0; i < size; i++) { - array.push(42) - } - - return array -} - -test('encode/decode arrays up to 0xffffffff elements', function (t) { - const encoder = msgpack() - - function doTest (array) { - t.test('encoding an array with ' + array.length + ' elements', function (t) { - const buf = encoder.encode(array) - // the array is full of 1-byte integers - t.equal(buf.length, 5 + array.length, 'must have the right length') - t.equal(buf.readUInt8(0), 0xdd, 'must have the proper header') - t.equal(buf.readUInt32BE(1), array.length, 'must include the array length') - t.end() - }) - - t.test('mirror test for an array of length ' + array.length, function (t) { - t.deepEqual(encoder.decode(encoder.encode(array)), array, 'must stay the same') - t.end() - }) - } - - doTest(build(0xffff + 1)) - doTest(build(0xffff + 42)) - // unable to test bigger arrays do to out of memory errors - - t.end() -}) - -test('decoding an incomplete array', function (t) { - const encoder = msgpack() - - const array = build(0xffff + 42) - let buf = Buffer.allocUnsafe(5 + array.length) - buf[0] = 0xdd - buf.writeUInt32BE(array.length + 10, 1) // set bigger size - let pos = 5 - for (let i = 0; i < array.length; i++) { - const obj = encoder.encode(array[i], true) - obj.copy(buf, pos) - pos += obj.length - } - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete header', function (t) { - const encoder = msgpack() - - let buf = Buffer.allocUnsafe(4) - buf[0] = 0xdd - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/4-bytes-length-buffers.js b/node_modules/msgpack5/test/4-bytes-length-buffers.js deleted file mode 100644 index 27749da5ab..0000000000 --- a/node_modules/msgpack5/test/4-bytes-length-buffers.js +++ /dev/null @@ -1,78 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -function build (size) { - const buf = Buffer.allocUnsafe(size) - buf.fill('a') - - return buf -} - -test('encode/decode 2^32-1 bytes buffers', function (t) { - const encoder = msgpack() - const all = [] - - all.push(build(Math.pow(2, 16))) - all.push(build(Math.pow(2, 16) + 1)) - all.push(build(Math.pow(2, 18) + 1)) - - all.forEach(function (orig) { - t.test('encoding a buffer of length ' + orig.length, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 5 + orig.length, 'must have the right length') - t.equal(buf.readUInt8(0), 0xc6, 'must have the proper header') - t.equal(buf.readUInt32BE(1), orig.length, 'must include the buf length') - t.equal(buf.toString('utf8', 5), orig.toString('utf8'), 'must decode correctly') - t.end() - }) - - t.test('decoding a buffer of length ' + orig.length, function (t) { - const buf = Buffer.allocUnsafe(5 + orig.length) - buf[0] = 0xc6 - buf.writeUInt32BE(orig.length, 1) - orig.copy(buf, 5) - t.equal(encoder.decode(buf).toString('utf8'), orig.toString('utf8'), 'must decode correctly') - t.end() - }) - - t.test('mirror test a buffer of length ' + orig.length, function (t) { - t.equal(encoder.decode(encoder.encode(orig)).toString(), orig.toString(), 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding a chopped 2^32-1 bytes buffer', function (t) { - const encoder = msgpack() - const orig = build(Math.pow(2, 18)) - let buf = Buffer.allocUnsafe(5 + orig.length) - buf[0] = 0xc6 - buf[1] = Math.pow(2, 32) - 1 // set bigger size - orig.copy(buf, 5) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete header of 2^32-1 bytes buffer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(4) - buf[0] = 0xc6 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/4-bytes-length-exts.js b/node_modules/msgpack5/test/4-bytes-length-exts.js deleted file mode 100644 index e1e539f84e..0000000000 --- a/node_modules/msgpack5/test/4-bytes-length-exts.js +++ /dev/null @@ -1,86 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encode/decode variable ext data up between 0x10000 and 0xffffffff', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (size, value) { - this.value = value - this.size = size - } - - function mytipeEncode (obj) { - const buf = Buffer.allocUnsafe(obj.size) - buf.fill(obj.value) - return buf - } - - function mytipeDecode (data) { - const result = new MyType(data.length, data.toString('utf8', 0, 1)) - - for (let i = 0; i < data.length; i++) { - if (data.readUInt8(0) !== data.readUInt8(i)) { - throw new Error('should all be the same') - } - } - - return result - } - - encoder.register(0x52, MyType, mytipeEncode, mytipeDecode) - - all.push(new MyType(0x10000, 'a')) - all.push(new MyType(0x10001, 'a')) - all.push(new MyType(0xffffff, 'a')) - - all.forEach(function (orig) { - t.test('encoding a custom obj of length ' + orig.size, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 6 + orig.size, 'must have the right length') - t.equal(buf.readUInt8(0), 0xc9, 'must have the ext header') - t.equal(buf.readUInt32BE(1), orig.size, 'must include the data length') - t.equal(buf.readUInt8(5), 0x52, 'must include the custom type id') - t.equal(buf.toString('utf8', 6, 7), orig.value, 'must decode correctly') - t.end() - }) - - t.test('mirror test with a custom obj of length ' + orig.size, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.test('decoding an incomplete variable ext data up between 0x10000 and 0xffffffff', function (t) { - const obj = encoder.encode(new MyType(0xffffff, 'a')) - let buf = Buffer.allocUnsafe(obj.length) - buf[0] = 0xc9 - buf.writeUInt32BE(obj.length + 2, 1) // set bigger size - obj.copy(buf, 5, 5, obj.length) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.test('decoding an incomplete header of variable ext data up between 0x10000 and 0xffffffff', function (t) { - let buf = Buffer.allocUnsafe(5) - buf[0] = 0xc9 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/4-bytes-length-strings.js b/node_modules/msgpack5/test/4-bytes-length-strings.js deleted file mode 100644 index 7670151a6e..0000000000 --- a/node_modules/msgpack5/test/4-bytes-length-strings.js +++ /dev/null @@ -1,83 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encode/decode 2^16 <-> (2^32 - 1) bytes strings', function (t) { - const encoder = msgpack() - const all = [] - let str - - str = Buffer.allocUnsafe(Math.pow(2, 16)) - str.fill('a') - all.push(str.toString()) - - str = Buffer.allocUnsafe(Math.pow(2, 16) + 1) - str.fill('a') - all.push(str.toString()) - - str = Buffer.allocUnsafe(Math.pow(2, 20)) - str.fill('a') - all.push(str.toString()) - - all.forEach(function (str) { - t.test('encoding a string of length ' + str.length, function (t) { - const buf = encoder.encode(str) - t.equal(buf.length, 5 + Buffer.byteLength(str), 'must be the proper length') - t.equal(buf[0], 0xdb, 'must have the proper header') - t.equal(buf.readUInt32BE(1), Buffer.byteLength(str), 'must include the str length') - t.equal(buf.toString('utf8', 5, Buffer.byteLength(str) + 5), str, 'must decode correctly') - t.end() - }) - - t.test('decoding a string of length ' + str.length, function (t) { - const buf = Buffer.allocUnsafe(5 + Buffer.byteLength(str)) - buf[0] = 0xdb - buf.writeUInt32BE(Buffer.byteLength(str), 1) - buf.write(str, 5) - t.equal(encoder.decode(buf), str, 'must decode correctly') - t.end() - }) - - t.test('mirror test a string of length ' + str.length, function (t) { - t.equal(encoder.decode(encoder.encode(str)), str, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding a chopped string', function (t) { - const encoder = msgpack() - let str - for (str = 'a'; str.length < 0xffff + 100;) { - str += 'a' - } - let buf = Buffer.allocUnsafe(5 + Buffer.byteLength(str)) - buf[0] = 0xdb - buf.writeUInt32BE(Buffer.byteLength(str) + 10, 1) // set bigger size - buf.write(str, 5) - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete header of a string', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(4) - buf[0] = 0xdb - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/4-bytes-length-uint8arrays.js b/node_modules/msgpack5/test/4-bytes-length-uint8arrays.js deleted file mode 100644 index 46147d9ad9..0000000000 --- a/node_modules/msgpack5/test/4-bytes-length-uint8arrays.js +++ /dev/null @@ -1,42 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -function build (size) { - const array = [] - let i - - for (i = 0; i < size; i++) { - array.push(42) - } - - return new Uint8Array(array) -} - -test('encode/decode 2^8-1 Uint8Arrays', function (t) { - const encoder = msgpack() - const all = [] - - all.push(build(Math.pow(2, 16))) - all.push(build(Math.pow(2, 16) + 1)) - all.push(build(Math.pow(2, 18) + 1)) - - all.forEach(function (array) { - t.test('encoding Uint8Array of length ' + array.byteLength + ' bytes', function (t) { - const buf = encoder.encode(array) - t.equal(buf.length, 5 + array.byteLength, 'must have the right length') - t.equal(buf.readUInt8(0), 0xc6, 'must have the proper header') - t.equal(buf.readUInt32BE(1), array.byteLength, 'must include the buf length') - t.end() - }) - - t.test('mirror test for an Uint8Array of length ' + array.byteLength + ' bytes', function (t) { - t.deepEqual(encoder.decode(encoder.encode(array)), Buffer.from(array), 'must stay the same') - t.end() - }) - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/5-bits-negative-integers.js b/node_modules/msgpack5/test/5-bits-negative-integers.js deleted file mode 100644 index 76d44368dc..0000000000 --- a/node_modules/msgpack5/test/5-bits-negative-integers.js +++ /dev/null @@ -1,36 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -test('encoding/decoding 5-bits negative ints', function (t) { - const encoder = msgpack() - const allNum = [] - - for (let i = 1; i <= 32; i++) { - allNum.push(-i) - } - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 1, 'must have 1 byte') - t.equal(buf[0], num + 0x100, 'must encode correctly') - t.end() - }) - - t.test('decoding' + num, function (t) { - const buf = Buffer.from([num + 0x100]) - t.equal(encoder.decode(buf), num, 'must decode correctly') - t.end() - }) - - t.test('mirror test' + num, function (t) { - t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/64-bits-signed-integers.js b/node_modules/msgpack5/test/64-bits-signed-integers.js deleted file mode 100644 index 86b8342540..0000000000 --- a/node_modules/msgpack5/test/64-bits-signed-integers.js +++ /dev/null @@ -1,48 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 64-bits big-endian signed integers', function (t) { - const encoder = msgpack() - const table = [ - { num: -9007199254740991, hi: 0xffe00000, lo: 0x00000001 }, - { num: -4294967297, hi: 0xfffffffe, lo: 0xffffffff }, - { num: -4294967296, hi: 0xffffffff, lo: 0x00000000 }, - { num: -4294967295, hi: 0xffffffff, lo: 0x00000001 }, - { num: -214748365, hi: 0xffffffff, lo: 0xf3333333 } - ] - - table.forEach(function (testCase) { - t.test('encoding ' + testCase.num, function (t) { - const buf = encoder.encode(testCase.num) - t.equal(buf.length, 9, 'must have 9 bytes') - t.equal(buf[0], 0xd3, 'must have the proper header') - t.equal(buf.readUInt32BE(1), testCase.hi, 'hi word must be properly written') - t.equal(buf.readUInt32BE(5), testCase.lo, 'lo word must be properly written') - t.end() - }) - - t.test('mirror test ' + testCase.num, function (t) { - t.equal(encoder.decode(encoder.encode(testCase.num)), testCase.num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 64-bits big-endian signed integer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(8) - buf[0] = 0xd3 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/64-bits-unsigned-integers.js b/node_modules/msgpack5/test/64-bits-unsigned-integers.js deleted file mode 100644 index f5d628155b..0000000000 --- a/node_modules/msgpack5/test/64-bits-unsigned-integers.js +++ /dev/null @@ -1,48 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 64-bits big-endian unsigned integers', function (t) { - const encoder = msgpack() - const allNum = [] - - allNum.push(0x0000000100000000) - allNum.push(0xffffffffeeeee) - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 9, 'must have 9 bytes') - t.equal(buf[0], 0xcf, 'must have the proper header') - let result = 0 - for (let k = 7; k >= 0; k--) { - result += (buf.readUInt8(k + 1) * Math.pow(2, (8 * (7 - k)))) - } - t.equal(result, num, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 64-bits big-endian unsigned integer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(8) - buf[0] = 0xcf - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/7-bits-positive-integers.js b/node_modules/msgpack5/test/7-bits-positive-integers.js deleted file mode 100644 index 16685ea48f..0000000000 --- a/node_modules/msgpack5/test/7-bits-positive-integers.js +++ /dev/null @@ -1,36 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -test('encoding/decoding 7-bits positive ints', function (t) { - const encoder = msgpack() - const allNum = [] - - for (let i = 0; i < 126; i++) { - allNum.push(i) - } - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 1, 'must have 1 byte') - t.equal(buf[0], num, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.from([num]) - t.equal(encoder.decode(buf), num, 'must decode correctly') - t.end() - }) - - t.test('mirror test' + num, function (t) { - t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/8-bits-positive-integers.js b/node_modules/msgpack5/test/8-bits-positive-integers.js deleted file mode 100644 index b30fb2cfed..0000000000 --- a/node_modules/msgpack5/test/8-bits-positive-integers.js +++ /dev/null @@ -1,51 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 8-bits integers', function (t) { - const encoder = msgpack() - const allNum = [] - - for (let i = 128; i < 256; i++) { - allNum.push(i) - } - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 2, 'must have 2 bytes') - t.equal(buf[0], 0xcc, 'must have the proper header') - t.equal(buf[1], num, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.from([0xcc, num]) - t.equal(encoder.decode(buf), num, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 8-bits unsigned integer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(1) - buf[0] = 0xcc - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/8-bits-signed-integers.js b/node_modules/msgpack5/test/8-bits-signed-integers.js deleted file mode 100644 index e074d3563f..0000000000 --- a/node_modules/msgpack5/test/8-bits-signed-integers.js +++ /dev/null @@ -1,53 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 8-bits big-endian signed integers', function (t) { - const encoder = msgpack() - const allNum = [] - - for (let i = 33; i <= 128; i++) { - allNum.push(-i) - } - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 2, 'must have 2 bytes') - t.equal(buf[0], 0xd0, 'must have the proper header') - t.equal(buf.readInt8(1), num, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.allocUnsafe(3) - buf[0] = 0xd0 - buf.writeInt8(num, 1) - t.equal(encoder.decode(buf), num, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - t.equal(encoder.decode(encoder.encode(num)), num, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 8-bits big-endian signed integer', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(1) - buf[0] = 0xd0 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/NaN.js b/node_modules/msgpack5/test/NaN.js deleted file mode 100644 index bf4c1b2539..0000000000 --- a/node_modules/msgpack5/test/NaN.js +++ /dev/null @@ -1,52 +0,0 @@ -'use strict' - -const test = require('tape').test -const msgpack = require('../') - -test('encode NaN as 32-bit float', function (t) { - const encoder = msgpack() - - const buf = encoder.encode(NaN) - t.equal(buf[0], 0xca) - t.equal(buf.byteLength, 5) - - t.end() -}) - -test('encode NaN as 64-bit float with forceFloat64', function (t) { - const encoder = msgpack({ forceFloat64: true }) - - const buf = encoder.encode(NaN) - - t.equal(buf[0], 0xcb) - t.equal(buf.byteLength, 9) - - t.end() -}) - -test('round-trip 32-bit NaN', function (t) { - const encoder = msgpack() - - t.assert(Object.is(encoder.decode(encoder.encode(NaN)), NaN)) - - t.end() -}) - -test('round-trip 64-bit NaN with forceFloat64', function (t) { - const encoder = msgpack({ forceFloat64: true }) - - t.assert(Object.is(encoder.decode(encoder.encode(NaN)), NaN)) - - t.end() -}) - -test('decode 64-bit NaN', function (t) { - const encoder = msgpack() - const buf = Buffer.alloc(9) - buf.writeUInt8(0xcb, 0) - buf.writeDoubleBE(NaN, 1) - - t.assert(Object.is(encoder.decode(buf), NaN)) - - t.end() -}) diff --git a/node_modules/msgpack5/test/booleans.js b/node_modules/msgpack5/test/booleans.js deleted file mode 100644 index 02fb5be37f..0000000000 --- a/node_modules/msgpack5/test/booleans.js +++ /dev/null @@ -1,21 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -test('encode/decode booleans', function (t) { - const encoder = msgpack() - - t.equal(encoder.encode(true)[0], 0xc3, 'encode true as 0xc3') - t.equal(encoder.encode(true).length, 1, 'encode true as a buffer of length 1') - t.equal(encoder.decode(Buffer.from([0xc3])), true, 'decode 0xc3 as true') - t.equal(encoder.decode(encoder.encode(true)), true, 'mirror test true') - - t.equal(encoder.encode(false)[0], 0xc2, 'encode false as 0xc2') - t.equal(encoder.encode(false).length, 1, 'encode false as a buffer of length 1') - t.equal(encoder.decode(Buffer.from([0xc2])), false, 'decode 0xc2 as false') - t.equal(encoder.decode(encoder.encode(false)), false, 'mirror test false') - - t.end() -}) diff --git a/node_modules/msgpack5/test/compatibility-mode.js b/node_modules/msgpack5/test/compatibility-mode.js deleted file mode 100644 index 185a6ad5e8..0000000000 --- a/node_modules/msgpack5/test/compatibility-mode.js +++ /dev/null @@ -1,73 +0,0 @@ -'use strict' - -const test = require('tape').test -const msgpack = require('../') - -function buildBuffer (size) { - const buf = Buffer.allocUnsafe(size) - buf.fill('a') - - return buf -} - -test('encode/compatibility mode', function (t) { - const compatEncoder = msgpack({ - compatibilityMode: true - }) - const defaultEncoder = msgpack({ - compatibilityMode: false - }) - - const oneBytesStr = Array(31 + 2).join('x') - const twoBytesStr = Array(255 + 2).join('x') - - t.test('default encoding a string of length ' + oneBytesStr.length, function (t) { - // Default: use 1 byte length string (str8) - const buf = defaultEncoder.encode(oneBytesStr) - t.equal(buf[0], 0xd9, 'must have the proper header (str8)') - t.equal(buf.toString('utf8', 2, Buffer.byteLength(oneBytesStr) + 2), oneBytesStr, 'must decode correctly') - t.end() - }) - - t.test('compat. encoding a string of length ' + oneBytesStr.length, function (t) { - // Compat. mode: use 2 byte length string (str16) - const buf = compatEncoder.encode(oneBytesStr) - t.equal(buf[0], 0xda, 'must have the proper header (str16)') - t.equal(buf.toString('utf8', 3, Buffer.byteLength(oneBytesStr) + 3), oneBytesStr, 'must decode correctly') - t.end() - }) - - t.test('encoding for a string of length ' + twoBytesStr.length, function (t) { - // Two byte strings: compat. mode should make no difference - const buf1 = defaultEncoder.encode(twoBytesStr) - const buf2 = compatEncoder.encode(twoBytesStr) - t.deepEqual(buf1, buf2, 'must be equal for two byte strings') - t.end() - }) - - const fixRawBuffer = buildBuffer(1) - const raw16Buffer = buildBuffer(Math.pow(2, 16) - 1) - const raw32Buffer = buildBuffer(Math.pow(2, 16) + 1) - - t.test('compat. encoding a Buffer of length ' + fixRawBuffer.length, function (t) { - // fix raw header: 0xa0 | 1 = 0xa1 - const buf = compatEncoder.encode(fixRawBuffer) - t.equal(buf[0], 0xa1, 'must have the proper header (fix raw)') - t.equal(buf.toString('utf8', 1, Buffer.byteLength(fixRawBuffer) + 1), fixRawBuffer.toString('utf8'), 'must decode correctly') - t.end() - }) - - t.test('compat. encoding a Buffer of length ' + raw16Buffer.length, function (t) { - const buf = compatEncoder.encode(raw16Buffer) - t.equal(buf[0], 0xda, 'must have the proper header (raw 16)') - t.equal(buf.toString('utf8', 3, Buffer.byteLength(raw16Buffer) + 3), raw16Buffer.toString('utf8'), 'must decode correctly') - t.end() - }) - - t.test('compat. encoding a Buffer of length ' + raw32Buffer.length, function (t) { - const buf = compatEncoder.encode(raw32Buffer) - t.equal(buf[0], 0xdb, 'must have the proper header (raw 32)') - t.equal(buf.toString('utf8', 5, Buffer.byteLength(raw32Buffer) + 5), raw32Buffer.toString('utf8'), 'must decode correctly') - t.end() - }) -}) diff --git a/node_modules/msgpack5/test/datenull.js b/node_modules/msgpack5/test/datenull.js deleted file mode 100644 index f1933372c2..0000000000 --- a/node_modules/msgpack5/test/datenull.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict' -const test = require('tape').test -const msgpack = require('../') - -test('encode date is null ', function (t) { - const encoder = msgpack({ - disableTimestampEncoding: true - }) - - t.equal(encoder.encode(null)[0], 0xc0, 'encode null as null') - - t.end() -}) diff --git a/node_modules/msgpack5/test/doubles.js b/node_modules/msgpack5/test/doubles.js deleted file mode 100644 index 5fe8002d06..0000000000 --- a/node_modules/msgpack5/test/doubles.js +++ /dev/null @@ -1,57 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 64-bits float numbers', function (t) { - const encoder = msgpack() - const allNum = [] - - allNum.push(748365544534.2) - allNum.push(-222111111000004.2) - allNum.push(9007199254740992) - allNum.push(-9007199254740992) - - allNum.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - const dec = buf.readDoubleBE(1) - t.equal(buf.length, 9, 'must have 9 bytes') - t.equal(buf[0], 0xcb, 'must have the proper header') - t.true(Math.abs(dec - num) < 0.1, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.allocUnsafe(9) - buf[0] = 0xcb - buf.writeDoubleBE(num, 1) - const dec = encoder.decode(buf) - t.true(Math.abs(dec - num) < 0.1, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - const dec = encoder.decode(encoder.encode(num)) - t.true(Math.abs(dec - num) < 0.1, 'must decode correctly') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 64-bits float numbers', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(8) - buf[0] = 0xcb - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/ext-custom-encode-check.js b/node_modules/msgpack5/test/ext-custom-encode-check.js deleted file mode 100644 index 22f591f946..0000000000 --- a/node_modules/msgpack5/test/ext-custom-encode-check.js +++ /dev/null @@ -1,64 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -test('encode/decode ext with a custom object check', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (data) { - this.data = data - } - - function checkForMyType (obj) { - return obj instanceof MyType - } - - function mytypeEncode (obj) { - const buf = Buffer.allocUnsafe(2) - buf.writeUInt8(0x42, 0) - buf.writeUInt8(obj.data, 1) - return buf - } - - function mytypeDecode (data) { - return new MyType(data.readUInt8(0)) - } - - encoder.registerEncoder(checkForMyType, mytypeEncode) - encoder.registerDecoder(0x42, mytypeDecode) - - all.push(new MyType(0)) - all.push(new MyType(1)) - all.push(new MyType(42)) - - all.forEach(function (orig) { - t.test('encoding a custom obj encoded as ' + orig.data, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 3, 'must have the right length') - t.equal(buf.readUInt8(0), 0xd4, 'must have the fixext header') - t.equal(buf.readUInt8(1), 0x42, 'must include the custom type id') - t.equal(buf.readUInt8(2), orig.data, 'must decode correctly') - t.end() - }) - - t.test('decoding a custom obj encoded as ' + orig.data, function (t) { - const buf = Buffer.allocUnsafe(3) - buf[0] = 0xd4 - buf[1] = 0x42 - buf.writeUInt8(orig.data, 2) - t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') - t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') - t.end() - }) - - t.test('mirror test with a custom obj containing ' + orig.data, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/fixexts.js b/node_modules/msgpack5/test/fixexts.js deleted file mode 100644 index c045984432..0000000000 --- a/node_modules/msgpack5/test/fixexts.js +++ /dev/null @@ -1,497 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encode/decode 1 byte fixext data', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (data) { - this.data = data - } - - function mytypeEncode (obj) { - const buf = Buffer.allocUnsafe(1) - buf.writeUInt8(obj.data, 0) - return buf - } - - function mytypeDecode (data) { - return new MyType(data.readUInt8(0)) - } - - encoder.register(0x42, MyType, mytypeEncode, mytypeDecode) - - all.push(new MyType(0)) - all.push(new MyType(1)) - all.push(new MyType(42)) - - all.forEach(function (orig) { - t.test('encoding a custom obj encoded as ' + orig.data, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 3, 'must have the right length') - t.equal(buf.readUInt8(0), 0xd4, 'must have the fixext header') - t.equal(buf.readUInt8(1), 0x42, 'must include the custom type id') - t.equal(buf.readUInt8(2), orig.data, 'must decode correctly') - t.end() - }) - - t.test('decoding a custom obj encoded as ' + orig.data, function (t) { - const buf = Buffer.allocUnsafe(3) - buf[0] = 0xd4 - buf[1] = 0x42 - buf.writeUInt8(orig.data, 2) - t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') - t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') - t.end() - }) - - t.test('mirror test with a custom obj containing ' + orig.data, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('encode/decode 2 bytes fixext data', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (data) { - this.data = data - } - - function mytypeEncode (obj) { - const buf = Buffer.allocUnsafe(2) - buf.writeUInt16BE(obj.data, 0) - return buf - } - - function mytypeDecode (data) { - return new MyType(data.readUInt16BE(0)) - } - - encoder.register(0x42, MyType, mytypeEncode, mytypeDecode) - - all.push(new MyType(0)) - all.push(new MyType(1)) - all.push(new MyType(42)) - - all.forEach(function (orig) { - t.test('encoding a custom obj encoded as ' + orig.data, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 4, 'must have the right length') - t.equal(buf.readUInt8(0), 0xd5, 'must have the fixext header') - t.equal(buf.readUInt8(1), 0x42, 'must include the custom type id') - t.equal(buf.readUInt16BE(2), orig.data, 'must decode correctly') - t.end() - }) - - t.test('decoding a custom obj encoded as ' + orig.data, function (t) { - const buf = Buffer.allocUnsafe(4) - buf[0] = 0xd5 - buf[1] = 0x42 - buf.writeUInt16BE(orig.data, 2) - t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') - t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') - t.end() - }) - - t.test('mirror test with a custom obj containing ' + orig.data, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('encode/decode 4 bytes fixext data', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (data) { - this.data = data - } - - function mytypeEncode (obj) { - const buf = Buffer.allocUnsafe(4) - buf.writeUInt32BE(obj.data, 0) - return buf - } - - function mytypeDecode (data) { - return new MyType(data.readUInt32BE(0)) - } - - encoder.register(0x44, MyType, mytypeEncode, mytypeDecode) - - all.push(new MyType(0)) - all.push(new MyType(1)) - all.push(new MyType(42)) - - all.forEach(function (orig) { - t.test('encoding a custom obj encoded as ' + orig.data, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 6, 'must have the right length') - t.equal(buf.readUInt8(0), 0xd6, 'must have the fixext header') - t.equal(buf.readUInt8(1), 0x44, 'must include the custom type id') - t.equal(buf.readUInt32BE(2), orig.data, 'must decode correctly') - t.end() - }) - - t.test('decoding a custom obj encoded as ' + orig.data, function (t) { - const buf = Buffer.allocUnsafe(6) - buf[0] = 0xd6 - buf[1] = 0x44 - buf.writeUInt32BE(orig.data, 2) - t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') - t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') - t.end() - }) - - t.test('mirror test with a custom obj containing ' + orig.data, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('encode/decode 8 bytes fixext data', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (data) { - this.data = data - } - - function mytypeEncode (obj) { - const buf = Buffer.allocUnsafe(8) - buf.writeUInt32BE(obj.data / 2, 0) - buf.writeUInt32BE(obj.data / 2, 4) - return buf - } - - function mytypeDecode (data) { - return new MyType(data.readUInt32BE(0) + data.readUInt32BE(4)) - } - - encoder.register(0x44, MyType, mytypeEncode, mytypeDecode) - - all.push(new MyType(2)) - all.push(new MyType(4)) - all.push(new MyType(42)) - - all.forEach(function (orig) { - t.test('encoding a custom obj encoded as ' + orig.data, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 10, 'must have the right length') - t.equal(buf.readUInt8(0), 0xd7, 'must have the fixext header') - t.equal(buf.readUInt8(1), 0x44, 'must include the custom type id') - t.equal(buf.readUInt32BE(2) + buf.readUInt32BE(6), orig.data, 'must decode correctly') - t.end() - }) - - t.test('decoding a custom obj encoded as ' + orig.data, function (t) { - const buf = Buffer.allocUnsafe(10) - buf[0] = 0xd7 - buf[1] = 0x44 - buf.writeUInt32BE(orig.data / 2, 2) - buf.writeUInt32BE(orig.data / 2, 6) - t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') - t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') - t.end() - }) - - t.test('mirror test with a custom obj containing ' + orig.data, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('encode/decode 16 bytes fixext data', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (data) { - this.data = data - } - - function mytypeEncode (obj) { - const buf = Buffer.allocUnsafe(16) - buf.writeUInt32BE(obj.data / 4, 0) - buf.writeUInt32BE(obj.data / 4, 4) - buf.writeUInt32BE(obj.data / 4, 8) - buf.writeUInt32BE(obj.data / 4, 12) - return buf - } - - function mytypeDecode (data) { - return new MyType(data.readUInt32BE(0) + data.readUInt32BE(4) + data.readUInt32BE(8) + data.readUInt32BE(12)) - } - - encoder.register(0x46, MyType, mytypeEncode, mytypeDecode) - - all.push(new MyType(4)) - all.push(new MyType(8)) - all.push(new MyType(44)) - - all.forEach(function (orig) { - t.test('encoding a custom obj encoded as ' + orig.data, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 18, 'must have the right length') - t.equal(buf.readUInt8(0), 0xd8, 'must have the fixext header') - t.equal(buf.readUInt8(1), 0x46, 'must include the custom type id') - t.equal(buf.readUInt32BE(2) + buf.readUInt32BE(6) + buf.readUInt32BE(10) + buf.readUInt32BE(14), orig.data, 'must decode correctly') - t.end() - }) - - t.test('decoding a custom obj encoded as ' + orig.data, function (t) { - const buf = Buffer.allocUnsafe(18) - buf[0] = 0xd8 - buf[1] = 0x46 - buf.writeUInt32BE(orig.data / 4, 2) - buf.writeUInt32BE(orig.data / 4, 6) - buf.writeUInt32BE(orig.data / 4, 10) - buf.writeUInt32BE(orig.data / 4, 14) - t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') - t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') - t.end() - }) - - t.test('mirror test with a custom obj containing ' + orig.data, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('encode/decode fixext inside a map', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (data) { - this.data = data - } - - function mytypeEncode (obj) { - const buf = Buffer.allocUnsafe(4) - buf.writeUInt32BE(obj.data, 0) - return buf - } - - function mytypeDecode (data) { - return new MyType(data.readUInt32BE(0)) - } - - encoder.register(0x42, MyType, mytypeEncode, mytypeDecode) - - all.push({ ret: new MyType(42) }) - all.push({ a: new MyType(42), b: new MyType(43) }) - - all.push([1, 2, 3, 4, 5, 6].reduce(function (acc, key) { - acc[key] = new MyType(key) - return acc - }, {})) - - all.forEach(function (orig) { - t.test('mirror test with a custom obj inside a map', function (t) { - const encoded = encoder.encode(orig) - t.deepEqual(encoder.decode(encoded), orig, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('encode/decode 8 bytes fixext data', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (data) { - this.data = data - } - - function mytypeEncode (obj) { - const buf = Buffer.allocUnsafe(8) - buf.writeUInt32BE(obj.data / 2, 0) - buf.writeUInt32BE(obj.data / 2, 4) - return buf - } - - function mytypeDecode (data) { - return new MyType(data.readUInt32BE(0) + data.readUInt32BE(4)) - } - - encoder.register(0x44, MyType, mytypeEncode, mytypeDecode) - - all.push(new MyType(2)) - all.push(new MyType(4)) - all.push(new MyType(42)) - - all.forEach(function (orig) { - t.test('encoding a custom obj encoded as ' + orig.data, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 10, 'must have the right length') - t.equal(buf.readUInt8(0), 0xd7, 'must have the fixext header') - t.equal(buf.readUInt8(1), 0x44, 'must include the custom type id') - t.equal(buf.readUInt32BE(2) + buf.readUInt32BE(6), orig.data, 'must decode correctly') - t.end() - }) - - t.test('decoding a custom obj encoded as ' + orig.data, function (t) { - const buf = Buffer.allocUnsafe(10) - buf[0] = 0xd7 - buf[1] = 0x44 - buf.writeUInt32BE(orig.data / 2, 2) - buf.writeUInt32BE(orig.data / 2, 6) - t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') - t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') - t.end() - }) - - t.test('mirror test with a custom obj containing ' + orig.data, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('encode/decode 16 bytes fixext data', function (t) { - const encoder = msgpack() - const all = [] - - function MyType (data) { - this.data = data - } - - function mytypeEncode (obj) { - const buf = Buffer.allocUnsafe(16) - buf.writeUInt32BE(obj.data / 4, 0) - buf.writeUInt32BE(obj.data / 4, 4) - buf.writeUInt32BE(obj.data / 4, 8) - buf.writeUInt32BE(obj.data / 4, 12) - return buf - } - - function mytypeDecode (data) { - return new MyType(data.readUInt32BE(0) + data.readUInt32BE(4) + data.readUInt32BE(8) + data.readUInt32BE(12)) - } - - encoder.register(0x46, MyType, mytypeEncode, mytypeDecode) - - all.push(new MyType(4)) - all.push(new MyType(8)) - all.push(new MyType(44)) - - all.forEach(function (orig) { - t.test('encoding a custom obj encoded as ' + orig.data, function (t) { - const buf = encoder.encode(orig) - t.equal(buf.length, 18, 'must have the right length') - t.equal(buf.readUInt8(0), 0xd8, 'must have the fixext header') - t.equal(buf.readUInt8(1), 0x46, 'must include the custom type id') - t.equal(buf.readUInt32BE(2) + buf.readUInt32BE(6) + buf.readUInt32BE(10) + buf.readUInt32BE(14), orig.data, 'must decode correctly') - t.end() - }) - - t.test('decoding a custom obj encoded as ' + orig.data, function (t) { - const buf = Buffer.allocUnsafe(18) - buf[0] = 0xd8 - buf[1] = 0x46 - buf.writeUInt32BE(orig.data / 4, 2) - buf.writeUInt32BE(orig.data / 4, 6) - buf.writeUInt32BE(orig.data / 4, 10) - buf.writeUInt32BE(orig.data / 4, 14) - t.ok(encoder.decode(buf) instanceof MyType, 'must have the correct prototype') - t.deepEqual(encoder.decode(buf), orig, 'must decode correctly') - t.end() - }) - - t.test('mirror test with a custom obj containing ' + orig.data, function (t) { - t.deepEqual(encoder.decode(encoder.encode(orig)), orig, 'must stay the same') - t.end() - }) - }) - - t.test('decoding an incomplete 1 byte fixext data', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(2) - buf[0] = 0xd4 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.test('decoding an incomplete 2 byte fixext data', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(3) - buf[0] = 0xd5 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.test('decoding an incomplete 4 byte fixext data', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(5) - buf[0] = 0xd6 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.test('decoding an incomplete 8 byte fixext data', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(9) - buf[0] = 0xd7 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.test('decoding an incomplete 16 byte fixext data', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(17) - buf[0] = 0xd8 - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() - }) - - t.end() -}) diff --git a/node_modules/msgpack5/test/floats.js b/node_modules/msgpack5/test/floats.js deleted file mode 100644 index 540a3fb735..0000000000 --- a/node_modules/msgpack5/test/floats.js +++ /dev/null @@ -1,117 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -test('encoding/decoding 32-bits float numbers', function (t) { - const encoder = msgpack() - const float32 = [ - 1.5, - 0.15625, - -2.5 - ] - - const float64 = [ - Math.pow(2, 150), - 1.337, - 2.2 - ] - - float64.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 9, 'must have 5 bytes') - t.equal(buf[0], 0xcb, 'must have the proper header') - - const dec = buf.readDoubleBE(1) - t.equal(dec, num, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.allocUnsafe(9) - buf[0] = 0xcb - buf.writeDoubleBE(num, 1) - - const dec = encoder.decode(buf) - t.equal(dec, num, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - const dec = encoder.decode(encoder.encode(num)) - t.equal(dec, num, 'must decode correctly') - t.end() - }) - }) - - float32.forEach(function (num) { - t.test('encoding ' + num, function (t) { - const buf = encoder.encode(num) - t.equal(buf.length, 5, 'must have 5 bytes') - t.equal(buf[0], 0xca, 'must have the proper header') - - const dec = buf.readFloatBE(1) - t.equal(dec, num, 'must decode correctly') - t.end() - }) - - t.test('forceFloat64 encoding ' + num, function (t) { - const enc = msgpack({ forceFloat64: true }) - const buf = enc.encode(num) - - t.equal(buf.length, 9, 'must have 9 bytes') - t.equal(buf[0], 0xcb, 'must have the proper header') - - const dec = buf.readDoubleBE(1) - t.equal(dec, num, 'must decode correctly') - t.end() - }) - - t.test('decoding ' + num, function (t) { - const buf = Buffer.allocUnsafe(5) - buf[0] = 0xca - buf.writeFloatBE(num, 1) - - const dec = encoder.decode(buf) - t.equal(dec, num, 'must decode correctly') - t.end() - }) - - t.test('mirror test ' + num, function (t) { - const dec = encoder.decode(encoder.encode(num)) - t.equal(dec, num, 'must decode correctly') - t.end() - }) - }) - - t.end() -}) - -test('decoding an incomplete 32-bits float numbers', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(4) - buf[0] = 0xca - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding an incomplete 64-bits float numbers', function (t) { - const encoder = msgpack() - let buf = Buffer.allocUnsafe(8) - buf[0] = 0xcb - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - encoder.decode(buf) - }, encoder.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/functions.js b/node_modules/msgpack5/test/functions.js deleted file mode 100644 index 4a50103777..0000000000 --- a/node_modules/msgpack5/test/functions.js +++ /dev/null @@ -1,19 +0,0 @@ -'use strict' - -const test = require('tape').test -const msgpack = require('../') -const noop = function () {} - -test('encode a function inside a map', function (t) { - const encoder = msgpack() - const expected = { - hello: 'world' - } - const toEncode = { - hello: 'world', - func: noop - } - - t.deepEqual(encoder.decode(encoder.encode(toEncode)), expected, 'remove the function from the map') - t.end() -}) diff --git a/node_modules/msgpack5/test/levelup-encoding.js b/node_modules/msgpack5/test/levelup-encoding.js deleted file mode 100644 index 6bcab775c6..0000000000 --- a/node_modules/msgpack5/test/levelup-encoding.js +++ /dev/null @@ -1,69 +0,0 @@ -'use strict' - -const test = require('tape').test -const level = require('memdb') -const msgpack = require('../') - -test('msgpack level encoding put', function (t) { - t.plan(4) - - const pack = msgpack() - const db = level({ - valueEncoding: pack - }) - const obj = { my: 'obj' } - - db.put('hello', obj, function (err) { - t.error(err, 'put has no errors') - db.get('hello', { valueEncoding: 'binary' }, function (err, buf) { - t.error(err, 'get has no error') - t.deepEqual(pack.decode(buf), obj) - db.close(function () { - t.pass('db closed') - }) - }) - }) -}) - -test('msgpack level encoding get', function (t) { - t.plan(4) - - const pack = msgpack() - const db = level({ - valueEncoding: pack - }) - const obj = { my: 'obj' } - const buf = pack.encode(obj) - - db.put('hello', buf, { valueEncoding: 'binary' }, function (err) { - t.error(err, 'putting has no errors') - db.get('hello', function (err, result) { - t.error(err, 'get has no error') - t.deepEqual(result, obj) - db.close(function () { - t.pass('db closed') - }) - }) - }) -}) - -test('msgpack level encoding mirror', function (t) { - t.plan(4) - - const pack = msgpack() - const db = level({ - valueEncoding: pack - }) - const obj = { my: 'obj' } - - db.put('hello', obj, function (err) { - t.error(err, 'putting has no errors') - db.get('hello', function (err, result) { - t.error(err, 'get has no error') - t.deepEqual(result, obj) - db.close(function () { - t.pass('db closed') - }) - }) - }) -}) diff --git a/node_modules/msgpack5/test/map-with-object-key.js b/node_modules/msgpack5/test/map-with-object-key.js deleted file mode 100644 index ad7c48eda8..0000000000 --- a/node_modules/msgpack5/test/map-with-object-key.js +++ /dev/null @@ -1,25 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -test('encode/decode map with multiple short buffers as both keys and values', function (t) { - const first = Buffer.from('first') - const second = Buffer.from('second') - const third = Buffer.from('third') - - const mapping = new Map().set(first, second) - .set(second, third) - .set(third, first) - - const pack = msgpack() - - const newMapping = pack.decode(pack.encode(mapping)) - - t.equals(newMapping.size, mapping.size) - t.deepEqual([...newMapping.keys()], [...mapping.keys()]) - t.deepEqual([...newMapping.values()], [...mapping.values()]) - - t.end() -}) diff --git a/node_modules/msgpack5/test/nested-containers.js b/node_modules/msgpack5/test/nested-containers.js deleted file mode 100644 index 0ffcbd3674..0000000000 --- a/node_modules/msgpack5/test/nested-containers.js +++ /dev/null @@ -1,44 +0,0 @@ -'use strict' - -const test = require('tape').test -const msgpack = require('../') - -test('encode/decode nested containers (map/array)', function (t) { - const encoder = msgpack() - - function doEncodeDecode (value) { - return encoder.decode(encoder.encode(value)) - } - - function preserveTest (A, message = 'works') { - const B = doEncodeDecode(A) - t.deepEqual(A, B, message) - } - - preserveTest({ - hello: 'world', - digit: 111, - array: [1, 2, 3, 4, 'string', { hello: 'world' }] - }) - - preserveTest([ - [ - { - hello: 'world', - array: [1, 2, 3, 4, 'string', { hello: 'world' }] - }, - { - digit: 111 - } - ], - [ - { - hello: 'world', - digit: 111, - array: [1, 2, 3, 4, 'string', { hello: 'world' }] - } - ] - ]) - - t.end() -}) diff --git a/node_modules/msgpack5/test/null.js b/node_modules/msgpack5/test/null.js deleted file mode 100644 index 4e4de06596..0000000000 --- a/node_modules/msgpack5/test/null.js +++ /dev/null @@ -1,16 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -test('encode/decode null', function (t) { - const encoder = msgpack() - - t.equal(encoder.encode(null)[0], 0xc0, 'encode null as 0xc0') - t.equal(encoder.encode(null).length, 1, 'encode a buffer of length 1') - t.equal(encoder.decode(Buffer.from([0xc0])), null, 'decode 0xc0 as null') - t.equal(encoder.decode(encoder.encode(null)), null, 'mirror test null') - - t.end() -}) diff --git a/node_modules/msgpack5/test/numerictypeasserts.js b/node_modules/msgpack5/test/numerictypeasserts.js deleted file mode 100644 index 098e0d1e20..0000000000 --- a/node_modules/msgpack5/test/numerictypeasserts.js +++ /dev/null @@ -1,49 +0,0 @@ -'use strict' - -const test = require('tape').test -const msgpack = require('../') - -test('custom type registeration assertions', function (t) { - const encoder = msgpack() - - function Type0 (value) { - this.value = value - } - - function type0Encode (value) { - return new Type0(value) - } - - function type0Decode (type0) { - return type0.value - } - - function TypeNeg (value) { - this.value = value - } - - function typeNegEncode (value) { - return new TypeNeg(value) - } - - function typeNegDecode (typeneg) { - return typeneg.value - } - - t.doesNotThrow(function () { - encoder.register(0, Type0, type0Decode, type0Encode) - }, undefined, 'A type registered at 0 should not throw.') - t.throws(function () { - encoder.register(-1, TypeNeg, typeNegEncode, typeNegDecode) - }, undefined, 'A type registered as a negative value should throw') - - const encoded = encoder.encode(new Type0('hi')) - let decoded - t.equal(encoded.readUInt8(1), 0x0, 'must use the custom type assigned') - t.doesNotThrow(function () { - decoded = encoder.decode(encoded) - }, undefined, 'decoding custom 0 type should not throw') - t.equal(decoded instanceof Type0, true, 'must decode to custom type instance') - - t.end() -}) diff --git a/node_modules/msgpack5/test/object-prototype-poisoning.js b/node_modules/msgpack5/test/object-prototype-poisoning.js deleted file mode 100644 index 641b25f81a..0000000000 --- a/node_modules/msgpack5/test/object-prototype-poisoning.js +++ /dev/null @@ -1,49 +0,0 @@ -'use strict' - -const test = require('tape').test -const msgpack = require('../') - -test('decode throws when object has forbidden __proto__ property', function (t) { - const encoder = msgpack() - - const payload = { hello: 'world' } - Object.defineProperty(payload, '__proto__', { - value: { polluted: true }, - enumerable: true - }) - - const encoded = encoder.encode(payload) - - t.throws(() => encoder.decode(encoded), /Object contains forbidden prototype property/) - t.end() -}) - -test('decode ignores forbidden __proto__ property if protoAction is "ignore"', function (t) { - const encoder = msgpack({ protoAction: 'ignore' }) - - const payload = { hello: 'world' } - Object.defineProperty(payload, '__proto__', { - value: { polluted: true }, - enumerable: true - }) - - const decoded = encoder.decode(encoder.encode(payload)) - - t.equal(decoded.polluted, true) - t.end() -}) - -test('decode removes forbidden __proto__ property if protoAction is "remove"', function (t) { - const encoder = msgpack({ protoAction: 'remove' }) - - const payload = { hello: 'world' } - Object.defineProperty(payload, '__proto__', { - value: { polluted: true }, - enumerable: true - }) - - const decoded = encoder.decode(encoder.encode(payload)) - - t.equal(decoded.polluted, undefined) - t.end() -}) diff --git a/node_modules/msgpack5/test/object-with-arrays.js b/node_modules/msgpack5/test/object-with-arrays.js deleted file mode 100644 index d2aa429e82..0000000000 --- a/node_modules/msgpack5/test/object-with-arrays.js +++ /dev/null @@ -1,69 +0,0 @@ -'use strict' - -const test = require('tape').test -const msgpack = require('../') -const bl = require('bl') - -function build (size) { - const array = [] - let i - - for (i = 0; i < size; i++) { - array.push(42) - } - - return array -} - -test('decoding a map with multiple big arrays', function (t) { - const map = { - first: build(0xffff + 42), - second: build(0xffff + 42) - } - const pack = msgpack() - - t.deepEqual(pack.decode(pack.encode(map)), map) - t.end() -}) - -test('decoding a map with multiple big arrays. First one is incomplete', function (t) { - const array = build(0xffff + 42) - const map = { - first: array, - second: build(0xffff + 42) - } - const pack = msgpack() - - let buf = pack.encode(map) - // 1 (fixmap's header 0x82) + first key's length + 1 (first array's 0xdd) - const sizePosOfFirstArray = 1 + pack.encode('first').length + 1 - buf.writeUInt32BE(array.length + 10, sizePosOfFirstArray) // set first array's size bigger than its actual size - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - pack.decode(buf) - }, pack.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) - -test('decoding a map with multiple big arrays. Second one is incomplete', function (t) { - const array = build(0xffff + 42) - const map = { - first: array, - second: build(0xffff + 42) - } - const pack = msgpack() - - let buf = pack.encode(map) - // 1 (fixmap's header 0x82) + first key-value pair's length + second key's length + 1 (second array's 0xdd) - const sizePosOfSecondArray = 1 + pack.encode('first').length + pack.encode(array).length + pack.encode('second').length + 1 - buf.writeUInt32BE(array.length + 10, sizePosOfSecondArray) // set second array's size bigger than its actual size - buf = bl().append(buf) - const origLength = buf.length - t.throws(function () { - pack.decode(buf) - }, pack.IncompleteBufferError, 'must throw IncompleteBufferError') - t.equals(buf.length, origLength, 'must not consume any byte') - t.end() -}) diff --git a/node_modules/msgpack5/test/object-with-buffers.js b/node_modules/msgpack5/test/object-with-buffers.js deleted file mode 100644 index d1a3a6088b..0000000000 --- a/node_modules/msgpack5/test/object-with-buffers.js +++ /dev/null @@ -1,33 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const fs = require('fs') -const p = require('path') -const msgpack = require('../') - -test('encode/decode map with multiple short buffers', function (t) { - const map = { - first: Buffer.from('first'), - second: Buffer.from('second'), - third: Buffer.from('third') - } - const pack = msgpack() - - t.deepEqual(pack.decode(pack.encode(map)), map) - t.end() -}) - -if (process.title !== 'browser') { - test('encode/decode map with all files in this directory', function (t) { - const files = fs.readdirSync(__dirname) - const map = files.reduce(function (acc, file) { - acc[file] = fs.readFileSync(p.join(__dirname, file)) - return acc - }, {}) - const pack = msgpack() - - t.deepEqual(pack.decode(pack.encode(map)), map) - t.end() - }) -} diff --git a/node_modules/msgpack5/test/object-with-many-keys.js b/node_modules/msgpack5/test/object-with-many-keys.js deleted file mode 100644 index d7dc05fc38..0000000000 --- a/node_modules/msgpack5/test/object-with-many-keys.js +++ /dev/null @@ -1,71 +0,0 @@ -'use strict' - -const test = require('tape').test -const msgpack = require('../') - -test('encode/decode map with 10 keys', function (t) { - const map = {} - - for (let i = 0; i < 10; i++) { - map[i] = i - } - - const pack = msgpack() - - const encoded = pack.encode(map) - - // map16 byte - t.equal(encoded[0], 0x8A) - - t.deepEqual(pack.decode(encoded), map) - t.end() -}) - -test('encode/decode map with 10000 keys', function (t) { - const map = {} - - for (let i = 0; i < 10000; i++) { - map[i] = i - } - - const pack = msgpack() - - const encoded = pack.encode(map) - - // map16 byte - t.equal(encoded[0], 0xde) - - t.deepEqual(pack.decode(encoded), map) - t.end() -}) - -test('encode/decode map with 100000 keys', function (t) { - const map = {} - - for (let i = 0; i < 100000; i++) { - map[i] = i - } - - const pack = msgpack() - - const encoded = pack.encode(map) - - // map32 byte - t.equal(encoded[0], 0xdf) - - t.deepEqual(pack.decode(encoded), map) - t.end() -}) - -test('encode/decode map with 1000000 keys', function (t) { - const map = {} - - for (let i = 0; i < 1000000; i++) { - map[i] = i - } - - const pack = msgpack() - - t.deepEqual(pack.decode(pack.encode(map)), map) - t.end() -}) diff --git a/node_modules/msgpack5/test/object-with-strings.js b/node_modules/msgpack5/test/object-with-strings.js deleted file mode 100644 index e17f3fdbd0..0000000000 --- a/node_modules/msgpack5/test/object-with-strings.js +++ /dev/null @@ -1,32 +0,0 @@ -'use strict' - -const test = require('tape').test -const fs = require('fs') -const p = require('path') -const msgpack = require('../') - -test('encode/decode map with multiple short buffers', function (t) { - const map = { - first: 'first', - second: 'second', - third: 'third' - } - const pack = msgpack() - - t.deepEqual(pack.decode(pack.encode(map)), map) - t.end() -}) - -if (process.title !== 'browser') { - test('encode/decode map with all files in this directory', function (t) { - const files = fs.readdirSync(__dirname) - const map = files.reduce(function (acc, file) { - acc[file] = fs.readFileSync(p.join(__dirname, file)).toString('utf8') - return acc - }, {}) - const pack = msgpack() - - t.deepEqual(pack.decode(pack.encode(map)), map) - t.end() - }) -} diff --git a/node_modules/msgpack5/test/prefer-map.js b/node_modules/msgpack5/test/prefer-map.js deleted file mode 100644 index d975687981..0000000000 --- a/node_modules/msgpack5/test/prefer-map.js +++ /dev/null @@ -1,71 +0,0 @@ -const test = require('tape').test -const msgpack = require('../') - -const map = new Map() - .set('a', 1) - .set('1', 'hello') - .set('world', 2) - .set('0', 'again') - .set('01', null) - -test('round-trip string-keyed Maps', function (t) { - const encoder = msgpack({ preferMap: true }) - - for (const input of [new Map(), map]) { - const result = encoder.decode(encoder.encode(input)) - t.assert(result instanceof Map) - t.deepEqual(result, input) - } - - t.end() -}) - -test('preserve iteration order of string-keyed Maps', function (t) { - const encoder = msgpack({ preferMap: true }) - const decoded = encoder.decode(encoder.encode(map)) - - t.deepEqual([...decoded.keys()], [...map.keys()]) - - t.end() -}) - -test('user can still encode objects as ext maps', function (t) { - const encoder = msgpack({ preferMap: true }) - const tag = 0x42 - - // Polyfill Object.fromEntries for node 10 - const fromEntries = Object.fromEntries || (iterable => { - const object = {} - for (const [property, value] of iterable) { - object[property] = value - } - return object - }) - - encoder.register( - tag, - Object, - obj => encoder.encode(new Map(Object.entries(obj))), - data => fromEntries(encoder.decode(data)) - ) - - const inputs = [ - {}, - new Map(), - { foo: 'bar' }, - new Map().set('foo', 'bar'), - new Map().set(null, null), - { 0: 'baz' }, - ['baz'] - ] - - for (const input of inputs) { - const buf = encoder.encode(input) - const result = encoder.decode(buf) - - t.deepEqual(result, input) - t.equal(Object.getPrototypeOf(result), Object.getPrototypeOf(input)) - } - - t.end() -}) diff --git a/node_modules/msgpack5/test/sparse-arrays.js b/node_modules/msgpack5/test/sparse-arrays.js deleted file mode 100644 index c0c1d1425c..0000000000 --- a/node_modules/msgpack5/test/sparse-arrays.js +++ /dev/null @@ -1,18 +0,0 @@ -'use strict' - -const test = require('tape').test -const msgpack = require('../') - -test('throws when encoding sparse arrays', function (t) { - const encoder = msgpack() - - t.deepEqual(encoder.decode(encoder.encode(new Array(0))), []) - t.throws(() => encoder.encode(new Array(1)), /Sparse arrays/) - t.throws(() => encoder.encode(new Array(100)), /Sparse arrays/) - - const sparse = [1, 2, 3, 4] - delete sparse[3] - t.throws(() => encoder.encode(sparse), /Sparse arrays/) - - t.end() -}) diff --git a/node_modules/msgpack5/test/streams.js b/node_modules/msgpack5/test/streams.js deleted file mode 100644 index f22c9bceb1..0000000000 --- a/node_modules/msgpack5/test/streams.js +++ /dev/null @@ -1,261 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') -const BufferList = require('bl') - -test('must send an object through', function (t) { - t.plan(1) - - const pack = msgpack() - const encoder = pack.encoder() - const decoder = pack.decoder() - const data = { hello: 'world' } - - encoder.pipe(decoder) - - decoder.on('data', function (chunk) { - t.deepEqual(chunk, data) - }) - - encoder.end(data) -}) - -test('must send three objects through', function (t) { - const pack = msgpack() - const encoder = pack.encoder() - const decoder = pack.decoder() - const data = [ - { hello: 1 }, - { hello: 2 }, - { hello: 3 } - ] - - t.plan(data.length) - - decoder.on('data', function (chunk) { - t.deepEqual(chunk, data.shift()) - }) - - data.forEach(encoder.write.bind(encoder)) - - encoder.pipe(decoder) - - encoder.end() -}) - -test('end-to-end', function (t) { - const pack = msgpack() - const encoder = pack.encoder() - const decoder = pack.decoder() - const data = [ - { hello: 1 }, - { hello: 2 }, - { hello: 3 } - ] - - t.plan(data.length) - - decoder.on('data', function (chunk) { - t.deepEqual(chunk, data.shift()) - }) - - data.forEach(encoder.write.bind(encoder)) - - encoder.end() - - encoder.pipe(decoder) -}) - -test('encoding error wrapped', function (t) { - t.plan(1) - - const pack = msgpack() - const encoder = pack.encoder() - const data = new MyType() - - function MyType () { - } - - function mytypeEncode () { - throw new Error('muahha') - } - - function mytypeDecode () { - } - - pack.register(0x42, MyType, mytypeEncode, mytypeDecode) - - encoder.on('error', function (err) { - t.equal(err.message, 'muahha') - }) - - encoder.end(data) -}) - -test('decoding error wrapped', function (t) { - t.plan(1) - - const pack = msgpack() - const encoder = pack.encoder() - const decoder = pack.decoder() - const data = new MyType() - - function MyType () { - } - - function mytypeEncode () { - return Buffer.allocUnsafe(0) - } - - function mytypeDecode () { - throw new Error('muahha') - } - - pack.register(0x42, MyType, mytypeEncode, mytypeDecode) - - decoder.on('error', function (err) { - t.equal(err.message, 'muahha') - }) - - encoder.end(data) - - encoder.pipe(decoder) -}) - -test('decoding error wrapped', function (t) { - t.plan(1) - - const pack = msgpack() - const encoder = pack.encoder({ header: false }) - const decoder = pack.decoder({ header: false }) - const data = new MyType() - - function MyType () { - } - - function mytypeEncode () { - return Buffer.allocUnsafe(0) - } - - function mytypeDecode () { - throw new Error('muahha') - } - - pack.register(0x42, MyType, mytypeEncode, mytypeDecode) - - decoder.on('error', function (err) { - t.equal(err.message, 'muahha') - }) - - encoder.end(data) - - encoder.pipe(decoder) -}) - -test('concatenated buffers work', function (t) { - const pack = msgpack() - const encoder = pack.encoder() - const decoder = pack.decoder() - const data = [ - { hello: 1 }, - { hello: 2 }, - { hello: 3 } - ] - - t.plan(data.length) - - const bl = new BufferList() - encoder.on('data', bl.append.bind(bl)) - - data.forEach(encoder.write.bind(encoder)) - - decoder.on('data', function (d) { - t.deepEqual(d, data.shift()) - }) - - encoder.once('finish', function () { - const buf = bl.slice() - decoder.write(buf) - }) - - encoder.end() -}) - -test('nil processing works', function (t) { - t.plan(3) - - const pack = msgpack() - const decoder = pack.decoder({ wrap: true }) - let decodedItemIndex = 0 - - decoder.on('data', function (chunk) { - decodedItemIndex++ - t.deepEqual(chunk.value, decodedItemIndex === 1 ? null : false) - }) - - decoder.on('end', function () { - t.equal(decodedItemIndex, 2) - }) - - decoder.write(Buffer.from([0xc0, 0xc2])) - decoder.end() -}) - -test('encoder wrap mode works', function (t) { - t.plan(1) - - const pack = msgpack() - const encoder = pack.encoder({ wrap: true }) - const decoder = pack.decoder() - const data = { hello: 'world' } - const wrappedData = { value: data } - - encoder.pipe(decoder) - - decoder.on('data', function (chunk) { - t.deepEqual(chunk, data) - }) - - encoder.end(wrappedData) -}) - -test('encoder/decoder wrap mode must send an object through', function (t) { - t.plan(1) - - const pack = msgpack() - const encoder = pack.encoder({ wrap: true }) - const decoder = pack.decoder({ wrap: true }) - const data = { value: { hello: 'world' } } - - encoder.pipe(decoder) - - decoder.on('data', function (chunk) { - t.deepEqual(chunk, data) - }) - - encoder.end(data) -}) - -test('encoder pack null', function (t) { - t.plan(2) - const pack = msgpack() - const encoder = pack.encoder({ wrap: true }) - const decoder = pack.decoder({ wrap: true }) - - encoder.pipe(decoder) - - let decodedItemIndex = 0 - decoder.on('data', function (chunk) { - decodedItemIndex++ - t.deepEqual(chunk.value, null) - }) - - decoder.on('end', function () { - t.equal(decodedItemIndex, 1) - }) - - encoder.write({ value: null }) - encoder.end() -}) diff --git a/node_modules/msgpack5/test/timestamps.js b/node_modules/msgpack5/test/timestamps.js deleted file mode 100644 index aae5b5d366..0000000000 --- a/node_modules/msgpack5/test/timestamps.js +++ /dev/null @@ -1,116 +0,0 @@ -'use strict' - -const Buffer = require('safe-buffer').Buffer -const test = require('tape').test -const msgpack = require('../') - -test('timestamp disabling', function (t) { - const encoder = msgpack({ disableTimestampEncoding: true }) - const timestamps = [ - [new Date('2018-01-02T03:04:05.000000000Z'), [0x80]] - ] - - timestamps.forEach(function (testcase) { - const item = testcase[0] - const expected = testcase[1] - - t.test('encoding ' + item.toString(), function (t) { - const buf = encoder.encode(item).slice() - t.equal(buf.length, expected.length, 'must have ' + expected.length + ' bytes') - t.equal(buf[0], expected[0], 'Should return 0x80 ({}) by default') - t.end() - }) - }) - - t.end() -}) -test('encoding/decoding timestamp 64', function (t) { - const encoder = msgpack() - const timestamps = [ - [new Date('2018-01-02T03:04:05.000000000Z'), [0xd6, 0xff, 0x5a, 0x4a, 0xf6, 0xa5]], - [new Date('2038-01-19T03:14:08.000000000Z'), [0xd6, 0xff, 0x80, 0x00, 0x00, 0x00]], - [new Date('2038-01-19T03:14:07.999000000Z'), [0xd7, 0xff, 0xee, 0x2E, 0x1F, 0x00, 0x7f, 0xff, 0xff, 0xff]], - [new Date('2106-02-07T06:28:16.000000000Z'), [0xd7, 0xff, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00]], - [new Date('2018-01-02T03:04:05.678000000Z'), [0xd7, 0xff, 0xa1, 0xa5, 0xd6, 0x00, 0x5a, 0x4a, 0xf6, 0xa5]] - ] - - timestamps.forEach(function (testcase) { - const item = testcase[0] - const expected = testcase[1] - - t.test('encoding ' + item.toString(), function (t) { - const buf = encoder.encode(item).slice() - t.equal(buf.length, expected.length, 'must have ' + expected.length + ' bytes') - switch (expected.length) { - case 6: - t.equal(buf[0], 0xd6, 'must have the correct header') - break - case 10: - t.equal(buf[0], 0xd7, 'must have the correct header') - break - } - t.equal(buf.readInt8(1), -1, 'must have the correct type') // Signed - for (let j = 2; j < buf.length; j++) { - t.equal(buf[j], expected[j], 'byte ' + (j - 2) + ' match') - } - t.end() - }) - - t.test('decoding ' + item, function (t) { - const buf = Buffer.from(expected) - const dt = encoder.decode(buf) - t.equal(dt.toString(), item.toString(), 'must decode correctly\nDecoded:\t' + dt * 1 + '\nExp:\t' + item * 1) - t.end() - }) - - t.test('mirror test ' + item, function (t) { - t.equal(encoder.decode(encoder.encode(item)) * 1, item * 1, 'must stay the same') - t.end() - }) - }) - - t.end() -}) - -test('encoding/decoding timestamp 96', function (t) { - const encoder = msgpack() - const timestamps = [ - [new Date('0001-01-02T03:04:05.000000000Z'), [0xc7, 0x0c, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xf1, 0x88, 0x6f, 0x85, 0xa5]], - [new Date('1251-01-19T03:14:08.000000000Z'), [0xc7, 0x0c, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xfa, 0xb7, 0xb2, 0xdf, 0x00]], - [new Date('1526-01-19T03:14:07.999000000Z'), [0xc7, 0x0c, 0xff, 0x3b, 0x8b, 0x87, 0xc0, 0xff, 0xff, 0xff, 0xfc, 0xbc, 0xf4, 0x34, 0x7f]], - [new Date('1920-02-07T06:28:16.000000000Z'), [0xc7, 0x0c, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xa2, 0x23, 0xf0, 0x00]], - [new Date('1969-01-02T03:04:05.678000000Z'), [0xc7, 0x0c, 0xff, 0x28, 0x69, 0x75, 0x80, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x20, 0x49, 0x25]], - [new Date('2514-05-30T02:04:05.678000000Z'), [0xc7, 0x0c, 0xff, 0x28, 0x69, 0x75, 0x80, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x02, 0x95]] - ] - - timestamps.forEach(function (testcase) { - const item = testcase[0] - const expected = testcase[1] - - t.test('encoding ' + item.toString(), function (t) { - const buf = encoder.encode(item).slice() - t.equal(buf.length, expected.length, 'must have ' + expected.length + ' bytes') - t.equal(buf[0], 0xc7, 'must have the correct header') - t.equal(buf.readInt8(1), 12, 'must have the correct size') - t.equal(buf.readInt8(2), -1, 'must have the correct type') // Signed - for (let j = 3; j < buf.length; j++) { - t.equal(buf[j], expected[j], 'byte ' + (j - 3) + ' match') - } - t.end() - }) - - t.test('decoding ' + item, function (t) { - const buf = Buffer.from(expected) - const dt = encoder.decode(buf) - t.equal(dt.toString(), item.toString(), 'must decode correctly\nDecoded:\t' + dt * 1 + '\nExp:\t' + item * 1) - t.end() - }) - - t.test('mirror test ' + item, function (t) { - t.equal(encoder.decode(encoder.encode(item)) * 1, item * 1, 'must stay the same') - t.end() - }) - }) - - t.end() -}) diff --git a/node_modules/readable-stream/CONTRIBUTING.md b/node_modules/readable-stream/CONTRIBUTING.md deleted file mode 100644 index f478d58dca..0000000000 --- a/node_modules/readable-stream/CONTRIBUTING.md +++ /dev/null @@ -1,38 +0,0 @@ -# Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -* (a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -* (b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -* (c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -* (d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. - -## Moderation Policy - -The [Node.js Moderation Policy] applies to this WG. - -## Code of Conduct - -The [Node.js Code of Conduct][] applies to this WG. - -[Node.js Code of Conduct]: -https://github.com/nodejs/node/blob/master/CODE_OF_CONDUCT.md -[Node.js Moderation Policy]: -https://github.com/nodejs/TSC/blob/master/Moderation-Policy.md diff --git a/node_modules/readable-stream/GOVERNANCE.md b/node_modules/readable-stream/GOVERNANCE.md deleted file mode 100644 index 16ffb93f24..0000000000 --- a/node_modules/readable-stream/GOVERNANCE.md +++ /dev/null @@ -1,136 +0,0 @@ -### Streams Working Group - -The Node.js Streams is jointly governed by a Working Group -(WG) -that is responsible for high-level guidance of the project. - -The WG has final authority over this project including: - -* Technical direction -* Project governance and process (including this policy) -* Contribution policy -* GitHub repository hosting -* Conduct guidelines -* Maintaining the list of additional Collaborators - -For the current list of WG members, see the project -[README.md](./README.md#current-project-team-members). - -### Collaborators - -The readable-stream GitHub repository is -maintained by the WG and additional Collaborators who are added by the -WG on an ongoing basis. - -Individuals making significant and valuable contributions are made -Collaborators and given commit-access to the project. These -individuals are identified by the WG and their addition as -Collaborators is discussed during the WG meeting. - -_Note:_ If you make a significant contribution and are not considered -for commit-access log an issue or contact a WG member directly and it -will be brought up in the next WG meeting. - -Modifications of the contents of the readable-stream repository are -made on -a collaborative basis. Anybody with a GitHub account may propose a -modification via pull request and it will be considered by the project -Collaborators. All pull requests must be reviewed and accepted by a -Collaborator with sufficient expertise who is able to take full -responsibility for the change. In the case of pull requests proposed -by an existing Collaborator, an additional Collaborator is required -for sign-off. Consensus should be sought if additional Collaborators -participate and there is disagreement around a particular -modification. See _Consensus Seeking Process_ below for further detail -on the consensus model used for governance. - -Collaborators may opt to elevate significant or controversial -modifications, or modifications that have not found consensus to the -WG for discussion by assigning the ***WG-agenda*** tag to a pull -request or issue. The WG should serve as the final arbiter where -required. - -For the current list of Collaborators, see the project -[README.md](./README.md#members). - -### WG Membership - -WG seats are not time-limited. There is no fixed size of the WG. -However, the expected target is between 6 and 12, to ensure adequate -coverage of important areas of expertise, balanced with the ability to -make decisions efficiently. - -There is no specific set of requirements or qualifications for WG -membership beyond these rules. - -The WG may add additional members to the WG by unanimous consensus. - -A WG member may be removed from the WG by voluntary resignation, or by -unanimous consensus of all other WG members. - -Changes to WG membership should be posted in the agenda, and may be -suggested as any other agenda item (see "WG Meetings" below). - -If an addition or removal is proposed during a meeting, and the full -WG is not in attendance to participate, then the addition or removal -is added to the agenda for the subsequent meeting. This is to ensure -that all members are given the opportunity to participate in all -membership decisions. If a WG member is unable to attend a meeting -where a planned membership decision is being made, then their consent -is assumed. - -No more than 1/3 of the WG members may be affiliated with the same -employer. If removal or resignation of a WG member, or a change of -employment by a WG member, creates a situation where more than 1/3 of -the WG membership shares an employer, then the situation must be -immediately remedied by the resignation or removal of one or more WG -members affiliated with the over-represented employer(s). - -### WG Meetings - -The WG meets occasionally on a Google Hangout On Air. A designated moderator -approved by the WG runs the meeting. Each meeting should be -published to YouTube. - -Items are added to the WG agenda that are considered contentious or -are modifications of governance, contribution policy, WG membership, -or release process. - -The intention of the agenda is not to approve or review all patches; -that should happen continuously on GitHub and be handled by the larger -group of Collaborators. - -Any community member or contributor can ask that something be added to -the next meeting's agenda by logging a GitHub Issue. Any Collaborator, -WG member or the moderator can add the item to the agenda by adding -the ***WG-agenda*** tag to the issue. - -Prior to each WG meeting the moderator will share the Agenda with -members of the WG. WG members can add any items they like to the -agenda at the beginning of each meeting. The moderator and the WG -cannot veto or remove items. - -The WG may invite persons or representatives from certain projects to -participate in a non-voting capacity. - -The moderator is responsible for summarizing the discussion of each -agenda item and sends it as a pull request after the meeting. - -### Consensus Seeking Process - -The WG follows a -[Consensus -Seeking](http://en.wikipedia.org/wiki/Consensus-seeking_decision-making) -decision-making model. - -When an agenda item has appeared to reach a consensus the moderator -will ask "Does anyone object?" as a final call for dissent from the -consensus. - -If an agenda item cannot reach a consensus a WG member can call for -either a closing vote or a vote to table the issue to the next -meeting. The call for a vote must be seconded by a majority of the WG -or else the discussion will continue. Simple majority wins. - -Note that changes to WG membership require a majority consensus. See -"WG Membership" above. diff --git a/node_modules/readable-stream/LICENSE b/node_modules/readable-stream/LICENSE deleted file mode 100644 index 2873b3b2e5..0000000000 --- a/node_modules/readable-stream/LICENSE +++ /dev/null @@ -1,47 +0,0 @@ -Node.js is licensed for use as follows: - -""" -Copyright Node.js contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - -This license applies to parts of Node.js originating from the -https://github.com/joyent/node repository: - -""" -Copyright Joyent, Inc. and other Node contributors. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" diff --git a/node_modules/readable-stream/README.md b/node_modules/readable-stream/README.md deleted file mode 100644 index 19117c1a05..0000000000 --- a/node_modules/readable-stream/README.md +++ /dev/null @@ -1,106 +0,0 @@ -# readable-stream - -***Node.js core streams for userland*** [![Build Status](https://travis-ci.com/nodejs/readable-stream.svg?branch=master)](https://travis-ci.com/nodejs/readable-stream) - - -[![NPM](https://nodei.co/npm/readable-stream.png?downloads=true&downloadRank=true)](https://nodei.co/npm/readable-stream/) -[![NPM](https://nodei.co/npm-dl/readable-stream.png?&months=6&height=3)](https://nodei.co/npm/readable-stream/) - - -[![Sauce Test Status](https://saucelabs.com/browser-matrix/readabe-stream.svg)](https://saucelabs.com/u/readabe-stream) - -```bash -npm install --save readable-stream -``` - -This package is a mirror of the streams implementations in Node.js. - -Full documentation may be found on the [Node.js website](https://nodejs.org/dist/v10.18.1/docs/api/stream.html). - -If you want to guarantee a stable streams base, regardless of what version of -Node you, or the users of your libraries are using, use **readable-stream** *only* and avoid the *"stream"* module in Node-core, for background see [this blogpost](http://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html). - -As of version 2.0.0 **readable-stream** uses semantic versioning. - -## Version 3.x.x - -v3.x.x of `readable-stream` is a cut from Node 10. This version supports Node 6, 8, and 10, as well as evergreen browsers, IE 11 and latest Safari. The breaking changes introduced by v3 are composed by the combined breaking changes in [Node v9](https://nodejs.org/en/blog/release/v9.0.0/) and [Node v10](https://nodejs.org/en/blog/release/v10.0.0/), as follows: - -1. Error codes: https://github.com/nodejs/node/pull/13310, - https://github.com/nodejs/node/pull/13291, - https://github.com/nodejs/node/pull/16589, - https://github.com/nodejs/node/pull/15042, - https://github.com/nodejs/node/pull/15665, - https://github.com/nodejs/readable-stream/pull/344 -2. 'readable' have precedence over flowing - https://github.com/nodejs/node/pull/18994 -3. make virtual methods errors consistent - https://github.com/nodejs/node/pull/18813 -4. updated streams error handling - https://github.com/nodejs/node/pull/18438 -5. writable.end should return this. - https://github.com/nodejs/node/pull/18780 -6. readable continues to read when push('') - https://github.com/nodejs/node/pull/18211 -7. add custom inspect to BufferList - https://github.com/nodejs/node/pull/17907 -8. always defer 'readable' with nextTick - https://github.com/nodejs/node/pull/17979 - -## Version 2.x.x -v2.x.x of `readable-stream` is a cut of the stream module from Node 8 (there have been no semver-major changes from Node 4 to 8). This version supports all Node.js versions from 0.8, as well as evergreen browsers and IE 10 & 11. - -### Big Thanks - -Cross-browser Testing Platform and Open Source <3 Provided by [Sauce Labs][sauce] - -# Usage - -You can swap your `require('stream')` with `require('readable-stream')` -without any changes, if you are just using one of the main classes and -functions. - -```js -const { - Readable, - Writable, - Transform, - Duplex, - pipeline, - finished -} = require('readable-stream') -```` - -Note that `require('stream')` will return `Stream`, while -`require('readable-stream')` will return `Readable`. We discourage using -whatever is exported directly, but rather use one of the properties as -shown in the example above. - -# Streams Working Group - -`readable-stream` is maintained by the Streams Working Group, which -oversees the development and maintenance of the Streams API within -Node.js. The responsibilities of the Streams Working Group include: - -* Addressing stream issues on the Node.js issue tracker. -* Authoring and editing stream documentation within the Node.js project. -* Reviewing changes to stream subclasses within the Node.js project. -* Redirecting changes to streams from the Node.js project to this - project. -* Assisting in the implementation of stream providers within Node.js. -* Recommending versions of `readable-stream` to be included in Node.js. -* Messaging about the future of streams to give the community advance - notice of changes. - - -## Team Members - -* **Calvin Metcalf** ([@calvinmetcalf](https://github.com/calvinmetcalf)) <calvin.metcalf@gmail.com> - - Release GPG key: F3EF5F62A87FC27A22E643F714CE4FF5015AA242 -* **Mathias Buus** ([@mafintosh](https://github.com/mafintosh)) <mathiasbuus@gmail.com> -* **Matteo Collina** ([@mcollina](https://github.com/mcollina)) <matteo.collina@gmail.com> - - Release GPG key: 3ABC01543F22DD2239285CDD818674489FBC127E -* **Irina Shestak** ([@lrlna](https://github.com/lrlna)) <shestak.irina@gmail.com> -* **Yoshua Wyuts** ([@yoshuawuyts](https://github.com/yoshuawuyts)) <yoshuawuyts@gmail.com> - -[sauce]: https://saucelabs.com diff --git a/node_modules/readable-stream/errors-browser.js b/node_modules/readable-stream/errors-browser.js deleted file mode 100644 index fb8e73e189..0000000000 --- a/node_modules/readable-stream/errors-browser.js +++ /dev/null @@ -1,127 +0,0 @@ -'use strict'; - -function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } - -var codes = {}; - -function createErrorType(code, message, Base) { - if (!Base) { - Base = Error; - } - - function getMessage(arg1, arg2, arg3) { - if (typeof message === 'string') { - return message; - } else { - return message(arg1, arg2, arg3); - } - } - - var NodeError = - /*#__PURE__*/ - function (_Base) { - _inheritsLoose(NodeError, _Base); - - function NodeError(arg1, arg2, arg3) { - return _Base.call(this, getMessage(arg1, arg2, arg3)) || this; - } - - return NodeError; - }(Base); - - NodeError.prototype.name = Base.name; - NodeError.prototype.code = code; - codes[code] = NodeError; -} // https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js - - -function oneOf(expected, thing) { - if (Array.isArray(expected)) { - var len = expected.length; - expected = expected.map(function (i) { - return String(i); - }); - - if (len > 2) { - return "one of ".concat(thing, " ").concat(expected.slice(0, len - 1).join(', '), ", or ") + expected[len - 1]; - } else if (len === 2) { - return "one of ".concat(thing, " ").concat(expected[0], " or ").concat(expected[1]); - } else { - return "of ".concat(thing, " ").concat(expected[0]); - } - } else { - return "of ".concat(thing, " ").concat(String(expected)); - } -} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith - - -function startsWith(str, search, pos) { - return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search; -} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith - - -function endsWith(str, search, this_len) { - if (this_len === undefined || this_len > str.length) { - this_len = str.length; - } - - return str.substring(this_len - search.length, this_len) === search; -} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes - - -function includes(str, search, start) { - if (typeof start !== 'number') { - start = 0; - } - - if (start + search.length > str.length) { - return false; - } else { - return str.indexOf(search, start) !== -1; - } -} - -createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) { - return 'The value "' + value + '" is invalid for option "' + name + '"'; -}, TypeError); -createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) { - // determiner: 'must be' or 'must not be' - var determiner; - - if (typeof expected === 'string' && startsWith(expected, 'not ')) { - determiner = 'must not be'; - expected = expected.replace(/^not /, ''); - } else { - determiner = 'must be'; - } - - var msg; - - if (endsWith(name, ' argument')) { - // For cases like 'first argument' - msg = "The ".concat(name, " ").concat(determiner, " ").concat(oneOf(expected, 'type')); - } else { - var type = includes(name, '.') ? 'property' : 'argument'; - msg = "The \"".concat(name, "\" ").concat(type, " ").concat(determiner, " ").concat(oneOf(expected, 'type')); - } - - msg += ". Received type ".concat(typeof actual); - return msg; -}, TypeError); -createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF'); -createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) { - return 'The ' + name + ' method is not implemented'; -}); -createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close'); -createErrorType('ERR_STREAM_DESTROYED', function (name) { - return 'Cannot call ' + name + ' after a stream was destroyed'; -}); -createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times'); -createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable'); -createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end'); -createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError); -createErrorType('ERR_UNKNOWN_ENCODING', function (arg) { - return 'Unknown encoding: ' + arg; -}, TypeError); -createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event'); -module.exports.codes = codes; diff --git a/node_modules/readable-stream/errors.js b/node_modules/readable-stream/errors.js deleted file mode 100644 index 8471526d6e..0000000000 --- a/node_modules/readable-stream/errors.js +++ /dev/null @@ -1,116 +0,0 @@ -'use strict'; - -const codes = {}; - -function createErrorType(code, message, Base) { - if (!Base) { - Base = Error - } - - function getMessage (arg1, arg2, arg3) { - if (typeof message === 'string') { - return message - } else { - return message(arg1, arg2, arg3) - } - } - - class NodeError extends Base { - constructor (arg1, arg2, arg3) { - super(getMessage(arg1, arg2, arg3)); - } - } - - NodeError.prototype.name = Base.name; - NodeError.prototype.code = code; - - codes[code] = NodeError; -} - -// https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js -function oneOf(expected, thing) { - if (Array.isArray(expected)) { - const len = expected.length; - expected = expected.map((i) => String(i)); - if (len > 2) { - return `one of ${thing} ${expected.slice(0, len - 1).join(', ')}, or ` + - expected[len - 1]; - } else if (len === 2) { - return `one of ${thing} ${expected[0]} or ${expected[1]}`; - } else { - return `of ${thing} ${expected[0]}`; - } - } else { - return `of ${thing} ${String(expected)}`; - } -} - -// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith -function startsWith(str, search, pos) { - return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search; -} - -// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith -function endsWith(str, search, this_len) { - if (this_len === undefined || this_len > str.length) { - this_len = str.length; - } - return str.substring(this_len - search.length, this_len) === search; -} - -// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes -function includes(str, search, start) { - if (typeof start !== 'number') { - start = 0; - } - - if (start + search.length > str.length) { - return false; - } else { - return str.indexOf(search, start) !== -1; - } -} - -createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) { - return 'The value "' + value + '" is invalid for option "' + name + '"' -}, TypeError); -createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) { - // determiner: 'must be' or 'must not be' - let determiner; - if (typeof expected === 'string' && startsWith(expected, 'not ')) { - determiner = 'must not be'; - expected = expected.replace(/^not /, ''); - } else { - determiner = 'must be'; - } - - let msg; - if (endsWith(name, ' argument')) { - // For cases like 'first argument' - msg = `The ${name} ${determiner} ${oneOf(expected, 'type')}`; - } else { - const type = includes(name, '.') ? 'property' : 'argument'; - msg = `The "${name}" ${type} ${determiner} ${oneOf(expected, 'type')}`; - } - - msg += `. Received type ${typeof actual}`; - return msg; -}, TypeError); -createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF'); -createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) { - return 'The ' + name + ' method is not implemented' -}); -createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close'); -createErrorType('ERR_STREAM_DESTROYED', function (name) { - return 'Cannot call ' + name + ' after a stream was destroyed'; -}); -createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times'); -createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable'); -createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end'); -createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError); -createErrorType('ERR_UNKNOWN_ENCODING', function (arg) { - return 'Unknown encoding: ' + arg -}, TypeError); -createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event'); - -module.exports.codes = codes; diff --git a/node_modules/readable-stream/experimentalWarning.js b/node_modules/readable-stream/experimentalWarning.js deleted file mode 100644 index 78e841495b..0000000000 --- a/node_modules/readable-stream/experimentalWarning.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict' - -var experimentalWarnings = new Set(); - -function emitExperimentalWarning(feature) { - if (experimentalWarnings.has(feature)) return; - var msg = feature + ' is an experimental feature. This feature could ' + - 'change at any time'; - experimentalWarnings.add(feature); - process.emitWarning(msg, 'ExperimentalWarning'); -} - -function noop() {} - -module.exports.emitExperimentalWarning = process.emitWarning - ? emitExperimentalWarning - : noop; diff --git a/node_modules/readable-stream/lib/_stream_duplex.js b/node_modules/readable-stream/lib/_stream_duplex.js deleted file mode 100644 index 19abfa604d..0000000000 --- a/node_modules/readable-stream/lib/_stream_duplex.js +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -// a duplex stream is just a stream that is both readable and writable. -// Since JS doesn't have multiple prototypal inheritance, this class -// prototypally inherits from Readable, and then parasitically from -// Writable. - -'use strict'; - -/**/ -var objectKeys = Object.keys || function (obj) { - var keys = []; - for (var key in obj) keys.push(key); - return keys; -}; -/**/ - -module.exports = Duplex; -var Readable = require('./_stream_readable'); -var Writable = require('./_stream_writable'); -require('inherits')(Duplex, Readable); -{ - // Allow the keys array to be GC'ed. - var keys = objectKeys(Writable.prototype); - for (var v = 0; v < keys.length; v++) { - var method = keys[v]; - if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method]; - } -} -function Duplex(options) { - if (!(this instanceof Duplex)) return new Duplex(options); - Readable.call(this, options); - Writable.call(this, options); - this.allowHalfOpen = true; - if (options) { - if (options.readable === false) this.readable = false; - if (options.writable === false) this.writable = false; - if (options.allowHalfOpen === false) { - this.allowHalfOpen = false; - this.once('end', onend); - } - } -} -Object.defineProperty(Duplex.prototype, 'writableHighWaterMark', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._writableState.highWaterMark; - } -}); -Object.defineProperty(Duplex.prototype, 'writableBuffer', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._writableState && this._writableState.getBuffer(); - } -}); -Object.defineProperty(Duplex.prototype, 'writableLength', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._writableState.length; - } -}); - -// the no-half-open enforcer -function onend() { - // If the writable side ended, then we're ok. - if (this._writableState.ended) return; - - // no more data can be written. - // But allow more writes to happen in this tick. - process.nextTick(onEndNT, this); -} -function onEndNT(self) { - self.end(); -} -Object.defineProperty(Duplex.prototype, 'destroyed', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - if (this._readableState === undefined || this._writableState === undefined) { - return false; - } - return this._readableState.destroyed && this._writableState.destroyed; - }, - set: function set(value) { - // we ignore the value if the stream - // has not been initialized yet - if (this._readableState === undefined || this._writableState === undefined) { - return; - } - - // backward compatibility, the user is explicitly - // managing destroyed - this._readableState.destroyed = value; - this._writableState.destroyed = value; - } -}); \ No newline at end of file diff --git a/node_modules/readable-stream/lib/_stream_passthrough.js b/node_modules/readable-stream/lib/_stream_passthrough.js deleted file mode 100644 index 24a6bdde29..0000000000 --- a/node_modules/readable-stream/lib/_stream_passthrough.js +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -// a passthrough stream. -// basically just the most minimal sort of Transform stream. -// Every written chunk gets output as-is. - -'use strict'; - -module.exports = PassThrough; -var Transform = require('./_stream_transform'); -require('inherits')(PassThrough, Transform); -function PassThrough(options) { - if (!(this instanceof PassThrough)) return new PassThrough(options); - Transform.call(this, options); -} -PassThrough.prototype._transform = function (chunk, encoding, cb) { - cb(null, chunk); -}; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/_stream_readable.js b/node_modules/readable-stream/lib/_stream_readable.js deleted file mode 100644 index df1f608d53..0000000000 --- a/node_modules/readable-stream/lib/_stream_readable.js +++ /dev/null @@ -1,1027 +0,0 @@ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -'use strict'; - -module.exports = Readable; - -/**/ -var Duplex; -/**/ - -Readable.ReadableState = ReadableState; - -/**/ -var EE = require('events').EventEmitter; -var EElistenerCount = function EElistenerCount(emitter, type) { - return emitter.listeners(type).length; -}; -/**/ - -/**/ -var Stream = require('./internal/streams/stream'); -/**/ - -var Buffer = require('buffer').Buffer; -var OurUint8Array = (typeof global !== 'undefined' ? global : typeof window !== 'undefined' ? window : typeof self !== 'undefined' ? self : {}).Uint8Array || function () {}; -function _uint8ArrayToBuffer(chunk) { - return Buffer.from(chunk); -} -function _isUint8Array(obj) { - return Buffer.isBuffer(obj) || obj instanceof OurUint8Array; -} - -/**/ -var debugUtil = require('util'); -var debug; -if (debugUtil && debugUtil.debuglog) { - debug = debugUtil.debuglog('stream'); -} else { - debug = function debug() {}; -} -/**/ - -var BufferList = require('./internal/streams/buffer_list'); -var destroyImpl = require('./internal/streams/destroy'); -var _require = require('./internal/streams/state'), - getHighWaterMark = _require.getHighWaterMark; -var _require$codes = require('../errors').codes, - ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE, - ERR_STREAM_PUSH_AFTER_EOF = _require$codes.ERR_STREAM_PUSH_AFTER_EOF, - ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED, - ERR_STREAM_UNSHIFT_AFTER_END_EVENT = _require$codes.ERR_STREAM_UNSHIFT_AFTER_END_EVENT; - -// Lazy loaded to improve the startup performance. -var StringDecoder; -var createReadableStreamAsyncIterator; -var from; -require('inherits')(Readable, Stream); -var errorOrDestroy = destroyImpl.errorOrDestroy; -var kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume']; -function prependListener(emitter, event, fn) { - // Sadly this is not cacheable as some libraries bundle their own - // event emitter implementation with them. - if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn); - - // This is a hack to make sure that our error handler is attached before any - // userland ones. NEVER DO THIS. This is here only because this code needs - // to continue to work with older versions of Node.js that do not include - // the prependListener() method. The goal is to eventually remove this hack. - if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (Array.isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]]; -} -function ReadableState(options, stream, isDuplex) { - Duplex = Duplex || require('./_stream_duplex'); - options = options || {}; - - // Duplex streams are both readable and writable, but share - // the same options object. - // However, some cases require setting options to different - // values for the readable and the writable sides of the duplex stream. - // These options can be provided separately as readableXXX and writableXXX. - if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; - - // object stream flag. Used to make read(n) ignore n and to - // make all the buffer merging and length checks go away - this.objectMode = !!options.objectMode; - if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode; - - // the point at which it stops calling _read() to fill the buffer - // Note: 0 is a valid value, means "don't call _read preemptively ever" - this.highWaterMark = getHighWaterMark(this, options, 'readableHighWaterMark', isDuplex); - - // A linked list is used to store data chunks instead of an array because the - // linked list can remove elements from the beginning faster than - // array.shift() - this.buffer = new BufferList(); - this.length = 0; - this.pipes = null; - this.pipesCount = 0; - this.flowing = null; - this.ended = false; - this.endEmitted = false; - this.reading = false; - - // a flag to be able to tell if the event 'readable'/'data' is emitted - // immediately, or on a later tick. We set this to true at first, because - // any actions that shouldn't happen until "later" should generally also - // not happen before the first read call. - this.sync = true; - - // whenever we return null, then we set a flag to say - // that we're awaiting a 'readable' event emission. - this.needReadable = false; - this.emittedReadable = false; - this.readableListening = false; - this.resumeScheduled = false; - this.paused = true; - - // Should close be emitted on destroy. Defaults to true. - this.emitClose = options.emitClose !== false; - - // Should .destroy() be called after 'end' (and potentially 'finish') - this.autoDestroy = !!options.autoDestroy; - - // has it been destroyed - this.destroyed = false; - - // Crypto is kind of old and crusty. Historically, its default string - // encoding is 'binary' so we have to make this configurable. - // Everything else in the universe uses 'utf8', though. - this.defaultEncoding = options.defaultEncoding || 'utf8'; - - // the number of writers that are awaiting a drain event in .pipe()s - this.awaitDrain = 0; - - // if true, a maybeReadMore has been scheduled - this.readingMore = false; - this.decoder = null; - this.encoding = null; - if (options.encoding) { - if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder; - this.decoder = new StringDecoder(options.encoding); - this.encoding = options.encoding; - } -} -function Readable(options) { - Duplex = Duplex || require('./_stream_duplex'); - if (!(this instanceof Readable)) return new Readable(options); - - // Checking for a Stream.Duplex instance is faster here instead of inside - // the ReadableState constructor, at least with V8 6.5 - var isDuplex = this instanceof Duplex; - this._readableState = new ReadableState(options, this, isDuplex); - - // legacy - this.readable = true; - if (options) { - if (typeof options.read === 'function') this._read = options.read; - if (typeof options.destroy === 'function') this._destroy = options.destroy; - } - Stream.call(this); -} -Object.defineProperty(Readable.prototype, 'destroyed', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - if (this._readableState === undefined) { - return false; - } - return this._readableState.destroyed; - }, - set: function set(value) { - // we ignore the value if the stream - // has not been initialized yet - if (!this._readableState) { - return; - } - - // backward compatibility, the user is explicitly - // managing destroyed - this._readableState.destroyed = value; - } -}); -Readable.prototype.destroy = destroyImpl.destroy; -Readable.prototype._undestroy = destroyImpl.undestroy; -Readable.prototype._destroy = function (err, cb) { - cb(err); -}; - -// Manually shove something into the read() buffer. -// This returns true if the highWaterMark has not been hit yet, -// similar to how Writable.write() returns true if you should -// write() some more. -Readable.prototype.push = function (chunk, encoding) { - var state = this._readableState; - var skipChunkCheck; - if (!state.objectMode) { - if (typeof chunk === 'string') { - encoding = encoding || state.defaultEncoding; - if (encoding !== state.encoding) { - chunk = Buffer.from(chunk, encoding); - encoding = ''; - } - skipChunkCheck = true; - } - } else { - skipChunkCheck = true; - } - return readableAddChunk(this, chunk, encoding, false, skipChunkCheck); -}; - -// Unshift should *always* be something directly out of read() -Readable.prototype.unshift = function (chunk) { - return readableAddChunk(this, chunk, null, true, false); -}; -function readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) { - debug('readableAddChunk', chunk); - var state = stream._readableState; - if (chunk === null) { - state.reading = false; - onEofChunk(stream, state); - } else { - var er; - if (!skipChunkCheck) er = chunkInvalid(state, chunk); - if (er) { - errorOrDestroy(stream, er); - } else if (state.objectMode || chunk && chunk.length > 0) { - if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) { - chunk = _uint8ArrayToBuffer(chunk); - } - if (addToFront) { - if (state.endEmitted) errorOrDestroy(stream, new ERR_STREAM_UNSHIFT_AFTER_END_EVENT());else addChunk(stream, state, chunk, true); - } else if (state.ended) { - errorOrDestroy(stream, new ERR_STREAM_PUSH_AFTER_EOF()); - } else if (state.destroyed) { - return false; - } else { - state.reading = false; - if (state.decoder && !encoding) { - chunk = state.decoder.write(chunk); - if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state); - } else { - addChunk(stream, state, chunk, false); - } - } - } else if (!addToFront) { - state.reading = false; - maybeReadMore(stream, state); - } - } - - // We can push more data if we are below the highWaterMark. - // Also, if we have no data yet, we can stand some more bytes. - // This is to work around cases where hwm=0, such as the repl. - return !state.ended && (state.length < state.highWaterMark || state.length === 0); -} -function addChunk(stream, state, chunk, addToFront) { - if (state.flowing && state.length === 0 && !state.sync) { - state.awaitDrain = 0; - stream.emit('data', chunk); - } else { - // update the buffer info. - state.length += state.objectMode ? 1 : chunk.length; - if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk); - if (state.needReadable) emitReadable(stream); - } - maybeReadMore(stream, state); -} -function chunkInvalid(state, chunk) { - var er; - if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) { - er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer', 'Uint8Array'], chunk); - } - return er; -} -Readable.prototype.isPaused = function () { - return this._readableState.flowing === false; -}; - -// backwards compatibility. -Readable.prototype.setEncoding = function (enc) { - if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder; - var decoder = new StringDecoder(enc); - this._readableState.decoder = decoder; - // If setEncoding(null), decoder.encoding equals utf8 - this._readableState.encoding = this._readableState.decoder.encoding; - - // Iterate over current buffer to convert already stored Buffers: - var p = this._readableState.buffer.head; - var content = ''; - while (p !== null) { - content += decoder.write(p.data); - p = p.next; - } - this._readableState.buffer.clear(); - if (content !== '') this._readableState.buffer.push(content); - this._readableState.length = content.length; - return this; -}; - -// Don't raise the hwm > 1GB -var MAX_HWM = 0x40000000; -function computeNewHighWaterMark(n) { - if (n >= MAX_HWM) { - // TODO(ronag): Throw ERR_VALUE_OUT_OF_RANGE. - n = MAX_HWM; - } else { - // Get the next highest power of 2 to prevent increasing hwm excessively in - // tiny amounts - n--; - n |= n >>> 1; - n |= n >>> 2; - n |= n >>> 4; - n |= n >>> 8; - n |= n >>> 16; - n++; - } - return n; -} - -// This function is designed to be inlinable, so please take care when making -// changes to the function body. -function howMuchToRead(n, state) { - if (n <= 0 || state.length === 0 && state.ended) return 0; - if (state.objectMode) return 1; - if (n !== n) { - // Only flow one buffer at a time - if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length; - } - // If we're asking for more than the current hwm, then raise the hwm. - if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n); - if (n <= state.length) return n; - // Don't have enough - if (!state.ended) { - state.needReadable = true; - return 0; - } - return state.length; -} - -// you can override either this method, or the async _read(n) below. -Readable.prototype.read = function (n) { - debug('read', n); - n = parseInt(n, 10); - var state = this._readableState; - var nOrig = n; - if (n !== 0) state.emittedReadable = false; - - // if we're doing read(0) to trigger a readable event, but we - // already have a bunch of data in the buffer, then just trigger - // the 'readable' event and move on. - if (n === 0 && state.needReadable && ((state.highWaterMark !== 0 ? state.length >= state.highWaterMark : state.length > 0) || state.ended)) { - debug('read: emitReadable', state.length, state.ended); - if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this); - return null; - } - n = howMuchToRead(n, state); - - // if we've ended, and we're now clear, then finish it up. - if (n === 0 && state.ended) { - if (state.length === 0) endReadable(this); - return null; - } - - // All the actual chunk generation logic needs to be - // *below* the call to _read. The reason is that in certain - // synthetic stream cases, such as passthrough streams, _read - // may be a completely synchronous operation which may change - // the state of the read buffer, providing enough data when - // before there was *not* enough. - // - // So, the steps are: - // 1. Figure out what the state of things will be after we do - // a read from the buffer. - // - // 2. If that resulting state will trigger a _read, then call _read. - // Note that this may be asynchronous, or synchronous. Yes, it is - // deeply ugly to write APIs this way, but that still doesn't mean - // that the Readable class should behave improperly, as streams are - // designed to be sync/async agnostic. - // Take note if the _read call is sync or async (ie, if the read call - // has returned yet), so that we know whether or not it's safe to emit - // 'readable' etc. - // - // 3. Actually pull the requested chunks out of the buffer and return. - - // if we need a readable event, then we need to do some reading. - var doRead = state.needReadable; - debug('need readable', doRead); - - // if we currently have less than the highWaterMark, then also read some - if (state.length === 0 || state.length - n < state.highWaterMark) { - doRead = true; - debug('length less than watermark', doRead); - } - - // however, if we've ended, then there's no point, and if we're already - // reading, then it's unnecessary. - if (state.ended || state.reading) { - doRead = false; - debug('reading or ended', doRead); - } else if (doRead) { - debug('do read'); - state.reading = true; - state.sync = true; - // if the length is currently zero, then we *need* a readable event. - if (state.length === 0) state.needReadable = true; - // call internal read method - this._read(state.highWaterMark); - state.sync = false; - // If _read pushed data synchronously, then `reading` will be false, - // and we need to re-evaluate how much data we can return to the user. - if (!state.reading) n = howMuchToRead(nOrig, state); - } - var ret; - if (n > 0) ret = fromList(n, state);else ret = null; - if (ret === null) { - state.needReadable = state.length <= state.highWaterMark; - n = 0; - } else { - state.length -= n; - state.awaitDrain = 0; - } - if (state.length === 0) { - // If we have nothing in the buffer, then we want to know - // as soon as we *do* get something into the buffer. - if (!state.ended) state.needReadable = true; - - // If we tried to read() past the EOF, then emit end on the next tick. - if (nOrig !== n && state.ended) endReadable(this); - } - if (ret !== null) this.emit('data', ret); - return ret; -}; -function onEofChunk(stream, state) { - debug('onEofChunk'); - if (state.ended) return; - if (state.decoder) { - var chunk = state.decoder.end(); - if (chunk && chunk.length) { - state.buffer.push(chunk); - state.length += state.objectMode ? 1 : chunk.length; - } - } - state.ended = true; - if (state.sync) { - // if we are sync, wait until next tick to emit the data. - // Otherwise we risk emitting data in the flow() - // the readable code triggers during a read() call - emitReadable(stream); - } else { - // emit 'readable' now to make sure it gets picked up. - state.needReadable = false; - if (!state.emittedReadable) { - state.emittedReadable = true; - emitReadable_(stream); - } - } -} - -// Don't emit readable right away in sync mode, because this can trigger -// another read() call => stack overflow. This way, it might trigger -// a nextTick recursion warning, but that's not so bad. -function emitReadable(stream) { - var state = stream._readableState; - debug('emitReadable', state.needReadable, state.emittedReadable); - state.needReadable = false; - if (!state.emittedReadable) { - debug('emitReadable', state.flowing); - state.emittedReadable = true; - process.nextTick(emitReadable_, stream); - } -} -function emitReadable_(stream) { - var state = stream._readableState; - debug('emitReadable_', state.destroyed, state.length, state.ended); - if (!state.destroyed && (state.length || state.ended)) { - stream.emit('readable'); - state.emittedReadable = false; - } - - // The stream needs another readable event if - // 1. It is not flowing, as the flow mechanism will take - // care of it. - // 2. It is not ended. - // 3. It is below the highWaterMark, so we can schedule - // another readable later. - state.needReadable = !state.flowing && !state.ended && state.length <= state.highWaterMark; - flow(stream); -} - -// at this point, the user has presumably seen the 'readable' event, -// and called read() to consume some data. that may have triggered -// in turn another _read(n) call, in which case reading = true if -// it's in progress. -// However, if we're not ended, or reading, and the length < hwm, -// then go ahead and try to read some more preemptively. -function maybeReadMore(stream, state) { - if (!state.readingMore) { - state.readingMore = true; - process.nextTick(maybeReadMore_, stream, state); - } -} -function maybeReadMore_(stream, state) { - // Attempt to read more data if we should. - // - // The conditions for reading more data are (one of): - // - Not enough data buffered (state.length < state.highWaterMark). The loop - // is responsible for filling the buffer with enough data if such data - // is available. If highWaterMark is 0 and we are not in the flowing mode - // we should _not_ attempt to buffer any extra data. We'll get more data - // when the stream consumer calls read() instead. - // - No data in the buffer, and the stream is in flowing mode. In this mode - // the loop below is responsible for ensuring read() is called. Failing to - // call read here would abort the flow and there's no other mechanism for - // continuing the flow if the stream consumer has just subscribed to the - // 'data' event. - // - // In addition to the above conditions to keep reading data, the following - // conditions prevent the data from being read: - // - The stream has ended (state.ended). - // - There is already a pending 'read' operation (state.reading). This is a - // case where the the stream has called the implementation defined _read() - // method, but they are processing the call asynchronously and have _not_ - // called push() with new data. In this case we skip performing more - // read()s. The execution ends in this method again after the _read() ends - // up calling push() with more data. - while (!state.reading && !state.ended && (state.length < state.highWaterMark || state.flowing && state.length === 0)) { - var len = state.length; - debug('maybeReadMore read 0'); - stream.read(0); - if (len === state.length) - // didn't get any data, stop spinning. - break; - } - state.readingMore = false; -} - -// abstract method. to be overridden in specific implementation classes. -// call cb(er, data) where data is <= n in length. -// for virtual (non-string, non-buffer) streams, "length" is somewhat -// arbitrary, and perhaps not very meaningful. -Readable.prototype._read = function (n) { - errorOrDestroy(this, new ERR_METHOD_NOT_IMPLEMENTED('_read()')); -}; -Readable.prototype.pipe = function (dest, pipeOpts) { - var src = this; - var state = this._readableState; - switch (state.pipesCount) { - case 0: - state.pipes = dest; - break; - case 1: - state.pipes = [state.pipes, dest]; - break; - default: - state.pipes.push(dest); - break; - } - state.pipesCount += 1; - debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts); - var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr; - var endFn = doEnd ? onend : unpipe; - if (state.endEmitted) process.nextTick(endFn);else src.once('end', endFn); - dest.on('unpipe', onunpipe); - function onunpipe(readable, unpipeInfo) { - debug('onunpipe'); - if (readable === src) { - if (unpipeInfo && unpipeInfo.hasUnpiped === false) { - unpipeInfo.hasUnpiped = true; - cleanup(); - } - } - } - function onend() { - debug('onend'); - dest.end(); - } - - // when the dest drains, it reduces the awaitDrain counter - // on the source. This would be more elegant with a .once() - // handler in flow(), but adding and removing repeatedly is - // too slow. - var ondrain = pipeOnDrain(src); - dest.on('drain', ondrain); - var cleanedUp = false; - function cleanup() { - debug('cleanup'); - // cleanup event handlers once the pipe is broken - dest.removeListener('close', onclose); - dest.removeListener('finish', onfinish); - dest.removeListener('drain', ondrain); - dest.removeListener('error', onerror); - dest.removeListener('unpipe', onunpipe); - src.removeListener('end', onend); - src.removeListener('end', unpipe); - src.removeListener('data', ondata); - cleanedUp = true; - - // if the reader is waiting for a drain event from this - // specific writer, then it would cause it to never start - // flowing again. - // So, if this is awaiting a drain, then we just call it now. - // If we don't know, then assume that we are waiting for one. - if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain(); - } - src.on('data', ondata); - function ondata(chunk) { - debug('ondata'); - var ret = dest.write(chunk); - debug('dest.write', ret); - if (ret === false) { - // If the user unpiped during `dest.write()`, it is possible - // to get stuck in a permanently paused state if that write - // also returned false. - // => Check whether `dest` is still a piping destination. - if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) { - debug('false write response, pause', state.awaitDrain); - state.awaitDrain++; - } - src.pause(); - } - } - - // if the dest has an error, then stop piping into it. - // however, don't suppress the throwing behavior for this. - function onerror(er) { - debug('onerror', er); - unpipe(); - dest.removeListener('error', onerror); - if (EElistenerCount(dest, 'error') === 0) errorOrDestroy(dest, er); - } - - // Make sure our error handler is attached before userland ones. - prependListener(dest, 'error', onerror); - - // Both close and finish should trigger unpipe, but only once. - function onclose() { - dest.removeListener('finish', onfinish); - unpipe(); - } - dest.once('close', onclose); - function onfinish() { - debug('onfinish'); - dest.removeListener('close', onclose); - unpipe(); - } - dest.once('finish', onfinish); - function unpipe() { - debug('unpipe'); - src.unpipe(dest); - } - - // tell the dest that it's being piped to - dest.emit('pipe', src); - - // start the flow if it hasn't been started already. - if (!state.flowing) { - debug('pipe resume'); - src.resume(); - } - return dest; -}; -function pipeOnDrain(src) { - return function pipeOnDrainFunctionResult() { - var state = src._readableState; - debug('pipeOnDrain', state.awaitDrain); - if (state.awaitDrain) state.awaitDrain--; - if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) { - state.flowing = true; - flow(src); - } - }; -} -Readable.prototype.unpipe = function (dest) { - var state = this._readableState; - var unpipeInfo = { - hasUnpiped: false - }; - - // if we're not piping anywhere, then do nothing. - if (state.pipesCount === 0) return this; - - // just one destination. most common case. - if (state.pipesCount === 1) { - // passed in one, but it's not the right one. - if (dest && dest !== state.pipes) return this; - if (!dest) dest = state.pipes; - - // got a match. - state.pipes = null; - state.pipesCount = 0; - state.flowing = false; - if (dest) dest.emit('unpipe', this, unpipeInfo); - return this; - } - - // slow case. multiple pipe destinations. - - if (!dest) { - // remove all. - var dests = state.pipes; - var len = state.pipesCount; - state.pipes = null; - state.pipesCount = 0; - state.flowing = false; - for (var i = 0; i < len; i++) dests[i].emit('unpipe', this, { - hasUnpiped: false - }); - return this; - } - - // try to find the right one. - var index = indexOf(state.pipes, dest); - if (index === -1) return this; - state.pipes.splice(index, 1); - state.pipesCount -= 1; - if (state.pipesCount === 1) state.pipes = state.pipes[0]; - dest.emit('unpipe', this, unpipeInfo); - return this; -}; - -// set up data events if they are asked for -// Ensure readable listeners eventually get something -Readable.prototype.on = function (ev, fn) { - var res = Stream.prototype.on.call(this, ev, fn); - var state = this._readableState; - if (ev === 'data') { - // update readableListening so that resume() may be a no-op - // a few lines down. This is needed to support once('readable'). - state.readableListening = this.listenerCount('readable') > 0; - - // Try start flowing on next tick if stream isn't explicitly paused - if (state.flowing !== false) this.resume(); - } else if (ev === 'readable') { - if (!state.endEmitted && !state.readableListening) { - state.readableListening = state.needReadable = true; - state.flowing = false; - state.emittedReadable = false; - debug('on readable', state.length, state.reading); - if (state.length) { - emitReadable(this); - } else if (!state.reading) { - process.nextTick(nReadingNextTick, this); - } - } - } - return res; -}; -Readable.prototype.addListener = Readable.prototype.on; -Readable.prototype.removeListener = function (ev, fn) { - var res = Stream.prototype.removeListener.call(this, ev, fn); - if (ev === 'readable') { - // We need to check if there is someone still listening to - // readable and reset the state. However this needs to happen - // after readable has been emitted but before I/O (nextTick) to - // support once('readable', fn) cycles. This means that calling - // resume within the same tick will have no - // effect. - process.nextTick(updateReadableListening, this); - } - return res; -}; -Readable.prototype.removeAllListeners = function (ev) { - var res = Stream.prototype.removeAllListeners.apply(this, arguments); - if (ev === 'readable' || ev === undefined) { - // We need to check if there is someone still listening to - // readable and reset the state. However this needs to happen - // after readable has been emitted but before I/O (nextTick) to - // support once('readable', fn) cycles. This means that calling - // resume within the same tick will have no - // effect. - process.nextTick(updateReadableListening, this); - } - return res; -}; -function updateReadableListening(self) { - var state = self._readableState; - state.readableListening = self.listenerCount('readable') > 0; - if (state.resumeScheduled && !state.paused) { - // flowing needs to be set to true now, otherwise - // the upcoming resume will not flow. - state.flowing = true; - - // crude way to check if we should resume - } else if (self.listenerCount('data') > 0) { - self.resume(); - } -} -function nReadingNextTick(self) { - debug('readable nexttick read 0'); - self.read(0); -} - -// pause() and resume() are remnants of the legacy readable stream API -// If the user uses them, then switch into old mode. -Readable.prototype.resume = function () { - var state = this._readableState; - if (!state.flowing) { - debug('resume'); - // we flow only if there is no one listening - // for readable, but we still have to call - // resume() - state.flowing = !state.readableListening; - resume(this, state); - } - state.paused = false; - return this; -}; -function resume(stream, state) { - if (!state.resumeScheduled) { - state.resumeScheduled = true; - process.nextTick(resume_, stream, state); - } -} -function resume_(stream, state) { - debug('resume', state.reading); - if (!state.reading) { - stream.read(0); - } - state.resumeScheduled = false; - stream.emit('resume'); - flow(stream); - if (state.flowing && !state.reading) stream.read(0); -} -Readable.prototype.pause = function () { - debug('call pause flowing=%j', this._readableState.flowing); - if (this._readableState.flowing !== false) { - debug('pause'); - this._readableState.flowing = false; - this.emit('pause'); - } - this._readableState.paused = true; - return this; -}; -function flow(stream) { - var state = stream._readableState; - debug('flow', state.flowing); - while (state.flowing && stream.read() !== null); -} - -// wrap an old-style stream as the async data source. -// This is *not* part of the readable stream interface. -// It is an ugly unfortunate mess of history. -Readable.prototype.wrap = function (stream) { - var _this = this; - var state = this._readableState; - var paused = false; - stream.on('end', function () { - debug('wrapped end'); - if (state.decoder && !state.ended) { - var chunk = state.decoder.end(); - if (chunk && chunk.length) _this.push(chunk); - } - _this.push(null); - }); - stream.on('data', function (chunk) { - debug('wrapped data'); - if (state.decoder) chunk = state.decoder.write(chunk); - - // don't skip over falsy values in objectMode - if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return; - var ret = _this.push(chunk); - if (!ret) { - paused = true; - stream.pause(); - } - }); - - // proxy all the other methods. - // important when wrapping filters and duplexes. - for (var i in stream) { - if (this[i] === undefined && typeof stream[i] === 'function') { - this[i] = function methodWrap(method) { - return function methodWrapReturnFunction() { - return stream[method].apply(stream, arguments); - }; - }(i); - } - } - - // proxy certain important events. - for (var n = 0; n < kProxyEvents.length; n++) { - stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n])); - } - - // when we try to consume some more bytes, simply unpause the - // underlying stream. - this._read = function (n) { - debug('wrapped _read', n); - if (paused) { - paused = false; - stream.resume(); - } - }; - return this; -}; -if (typeof Symbol === 'function') { - Readable.prototype[Symbol.asyncIterator] = function () { - if (createReadableStreamAsyncIterator === undefined) { - createReadableStreamAsyncIterator = require('./internal/streams/async_iterator'); - } - return createReadableStreamAsyncIterator(this); - }; -} -Object.defineProperty(Readable.prototype, 'readableHighWaterMark', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._readableState.highWaterMark; - } -}); -Object.defineProperty(Readable.prototype, 'readableBuffer', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._readableState && this._readableState.buffer; - } -}); -Object.defineProperty(Readable.prototype, 'readableFlowing', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._readableState.flowing; - }, - set: function set(state) { - if (this._readableState) { - this._readableState.flowing = state; - } - } -}); - -// exposed for testing purposes only. -Readable._fromList = fromList; -Object.defineProperty(Readable.prototype, 'readableLength', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._readableState.length; - } -}); - -// Pluck off n bytes from an array of buffers. -// Length is the combined lengths of all the buffers in the list. -// This function is designed to be inlinable, so please take care when making -// changes to the function body. -function fromList(n, state) { - // nothing buffered - if (state.length === 0) return null; - var ret; - if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) { - // read it all, truncate the list - if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.first();else ret = state.buffer.concat(state.length); - state.buffer.clear(); - } else { - // read part of list - ret = state.buffer.consume(n, state.decoder); - } - return ret; -} -function endReadable(stream) { - var state = stream._readableState; - debug('endReadable', state.endEmitted); - if (!state.endEmitted) { - state.ended = true; - process.nextTick(endReadableNT, state, stream); - } -} -function endReadableNT(state, stream) { - debug('endReadableNT', state.endEmitted, state.length); - - // Check that we didn't get one last unshift. - if (!state.endEmitted && state.length === 0) { - state.endEmitted = true; - stream.readable = false; - stream.emit('end'); - if (state.autoDestroy) { - // In case of duplex streams we need a way to detect - // if the writable side is ready for autoDestroy as well - var wState = stream._writableState; - if (!wState || wState.autoDestroy && wState.finished) { - stream.destroy(); - } - } - } -} -if (typeof Symbol === 'function') { - Readable.from = function (iterable, opts) { - if (from === undefined) { - from = require('./internal/streams/from'); - } - return from(Readable, iterable, opts); - }; -} -function indexOf(xs, x) { - for (var i = 0, l = xs.length; i < l; i++) { - if (xs[i] === x) return i; - } - return -1; -} \ No newline at end of file diff --git a/node_modules/readable-stream/lib/_stream_transform.js b/node_modules/readable-stream/lib/_stream_transform.js deleted file mode 100644 index 1ccb7157be..0000000000 --- a/node_modules/readable-stream/lib/_stream_transform.js +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -// a transform stream is a readable/writable stream where you do -// something with the data. Sometimes it's called a "filter", -// but that's not a great name for it, since that implies a thing where -// some bits pass through, and others are simply ignored. (That would -// be a valid example of a transform, of course.) -// -// While the output is causally related to the input, it's not a -// necessarily symmetric or synchronous transformation. For example, -// a zlib stream might take multiple plain-text writes(), and then -// emit a single compressed chunk some time in the future. -// -// Here's how this works: -// -// The Transform stream has all the aspects of the readable and writable -// stream classes. When you write(chunk), that calls _write(chunk,cb) -// internally, and returns false if there's a lot of pending writes -// buffered up. When you call read(), that calls _read(n) until -// there's enough pending readable data buffered up. -// -// In a transform stream, the written data is placed in a buffer. When -// _read(n) is called, it transforms the queued up data, calling the -// buffered _write cb's as it consumes chunks. If consuming a single -// written chunk would result in multiple output chunks, then the first -// outputted bit calls the readcb, and subsequent chunks just go into -// the read buffer, and will cause it to emit 'readable' if necessary. -// -// This way, back-pressure is actually determined by the reading side, -// since _read has to be called to start processing a new chunk. However, -// a pathological inflate type of transform can cause excessive buffering -// here. For example, imagine a stream where every byte of input is -// interpreted as an integer from 0-255, and then results in that many -// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in -// 1kb of data being output. In this case, you could write a very small -// amount of input, and end up with a very large amount of output. In -// such a pathological inflating mechanism, there'd be no way to tell -// the system to stop doing the transform. A single 4MB write could -// cause the system to run out of memory. -// -// However, even in such a pathological case, only a single written chunk -// would be consumed, and then the rest would wait (un-transformed) until -// the results of the previous transformed chunk were consumed. - -'use strict'; - -module.exports = Transform; -var _require$codes = require('../errors').codes, - ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED, - ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK, - ERR_TRANSFORM_ALREADY_TRANSFORMING = _require$codes.ERR_TRANSFORM_ALREADY_TRANSFORMING, - ERR_TRANSFORM_WITH_LENGTH_0 = _require$codes.ERR_TRANSFORM_WITH_LENGTH_0; -var Duplex = require('./_stream_duplex'); -require('inherits')(Transform, Duplex); -function afterTransform(er, data) { - var ts = this._transformState; - ts.transforming = false; - var cb = ts.writecb; - if (cb === null) { - return this.emit('error', new ERR_MULTIPLE_CALLBACK()); - } - ts.writechunk = null; - ts.writecb = null; - if (data != null) - // single equals check for both `null` and `undefined` - this.push(data); - cb(er); - var rs = this._readableState; - rs.reading = false; - if (rs.needReadable || rs.length < rs.highWaterMark) { - this._read(rs.highWaterMark); - } -} -function Transform(options) { - if (!(this instanceof Transform)) return new Transform(options); - Duplex.call(this, options); - this._transformState = { - afterTransform: afterTransform.bind(this), - needTransform: false, - transforming: false, - writecb: null, - writechunk: null, - writeencoding: null - }; - - // start out asking for a readable event once data is transformed. - this._readableState.needReadable = true; - - // we have implemented the _read method, and done the other things - // that Readable wants before the first _read call, so unset the - // sync guard flag. - this._readableState.sync = false; - if (options) { - if (typeof options.transform === 'function') this._transform = options.transform; - if (typeof options.flush === 'function') this._flush = options.flush; - } - - // When the writable side finishes, then flush out anything remaining. - this.on('prefinish', prefinish); -} -function prefinish() { - var _this = this; - if (typeof this._flush === 'function' && !this._readableState.destroyed) { - this._flush(function (er, data) { - done(_this, er, data); - }); - } else { - done(this, null, null); - } -} -Transform.prototype.push = function (chunk, encoding) { - this._transformState.needTransform = false; - return Duplex.prototype.push.call(this, chunk, encoding); -}; - -// This is the part where you do stuff! -// override this function in implementation classes. -// 'chunk' is an input chunk. -// -// Call `push(newChunk)` to pass along transformed output -// to the readable side. You may call 'push' zero or more times. -// -// Call `cb(err)` when you are done with this chunk. If you pass -// an error, then that'll put the hurt on the whole operation. If you -// never call cb(), then you'll never get another chunk. -Transform.prototype._transform = function (chunk, encoding, cb) { - cb(new ERR_METHOD_NOT_IMPLEMENTED('_transform()')); -}; -Transform.prototype._write = function (chunk, encoding, cb) { - var ts = this._transformState; - ts.writecb = cb; - ts.writechunk = chunk; - ts.writeencoding = encoding; - if (!ts.transforming) { - var rs = this._readableState; - if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark); - } -}; - -// Doesn't matter what the args are here. -// _transform does all the work. -// That we got here means that the readable side wants more data. -Transform.prototype._read = function (n) { - var ts = this._transformState; - if (ts.writechunk !== null && !ts.transforming) { - ts.transforming = true; - this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform); - } else { - // mark that we need a transform, so that any data that comes in - // will get processed, now that we've asked for it. - ts.needTransform = true; - } -}; -Transform.prototype._destroy = function (err, cb) { - Duplex.prototype._destroy.call(this, err, function (err2) { - cb(err2); - }); -}; -function done(stream, er, data) { - if (er) return stream.emit('error', er); - if (data != null) - // single equals check for both `null` and `undefined` - stream.push(data); - - // TODO(BridgeAR): Write a test for these two error cases - // if there's nothing in the write buffer, then that means - // that nothing more will ever be provided - if (stream._writableState.length) throw new ERR_TRANSFORM_WITH_LENGTH_0(); - if (stream._transformState.transforming) throw new ERR_TRANSFORM_ALREADY_TRANSFORMING(); - return stream.push(null); -} \ No newline at end of file diff --git a/node_modules/readable-stream/lib/_stream_writable.js b/node_modules/readable-stream/lib/_stream_writable.js deleted file mode 100644 index 292415e23a..0000000000 --- a/node_modules/readable-stream/lib/_stream_writable.js +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -// A bit simpler than readable streams. -// Implement an async ._write(chunk, encoding, cb), and it'll handle all -// the drain event emission and buffering. - -'use strict'; - -module.exports = Writable; - -/* */ -function WriteReq(chunk, encoding, cb) { - this.chunk = chunk; - this.encoding = encoding; - this.callback = cb; - this.next = null; -} - -// It seems a linked list but it is not -// there will be only 2 of these for each stream -function CorkedRequest(state) { - var _this = this; - this.next = null; - this.entry = null; - this.finish = function () { - onCorkedFinish(_this, state); - }; -} -/* */ - -/**/ -var Duplex; -/**/ - -Writable.WritableState = WritableState; - -/**/ -var internalUtil = { - deprecate: require('util-deprecate') -}; -/**/ - -/**/ -var Stream = require('./internal/streams/stream'); -/**/ - -var Buffer = require('buffer').Buffer; -var OurUint8Array = (typeof global !== 'undefined' ? global : typeof window !== 'undefined' ? window : typeof self !== 'undefined' ? self : {}).Uint8Array || function () {}; -function _uint8ArrayToBuffer(chunk) { - return Buffer.from(chunk); -} -function _isUint8Array(obj) { - return Buffer.isBuffer(obj) || obj instanceof OurUint8Array; -} -var destroyImpl = require('./internal/streams/destroy'); -var _require = require('./internal/streams/state'), - getHighWaterMark = _require.getHighWaterMark; -var _require$codes = require('../errors').codes, - ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE, - ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED, - ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK, - ERR_STREAM_CANNOT_PIPE = _require$codes.ERR_STREAM_CANNOT_PIPE, - ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED, - ERR_STREAM_NULL_VALUES = _require$codes.ERR_STREAM_NULL_VALUES, - ERR_STREAM_WRITE_AFTER_END = _require$codes.ERR_STREAM_WRITE_AFTER_END, - ERR_UNKNOWN_ENCODING = _require$codes.ERR_UNKNOWN_ENCODING; -var errorOrDestroy = destroyImpl.errorOrDestroy; -require('inherits')(Writable, Stream); -function nop() {} -function WritableState(options, stream, isDuplex) { - Duplex = Duplex || require('./_stream_duplex'); - options = options || {}; - - // Duplex streams are both readable and writable, but share - // the same options object. - // However, some cases require setting options to different - // values for the readable and the writable sides of the duplex stream, - // e.g. options.readableObjectMode vs. options.writableObjectMode, etc. - if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; - - // object stream flag to indicate whether or not this stream - // contains buffers or objects. - this.objectMode = !!options.objectMode; - if (isDuplex) this.objectMode = this.objectMode || !!options.writableObjectMode; - - // the point at which write() starts returning false - // Note: 0 is a valid value, means that we always return false if - // the entire buffer is not flushed immediately on write() - this.highWaterMark = getHighWaterMark(this, options, 'writableHighWaterMark', isDuplex); - - // if _final has been called - this.finalCalled = false; - - // drain event flag. - this.needDrain = false; - // at the start of calling end() - this.ending = false; - // when end() has been called, and returned - this.ended = false; - // when 'finish' is emitted - this.finished = false; - - // has it been destroyed - this.destroyed = false; - - // should we decode strings into buffers before passing to _write? - // this is here so that some node-core streams can optimize string - // handling at a lower level. - var noDecode = options.decodeStrings === false; - this.decodeStrings = !noDecode; - - // Crypto is kind of old and crusty. Historically, its default string - // encoding is 'binary' so we have to make this configurable. - // Everything else in the universe uses 'utf8', though. - this.defaultEncoding = options.defaultEncoding || 'utf8'; - - // not an actual buffer we keep track of, but a measurement - // of how much we're waiting to get pushed to some underlying - // socket or file. - this.length = 0; - - // a flag to see when we're in the middle of a write. - this.writing = false; - - // when true all writes will be buffered until .uncork() call - this.corked = 0; - - // a flag to be able to tell if the onwrite cb is called immediately, - // or on a later tick. We set this to true at first, because any - // actions that shouldn't happen until "later" should generally also - // not happen before the first write call. - this.sync = true; - - // a flag to know if we're processing previously buffered items, which - // may call the _write() callback in the same tick, so that we don't - // end up in an overlapped onwrite situation. - this.bufferProcessing = false; - - // the callback that's passed to _write(chunk,cb) - this.onwrite = function (er) { - onwrite(stream, er); - }; - - // the callback that the user supplies to write(chunk,encoding,cb) - this.writecb = null; - - // the amount that is being written when _write is called. - this.writelen = 0; - this.bufferedRequest = null; - this.lastBufferedRequest = null; - - // number of pending user-supplied write callbacks - // this must be 0 before 'finish' can be emitted - this.pendingcb = 0; - - // emit prefinish if the only thing we're waiting for is _write cbs - // This is relevant for synchronous Transform streams - this.prefinished = false; - - // True if the error was already emitted and should not be thrown again - this.errorEmitted = false; - - // Should close be emitted on destroy. Defaults to true. - this.emitClose = options.emitClose !== false; - - // Should .destroy() be called after 'finish' (and potentially 'end') - this.autoDestroy = !!options.autoDestroy; - - // count buffered requests - this.bufferedRequestCount = 0; - - // allocate the first CorkedRequest, there is always - // one allocated and free to use, and we maintain at most two - this.corkedRequestsFree = new CorkedRequest(this); -} -WritableState.prototype.getBuffer = function getBuffer() { - var current = this.bufferedRequest; - var out = []; - while (current) { - out.push(current); - current = current.next; - } - return out; -}; -(function () { - try { - Object.defineProperty(WritableState.prototype, 'buffer', { - get: internalUtil.deprecate(function writableStateBufferGetter() { - return this.getBuffer(); - }, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003') - }); - } catch (_) {} -})(); - -// Test _writableState for inheritance to account for Duplex streams, -// whose prototype chain only points to Readable. -var realHasInstance; -if (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') { - realHasInstance = Function.prototype[Symbol.hasInstance]; - Object.defineProperty(Writable, Symbol.hasInstance, { - value: function value(object) { - if (realHasInstance.call(this, object)) return true; - if (this !== Writable) return false; - return object && object._writableState instanceof WritableState; - } - }); -} else { - realHasInstance = function realHasInstance(object) { - return object instanceof this; - }; -} -function Writable(options) { - Duplex = Duplex || require('./_stream_duplex'); - - // Writable ctor is applied to Duplexes, too. - // `realHasInstance` is necessary because using plain `instanceof` - // would return false, as no `_writableState` property is attached. - - // Trying to use the custom `instanceof` for Writable here will also break the - // Node.js LazyTransform implementation, which has a non-trivial getter for - // `_writableState` that would lead to infinite recursion. - - // Checking for a Stream.Duplex instance is faster here instead of inside - // the WritableState constructor, at least with V8 6.5 - var isDuplex = this instanceof Duplex; - if (!isDuplex && !realHasInstance.call(Writable, this)) return new Writable(options); - this._writableState = new WritableState(options, this, isDuplex); - - // legacy. - this.writable = true; - if (options) { - if (typeof options.write === 'function') this._write = options.write; - if (typeof options.writev === 'function') this._writev = options.writev; - if (typeof options.destroy === 'function') this._destroy = options.destroy; - if (typeof options.final === 'function') this._final = options.final; - } - Stream.call(this); -} - -// Otherwise people can pipe Writable streams, which is just wrong. -Writable.prototype.pipe = function () { - errorOrDestroy(this, new ERR_STREAM_CANNOT_PIPE()); -}; -function writeAfterEnd(stream, cb) { - var er = new ERR_STREAM_WRITE_AFTER_END(); - // TODO: defer error events consistently everywhere, not just the cb - errorOrDestroy(stream, er); - process.nextTick(cb, er); -} - -// Checks that a user-supplied chunk is valid, especially for the particular -// mode the stream is in. Currently this means that `null` is never accepted -// and undefined/non-string values are only allowed in object mode. -function validChunk(stream, state, chunk, cb) { - var er; - if (chunk === null) { - er = new ERR_STREAM_NULL_VALUES(); - } else if (typeof chunk !== 'string' && !state.objectMode) { - er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer'], chunk); - } - if (er) { - errorOrDestroy(stream, er); - process.nextTick(cb, er); - return false; - } - return true; -} -Writable.prototype.write = function (chunk, encoding, cb) { - var state = this._writableState; - var ret = false; - var isBuf = !state.objectMode && _isUint8Array(chunk); - if (isBuf && !Buffer.isBuffer(chunk)) { - chunk = _uint8ArrayToBuffer(chunk); - } - if (typeof encoding === 'function') { - cb = encoding; - encoding = null; - } - if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding; - if (typeof cb !== 'function') cb = nop; - if (state.ending) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) { - state.pendingcb++; - ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb); - } - return ret; -}; -Writable.prototype.cork = function () { - this._writableState.corked++; -}; -Writable.prototype.uncork = function () { - var state = this._writableState; - if (state.corked) { - state.corked--; - if (!state.writing && !state.corked && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state); - } -}; -Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) { - // node::ParseEncoding() requires lower case. - if (typeof encoding === 'string') encoding = encoding.toLowerCase(); - if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new ERR_UNKNOWN_ENCODING(encoding); - this._writableState.defaultEncoding = encoding; - return this; -}; -Object.defineProperty(Writable.prototype, 'writableBuffer', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._writableState && this._writableState.getBuffer(); - } -}); -function decodeChunk(state, chunk, encoding) { - if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') { - chunk = Buffer.from(chunk, encoding); - } - return chunk; -} -Object.defineProperty(Writable.prototype, 'writableHighWaterMark', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._writableState.highWaterMark; - } -}); - -// if we're already writing something, then just put this -// in the queue, and wait our turn. Otherwise, call _write -// If we return false, then we need a drain event, so set that flag. -function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) { - if (!isBuf) { - var newChunk = decodeChunk(state, chunk, encoding); - if (chunk !== newChunk) { - isBuf = true; - encoding = 'buffer'; - chunk = newChunk; - } - } - var len = state.objectMode ? 1 : chunk.length; - state.length += len; - var ret = state.length < state.highWaterMark; - // we must ensure that previous needDrain will not be reset to false. - if (!ret) state.needDrain = true; - if (state.writing || state.corked) { - var last = state.lastBufferedRequest; - state.lastBufferedRequest = { - chunk: chunk, - encoding: encoding, - isBuf: isBuf, - callback: cb, - next: null - }; - if (last) { - last.next = state.lastBufferedRequest; - } else { - state.bufferedRequest = state.lastBufferedRequest; - } - state.bufferedRequestCount += 1; - } else { - doWrite(stream, state, false, len, chunk, encoding, cb); - } - return ret; -} -function doWrite(stream, state, writev, len, chunk, encoding, cb) { - state.writelen = len; - state.writecb = cb; - state.writing = true; - state.sync = true; - if (state.destroyed) state.onwrite(new ERR_STREAM_DESTROYED('write'));else if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite); - state.sync = false; -} -function onwriteError(stream, state, sync, er, cb) { - --state.pendingcb; - if (sync) { - // defer the callback if we are being called synchronously - // to avoid piling up things on the stack - process.nextTick(cb, er); - // this can emit finish, and it will always happen - // after error - process.nextTick(finishMaybe, stream, state); - stream._writableState.errorEmitted = true; - errorOrDestroy(stream, er); - } else { - // the caller expect this to happen before if - // it is async - cb(er); - stream._writableState.errorEmitted = true; - errorOrDestroy(stream, er); - // this can emit finish, but finish must - // always follow error - finishMaybe(stream, state); - } -} -function onwriteStateUpdate(state) { - state.writing = false; - state.writecb = null; - state.length -= state.writelen; - state.writelen = 0; -} -function onwrite(stream, er) { - var state = stream._writableState; - var sync = state.sync; - var cb = state.writecb; - if (typeof cb !== 'function') throw new ERR_MULTIPLE_CALLBACK(); - onwriteStateUpdate(state); - if (er) onwriteError(stream, state, sync, er, cb);else { - // Check if we're actually ready to finish, but don't emit yet - var finished = needFinish(state) || stream.destroyed; - if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) { - clearBuffer(stream, state); - } - if (sync) { - process.nextTick(afterWrite, stream, state, finished, cb); - } else { - afterWrite(stream, state, finished, cb); - } - } -} -function afterWrite(stream, state, finished, cb) { - if (!finished) onwriteDrain(stream, state); - state.pendingcb--; - cb(); - finishMaybe(stream, state); -} - -// Must force callback to be called on nextTick, so that we don't -// emit 'drain' before the write() consumer gets the 'false' return -// value, and has a chance to attach a 'drain' listener. -function onwriteDrain(stream, state) { - if (state.length === 0 && state.needDrain) { - state.needDrain = false; - stream.emit('drain'); - } -} - -// if there's something in the buffer waiting, then process it -function clearBuffer(stream, state) { - state.bufferProcessing = true; - var entry = state.bufferedRequest; - if (stream._writev && entry && entry.next) { - // Fast case, write everything using _writev() - var l = state.bufferedRequestCount; - var buffer = new Array(l); - var holder = state.corkedRequestsFree; - holder.entry = entry; - var count = 0; - var allBuffers = true; - while (entry) { - buffer[count] = entry; - if (!entry.isBuf) allBuffers = false; - entry = entry.next; - count += 1; - } - buffer.allBuffers = allBuffers; - doWrite(stream, state, true, state.length, buffer, '', holder.finish); - - // doWrite is almost always async, defer these to save a bit of time - // as the hot path ends with doWrite - state.pendingcb++; - state.lastBufferedRequest = null; - if (holder.next) { - state.corkedRequestsFree = holder.next; - holder.next = null; - } else { - state.corkedRequestsFree = new CorkedRequest(state); - } - state.bufferedRequestCount = 0; - } else { - // Slow case, write chunks one-by-one - while (entry) { - var chunk = entry.chunk; - var encoding = entry.encoding; - var cb = entry.callback; - var len = state.objectMode ? 1 : chunk.length; - doWrite(stream, state, false, len, chunk, encoding, cb); - entry = entry.next; - state.bufferedRequestCount--; - // if we didn't call the onwrite immediately, then - // it means that we need to wait until it does. - // also, that means that the chunk and cb are currently - // being processed, so move the buffer counter past them. - if (state.writing) { - break; - } - } - if (entry === null) state.lastBufferedRequest = null; - } - state.bufferedRequest = entry; - state.bufferProcessing = false; -} -Writable.prototype._write = function (chunk, encoding, cb) { - cb(new ERR_METHOD_NOT_IMPLEMENTED('_write()')); -}; -Writable.prototype._writev = null; -Writable.prototype.end = function (chunk, encoding, cb) { - var state = this._writableState; - if (typeof chunk === 'function') { - cb = chunk; - chunk = null; - encoding = null; - } else if (typeof encoding === 'function') { - cb = encoding; - encoding = null; - } - if (chunk !== null && chunk !== undefined) this.write(chunk, encoding); - - // .end() fully uncorks - if (state.corked) { - state.corked = 1; - this.uncork(); - } - - // ignore unnecessary end() calls. - if (!state.ending) endWritable(this, state, cb); - return this; -}; -Object.defineProperty(Writable.prototype, 'writableLength', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - return this._writableState.length; - } -}); -function needFinish(state) { - return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing; -} -function callFinal(stream, state) { - stream._final(function (err) { - state.pendingcb--; - if (err) { - errorOrDestroy(stream, err); - } - state.prefinished = true; - stream.emit('prefinish'); - finishMaybe(stream, state); - }); -} -function prefinish(stream, state) { - if (!state.prefinished && !state.finalCalled) { - if (typeof stream._final === 'function' && !state.destroyed) { - state.pendingcb++; - state.finalCalled = true; - process.nextTick(callFinal, stream, state); - } else { - state.prefinished = true; - stream.emit('prefinish'); - } - } -} -function finishMaybe(stream, state) { - var need = needFinish(state); - if (need) { - prefinish(stream, state); - if (state.pendingcb === 0) { - state.finished = true; - stream.emit('finish'); - if (state.autoDestroy) { - // In case of duplex streams we need a way to detect - // if the readable side is ready for autoDestroy as well - var rState = stream._readableState; - if (!rState || rState.autoDestroy && rState.endEmitted) { - stream.destroy(); - } - } - } - } - return need; -} -function endWritable(stream, state, cb) { - state.ending = true; - finishMaybe(stream, state); - if (cb) { - if (state.finished) process.nextTick(cb);else stream.once('finish', cb); - } - state.ended = true; - stream.writable = false; -} -function onCorkedFinish(corkReq, state, err) { - var entry = corkReq.entry; - corkReq.entry = null; - while (entry) { - var cb = entry.callback; - state.pendingcb--; - cb(err); - entry = entry.next; - } - - // reuse the free corkReq. - state.corkedRequestsFree.next = corkReq; -} -Object.defineProperty(Writable.prototype, 'destroyed', { - // making it explicit this property is not enumerable - // because otherwise some prototype manipulation in - // userland will fail - enumerable: false, - get: function get() { - if (this._writableState === undefined) { - return false; - } - return this._writableState.destroyed; - }, - set: function set(value) { - // we ignore the value if the stream - // has not been initialized yet - if (!this._writableState) { - return; - } - - // backward compatibility, the user is explicitly - // managing destroyed - this._writableState.destroyed = value; - } -}); -Writable.prototype.destroy = destroyImpl.destroy; -Writable.prototype._undestroy = destroyImpl.undestroy; -Writable.prototype._destroy = function (err, cb) { - cb(err); -}; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/async_iterator.js b/node_modules/readable-stream/lib/internal/streams/async_iterator.js deleted file mode 100644 index 742c5a4674..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/async_iterator.js +++ /dev/null @@ -1,180 +0,0 @@ -'use strict'; - -var _Object$setPrototypeO; -function _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } -function _toPropertyKey(arg) { var key = _toPrimitive(arg, "string"); return typeof key === "symbol" ? key : String(key); } -function _toPrimitive(input, hint) { if (typeof input !== "object" || input === null) return input; var prim = input[Symbol.toPrimitive]; if (prim !== undefined) { var res = prim.call(input, hint || "default"); if (typeof res !== "object") return res; throw new TypeError("@@toPrimitive must return a primitive value."); } return (hint === "string" ? String : Number)(input); } -var finished = require('./end-of-stream'); -var kLastResolve = Symbol('lastResolve'); -var kLastReject = Symbol('lastReject'); -var kError = Symbol('error'); -var kEnded = Symbol('ended'); -var kLastPromise = Symbol('lastPromise'); -var kHandlePromise = Symbol('handlePromise'); -var kStream = Symbol('stream'); -function createIterResult(value, done) { - return { - value: value, - done: done - }; -} -function readAndResolve(iter) { - var resolve = iter[kLastResolve]; - if (resolve !== null) { - var data = iter[kStream].read(); - // we defer if data is null - // we can be expecting either 'end' or - // 'error' - if (data !== null) { - iter[kLastPromise] = null; - iter[kLastResolve] = null; - iter[kLastReject] = null; - resolve(createIterResult(data, false)); - } - } -} -function onReadable(iter) { - // we wait for the next tick, because it might - // emit an error with process.nextTick - process.nextTick(readAndResolve, iter); -} -function wrapForNext(lastPromise, iter) { - return function (resolve, reject) { - lastPromise.then(function () { - if (iter[kEnded]) { - resolve(createIterResult(undefined, true)); - return; - } - iter[kHandlePromise](resolve, reject); - }, reject); - }; -} -var AsyncIteratorPrototype = Object.getPrototypeOf(function () {}); -var ReadableStreamAsyncIteratorPrototype = Object.setPrototypeOf((_Object$setPrototypeO = { - get stream() { - return this[kStream]; - }, - next: function next() { - var _this = this; - // if we have detected an error in the meanwhile - // reject straight away - var error = this[kError]; - if (error !== null) { - return Promise.reject(error); - } - if (this[kEnded]) { - return Promise.resolve(createIterResult(undefined, true)); - } - if (this[kStream].destroyed) { - // We need to defer via nextTick because if .destroy(err) is - // called, the error will be emitted via nextTick, and - // we cannot guarantee that there is no error lingering around - // waiting to be emitted. - return new Promise(function (resolve, reject) { - process.nextTick(function () { - if (_this[kError]) { - reject(_this[kError]); - } else { - resolve(createIterResult(undefined, true)); - } - }); - }); - } - - // if we have multiple next() calls - // we will wait for the previous Promise to finish - // this logic is optimized to support for await loops, - // where next() is only called once at a time - var lastPromise = this[kLastPromise]; - var promise; - if (lastPromise) { - promise = new Promise(wrapForNext(lastPromise, this)); - } else { - // fast path needed to support multiple this.push() - // without triggering the next() queue - var data = this[kStream].read(); - if (data !== null) { - return Promise.resolve(createIterResult(data, false)); - } - promise = new Promise(this[kHandlePromise]); - } - this[kLastPromise] = promise; - return promise; - } -}, _defineProperty(_Object$setPrototypeO, Symbol.asyncIterator, function () { - return this; -}), _defineProperty(_Object$setPrototypeO, "return", function _return() { - var _this2 = this; - // destroy(err, cb) is a private API - // we can guarantee we have that here, because we control the - // Readable class this is attached to - return new Promise(function (resolve, reject) { - _this2[kStream].destroy(null, function (err) { - if (err) { - reject(err); - return; - } - resolve(createIterResult(undefined, true)); - }); - }); -}), _Object$setPrototypeO), AsyncIteratorPrototype); -var createReadableStreamAsyncIterator = function createReadableStreamAsyncIterator(stream) { - var _Object$create; - var iterator = Object.create(ReadableStreamAsyncIteratorPrototype, (_Object$create = {}, _defineProperty(_Object$create, kStream, { - value: stream, - writable: true - }), _defineProperty(_Object$create, kLastResolve, { - value: null, - writable: true - }), _defineProperty(_Object$create, kLastReject, { - value: null, - writable: true - }), _defineProperty(_Object$create, kError, { - value: null, - writable: true - }), _defineProperty(_Object$create, kEnded, { - value: stream._readableState.endEmitted, - writable: true - }), _defineProperty(_Object$create, kHandlePromise, { - value: function value(resolve, reject) { - var data = iterator[kStream].read(); - if (data) { - iterator[kLastPromise] = null; - iterator[kLastResolve] = null; - iterator[kLastReject] = null; - resolve(createIterResult(data, false)); - } else { - iterator[kLastResolve] = resolve; - iterator[kLastReject] = reject; - } - }, - writable: true - }), _Object$create)); - iterator[kLastPromise] = null; - finished(stream, function (err) { - if (err && err.code !== 'ERR_STREAM_PREMATURE_CLOSE') { - var reject = iterator[kLastReject]; - // reject if we are waiting for data in the Promise - // returned by next() and store the error - if (reject !== null) { - iterator[kLastPromise] = null; - iterator[kLastResolve] = null; - iterator[kLastReject] = null; - reject(err); - } - iterator[kError] = err; - return; - } - var resolve = iterator[kLastResolve]; - if (resolve !== null) { - iterator[kLastPromise] = null; - iterator[kLastResolve] = null; - iterator[kLastReject] = null; - resolve(createIterResult(undefined, true)); - } - iterator[kEnded] = true; - }); - stream.on('readable', onReadable.bind(null, iterator)); - return iterator; -}; -module.exports = createReadableStreamAsyncIterator; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/buffer_list.js b/node_modules/readable-stream/lib/internal/streams/buffer_list.js deleted file mode 100644 index 69bda497d3..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/buffer_list.js +++ /dev/null @@ -1,183 +0,0 @@ -'use strict'; - -function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; } -function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { _defineProperty(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; } -function _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } -function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, _toPropertyKey(descriptor.key), descriptor); } } -function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); Object.defineProperty(Constructor, "prototype", { writable: false }); return Constructor; } -function _toPropertyKey(arg) { var key = _toPrimitive(arg, "string"); return typeof key === "symbol" ? key : String(key); } -function _toPrimitive(input, hint) { if (typeof input !== "object" || input === null) return input; var prim = input[Symbol.toPrimitive]; if (prim !== undefined) { var res = prim.call(input, hint || "default"); if (typeof res !== "object") return res; throw new TypeError("@@toPrimitive must return a primitive value."); } return (hint === "string" ? String : Number)(input); } -var _require = require('buffer'), - Buffer = _require.Buffer; -var _require2 = require('util'), - inspect = _require2.inspect; -var custom = inspect && inspect.custom || 'inspect'; -function copyBuffer(src, target, offset) { - Buffer.prototype.copy.call(src, target, offset); -} -module.exports = /*#__PURE__*/function () { - function BufferList() { - _classCallCheck(this, BufferList); - this.head = null; - this.tail = null; - this.length = 0; - } - _createClass(BufferList, [{ - key: "push", - value: function push(v) { - var entry = { - data: v, - next: null - }; - if (this.length > 0) this.tail.next = entry;else this.head = entry; - this.tail = entry; - ++this.length; - } - }, { - key: "unshift", - value: function unshift(v) { - var entry = { - data: v, - next: this.head - }; - if (this.length === 0) this.tail = entry; - this.head = entry; - ++this.length; - } - }, { - key: "shift", - value: function shift() { - if (this.length === 0) return; - var ret = this.head.data; - if (this.length === 1) this.head = this.tail = null;else this.head = this.head.next; - --this.length; - return ret; - } - }, { - key: "clear", - value: function clear() { - this.head = this.tail = null; - this.length = 0; - } - }, { - key: "join", - value: function join(s) { - if (this.length === 0) return ''; - var p = this.head; - var ret = '' + p.data; - while (p = p.next) ret += s + p.data; - return ret; - } - }, { - key: "concat", - value: function concat(n) { - if (this.length === 0) return Buffer.alloc(0); - var ret = Buffer.allocUnsafe(n >>> 0); - var p = this.head; - var i = 0; - while (p) { - copyBuffer(p.data, ret, i); - i += p.data.length; - p = p.next; - } - return ret; - } - - // Consumes a specified amount of bytes or characters from the buffered data. - }, { - key: "consume", - value: function consume(n, hasStrings) { - var ret; - if (n < this.head.data.length) { - // `slice` is the same for buffers and strings. - ret = this.head.data.slice(0, n); - this.head.data = this.head.data.slice(n); - } else if (n === this.head.data.length) { - // First chunk is a perfect match. - ret = this.shift(); - } else { - // Result spans more than one buffer. - ret = hasStrings ? this._getString(n) : this._getBuffer(n); - } - return ret; - } - }, { - key: "first", - value: function first() { - return this.head.data; - } - - // Consumes a specified amount of characters from the buffered data. - }, { - key: "_getString", - value: function _getString(n) { - var p = this.head; - var c = 1; - var ret = p.data; - n -= ret.length; - while (p = p.next) { - var str = p.data; - var nb = n > str.length ? str.length : n; - if (nb === str.length) ret += str;else ret += str.slice(0, n); - n -= nb; - if (n === 0) { - if (nb === str.length) { - ++c; - if (p.next) this.head = p.next;else this.head = this.tail = null; - } else { - this.head = p; - p.data = str.slice(nb); - } - break; - } - ++c; - } - this.length -= c; - return ret; - } - - // Consumes a specified amount of bytes from the buffered data. - }, { - key: "_getBuffer", - value: function _getBuffer(n) { - var ret = Buffer.allocUnsafe(n); - var p = this.head; - var c = 1; - p.data.copy(ret); - n -= p.data.length; - while (p = p.next) { - var buf = p.data; - var nb = n > buf.length ? buf.length : n; - buf.copy(ret, ret.length - n, 0, nb); - n -= nb; - if (n === 0) { - if (nb === buf.length) { - ++c; - if (p.next) this.head = p.next;else this.head = this.tail = null; - } else { - this.head = p; - p.data = buf.slice(nb); - } - break; - } - ++c; - } - this.length -= c; - return ret; - } - - // Make sure the linked list only shows the minimal necessary information. - }, { - key: custom, - value: function value(_, options) { - return inspect(this, _objectSpread(_objectSpread({}, options), {}, { - // Only inspect one level. - depth: 0, - // It should not recurse. - customInspect: false - })); - } - }]); - return BufferList; -}(); \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/destroy.js b/node_modules/readable-stream/lib/internal/streams/destroy.js deleted file mode 100644 index 31a17c4dc4..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/destroy.js +++ /dev/null @@ -1,96 +0,0 @@ -'use strict'; - -// undocumented cb() API, needed for core, not for public API -function destroy(err, cb) { - var _this = this; - var readableDestroyed = this._readableState && this._readableState.destroyed; - var writableDestroyed = this._writableState && this._writableState.destroyed; - if (readableDestroyed || writableDestroyed) { - if (cb) { - cb(err); - } else if (err) { - if (!this._writableState) { - process.nextTick(emitErrorNT, this, err); - } else if (!this._writableState.errorEmitted) { - this._writableState.errorEmitted = true; - process.nextTick(emitErrorNT, this, err); - } - } - return this; - } - - // we set destroyed to true before firing error callbacks in order - // to make it re-entrance safe in case destroy() is called within callbacks - - if (this._readableState) { - this._readableState.destroyed = true; - } - - // if this is a duplex stream mark the writable part as destroyed as well - if (this._writableState) { - this._writableState.destroyed = true; - } - this._destroy(err || null, function (err) { - if (!cb && err) { - if (!_this._writableState) { - process.nextTick(emitErrorAndCloseNT, _this, err); - } else if (!_this._writableState.errorEmitted) { - _this._writableState.errorEmitted = true; - process.nextTick(emitErrorAndCloseNT, _this, err); - } else { - process.nextTick(emitCloseNT, _this); - } - } else if (cb) { - process.nextTick(emitCloseNT, _this); - cb(err); - } else { - process.nextTick(emitCloseNT, _this); - } - }); - return this; -} -function emitErrorAndCloseNT(self, err) { - emitErrorNT(self, err); - emitCloseNT(self); -} -function emitCloseNT(self) { - if (self._writableState && !self._writableState.emitClose) return; - if (self._readableState && !self._readableState.emitClose) return; - self.emit('close'); -} -function undestroy() { - if (this._readableState) { - this._readableState.destroyed = false; - this._readableState.reading = false; - this._readableState.ended = false; - this._readableState.endEmitted = false; - } - if (this._writableState) { - this._writableState.destroyed = false; - this._writableState.ended = false; - this._writableState.ending = false; - this._writableState.finalCalled = false; - this._writableState.prefinished = false; - this._writableState.finished = false; - this._writableState.errorEmitted = false; - } -} -function emitErrorNT(self, err) { - self.emit('error', err); -} -function errorOrDestroy(stream, err) { - // We have tests that rely on errors being emitted - // in the same tick, so changing this is semver major. - // For now when you opt-in to autoDestroy we allow - // the error to be emitted nextTick. In a future - // semver major update we should change the default to this. - - var rState = stream._readableState; - var wState = stream._writableState; - if (rState && rState.autoDestroy || wState && wState.autoDestroy) stream.destroy(err);else stream.emit('error', err); -} -module.exports = { - destroy: destroy, - undestroy: undestroy, - errorOrDestroy: errorOrDestroy -}; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/end-of-stream.js b/node_modules/readable-stream/lib/internal/streams/end-of-stream.js deleted file mode 100644 index 59c671b5af..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/end-of-stream.js +++ /dev/null @@ -1,86 +0,0 @@ -// Ported from https://github.com/mafintosh/end-of-stream with -// permission from the author, Mathias Buus (@mafintosh). - -'use strict'; - -var ERR_STREAM_PREMATURE_CLOSE = require('../../../errors').codes.ERR_STREAM_PREMATURE_CLOSE; -function once(callback) { - var called = false; - return function () { - if (called) return; - called = true; - for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { - args[_key] = arguments[_key]; - } - callback.apply(this, args); - }; -} -function noop() {} -function isRequest(stream) { - return stream.setHeader && typeof stream.abort === 'function'; -} -function eos(stream, opts, callback) { - if (typeof opts === 'function') return eos(stream, null, opts); - if (!opts) opts = {}; - callback = once(callback || noop); - var readable = opts.readable || opts.readable !== false && stream.readable; - var writable = opts.writable || opts.writable !== false && stream.writable; - var onlegacyfinish = function onlegacyfinish() { - if (!stream.writable) onfinish(); - }; - var writableEnded = stream._writableState && stream._writableState.finished; - var onfinish = function onfinish() { - writable = false; - writableEnded = true; - if (!readable) callback.call(stream); - }; - var readableEnded = stream._readableState && stream._readableState.endEmitted; - var onend = function onend() { - readable = false; - readableEnded = true; - if (!writable) callback.call(stream); - }; - var onerror = function onerror(err) { - callback.call(stream, err); - }; - var onclose = function onclose() { - var err; - if (readable && !readableEnded) { - if (!stream._readableState || !stream._readableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE(); - return callback.call(stream, err); - } - if (writable && !writableEnded) { - if (!stream._writableState || !stream._writableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE(); - return callback.call(stream, err); - } - }; - var onrequest = function onrequest() { - stream.req.on('finish', onfinish); - }; - if (isRequest(stream)) { - stream.on('complete', onfinish); - stream.on('abort', onclose); - if (stream.req) onrequest();else stream.on('request', onrequest); - } else if (writable && !stream._writableState) { - // legacy streams - stream.on('end', onlegacyfinish); - stream.on('close', onlegacyfinish); - } - stream.on('end', onend); - stream.on('finish', onfinish); - if (opts.error !== false) stream.on('error', onerror); - stream.on('close', onclose); - return function () { - stream.removeListener('complete', onfinish); - stream.removeListener('abort', onclose); - stream.removeListener('request', onrequest); - if (stream.req) stream.req.removeListener('finish', onfinish); - stream.removeListener('end', onlegacyfinish); - stream.removeListener('close', onlegacyfinish); - stream.removeListener('finish', onfinish); - stream.removeListener('end', onend); - stream.removeListener('error', onerror); - stream.removeListener('close', onclose); - }; -} -module.exports = eos; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/from-browser.js b/node_modules/readable-stream/lib/internal/streams/from-browser.js deleted file mode 100644 index a4ce56f3c9..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/from-browser.js +++ /dev/null @@ -1,3 +0,0 @@ -module.exports = function () { - throw new Error('Readable.from is not available in the browser') -}; diff --git a/node_modules/readable-stream/lib/internal/streams/from.js b/node_modules/readable-stream/lib/internal/streams/from.js deleted file mode 100644 index 0a34ee92e3..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/from.js +++ /dev/null @@ -1,52 +0,0 @@ -'use strict'; - -function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } -function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } -function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; } -function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { _defineProperty(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; } -function _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } -function _toPropertyKey(arg) { var key = _toPrimitive(arg, "string"); return typeof key === "symbol" ? key : String(key); } -function _toPrimitive(input, hint) { if (typeof input !== "object" || input === null) return input; var prim = input[Symbol.toPrimitive]; if (prim !== undefined) { var res = prim.call(input, hint || "default"); if (typeof res !== "object") return res; throw new TypeError("@@toPrimitive must return a primitive value."); } return (hint === "string" ? String : Number)(input); } -var ERR_INVALID_ARG_TYPE = require('../../../errors').codes.ERR_INVALID_ARG_TYPE; -function from(Readable, iterable, opts) { - var iterator; - if (iterable && typeof iterable.next === 'function') { - iterator = iterable; - } else if (iterable && iterable[Symbol.asyncIterator]) iterator = iterable[Symbol.asyncIterator]();else if (iterable && iterable[Symbol.iterator]) iterator = iterable[Symbol.iterator]();else throw new ERR_INVALID_ARG_TYPE('iterable', ['Iterable'], iterable); - var readable = new Readable(_objectSpread({ - objectMode: true - }, opts)); - // Reading boolean to protect against _read - // being called before last iteration completion. - var reading = false; - readable._read = function () { - if (!reading) { - reading = true; - next(); - } - }; - function next() { - return _next2.apply(this, arguments); - } - function _next2() { - _next2 = _asyncToGenerator(function* () { - try { - var _yield$iterator$next = yield iterator.next(), - value = _yield$iterator$next.value, - done = _yield$iterator$next.done; - if (done) { - readable.push(null); - } else if (readable.push(yield value)) { - next(); - } else { - reading = false; - } - } catch (err) { - readable.destroy(err); - } - }); - return _next2.apply(this, arguments); - } - return readable; -} -module.exports = from; diff --git a/node_modules/readable-stream/lib/internal/streams/pipeline.js b/node_modules/readable-stream/lib/internal/streams/pipeline.js deleted file mode 100644 index e6f39241f9..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/pipeline.js +++ /dev/null @@ -1,86 +0,0 @@ -// Ported from https://github.com/mafintosh/pump with -// permission from the author, Mathias Buus (@mafintosh). - -'use strict'; - -var eos; -function once(callback) { - var called = false; - return function () { - if (called) return; - called = true; - callback.apply(void 0, arguments); - }; -} -var _require$codes = require('../../../errors').codes, - ERR_MISSING_ARGS = _require$codes.ERR_MISSING_ARGS, - ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED; -function noop(err) { - // Rethrow the error if it exists to avoid swallowing it - if (err) throw err; -} -function isRequest(stream) { - return stream.setHeader && typeof stream.abort === 'function'; -} -function destroyer(stream, reading, writing, callback) { - callback = once(callback); - var closed = false; - stream.on('close', function () { - closed = true; - }); - if (eos === undefined) eos = require('./end-of-stream'); - eos(stream, { - readable: reading, - writable: writing - }, function (err) { - if (err) return callback(err); - closed = true; - callback(); - }); - var destroyed = false; - return function (err) { - if (closed) return; - if (destroyed) return; - destroyed = true; - - // request.destroy just do .end - .abort is what we want - if (isRequest(stream)) return stream.abort(); - if (typeof stream.destroy === 'function') return stream.destroy(); - callback(err || new ERR_STREAM_DESTROYED('pipe')); - }; -} -function call(fn) { - fn(); -} -function pipe(from, to) { - return from.pipe(to); -} -function popCallback(streams) { - if (!streams.length) return noop; - if (typeof streams[streams.length - 1] !== 'function') return noop; - return streams.pop(); -} -function pipeline() { - for (var _len = arguments.length, streams = new Array(_len), _key = 0; _key < _len; _key++) { - streams[_key] = arguments[_key]; - } - var callback = popCallback(streams); - if (Array.isArray(streams[0])) streams = streams[0]; - if (streams.length < 2) { - throw new ERR_MISSING_ARGS('streams'); - } - var error; - var destroys = streams.map(function (stream, i) { - var reading = i < streams.length - 1; - var writing = i > 0; - return destroyer(stream, reading, writing, function (err) { - if (!error) error = err; - if (err) destroys.forEach(call); - if (reading) return; - destroys.forEach(call); - callback(error); - }); - }); - return streams.reduce(pipe); -} -module.exports = pipeline; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/state.js b/node_modules/readable-stream/lib/internal/streams/state.js deleted file mode 100644 index 3fbf8927e0..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/state.js +++ /dev/null @@ -1,22 +0,0 @@ -'use strict'; - -var ERR_INVALID_OPT_VALUE = require('../../../errors').codes.ERR_INVALID_OPT_VALUE; -function highWaterMarkFrom(options, isDuplex, duplexKey) { - return options.highWaterMark != null ? options.highWaterMark : isDuplex ? options[duplexKey] : null; -} -function getHighWaterMark(state, options, duplexKey, isDuplex) { - var hwm = highWaterMarkFrom(options, isDuplex, duplexKey); - if (hwm != null) { - if (!(isFinite(hwm) && Math.floor(hwm) === hwm) || hwm < 0) { - var name = isDuplex ? duplexKey : 'highWaterMark'; - throw new ERR_INVALID_OPT_VALUE(name, hwm); - } - return Math.floor(hwm); - } - - // Default value - return state.objectMode ? 16 : 16 * 1024; -} -module.exports = { - getHighWaterMark: getHighWaterMark -}; \ No newline at end of file diff --git a/node_modules/readable-stream/lib/internal/streams/stream-browser.js b/node_modules/readable-stream/lib/internal/streams/stream-browser.js deleted file mode 100644 index 9332a3fdae..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/stream-browser.js +++ /dev/null @@ -1 +0,0 @@ -module.exports = require('events').EventEmitter; diff --git a/node_modules/readable-stream/lib/internal/streams/stream.js b/node_modules/readable-stream/lib/internal/streams/stream.js deleted file mode 100644 index ce2ad5b6ee..0000000000 --- a/node_modules/readable-stream/lib/internal/streams/stream.js +++ /dev/null @@ -1 +0,0 @@ -module.exports = require('stream'); diff --git a/node_modules/readable-stream/package.json b/node_modules/readable-stream/package.json deleted file mode 100644 index ade59e71aa..0000000000 --- a/node_modules/readable-stream/package.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "name": "readable-stream", - "version": "3.6.2", - "description": "Streams3, a user-land copy of the stream library from Node.js", - "main": "readable.js", - "engines": { - "node": ">= 6" - }, - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "devDependencies": { - "@babel/cli": "^7.2.0", - "@babel/core": "^7.2.0", - "@babel/polyfill": "^7.0.0", - "@babel/preset-env": "^7.2.0", - "airtap": "0.0.9", - "assert": "^1.4.0", - "bl": "^2.0.0", - "deep-strict-equal": "^0.2.0", - "events.once": "^2.0.2", - "glob": "^7.1.2", - "gunzip-maybe": "^1.4.1", - "hyperquest": "^2.1.3", - "lolex": "^2.6.0", - "nyc": "^11.0.0", - "pump": "^3.0.0", - "rimraf": "^2.6.2", - "tap": "^12.0.0", - "tape": "^4.9.0", - "tar-fs": "^1.16.2", - "util-promisify": "^2.1.0" - }, - "scripts": { - "test": "tap -J --no-esm test/parallel/*.js test/ours/*.js", - "ci": "TAP=1 tap --no-esm test/parallel/*.js test/ours/*.js | tee test.tap", - "test-browsers": "airtap --sauce-connect --loopback airtap.local -- test/browser.js", - "test-browser-local": "airtap --open --local -- test/browser.js", - "cover": "nyc npm test", - "report": "nyc report --reporter=lcov", - "update-browser-errors": "babel -o errors-browser.js errors.js" - }, - "repository": { - "type": "git", - "url": "git://github.com/nodejs/readable-stream" - }, - "keywords": [ - "readable", - "stream", - "pipe" - ], - "browser": { - "util": false, - "worker_threads": false, - "./errors": "./errors-browser.js", - "./readable.js": "./readable-browser.js", - "./lib/internal/streams/from.js": "./lib/internal/streams/from-browser.js", - "./lib/internal/streams/stream.js": "./lib/internal/streams/stream-browser.js" - }, - "nyc": { - "include": [ - "lib/**.js" - ] - }, - "license": "MIT" -} diff --git a/node_modules/readable-stream/readable-browser.js b/node_modules/readable-stream/readable-browser.js deleted file mode 100644 index adbf60de83..0000000000 --- a/node_modules/readable-stream/readable-browser.js +++ /dev/null @@ -1,9 +0,0 @@ -exports = module.exports = require('./lib/_stream_readable.js'); -exports.Stream = exports; -exports.Readable = exports; -exports.Writable = require('./lib/_stream_writable.js'); -exports.Duplex = require('./lib/_stream_duplex.js'); -exports.Transform = require('./lib/_stream_transform.js'); -exports.PassThrough = require('./lib/_stream_passthrough.js'); -exports.finished = require('./lib/internal/streams/end-of-stream.js'); -exports.pipeline = require('./lib/internal/streams/pipeline.js'); diff --git a/node_modules/readable-stream/readable.js b/node_modules/readable-stream/readable.js deleted file mode 100644 index 9e0ca120de..0000000000 --- a/node_modules/readable-stream/readable.js +++ /dev/null @@ -1,16 +0,0 @@ -var Stream = require('stream'); -if (process.env.READABLE_STREAM === 'disable' && Stream) { - module.exports = Stream.Readable; - Object.assign(module.exports, Stream); - module.exports.Stream = Stream; -} else { - exports = module.exports = require('./lib/_stream_readable.js'); - exports.Stream = Stream || exports; - exports.Readable = exports; - exports.Writable = require('./lib/_stream_writable.js'); - exports.Duplex = require('./lib/_stream_duplex.js'); - exports.Transform = require('./lib/_stream_transform.js'); - exports.PassThrough = require('./lib/_stream_passthrough.js'); - exports.finished = require('./lib/internal/streams/end-of-stream.js'); - exports.pipeline = require('./lib/internal/streams/pipeline.js'); -} diff --git a/node_modules/safe-buffer/LICENSE b/node_modules/safe-buffer/LICENSE deleted file mode 100644 index 0c068ceecb..0000000000 --- a/node_modules/safe-buffer/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) Feross Aboukhadijeh - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/node_modules/safe-buffer/README.md b/node_modules/safe-buffer/README.md deleted file mode 100644 index e9a81afd04..0000000000 --- a/node_modules/safe-buffer/README.md +++ /dev/null @@ -1,584 +0,0 @@ -# safe-buffer [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url] [![javascript style guide][standard-image]][standard-url] - -[travis-image]: https://img.shields.io/travis/feross/safe-buffer/master.svg -[travis-url]: https://travis-ci.org/feross/safe-buffer -[npm-image]: https://img.shields.io/npm/v/safe-buffer.svg -[npm-url]: https://npmjs.org/package/safe-buffer -[downloads-image]: https://img.shields.io/npm/dm/safe-buffer.svg -[downloads-url]: https://npmjs.org/package/safe-buffer -[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg -[standard-url]: https://standardjs.com - -#### Safer Node.js Buffer API - -**Use the new Node.js Buffer APIs (`Buffer.from`, `Buffer.alloc`, -`Buffer.allocUnsafe`, `Buffer.allocUnsafeSlow`) in all versions of Node.js.** - -**Uses the built-in implementation when available.** - -## install - -``` -npm install safe-buffer -``` - -## usage - -The goal of this package is to provide a safe replacement for the node.js `Buffer`. - -It's a drop-in replacement for `Buffer`. You can use it by adding one `require` line to -the top of your node.js modules: - -```js -var Buffer = require('safe-buffer').Buffer - -// Existing buffer code will continue to work without issues: - -new Buffer('hey', 'utf8') -new Buffer([1, 2, 3], 'utf8') -new Buffer(obj) -new Buffer(16) // create an uninitialized buffer (potentially unsafe) - -// But you can use these new explicit APIs to make clear what you want: - -Buffer.from('hey', 'utf8') // convert from many types to a Buffer -Buffer.alloc(16) // create a zero-filled buffer (safe) -Buffer.allocUnsafe(16) // create an uninitialized buffer (potentially unsafe) -``` - -## api - -### Class Method: Buffer.from(array) - - -* `array` {Array} - -Allocates a new `Buffer` using an `array` of octets. - -```js -const buf = Buffer.from([0x62,0x75,0x66,0x66,0x65,0x72]); - // creates a new Buffer containing ASCII bytes - // ['b','u','f','f','e','r'] -``` - -A `TypeError` will be thrown if `array` is not an `Array`. - -### Class Method: Buffer.from(arrayBuffer[, byteOffset[, length]]) - - -* `arrayBuffer` {ArrayBuffer} The `.buffer` property of a `TypedArray` or - a `new ArrayBuffer()` -* `byteOffset` {Number} Default: `0` -* `length` {Number} Default: `arrayBuffer.length - byteOffset` - -When passed a reference to the `.buffer` property of a `TypedArray` instance, -the newly created `Buffer` will share the same allocated memory as the -TypedArray. - -```js -const arr = new Uint16Array(2); -arr[0] = 5000; -arr[1] = 4000; - -const buf = Buffer.from(arr.buffer); // shares the memory with arr; - -console.log(buf); - // Prints: - -// changing the TypedArray changes the Buffer also -arr[1] = 6000; - -console.log(buf); - // Prints: -``` - -The optional `byteOffset` and `length` arguments specify a memory range within -the `arrayBuffer` that will be shared by the `Buffer`. - -```js -const ab = new ArrayBuffer(10); -const buf = Buffer.from(ab, 0, 2); -console.log(buf.length); - // Prints: 2 -``` - -A `TypeError` will be thrown if `arrayBuffer` is not an `ArrayBuffer`. - -### Class Method: Buffer.from(buffer) - - -* `buffer` {Buffer} - -Copies the passed `buffer` data onto a new `Buffer` instance. - -```js -const buf1 = Buffer.from('buffer'); -const buf2 = Buffer.from(buf1); - -buf1[0] = 0x61; -console.log(buf1.toString()); - // 'auffer' -console.log(buf2.toString()); - // 'buffer' (copy is not changed) -``` - -A `TypeError` will be thrown if `buffer` is not a `Buffer`. - -### Class Method: Buffer.from(str[, encoding]) - - -* `str` {String} String to encode. -* `encoding` {String} Encoding to use, Default: `'utf8'` - -Creates a new `Buffer` containing the given JavaScript string `str`. If -provided, the `encoding` parameter identifies the character encoding. -If not provided, `encoding` defaults to `'utf8'`. - -```js -const buf1 = Buffer.from('this is a tést'); -console.log(buf1.toString()); - // prints: this is a tést -console.log(buf1.toString('ascii')); - // prints: this is a tC)st - -const buf2 = Buffer.from('7468697320697320612074c3a97374', 'hex'); -console.log(buf2.toString()); - // prints: this is a tést -``` - -A `TypeError` will be thrown if `str` is not a string. - -### Class Method: Buffer.alloc(size[, fill[, encoding]]) - - -* `size` {Number} -* `fill` {Value} Default: `undefined` -* `encoding` {String} Default: `utf8` - -Allocates a new `Buffer` of `size` bytes. If `fill` is `undefined`, the -`Buffer` will be *zero-filled*. - -```js -const buf = Buffer.alloc(5); -console.log(buf); - // -``` - -The `size` must be less than or equal to the value of -`require('buffer').kMaxLength` (on 64-bit architectures, `kMaxLength` is -`(2^31)-1`). Otherwise, a [`RangeError`][] is thrown. A zero-length Buffer will -be created if a `size` less than or equal to 0 is specified. - -If `fill` is specified, the allocated `Buffer` will be initialized by calling -`buf.fill(fill)`. See [`buf.fill()`][] for more information. - -```js -const buf = Buffer.alloc(5, 'a'); -console.log(buf); - // -``` - -If both `fill` and `encoding` are specified, the allocated `Buffer` will be -initialized by calling `buf.fill(fill, encoding)`. For example: - -```js -const buf = Buffer.alloc(11, 'aGVsbG8gd29ybGQ=', 'base64'); -console.log(buf); - // -``` - -Calling `Buffer.alloc(size)` can be significantly slower than the alternative -`Buffer.allocUnsafe(size)` but ensures that the newly created `Buffer` instance -contents will *never contain sensitive data*. - -A `TypeError` will be thrown if `size` is not a number. - -### Class Method: Buffer.allocUnsafe(size) - - -* `size` {Number} - -Allocates a new *non-zero-filled* `Buffer` of `size` bytes. The `size` must -be less than or equal to the value of `require('buffer').kMaxLength` (on 64-bit -architectures, `kMaxLength` is `(2^31)-1`). Otherwise, a [`RangeError`][] is -thrown. A zero-length Buffer will be created if a `size` less than or equal to -0 is specified. - -The underlying memory for `Buffer` instances created in this way is *not -initialized*. The contents of the newly created `Buffer` are unknown and -*may contain sensitive data*. Use [`buf.fill(0)`][] to initialize such -`Buffer` instances to zeroes. - -```js -const buf = Buffer.allocUnsafe(5); -console.log(buf); - // - // (octets will be different, every time) -buf.fill(0); -console.log(buf); - // -``` - -A `TypeError` will be thrown if `size` is not a number. - -Note that the `Buffer` module pre-allocates an internal `Buffer` instance of -size `Buffer.poolSize` that is used as a pool for the fast allocation of new -`Buffer` instances created using `Buffer.allocUnsafe(size)` (and the deprecated -`new Buffer(size)` constructor) only when `size` is less than or equal to -`Buffer.poolSize >> 1` (floor of `Buffer.poolSize` divided by two). The default -value of `Buffer.poolSize` is `8192` but can be modified. - -Use of this pre-allocated internal memory pool is a key difference between -calling `Buffer.alloc(size, fill)` vs. `Buffer.allocUnsafe(size).fill(fill)`. -Specifically, `Buffer.alloc(size, fill)` will *never* use the internal Buffer -pool, while `Buffer.allocUnsafe(size).fill(fill)` *will* use the internal -Buffer pool if `size` is less than or equal to half `Buffer.poolSize`. The -difference is subtle but can be important when an application requires the -additional performance that `Buffer.allocUnsafe(size)` provides. - -### Class Method: Buffer.allocUnsafeSlow(size) - - -* `size` {Number} - -Allocates a new *non-zero-filled* and non-pooled `Buffer` of `size` bytes. The -`size` must be less than or equal to the value of -`require('buffer').kMaxLength` (on 64-bit architectures, `kMaxLength` is -`(2^31)-1`). Otherwise, a [`RangeError`][] is thrown. A zero-length Buffer will -be created if a `size` less than or equal to 0 is specified. - -The underlying memory for `Buffer` instances created in this way is *not -initialized*. The contents of the newly created `Buffer` are unknown and -*may contain sensitive data*. Use [`buf.fill(0)`][] to initialize such -`Buffer` instances to zeroes. - -When using `Buffer.allocUnsafe()` to allocate new `Buffer` instances, -allocations under 4KB are, by default, sliced from a single pre-allocated -`Buffer`. This allows applications to avoid the garbage collection overhead of -creating many individually allocated Buffers. This approach improves both -performance and memory usage by eliminating the need to track and cleanup as -many `Persistent` objects. - -However, in the case where a developer may need to retain a small chunk of -memory from a pool for an indeterminate amount of time, it may be appropriate -to create an un-pooled Buffer instance using `Buffer.allocUnsafeSlow()` then -copy out the relevant bits. - -```js -// need to keep around a few small chunks of memory -const store = []; - -socket.on('readable', () => { - const data = socket.read(); - // allocate for retained data - const sb = Buffer.allocUnsafeSlow(10); - // copy the data into the new allocation - data.copy(sb, 0, 0, 10); - store.push(sb); -}); -``` - -Use of `Buffer.allocUnsafeSlow()` should be used only as a last resort *after* -a developer has observed undue memory retention in their applications. - -A `TypeError` will be thrown if `size` is not a number. - -### All the Rest - -The rest of the `Buffer` API is exactly the same as in node.js. -[See the docs](https://nodejs.org/api/buffer.html). - - -## Related links - -- [Node.js issue: Buffer(number) is unsafe](https://github.com/nodejs/node/issues/4660) -- [Node.js Enhancement Proposal: Buffer.from/Buffer.alloc/Buffer.zalloc/Buffer() soft-deprecate](https://github.com/nodejs/node-eps/pull/4) - -## Why is `Buffer` unsafe? - -Today, the node.js `Buffer` constructor is overloaded to handle many different argument -types like `String`, `Array`, `Object`, `TypedArrayView` (`Uint8Array`, etc.), -`ArrayBuffer`, and also `Number`. - -The API is optimized for convenience: you can throw any type at it, and it will try to do -what you want. - -Because the Buffer constructor is so powerful, you often see code like this: - -```js -// Convert UTF-8 strings to hex -function toHex (str) { - return new Buffer(str).toString('hex') -} -``` - -***But what happens if `toHex` is called with a `Number` argument?*** - -### Remote Memory Disclosure - -If an attacker can make your program call the `Buffer` constructor with a `Number` -argument, then they can make it allocate uninitialized memory from the node.js process. -This could potentially disclose TLS private keys, user data, or database passwords. - -When the `Buffer` constructor is passed a `Number` argument, it returns an -**UNINITIALIZED** block of memory of the specified `size`. When you create a `Buffer` like -this, you **MUST** overwrite the contents before returning it to the user. - -From the [node.js docs](https://nodejs.org/api/buffer.html#buffer_new_buffer_size): - -> `new Buffer(size)` -> -> - `size` Number -> -> The underlying memory for `Buffer` instances created in this way is not initialized. -> **The contents of a newly created `Buffer` are unknown and could contain sensitive -> data.** Use `buf.fill(0)` to initialize a Buffer to zeroes. - -(Emphasis our own.) - -Whenever the programmer intended to create an uninitialized `Buffer` you often see code -like this: - -```js -var buf = new Buffer(16) - -// Immediately overwrite the uninitialized buffer with data from another buffer -for (var i = 0; i < buf.length; i++) { - buf[i] = otherBuf[i] -} -``` - - -### Would this ever be a problem in real code? - -Yes. It's surprisingly common to forget to check the type of your variables in a -dynamically-typed language like JavaScript. - -Usually the consequences of assuming the wrong type is that your program crashes with an -uncaught exception. But the failure mode for forgetting to check the type of arguments to -the `Buffer` constructor is more catastrophic. - -Here's an example of a vulnerable service that takes a JSON payload and converts it to -hex: - -```js -// Take a JSON payload {str: "some string"} and convert it to hex -var server = http.createServer(function (req, res) { - var data = '' - req.setEncoding('utf8') - req.on('data', function (chunk) { - data += chunk - }) - req.on('end', function () { - var body = JSON.parse(data) - res.end(new Buffer(body.str).toString('hex')) - }) -}) - -server.listen(8080) -``` - -In this example, an http client just has to send: - -```json -{ - "str": 1000 -} -``` - -and it will get back 1,000 bytes of uninitialized memory from the server. - -This is a very serious bug. It's similar in severity to the -[the Heartbleed bug](http://heartbleed.com/) that allowed disclosure of OpenSSL process -memory by remote attackers. - - -### Which real-world packages were vulnerable? - -#### [`bittorrent-dht`](https://www.npmjs.com/package/bittorrent-dht) - -[Mathias Buus](https://github.com/mafintosh) and I -([Feross Aboukhadijeh](http://feross.org/)) found this issue in one of our own packages, -[`bittorrent-dht`](https://www.npmjs.com/package/bittorrent-dht). The bug would allow -anyone on the internet to send a series of messages to a user of `bittorrent-dht` and get -them to reveal 20 bytes at a time of uninitialized memory from the node.js process. - -Here's -[the commit](https://github.com/feross/bittorrent-dht/commit/6c7da04025d5633699800a99ec3fbadf70ad35b8) -that fixed it. We released a new fixed version, created a -[Node Security Project disclosure](https://nodesecurity.io/advisories/68), and deprecated all -vulnerable versions on npm so users will get a warning to upgrade to a newer version. - -#### [`ws`](https://www.npmjs.com/package/ws) - -That got us wondering if there were other vulnerable packages. Sure enough, within a short -period of time, we found the same issue in [`ws`](https://www.npmjs.com/package/ws), the -most popular WebSocket implementation in node.js. - -If certain APIs were called with `Number` parameters instead of `String` or `Buffer` as -expected, then uninitialized server memory would be disclosed to the remote peer. - -These were the vulnerable methods: - -```js -socket.send(number) -socket.ping(number) -socket.pong(number) -``` - -Here's a vulnerable socket server with some echo functionality: - -```js -server.on('connection', function (socket) { - socket.on('message', function (message) { - message = JSON.parse(message) - if (message.type === 'echo') { - socket.send(message.data) // send back the user's message - } - }) -}) -``` - -`socket.send(number)` called on the server, will disclose server memory. - -Here's [the release](https://github.com/websockets/ws/releases/tag/1.0.1) where the issue -was fixed, with a more detailed explanation. Props to -[Arnout Kazemier](https://github.com/3rd-Eden) for the quick fix. Here's the -[Node Security Project disclosure](https://nodesecurity.io/advisories/67). - - -### What's the solution? - -It's important that node.js offers a fast way to get memory otherwise performance-critical -applications would needlessly get a lot slower. - -But we need a better way to *signal our intent* as programmers. **When we want -uninitialized memory, we should request it explicitly.** - -Sensitive functionality should not be packed into a developer-friendly API that loosely -accepts many different types. This type of API encourages the lazy practice of passing -variables in without checking the type very carefully. - -#### A new API: `Buffer.allocUnsafe(number)` - -The functionality of creating buffers with uninitialized memory should be part of another -API. We propose `Buffer.allocUnsafe(number)`. This way, it's not part of an API that -frequently gets user input of all sorts of different types passed into it. - -```js -var buf = Buffer.allocUnsafe(16) // careful, uninitialized memory! - -// Immediately overwrite the uninitialized buffer with data from another buffer -for (var i = 0; i < buf.length; i++) { - buf[i] = otherBuf[i] -} -``` - - -### How do we fix node.js core? - -We sent [a PR to node.js core](https://github.com/nodejs/node/pull/4514) (merged as -`semver-major`) which defends against one case: - -```js -var str = 16 -new Buffer(str, 'utf8') -``` - -In this situation, it's implied that the programmer intended the first argument to be a -string, since they passed an encoding as a second argument. Today, node.js will allocate -uninitialized memory in the case of `new Buffer(number, encoding)`, which is probably not -what the programmer intended. - -But this is only a partial solution, since if the programmer does `new Buffer(variable)` -(without an `encoding` parameter) there's no way to know what they intended. If `variable` -is sometimes a number, then uninitialized memory will sometimes be returned. - -### What's the real long-term fix? - -We could deprecate and remove `new Buffer(number)` and use `Buffer.allocUnsafe(number)` when -we need uninitialized memory. But that would break 1000s of packages. - -~~We believe the best solution is to:~~ - -~~1. Change `new Buffer(number)` to return safe, zeroed-out memory~~ - -~~2. Create a new API for creating uninitialized Buffers. We propose: `Buffer.allocUnsafe(number)`~~ - -#### Update - -We now support adding three new APIs: - -- `Buffer.from(value)` - convert from any type to a buffer -- `Buffer.alloc(size)` - create a zero-filled buffer -- `Buffer.allocUnsafe(size)` - create an uninitialized buffer with given size - -This solves the core problem that affected `ws` and `bittorrent-dht` which is -`Buffer(variable)` getting tricked into taking a number argument. - -This way, existing code continues working and the impact on the npm ecosystem will be -minimal. Over time, npm maintainers can migrate performance-critical code to use -`Buffer.allocUnsafe(number)` instead of `new Buffer(number)`. - - -### Conclusion - -We think there's a serious design issue with the `Buffer` API as it exists today. It -promotes insecure software by putting high-risk functionality into a convenient API -with friendly "developer ergonomics". - -This wasn't merely a theoretical exercise because we found the issue in some of the -most popular npm packages. - -Fortunately, there's an easy fix that can be applied today. Use `safe-buffer` in place of -`buffer`. - -```js -var Buffer = require('safe-buffer').Buffer -``` - -Eventually, we hope that node.js core can switch to this new, safer behavior. We believe -the impact on the ecosystem would be minimal since it's not a breaking change. -Well-maintained, popular packages would be updated to use `Buffer.alloc` quickly, while -older, insecure packages would magically become safe from this attack vector. - - -## links - -- [Node.js PR: buffer: throw if both length and enc are passed](https://github.com/nodejs/node/pull/4514) -- [Node Security Project disclosure for `ws`](https://nodesecurity.io/advisories/67) -- [Node Security Project disclosure for`bittorrent-dht`](https://nodesecurity.io/advisories/68) - - -## credit - -The original issues in `bittorrent-dht` -([disclosure](https://nodesecurity.io/advisories/68)) and -`ws` ([disclosure](https://nodesecurity.io/advisories/67)) were discovered by -[Mathias Buus](https://github.com/mafintosh) and -[Feross Aboukhadijeh](http://feross.org/). - -Thanks to [Adam Baldwin](https://github.com/evilpacket) for helping disclose these issues -and for his work running the [Node Security Project](https://nodesecurity.io/). - -Thanks to [John Hiesey](https://github.com/jhiesey) for proofreading this README and -auditing the code. - - -## license - -MIT. Copyright (C) [Feross Aboukhadijeh](http://feross.org) diff --git a/node_modules/safe-buffer/index.d.ts b/node_modules/safe-buffer/index.d.ts deleted file mode 100644 index e9fed809a5..0000000000 --- a/node_modules/safe-buffer/index.d.ts +++ /dev/null @@ -1,187 +0,0 @@ -declare module "safe-buffer" { - export class Buffer { - length: number - write(string: string, offset?: number, length?: number, encoding?: string): number; - toString(encoding?: string, start?: number, end?: number): string; - toJSON(): { type: 'Buffer', data: any[] }; - equals(otherBuffer: Buffer): boolean; - compare(otherBuffer: Buffer, targetStart?: number, targetEnd?: number, sourceStart?: number, sourceEnd?: number): number; - copy(targetBuffer: Buffer, targetStart?: number, sourceStart?: number, sourceEnd?: number): number; - slice(start?: number, end?: number): Buffer; - writeUIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; - writeUIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; - writeIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; - writeIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; - readUIntLE(offset: number, byteLength: number, noAssert?: boolean): number; - readUIntBE(offset: number, byteLength: number, noAssert?: boolean): number; - readIntLE(offset: number, byteLength: number, noAssert?: boolean): number; - readIntBE(offset: number, byteLength: number, noAssert?: boolean): number; - readUInt8(offset: number, noAssert?: boolean): number; - readUInt16LE(offset: number, noAssert?: boolean): number; - readUInt16BE(offset: number, noAssert?: boolean): number; - readUInt32LE(offset: number, noAssert?: boolean): number; - readUInt32BE(offset: number, noAssert?: boolean): number; - readInt8(offset: number, noAssert?: boolean): number; - readInt16LE(offset: number, noAssert?: boolean): number; - readInt16BE(offset: number, noAssert?: boolean): number; - readInt32LE(offset: number, noAssert?: boolean): number; - readInt32BE(offset: number, noAssert?: boolean): number; - readFloatLE(offset: number, noAssert?: boolean): number; - readFloatBE(offset: number, noAssert?: boolean): number; - readDoubleLE(offset: number, noAssert?: boolean): number; - readDoubleBE(offset: number, noAssert?: boolean): number; - swap16(): Buffer; - swap32(): Buffer; - swap64(): Buffer; - writeUInt8(value: number, offset: number, noAssert?: boolean): number; - writeUInt16LE(value: number, offset: number, noAssert?: boolean): number; - writeUInt16BE(value: number, offset: number, noAssert?: boolean): number; - writeUInt32LE(value: number, offset: number, noAssert?: boolean): number; - writeUInt32BE(value: number, offset: number, noAssert?: boolean): number; - writeInt8(value: number, offset: number, noAssert?: boolean): number; - writeInt16LE(value: number, offset: number, noAssert?: boolean): number; - writeInt16BE(value: number, offset: number, noAssert?: boolean): number; - writeInt32LE(value: number, offset: number, noAssert?: boolean): number; - writeInt32BE(value: number, offset: number, noAssert?: boolean): number; - writeFloatLE(value: number, offset: number, noAssert?: boolean): number; - writeFloatBE(value: number, offset: number, noAssert?: boolean): number; - writeDoubleLE(value: number, offset: number, noAssert?: boolean): number; - writeDoubleBE(value: number, offset: number, noAssert?: boolean): number; - fill(value: any, offset?: number, end?: number): this; - indexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; - lastIndexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; - includes(value: string | number | Buffer, byteOffset?: number, encoding?: string): boolean; - - /** - * Allocates a new buffer containing the given {str}. - * - * @param str String to store in buffer. - * @param encoding encoding to use, optional. Default is 'utf8' - */ - constructor (str: string, encoding?: string); - /** - * Allocates a new buffer of {size} octets. - * - * @param size count of octets to allocate. - */ - constructor (size: number); - /** - * Allocates a new buffer containing the given {array} of octets. - * - * @param array The octets to store. - */ - constructor (array: Uint8Array); - /** - * Produces a Buffer backed by the same allocated memory as - * the given {ArrayBuffer}. - * - * - * @param arrayBuffer The ArrayBuffer with which to share memory. - */ - constructor (arrayBuffer: ArrayBuffer); - /** - * Allocates a new buffer containing the given {array} of octets. - * - * @param array The octets to store. - */ - constructor (array: any[]); - /** - * Copies the passed {buffer} data onto a new {Buffer} instance. - * - * @param buffer The buffer to copy. - */ - constructor (buffer: Buffer); - prototype: Buffer; - /** - * Allocates a new Buffer using an {array} of octets. - * - * @param array - */ - static from(array: any[]): Buffer; - /** - * When passed a reference to the .buffer property of a TypedArray instance, - * the newly created Buffer will share the same allocated memory as the TypedArray. - * The optional {byteOffset} and {length} arguments specify a memory range - * within the {arrayBuffer} that will be shared by the Buffer. - * - * @param arrayBuffer The .buffer property of a TypedArray or a new ArrayBuffer() - * @param byteOffset - * @param length - */ - static from(arrayBuffer: ArrayBuffer, byteOffset?: number, length?: number): Buffer; - /** - * Copies the passed {buffer} data onto a new Buffer instance. - * - * @param buffer - */ - static from(buffer: Buffer): Buffer; - /** - * Creates a new Buffer containing the given JavaScript string {str}. - * If provided, the {encoding} parameter identifies the character encoding. - * If not provided, {encoding} defaults to 'utf8'. - * - * @param str - */ - static from(str: string, encoding?: string): Buffer; - /** - * Returns true if {obj} is a Buffer - * - * @param obj object to test. - */ - static isBuffer(obj: any): obj is Buffer; - /** - * Returns true if {encoding} is a valid encoding argument. - * Valid string encodings in Node 0.12: 'ascii'|'utf8'|'utf16le'|'ucs2'(alias of 'utf16le')|'base64'|'binary'(deprecated)|'hex' - * - * @param encoding string to test. - */ - static isEncoding(encoding: string): boolean; - /** - * Gives the actual byte length of a string. encoding defaults to 'utf8'. - * This is not the same as String.prototype.length since that returns the number of characters in a string. - * - * @param string string to test. - * @param encoding encoding used to evaluate (defaults to 'utf8') - */ - static byteLength(string: string, encoding?: string): number; - /** - * Returns a buffer which is the result of concatenating all the buffers in the list together. - * - * If the list has no items, or if the totalLength is 0, then it returns a zero-length buffer. - * If the list has exactly one item, then the first item of the list is returned. - * If the list has more than one item, then a new Buffer is created. - * - * @param list An array of Buffer objects to concatenate - * @param totalLength Total length of the buffers when concatenated. - * If totalLength is not provided, it is read from the buffers in the list. However, this adds an additional loop to the function, so it is faster to provide the length explicitly. - */ - static concat(list: Buffer[], totalLength?: number): Buffer; - /** - * The same as buf1.compare(buf2). - */ - static compare(buf1: Buffer, buf2: Buffer): number; - /** - * Allocates a new buffer of {size} octets. - * - * @param size count of octets to allocate. - * @param fill if specified, buffer will be initialized by calling buf.fill(fill). - * If parameter is omitted, buffer will be filled with zeros. - * @param encoding encoding used for call to buf.fill while initalizing - */ - static alloc(size: number, fill?: string | Buffer | number, encoding?: string): Buffer; - /** - * Allocates a new buffer of {size} octets, leaving memory not initialized, so the contents - * of the newly created Buffer are unknown and may contain sensitive data. - * - * @param size count of octets to allocate - */ - static allocUnsafe(size: number): Buffer; - /** - * Allocates a new non-pooled buffer of {size} octets, leaving memory not initialized, so the contents - * of the newly created Buffer are unknown and may contain sensitive data. - * - * @param size count of octets to allocate - */ - static allocUnsafeSlow(size: number): Buffer; - } -} \ No newline at end of file diff --git a/node_modules/safe-buffer/index.js b/node_modules/safe-buffer/index.js deleted file mode 100644 index f8d3ec9885..0000000000 --- a/node_modules/safe-buffer/index.js +++ /dev/null @@ -1,65 +0,0 @@ -/*! safe-buffer. MIT License. Feross Aboukhadijeh */ -/* eslint-disable node/no-deprecated-api */ -var buffer = require('buffer') -var Buffer = buffer.Buffer - -// alternative to using Object.keys for old browsers -function copyProps (src, dst) { - for (var key in src) { - dst[key] = src[key] - } -} -if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) { - module.exports = buffer -} else { - // Copy properties from require('buffer') - copyProps(buffer, exports) - exports.Buffer = SafeBuffer -} - -function SafeBuffer (arg, encodingOrOffset, length) { - return Buffer(arg, encodingOrOffset, length) -} - -SafeBuffer.prototype = Object.create(Buffer.prototype) - -// Copy static methods from Buffer -copyProps(Buffer, SafeBuffer) - -SafeBuffer.from = function (arg, encodingOrOffset, length) { - if (typeof arg === 'number') { - throw new TypeError('Argument must not be a number') - } - return Buffer(arg, encodingOrOffset, length) -} - -SafeBuffer.alloc = function (size, fill, encoding) { - if (typeof size !== 'number') { - throw new TypeError('Argument must be a number') - } - var buf = Buffer(size) - if (fill !== undefined) { - if (typeof encoding === 'string') { - buf.fill(fill, encoding) - } else { - buf.fill(fill) - } - } else { - buf.fill(0) - } - return buf -} - -SafeBuffer.allocUnsafe = function (size) { - if (typeof size !== 'number') { - throw new TypeError('Argument must be a number') - } - return Buffer(size) -} - -SafeBuffer.allocUnsafeSlow = function (size) { - if (typeof size !== 'number') { - throw new TypeError('Argument must be a number') - } - return buffer.SlowBuffer(size) -} diff --git a/node_modules/safe-buffer/package.json b/node_modules/safe-buffer/package.json deleted file mode 100644 index f2869e2564..0000000000 --- a/node_modules/safe-buffer/package.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "safe-buffer", - "description": "Safer Node.js Buffer API", - "version": "5.2.1", - "author": { - "name": "Feross Aboukhadijeh", - "email": "feross@feross.org", - "url": "https://feross.org" - }, - "bugs": { - "url": "https://github.com/feross/safe-buffer/issues" - }, - "devDependencies": { - "standard": "*", - "tape": "^5.0.0" - }, - "homepage": "https://github.com/feross/safe-buffer", - "keywords": [ - "buffer", - "buffer allocate", - "node security", - "safe", - "safe-buffer", - "security", - "uninitialized" - ], - "license": "MIT", - "main": "index.js", - "types": "index.d.ts", - "repository": { - "type": "git", - "url": "git://github.com/feross/safe-buffer.git" - }, - "scripts": { - "test": "standard && tape test/*.js" - }, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] -} diff --git a/node_modules/string_decoder/LICENSE b/node_modules/string_decoder/LICENSE deleted file mode 100644 index 778edb2073..0000000000 --- a/node_modules/string_decoder/LICENSE +++ /dev/null @@ -1,48 +0,0 @@ -Node.js is licensed for use as follows: - -""" -Copyright Node.js contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - -This license applies to parts of Node.js originating from the -https://github.com/joyent/node repository: - -""" -Copyright Joyent, Inc. and other Node contributors. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - diff --git a/node_modules/string_decoder/README.md b/node_modules/string_decoder/README.md deleted file mode 100644 index 5fd58315ed..0000000000 --- a/node_modules/string_decoder/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# string_decoder - -***Node-core v8.9.4 string_decoder for userland*** - - -[![NPM](https://nodei.co/npm/string_decoder.png?downloads=true&downloadRank=true)](https://nodei.co/npm/string_decoder/) -[![NPM](https://nodei.co/npm-dl/string_decoder.png?&months=6&height=3)](https://nodei.co/npm/string_decoder/) - - -```bash -npm install --save string_decoder -``` - -***Node-core string_decoder for userland*** - -This package is a mirror of the string_decoder implementation in Node-core. - -Full documentation may be found on the [Node.js website](https://nodejs.org/dist/v8.9.4/docs/api/). - -As of version 1.0.0 **string_decoder** uses semantic versioning. - -## Previous versions - -Previous version numbers match the versions found in Node core, e.g. 0.10.24 matches Node 0.10.24, likewise 0.11.10 matches Node 0.11.10. - -## Update - -The *build/* directory contains a build script that will scrape the source from the [nodejs/node](https://github.com/nodejs/node) repo given a specific Node version. - -## Streams Working Group - -`string_decoder` is maintained by the Streams Working Group, which -oversees the development and maintenance of the Streams API within -Node.js. The responsibilities of the Streams Working Group include: - -* Addressing stream issues on the Node.js issue tracker. -* Authoring and editing stream documentation within the Node.js project. -* Reviewing changes to stream subclasses within the Node.js project. -* Redirecting changes to streams from the Node.js project to this - project. -* Assisting in the implementation of stream providers within Node.js. -* Recommending versions of `readable-stream` to be included in Node.js. -* Messaging about the future of streams to give the community advance - notice of changes. - -See [readable-stream](https://github.com/nodejs/readable-stream) for -more details. diff --git a/node_modules/string_decoder/lib/string_decoder.js b/node_modules/string_decoder/lib/string_decoder.js deleted file mode 100644 index 2e89e63f79..0000000000 --- a/node_modules/string_decoder/lib/string_decoder.js +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -'use strict'; - -/**/ - -var Buffer = require('safe-buffer').Buffer; -/**/ - -var isEncoding = Buffer.isEncoding || function (encoding) { - encoding = '' + encoding; - switch (encoding && encoding.toLowerCase()) { - case 'hex':case 'utf8':case 'utf-8':case 'ascii':case 'binary':case 'base64':case 'ucs2':case 'ucs-2':case 'utf16le':case 'utf-16le':case 'raw': - return true; - default: - return false; - } -}; - -function _normalizeEncoding(enc) { - if (!enc) return 'utf8'; - var retried; - while (true) { - switch (enc) { - case 'utf8': - case 'utf-8': - return 'utf8'; - case 'ucs2': - case 'ucs-2': - case 'utf16le': - case 'utf-16le': - return 'utf16le'; - case 'latin1': - case 'binary': - return 'latin1'; - case 'base64': - case 'ascii': - case 'hex': - return enc; - default: - if (retried) return; // undefined - enc = ('' + enc).toLowerCase(); - retried = true; - } - } -}; - -// Do not cache `Buffer.isEncoding` when checking encoding names as some -// modules monkey-patch it to support additional encodings -function normalizeEncoding(enc) { - var nenc = _normalizeEncoding(enc); - if (typeof nenc !== 'string' && (Buffer.isEncoding === isEncoding || !isEncoding(enc))) throw new Error('Unknown encoding: ' + enc); - return nenc || enc; -} - -// StringDecoder provides an interface for efficiently splitting a series of -// buffers into a series of JS strings without breaking apart multi-byte -// characters. -exports.StringDecoder = StringDecoder; -function StringDecoder(encoding) { - this.encoding = normalizeEncoding(encoding); - var nb; - switch (this.encoding) { - case 'utf16le': - this.text = utf16Text; - this.end = utf16End; - nb = 4; - break; - case 'utf8': - this.fillLast = utf8FillLast; - nb = 4; - break; - case 'base64': - this.text = base64Text; - this.end = base64End; - nb = 3; - break; - default: - this.write = simpleWrite; - this.end = simpleEnd; - return; - } - this.lastNeed = 0; - this.lastTotal = 0; - this.lastChar = Buffer.allocUnsafe(nb); -} - -StringDecoder.prototype.write = function (buf) { - if (buf.length === 0) return ''; - var r; - var i; - if (this.lastNeed) { - r = this.fillLast(buf); - if (r === undefined) return ''; - i = this.lastNeed; - this.lastNeed = 0; - } else { - i = 0; - } - if (i < buf.length) return r ? r + this.text(buf, i) : this.text(buf, i); - return r || ''; -}; - -StringDecoder.prototype.end = utf8End; - -// Returns only complete characters in a Buffer -StringDecoder.prototype.text = utf8Text; - -// Attempts to complete a partial non-UTF-8 character using bytes from a Buffer -StringDecoder.prototype.fillLast = function (buf) { - if (this.lastNeed <= buf.length) { - buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, this.lastNeed); - return this.lastChar.toString(this.encoding, 0, this.lastTotal); - } - buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, buf.length); - this.lastNeed -= buf.length; -}; - -// Checks the type of a UTF-8 byte, whether it's ASCII, a leading byte, or a -// continuation byte. If an invalid byte is detected, -2 is returned. -function utf8CheckByte(byte) { - if (byte <= 0x7F) return 0;else if (byte >> 5 === 0x06) return 2;else if (byte >> 4 === 0x0E) return 3;else if (byte >> 3 === 0x1E) return 4; - return byte >> 6 === 0x02 ? -1 : -2; -} - -// Checks at most 3 bytes at the end of a Buffer in order to detect an -// incomplete multi-byte UTF-8 character. The total number of bytes (2, 3, or 4) -// needed to complete the UTF-8 character (if applicable) are returned. -function utf8CheckIncomplete(self, buf, i) { - var j = buf.length - 1; - if (j < i) return 0; - var nb = utf8CheckByte(buf[j]); - if (nb >= 0) { - if (nb > 0) self.lastNeed = nb - 1; - return nb; - } - if (--j < i || nb === -2) return 0; - nb = utf8CheckByte(buf[j]); - if (nb >= 0) { - if (nb > 0) self.lastNeed = nb - 2; - return nb; - } - if (--j < i || nb === -2) return 0; - nb = utf8CheckByte(buf[j]); - if (nb >= 0) { - if (nb > 0) { - if (nb === 2) nb = 0;else self.lastNeed = nb - 3; - } - return nb; - } - return 0; -} - -// Validates as many continuation bytes for a multi-byte UTF-8 character as -// needed or are available. If we see a non-continuation byte where we expect -// one, we "replace" the validated continuation bytes we've seen so far with -// a single UTF-8 replacement character ('\ufffd'), to match v8's UTF-8 decoding -// behavior. The continuation byte check is included three times in the case -// where all of the continuation bytes for a character exist in the same buffer. -// It is also done this way as a slight performance increase instead of using a -// loop. -function utf8CheckExtraBytes(self, buf, p) { - if ((buf[0] & 0xC0) !== 0x80) { - self.lastNeed = 0; - return '\ufffd'; - } - if (self.lastNeed > 1 && buf.length > 1) { - if ((buf[1] & 0xC0) !== 0x80) { - self.lastNeed = 1; - return '\ufffd'; - } - if (self.lastNeed > 2 && buf.length > 2) { - if ((buf[2] & 0xC0) !== 0x80) { - self.lastNeed = 2; - return '\ufffd'; - } - } - } -} - -// Attempts to complete a multi-byte UTF-8 character using bytes from a Buffer. -function utf8FillLast(buf) { - var p = this.lastTotal - this.lastNeed; - var r = utf8CheckExtraBytes(this, buf, p); - if (r !== undefined) return r; - if (this.lastNeed <= buf.length) { - buf.copy(this.lastChar, p, 0, this.lastNeed); - return this.lastChar.toString(this.encoding, 0, this.lastTotal); - } - buf.copy(this.lastChar, p, 0, buf.length); - this.lastNeed -= buf.length; -} - -// Returns all complete UTF-8 characters in a Buffer. If the Buffer ended on a -// partial character, the character's bytes are buffered until the required -// number of bytes are available. -function utf8Text(buf, i) { - var total = utf8CheckIncomplete(this, buf, i); - if (!this.lastNeed) return buf.toString('utf8', i); - this.lastTotal = total; - var end = buf.length - (total - this.lastNeed); - buf.copy(this.lastChar, 0, end); - return buf.toString('utf8', i, end); -} - -// For UTF-8, a replacement character is added when ending on a partial -// character. -function utf8End(buf) { - var r = buf && buf.length ? this.write(buf) : ''; - if (this.lastNeed) return r + '\ufffd'; - return r; -} - -// UTF-16LE typically needs two bytes per character, but even if we have an even -// number of bytes available, we need to check if we end on a leading/high -// surrogate. In that case, we need to wait for the next two bytes in order to -// decode the last character properly. -function utf16Text(buf, i) { - if ((buf.length - i) % 2 === 0) { - var r = buf.toString('utf16le', i); - if (r) { - var c = r.charCodeAt(r.length - 1); - if (c >= 0xD800 && c <= 0xDBFF) { - this.lastNeed = 2; - this.lastTotal = 4; - this.lastChar[0] = buf[buf.length - 2]; - this.lastChar[1] = buf[buf.length - 1]; - return r.slice(0, -1); - } - } - return r; - } - this.lastNeed = 1; - this.lastTotal = 2; - this.lastChar[0] = buf[buf.length - 1]; - return buf.toString('utf16le', i, buf.length - 1); -} - -// For UTF-16LE we do not explicitly append special replacement characters if we -// end on a partial character, we simply let v8 handle that. -function utf16End(buf) { - var r = buf && buf.length ? this.write(buf) : ''; - if (this.lastNeed) { - var end = this.lastTotal - this.lastNeed; - return r + this.lastChar.toString('utf16le', 0, end); - } - return r; -} - -function base64Text(buf, i) { - var n = (buf.length - i) % 3; - if (n === 0) return buf.toString('base64', i); - this.lastNeed = 3 - n; - this.lastTotal = 3; - if (n === 1) { - this.lastChar[0] = buf[buf.length - 1]; - } else { - this.lastChar[0] = buf[buf.length - 2]; - this.lastChar[1] = buf[buf.length - 1]; - } - return buf.toString('base64', i, buf.length - n); -} - -function base64End(buf) { - var r = buf && buf.length ? this.write(buf) : ''; - if (this.lastNeed) return r + this.lastChar.toString('base64', 0, 3 - this.lastNeed); - return r; -} - -// Pass bytes on through for single-byte encodings (e.g. ascii, latin1, hex) -function simpleWrite(buf) { - return buf.toString(this.encoding); -} - -function simpleEnd(buf) { - return buf && buf.length ? this.write(buf) : ''; -} \ No newline at end of file diff --git a/node_modules/string_decoder/package.json b/node_modules/string_decoder/package.json deleted file mode 100644 index b2bb141160..0000000000 --- a/node_modules/string_decoder/package.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "name": "string_decoder", - "version": "1.3.0", - "description": "The string_decoder module from Node core", - "main": "lib/string_decoder.js", - "files": [ - "lib" - ], - "dependencies": { - "safe-buffer": "~5.2.0" - }, - "devDependencies": { - "babel-polyfill": "^6.23.0", - "core-util-is": "^1.0.2", - "inherits": "^2.0.3", - "tap": "~0.4.8" - }, - "scripts": { - "test": "tap test/parallel/*.js && node test/verify-dependencies", - "ci": "tap test/parallel/*.js test/ours/*.js --tap | tee test.tap && node test/verify-dependencies.js" - }, - "repository": { - "type": "git", - "url": "git://github.com/nodejs/string_decoder.git" - }, - "homepage": "https://github.com/nodejs/string_decoder", - "keywords": [ - "string", - "decoder", - "browser", - "browserify" - ], - "license": "MIT" -} diff --git a/node_modules/util-deprecate/History.md b/node_modules/util-deprecate/History.md deleted file mode 100644 index acc8675372..0000000000 --- a/node_modules/util-deprecate/History.md +++ /dev/null @@ -1,16 +0,0 @@ - -1.0.2 / 2015-10-07 -================== - - * use try/catch when checking `localStorage` (#3, @kumavis) - -1.0.1 / 2014-11-25 -================== - - * browser: use `console.warn()` for deprecation calls - * browser: more jsdocs - -1.0.0 / 2014-04-30 -================== - - * initial commit diff --git a/node_modules/util-deprecate/LICENSE b/node_modules/util-deprecate/LICENSE deleted file mode 100644 index 6a60e8c225..0000000000 --- a/node_modules/util-deprecate/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -(The MIT License) - -Copyright (c) 2014 Nathan Rajlich - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff --git a/node_modules/util-deprecate/README.md b/node_modules/util-deprecate/README.md deleted file mode 100644 index 75622fa7c2..0000000000 --- a/node_modules/util-deprecate/README.md +++ /dev/null @@ -1,53 +0,0 @@ -util-deprecate -============== -### The Node.js `util.deprecate()` function with browser support - -In Node.js, this module simply re-exports the `util.deprecate()` function. - -In the web browser (i.e. via browserify), a browser-specific implementation -of the `util.deprecate()` function is used. - - -## API - -A `deprecate()` function is the only thing exposed by this module. - -``` javascript -// setup: -exports.foo = deprecate(foo, 'foo() is deprecated, use bar() instead'); - - -// users see: -foo(); -// foo() is deprecated, use bar() instead -foo(); -foo(); -``` - - -## License - -(The MIT License) - -Copyright (c) 2014 Nathan Rajlich - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff --git a/node_modules/util-deprecate/browser.js b/node_modules/util-deprecate/browser.js deleted file mode 100644 index 549ae2f065..0000000000 --- a/node_modules/util-deprecate/browser.js +++ /dev/null @@ -1,67 +0,0 @@ - -/** - * Module exports. - */ - -module.exports = deprecate; - -/** - * Mark that a method should not be used. - * Returns a modified function which warns once by default. - * - * If `localStorage.noDeprecation = true` is set, then it is a no-op. - * - * If `localStorage.throwDeprecation = true` is set, then deprecated functions - * will throw an Error when invoked. - * - * If `localStorage.traceDeprecation = true` is set, then deprecated functions - * will invoke `console.trace()` instead of `console.error()`. - * - * @param {Function} fn - the function to deprecate - * @param {String} msg - the string to print to the console when `fn` is invoked - * @returns {Function} a new "deprecated" version of `fn` - * @api public - */ - -function deprecate (fn, msg) { - if (config('noDeprecation')) { - return fn; - } - - var warned = false; - function deprecated() { - if (!warned) { - if (config('throwDeprecation')) { - throw new Error(msg); - } else if (config('traceDeprecation')) { - console.trace(msg); - } else { - console.warn(msg); - } - warned = true; - } - return fn.apply(this, arguments); - } - - return deprecated; -} - -/** - * Checks `localStorage` for boolean values for the given `name`. - * - * @param {String} name - * @returns {Boolean} - * @api private - */ - -function config (name) { - // accessing global.localStorage can trigger a DOMException in sandboxed iframes - try { - if (!global.localStorage) return false; - } catch (_) { - return false; - } - var val = global.localStorage[name]; - if (null == val) return false; - return String(val).toLowerCase() === 'true'; -} diff --git a/node_modules/util-deprecate/node.js b/node_modules/util-deprecate/node.js deleted file mode 100644 index 5e6fcff5dd..0000000000 --- a/node_modules/util-deprecate/node.js +++ /dev/null @@ -1,6 +0,0 @@ - -/** - * For Node.js, simply re-export the core `util.deprecate` function. - */ - -module.exports = require('util').deprecate; diff --git a/node_modules/util-deprecate/package.json b/node_modules/util-deprecate/package.json deleted file mode 100644 index 2e79f89a90..0000000000 --- a/node_modules/util-deprecate/package.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "name": "util-deprecate", - "version": "1.0.2", - "description": "The Node.js `util.deprecate()` function with browser support", - "main": "node.js", - "browser": "browser.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "repository": { - "type": "git", - "url": "git://github.com/TooTallNate/util-deprecate.git" - }, - "keywords": [ - "util", - "deprecate", - "browserify", - "browser", - "node" - ], - "author": "Nathan Rajlich (http://n8.io/)", - "license": "MIT", - "bugs": { - "url": "https://github.com/TooTallNate/util-deprecate/issues" - }, - "homepage": "https://github.com/TooTallNate/util-deprecate" -} From c8f472fa195a40f470d1b8e94a51915680198d64 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 11:49:38 +0800 Subject: [PATCH 31/38] remove pacakge.json Signed-off-by: Future-Outlier --- package-lock.json | 154 ---------------------------------------------- package.json | 5 -- 2 files changed, 159 deletions(-) delete mode 100644 package-lock.json delete mode 100644 package.json diff --git a/package-lock.json b/package-lock.json deleted file mode 100644 index 8455638b1a..0000000000 --- a/package-lock.json +++ /dev/null @@ -1,154 +0,0 @@ -{ - "name": "flytekit", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "dependencies": { - "msgpack5": "^6.0.2" - } - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/bl": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-5.1.0.tgz", - "integrity": "sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ==", - "license": "MIT", - "dependencies": { - "buffer": "^6.0.3", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, - "node_modules/buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "BSD-3-Clause" - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "license": "ISC" - }, - "node_modules/msgpack5": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/msgpack5/-/msgpack5-6.0.2.tgz", - "integrity": "sha512-kBSpECAWslrciRF3jy6HkMckNa14j3VZwNUUe1ONO/yihs19MskiFnsWXm0Q0aPkDYDBRFvTKkEuEDY+HVxBvQ==", - "license": "MIT", - "dependencies": { - "bl": "^5.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.0.0", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "license": "MIT" - } - } -} diff --git a/package.json b/package.json deleted file mode 100644 index 6b25f8f14e..0000000000 --- a/package.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "dependencies": { - "msgpack5": "^6.0.2" - } -} From 4b08fbef0b126983e9cb0c83c883002820dfbfe3 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 14:14:16 +0800 Subject: [PATCH 32/38] lint Signed-off-by: Future-Outlier --- plugins/flytekit-snowflake/setup.py | 2 +- .../structured_dataset/test_snowflake.py | 48 +++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 tests/flytekit/unit/types/structured_dataset/test_snowflake.py diff --git a/plugins/flytekit-snowflake/setup.py b/plugins/flytekit-snowflake/setup.py index c2c42044a9..ec1d6e0158 100644 --- a/plugins/flytekit-snowflake/setup.py +++ b/plugins/flytekit-snowflake/setup.py @@ -4,7 +4,7 @@ microlib_name = f"flytekitplugins-{PLUGIN_NAME}" -plugin_requires = ["flytekit>1.10.7", "snowflake-connector-python>=3.11.0"] +plugin_requires = ["flytekit>1.13.1", "snowflake-connector-python>=3.11.0"] __version__ = "0.0.0+develop" diff --git a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py new file mode 100644 index 0000000000..b4c1f9476b --- /dev/null +++ b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py @@ -0,0 +1,48 @@ +import mock +import pandas as pd +import pytest +from typing_extensions import Annotated + +from flytekit import StructuredDataset, kwtypes, task, workflow +import sys + + + +@mock.patch("flytekit.types.structured.snowflake.get_private_key", return_value="pb") +@mock.patch("snowflake.connector.connect") +@pytest.mark.skipif("pandas" not in sys.modules, reason="Pandas is not installed.") +@pytest.mark.asyncio +async def test_sf_wf(mock_connect, mock_get_private_key): + pd_df = pd.DataFrame({"Name": ["Tom", "Joseph"], "Age": [20, 22]}) + my_cols = kwtypes(Name=str, Age=int) + + @task + def gen_df() -> Annotated[pd.DataFrame, my_cols, "parquet"]: + return pd_df + + @task + def t1(df: pd.DataFrame) -> Annotated[StructuredDataset, my_cols]: + return StructuredDataset( + dataframe=df, + uri="snowflake://dummy_user:dummy_account/dummy_warehouse/dummy_database/dummy_schema/dummy_table" + ) + + @task + def t2(sd: Annotated[StructuredDataset, my_cols]) -> pd.DataFrame: + return sd.open(pd.DataFrame).all() + + @workflow + def wf() -> pd.DataFrame: + df = gen_df() + sd = t1(df=df) + return t2(sd=sd) + + class mock_dataframe: + def to_dataframe(self): + return pd.DataFrame({"Name": ["Tom", "Joseph"], "Age": [20, 22]}) + + mock_connect_instance = mock_connect.return_value + mock_coursor_instance = mock_connect_instance.cursor.return_value + mock_coursor_instance.fetch_pandas_all.return_value = mock_dataframe().to_dataframe() + + assert wf().equals(pd_df) From de6ce1a49f4108f9164812bb3fb422dd748334b2 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 14:14:50 +0800 Subject: [PATCH 33/38] add snowflake-connector-python Signed-off-by: Future-Outlier --- dev-requirements.in | 1 + 1 file changed, 1 insertion(+) diff --git a/dev-requirements.in b/dev-requirements.in index 2c91767a01..f9765969ca 100644 --- a/dev-requirements.in +++ b/dev-requirements.in @@ -16,6 +16,7 @@ pre-commit codespell google-cloud-bigquery google-cloud-bigquery-storage +snowflake-connector-python IPython keyrings.alt setuptools_scm From 58a1106b1fcc7b7865fd2dc24848c6413242e472 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 14:19:30 +0800 Subject: [PATCH 34/38] fix test_snowflake Signed-off-by: Future-Outlier --- .../flytekit/unit/types/structured_dataset/test_snowflake.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py index b4c1f9476b..d0d3cf6cc6 100644 --- a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py +++ b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py @@ -1,10 +1,10 @@ import mock -import pandas as pd import pytest from typing_extensions import Annotated +import sys from flytekit import StructuredDataset, kwtypes, task, workflow -import sys + @@ -13,6 +13,7 @@ @pytest.mark.skipif("pandas" not in sys.modules, reason="Pandas is not installed.") @pytest.mark.asyncio async def test_sf_wf(mock_connect, mock_get_private_key): + import pandas as pd pd_df = pd.DataFrame({"Name": ["Tom", "Joseph"], "Age": [20, 22]}) my_cols = kwtypes(Name=str, Age=int) From 4aa241177256d440a0babd127f70128287c34b49 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 14:28:58 +0800 Subject: [PATCH 35/38] Try to fix tests Signed-off-by: Future-Outlier --- .../unit/types/structured_dataset/test_snowflake.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py index d0d3cf6cc6..137bcbe3af 100644 --- a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py +++ b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py @@ -5,14 +5,10 @@ from flytekit import StructuredDataset, kwtypes, task, workflow - - - +@pytest.mark.skipif("pandas" not in sys.modules, reason="Pandas is not installed.") @mock.patch("flytekit.types.structured.snowflake.get_private_key", return_value="pb") @mock.patch("snowflake.connector.connect") -@pytest.mark.skipif("pandas" not in sys.modules, reason="Pandas is not installed.") -@pytest.mark.asyncio -async def test_sf_wf(mock_connect, mock_get_private_key): +def test_sf_wf(mock_connect, mock_get_private_key): import pandas as pd pd_df = pd.DataFrame({"Name": ["Tom", "Joseph"], "Age": [20, 22]}) my_cols = kwtypes(Name=str, Age=int) @@ -25,7 +21,7 @@ def gen_df() -> Annotated[pd.DataFrame, my_cols, "parquet"]: def t1(df: pd.DataFrame) -> Annotated[StructuredDataset, my_cols]: return StructuredDataset( dataframe=df, - uri="snowflake://dummy_user:dummy_account/dummy_warehouse/dummy_database/dummy_schema/dummy_table" + uri="snowflake://dummy_user/dummy_account/COMPUTE_WH/FLYTEAGENT/PUBLIC/TEST" ) @task From 31e57c85dfb15153fc7af52b058fbcc84342d179 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 14:56:46 +0800 Subject: [PATCH 36/38] fix tests Signed-off-by: Future-Outlier --- .../types/structured_dataset/test_snowflake.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py index 137bcbe3af..87f457929f 100644 --- a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py +++ b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py @@ -5,7 +5,21 @@ from flytekit import StructuredDataset, kwtypes, task, workflow +try: + import numpy as np + numpy_installed = True +except ImportError: + numpy_installed = False + +skip_if_wrong_numpy_version = pytest.mark.skipif( + not numpy_installed or np.__version__ > '1.26.4', + reason="Test skipped because either NumPy is not installed or the installed version is greater than 1.26.4. " + "Ensure that NumPy is installed and the version is <= 1.26.4, as required by the Snowflake connector." + +) + @pytest.mark.skipif("pandas" not in sys.modules, reason="Pandas is not installed.") +@skip_if_wrong_numpy_version @mock.patch("flytekit.types.structured.snowflake.get_private_key", return_value="pb") @mock.patch("snowflake.connector.connect") def test_sf_wf(mock_connect, mock_get_private_key): From 4a9e936eef7120e1c0cb5180f3b71a50b325adde Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 15:43:23 +0800 Subject: [PATCH 37/38] Try Fix snowflake Import Signed-off-by: Future-Outlier --- tests/flytekit/unit/types/structured_dataset/test_snowflake.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py index 87f457929f..1eb4887527 100644 --- a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py +++ b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py @@ -24,6 +24,9 @@ @mock.patch("snowflake.connector.connect") def test_sf_wf(mock_connect, mock_get_private_key): import pandas as pd + from snowflake import connector as sc + import flytekit + pd_df = pd.DataFrame({"Name": ["Tom", "Joseph"], "Age": [20, 22]}) my_cols = kwtypes(Name=str, Age=int) From 1dd36b28fbccd9cfe7e6256917eba0809d39f7d1 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 31 Jul 2024 16:01:33 +0800 Subject: [PATCH 38/38] snowflake test passed Signed-off-by: Future-Outlier --- .../unit/types/structured_dataset/test_snowflake.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py index 1eb4887527..ab85f9e013 100644 --- a/tests/flytekit/unit/types/structured_dataset/test_snowflake.py +++ b/tests/flytekit/unit/types/structured_dataset/test_snowflake.py @@ -24,8 +24,16 @@ @mock.patch("snowflake.connector.connect") def test_sf_wf(mock_connect, mock_get_private_key): import pandas as pd - from snowflake import connector as sc - import flytekit + from flytekit.lazy_import.lazy_module import is_imported + from flytekit.types.structured import register_snowflake_handlers + from flytekit.types.structured.structured_dataset import DuplicateHandlerError + + if is_imported("snowflake.connector"): + try: + register_snowflake_handlers() + except DuplicateHandlerError: + pass + pd_df = pd.DataFrame({"Name": ["Tom", "Joseph"], "Age": [20, 22]}) my_cols = kwtypes(Name=str, Age=int)