diff --git a/CHANGELOG.md b/CHANGELOG.md index 7fc0588..9c9f3c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ ## 2.9.0 [unreleased] +### Bug Fixes +1. [#123](https://github.com/influxdata/influxdb-client-ruby/pull/123): Duplicate columns warning shows in improper situations + ## 2.8.0 [2022-10-27] ### Features diff --git a/lib/influxdb2/client/client.rb b/lib/influxdb2/client/client.rb index b19925b..177cee6 100644 --- a/lib/influxdb2/client/client.rb +++ b/lib/influxdb2/client/client.rb @@ -51,7 +51,6 @@ class Client # @option options [Logger] :logger Logger used for logging. Disable logging by set to false. # @option options [bool] :debugging Enable debugging for HTTP request/response. # @option options [Hash] :tags Default tags which will be added to each point written by api. - # the body line-protocol def initialize(url, token, options = nil) @auto_closeable = [] @options = options ? options.dup : {} diff --git a/lib/influxdb2/client/flux_csv_parser.rb b/lib/influxdb2/client/flux_csv_parser.rb index 317b691..eca8e78 100644 --- a/lib/influxdb2/client/flux_csv_parser.rb +++ b/lib/influxdb2/client/flux_csv_parser.rb @@ -188,7 +188,7 @@ def _add_column_names_and_tags(table, csv) i += 1 end - duplicates = table.columns.group_by { :label }.select { |_k, v| v.size > 1 } + duplicates = table.columns.group_by(&:label).select { |_k, v| v.size > 1 } warning = "The response contains columns with duplicated names: #{duplicates.keys.join(', ')} You should use the 'FluxRecord.row to access your data instead of 'FluxRecord.values' hash." diff --git a/test/influxdb/flux_csv_parser_test.rb b/test/influxdb/flux_csv_parser_test.rb index ce69264..413a308 100644 --- a/test/influxdb/flux_csv_parser_test.rb +++ b/test/influxdb/flux_csv_parser_test.rb @@ -516,14 +516,44 @@ def test_parse_duplicate_column_names ,,0,2022-09-13T06:14:40.469404272Z,2022-09-13T06:24:40.469404272Z,2022-09-13T06:24:39.299Z,my_measurement,Prague,25.3 ,,0,2022-09-13T06:14:40.469404272Z,2022-09-13T06:24:40.469404272Z,2022-09-13T06:24:40.454Z,my_measurement,Prague,25.3' - tables = InfluxDB2::FluxCsvParser.new(data, stream: false, response_mode: InfluxDB2::FluxResponseMode::ONLY_NAMES) - .parse - .tables - assert_equal 1, tables.size - assert_equal 8, tables[0].columns.size - assert_equal 3, tables[0].records.size - assert_equal 7, tables[0].records[0].values.size - assert_equal 8, tables[0].records[0].row.size - assert_equal 25.3, tables[0].records[0].row[7] + out, = capture_io do + tables = InfluxDB2::FluxCsvParser.new(data, stream: false, response_mode: InfluxDB2::FluxResponseMode::ONLY_NAMES) + .parse + .tables + + assert_equal 1, tables.size + assert_equal 8, tables[0].columns.size + assert_equal 3, tables[0].records.size + assert_equal 7, tables[0].records[0].values.size + assert_equal 8, tables[0].records[0].row.size + assert_equal 25.3, tables[0].records[0].row[7] + end + + assert_match 'The response contains columns with duplicated names: result', out + end + + def test_parse_without_duplicates + data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,double +#group,false,false,true,true,false,true,true,false +#default,_result,,,,,,, + ,result,table,_start,_stop,_time,_measurement,location,result2 +,,0,2022-09-13T06:14:40.469404272Z,2022-09-13T06:24:40.469404272Z,2022-09-13T06:24:33.746Z,my_measurement,Prague,25.3 +,,0,2022-09-13T06:14:40.469404272Z,2022-09-13T06:24:40.469404272Z,2022-09-13T06:24:39.299Z,my_measurement,Prague,25.3 +,,0,2022-09-13T06:14:40.469404272Z,2022-09-13T06:24:40.469404272Z,2022-09-13T06:24:40.454Z,my_measurement,Prague,25.3' + + out, = capture_io do + tables = InfluxDB2::FluxCsvParser.new(data, stream: false, response_mode: InfluxDB2::FluxResponseMode::ONLY_NAMES) + .parse + .tables + + assert_equal 1, tables.size + assert_equal 8, tables[0].columns.size + assert_equal 3, tables[0].records.size + assert_equal 8, tables[0].records[0].values.size + assert_equal 8, tables[0].records[0].row.size + assert_equal 25.3, tables[0].records[0].row[7] + end + + assert_equal '', out end end