diff --git a/doc/source/api.rst b/doc/source/api.rst
index f6dfd5cfaf0e7..5e5b84e0e80b2 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -78,17 +78,11 @@ SQL
.. autosummary::
:toctree: generated/
-
+
+ read_sql_table
+ read_sql_query
read_sql
-.. currentmodule:: pandas.io.sql
-
-.. autosummary::
- :toctree: generated/
-
- read_frame
- write_frame
-
Google BigQuery
~~~~~~~~~~~~~~~
.. currentmodule:: pandas.io.gbq
diff --git a/doc/source/io.rst b/doc/source/io.rst
index b6bb5718e37f9..891ad3ca1cf85 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3113,11 +3113,22 @@ DB-API `__.
See also some :ref:`cookbook examples ` for some advanced strategies.
The key functions are:
-:func:`~pandas.io.sql.to_sql`
-:func:`~pandas.io.sql.read_sql`
-:func:`~pandas.io.sql.read_table`
+.. autosummary::
+ :toctree: generated/
+
+ read_sql_table
+ read_sql_query
+ read_sql
+ DataFrame.to_sql
+.. note::
+
+ The function :func:`~pandas.read_sql` is a convenience wrapper around
+ :func:`~pandas.read_sql_table` and :func:`~pandas.read_sql_query` (and for
+ backward compatibility) and will delegate to specific function depending on
+ the provided input (database table name or sql query).
+
In the following example, we use the `SQlite `__ SQL database
engine. You can use a temporary SQLite database where data are stored in
"memory".
@@ -3129,7 +3140,7 @@ connecting to.
For more information on :func:`create_engine` and the URI formatting, see the examples
below and the SQLAlchemy `documentation `__
-.. code-block:: python
+.. ipython:: python
from sqlalchemy import create_engine
from pandas.io import sql
@@ -3140,8 +3151,7 @@ Writing DataFrames
~~~~~~~~~~~~~~~~~~
Assuming the following data is in a DataFrame ``data``, we can insert it into
-the database using :func:`~pandas.io.sql.to_sql`.
-
+the database using :func:`~pandas.DataFrame.to_sql`.
+-----+------------+-------+-------+-------+
| id | Date | Col_1 | Col_2 | Col_3 |
@@ -3154,13 +3164,6 @@ the database using :func:`~pandas.io.sql.to_sql`.
+-----+------------+-------+-------+-------+
-.. ipython:: python
- :suppress:
-
- from sqlalchemy import create_engine
- from pandas.io import sql
- engine = create_engine('sqlite:///:memory:')
-
.. ipython:: python
:suppress:
@@ -3171,44 +3174,47 @@ the database using :func:`~pandas.io.sql.to_sql`.
(63, datetime.datetime(2010,10,20), 'Z', 5.73, True)]
data = DataFrame(d, columns=c)
- sql.to_sql(data, 'data', engine)
+
+.. ipython:: python
+
+ data.to_sql('data', engine)
Reading Tables
~~~~~~~~~~~~~~
-:func:`~pandas.io.sql.read_table` will read a databse table given the
+:func:`~pandas.read_sql_table` will read a database table given the
table name and optionally a subset of columns to read.
.. note::
- In order to use :func:`~pandas.io.sql.read_table`, you **must** have the
+ In order to use :func:`~pandas.read_sql_table`, you **must** have the
SQLAlchemy optional dependency installed.
.. ipython:: python
- sql.read_table('data', engine)
+ pd.read_sql_table('data', engine)
You can also specify the name of the column as the DataFrame index,
and specify a subset of columns to be read.
.. ipython:: python
- sql.read_table('data', engine, index_col='id')
- sql.read_table('data', engine, columns=['Col_1', 'Col_2'])
+ pd.read_sql_table('data', engine, index_col='id')
+ pd.read_sql_table('data', engine, columns=['Col_1', 'Col_2'])
And you can explicitly force columns to be parsed as dates:
.. ipython:: python
- sql.read_table('data', engine, parse_dates=['Date'])
+ pd.read_sql_table('data', engine, parse_dates=['Date'])
If needed you can explicitly specifiy a format string, or a dict of arguments
-to pass to :func:`pandas.tseries.tools.to_datetime`.
+to pass to :func:`pandas.to_datetime`:
.. code-block:: python
- sql.read_table('data', engine, parse_dates={'Date': '%Y-%m-%d'})
- sql.read_table('data', engine, parse_dates={'Date': {'format': '%Y-%m-%d %H:%M:%S'}})
+ pd.read_sql_table('data', engine, parse_dates={'Date': '%Y-%m-%d'})
+ pd.read_sql_table('data', engine, parse_dates={'Date': {'format': '%Y-%m-%d %H:%M:%S'}})
You can check if a table exists using :func:`~pandas.io.sql.has_table`
@@ -3219,20 +3225,20 @@ instantiated directly for more manual control over the SQL interaction.
Querying
~~~~~~~~
-You can query using raw SQL in the :func:`~pandas.io.sql.read_sql` function.
+You can query using raw SQL in the :func:`~pandas.read_sql_query` function.
In this case you must use the SQL variant appropriate for your database.
When using SQLAlchemy, you can also pass SQLAlchemy Expression language constructs,
which are database-agnostic.
.. ipython:: python
- sql.read_sql('SELECT * FROM data', engine)
+ pd.read_sql_query('SELECT * FROM data', engine)
Of course, you can specify a more "complex" query.
.. ipython:: python
- sql.read_frame("SELECT id, Col_1, Col_2 FROM data WHERE id = 42;", engine)
+ pd.read_sql_query("SELECT id, Col_1, Col_2 FROM data WHERE id = 42;", engine)
You can also run a plain query without creating a dataframe with
@@ -3290,7 +3296,7 @@ you are using.
.. code-block:: python
- sql.to_sql(data, 'data', cnx, flavor='sqlite')
+ data.to_sql('data', cnx, flavor='sqlite')
sql.read_sql("SELECT * FROM data", cnx, flavor='sqlite')
diff --git a/pandas/io/api.py b/pandas/io/api.py
index cf3615cd822cd..5fa8c7ef60074 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -8,7 +8,7 @@
from pandas.io.pytables import HDFStore, Term, get_store, read_hdf
from pandas.io.json import read_json
from pandas.io.html import read_html
-from pandas.io.sql import read_sql
+from pandas.io.sql import read_sql, read_sql_table, read_sql_query
from pandas.io.stata import read_stata
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.packers import read_msgpack, to_msgpack
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 5052f057871b0..bed4c2da61c59 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -176,14 +176,66 @@ def uquery(sql, con, cur=None, params=None, engine=None, flavor='sqlite'):
#------------------------------------------------------------------------------
# Read and write to DataFrames
+def read_sql_table(table_name, con, meta=None, index_col=None,
+ coerce_float=True, parse_dates=None, columns=None):
+ """Read SQL database table into a DataFrame.
+
+ Given a table name and an SQLAlchemy engine, returns a DataFrame.
+ This function does not support DBAPI connections.
+
+ Parameters
+ ----------
+ table_name : string
+ Name of SQL table in database
+ con : SQLAlchemy engine
+ Legacy mode not supported
+ meta : SQLAlchemy meta, optional
+ If omitted MetaData is reflected from engine
+ index_col : string, optional
+ Column to set as index
+ coerce_float : boolean, default True
+ Attempt to convert values to non-string, non-numeric objects (like
+ decimal.Decimal) to floating point. Can result in loss of Precision.
+ parse_dates : list or dict
+ - List of column names to parse as dates
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps
+ - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
+ to the keyword arguments of :func:`pandas.to_datetime`
+ Especially useful with databases without native Datetime support,
+ such as SQLite
+ columns : list
+ List of column names to select from sql table
+
+ Returns
+ -------
+ DataFrame
+
+ See also
+ --------
+ read_sql_query : Read SQL query into a DataFrame.
+ read_sql
+
-def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True,
- params=None, parse_dates=None):
"""
- Returns a DataFrame corresponding to the result set of the query
- string.
+ pandas_sql = PandasSQLAlchemy(con, meta=meta)
+ table = pandas_sql.read_table(
+ table_name, index_col=index_col, coerce_float=coerce_float,
+ parse_dates=parse_dates, columns=columns)
+
+ if table is not None:
+ return table
+ else:
+ raise ValueError("Table %s not found" % table_name, con)
+
+
+def read_sql_query(sql, con, index_col=None, flavor='sqlite',
+ coerce_float=True, params=None, parse_dates=None):
+ """Read SQL query into a DataFrame.
- Optionally provide an `index_col` parameter to use one of the
+ Returns a DataFrame corresponding to the result set of the query
+ string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
@@ -221,15 +273,83 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True,
See also
--------
- read_table
+ read_sql_table : Read SQL database table into a DataFrame
+ read_sql
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
- return pandas_sql.read_sql(sql,
- index_col=index_col,
- params=params,
- coerce_float=coerce_float,
- parse_dates=parse_dates)
+ return pandas_sql.read_sql(
+ sql, index_col=index_col, params=params, coerce_float=coerce_float,
+ parse_dates=parse_dates)
+
+
+def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True,
+ params=None, parse_dates=None, columns=None):
+ """
+ Read SQL query or database table into a DataFrame.
+
+ Parameters
+ ----------
+ sql : string
+ SQL query to be executed or database table name.
+ con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
+ Using SQLAlchemy makes it possible to use any DB supported by that
+ library.
+ If a DBAPI2 object is given, a supported SQL flavor must also be provided
+ index_col : string, optional
+ column name to use for the returned DataFrame object.
+ flavor : string, {'sqlite', 'mysql'}
+ The flavor of SQL to use. Ignored when using
+ SQLAlchemy engine. Required when using DBAPI2 connection.
+ coerce_float : boolean, default True
+ Attempt to convert values to non-string, non-numeric objects (like
+ decimal.Decimal) to floating point, useful for SQL result sets
+ cur : depreciated, cursor is obtained from connection
+ params : list, tuple or dict, optional
+ List of parameters to pass to execute method.
+ parse_dates : list or dict
+ - List of column names to parse as dates
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps
+ - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
+ to the keyword arguments of :func:`pandas.to_datetime`
+ Especially useful with databases without native Datetime support,
+ such as SQLite
+ columns : list
+ List of column names to select from sql table
+
+ Returns
+ -------
+ DataFrame
+
+ Notes
+ -----
+ This function is a convenience wrapper around ``read_sql_table`` and
+ ``read_sql_query`` (and for backward compatibility) and will delegate
+ to the specific function depending on the provided input (database
+ table name or sql query).
+
+ See also
+ --------
+ read_sql_table : Read SQL database table into a DataFrame
+ read_sql_query : Read SQL query into a DataFrame
+
+ """
+ pandas_sql = pandasSQL_builder(con, flavor=flavor)
+
+ if pandas_sql.has_table(sql):
+ if isinstance(pandas_sql, PandasSQLLegacy):
+ raise ValueError("Reading a table with read_sql is not supported "
+ "for a DBAPI2 connection. Use an SQLAlchemy "
+ "engine or specify an sql query")
+ return pandas_sql.read_table(
+ sql, index_col=index_col, coerce_float=coerce_float,
+ parse_dates=parse_dates, columns=columns)
+ else:
+ return pandas_sql.read_sql(
+ sql, index_col=index_col, params=params, coerce_float=coerce_float,
+ parse_dates=parse_dates)
def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
@@ -296,59 +416,6 @@ def has_table(table_name, con, meta=None, flavor='sqlite'):
return pandas_sql.has_table(table_name)
-def read_table(table_name, con, meta=None, index_col=None, coerce_float=True,
- parse_dates=None, columns=None):
- """Given a table name and SQLAlchemy engine, return a DataFrame.
-
- Type convertions will be done automatically.
-
- Parameters
- ----------
- table_name : string
- Name of SQL table in database
- con : SQLAlchemy engine
- Legacy mode not supported
- meta : SQLAlchemy meta, optional
- If omitted MetaData is reflected from engine
- index_col : string or sequence of strings, optional
- Column(s) to set as index.
- coerce_float : boolean, default True
- Attempt to convert values to non-string, non-numeric objects (like
- decimal.Decimal) to floating point. Can result in loss of Precision.
- parse_dates : list or dict
- - List of column names to parse as dates
- - Dict of ``{column_name: format string}`` where format string is
- strftime compatible in case of parsing string times or is one of
- (D, s, ns, ms, us) in case of parsing integer timestamps
- - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
- to the keyword arguments of :func:`pandas.to_datetime`
- Especially useful with databases without native Datetime support,
- such as SQLite
- columns : list, optional
- List of column names to select from sql table
-
- Returns
- -------
- DataFrame
-
- See also
- --------
- read_sql
-
- """
- pandas_sql = PandasSQLAlchemy(con, meta=meta)
- table = pandas_sql.read_table(table_name,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- columns=columns)
-
- if table is not None:
- return table
- else:
- raise ValueError("Table %s not found" % table_name, con)
-
-
def pandasSQL_builder(con, flavor=None, meta=None):
"""
Convenience function to return the correct PandasSQL subclass based on the
@@ -667,6 +734,13 @@ def uquery(self, *args, **kwargs):
result = self.execute(*args, **kwargs)
return result.rowcount
+ def read_table(self, table_name, index_col=None, coerce_float=True,
+ parse_dates=None, columns=None):
+
+ table = PandasSQLTable(table_name, self, index=index_col)
+ return table.read(coerce_float=coerce_float,
+ parse_dates=parse_dates, columns=columns)
+
def read_sql(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None):
args = _convert_params(sql, params)
@@ -705,13 +779,6 @@ def has_table(self, name):
def get_table(self, table_name):
return self.meta.tables.get(table_name)
- def read_table(self, table_name, index_col=None, coerce_float=True,
- parse_dates=None, columns=None):
-
- table = PandasSQLTable(table_name, self, index=index_col)
- return table.read(coerce_float=coerce_float,
- parse_dates=parse_dates, columns=columns)
-
def drop_table(self, table_name):
if self.engine.has_table(table_name):
self.get_table(table_name).drop()
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index f05f6fe3c1d14..83978a0e0b8f7 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -306,7 +306,7 @@ def setUp(self):
self._load_raw_sql()
def test_read_sql_iris(self):
- iris_frame = sql.read_sql(
+ iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
@@ -364,8 +364,8 @@ def test_to_sql_append(self):
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False)
- s2 = sql.read_sql("SELECT * FROM test_series", self.conn,
- flavor='sqlite')
+ s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn,
+ flavor='sqlite')
tm.assert_frame_equal(s.to_frame(), s2)
def test_to_sql_panel(self):
@@ -384,7 +384,7 @@ def test_legacy_write_frame(self):
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn, flavor='sqlite')
- result = sql.read_sql(
+ result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn,
flavor='sqlite')
@@ -412,35 +412,33 @@ def test_tquery(self):
def test_date_parsing(self):
""" Test date parsing in read_sql """
# No Parsing
- df = sql.read_sql(
- "SELECT * FROM types_test_data", self.conn, flavor='sqlite')
+ df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
+ flavor='sqlite')
self.assertFalse(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
- df = sql.read_sql("SELECT * FROM types_test_data",
- self.conn, flavor='sqlite', parse_dates=['DateCol'])
+ df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
+ flavor='sqlite', parse_dates=['DateCol'])
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
- df = sql.read_sql("SELECT * FROM types_test_data", self.conn,
- flavor='sqlite',
- parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
+ df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
+ flavor='sqlite',
+ parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
- df = sql.read_sql("SELECT * FROM types_test_data",
- self.conn, flavor='sqlite',
- parse_dates=['IntDateCol'])
+ df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
+ flavor='sqlite', parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
- df = sql.read_sql("SELECT * FROM types_test_data",
- self.conn, flavor='sqlite',
- parse_dates={'IntDateCol': 's'})
+ df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
+ flavor='sqlite', parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
@@ -448,10 +446,10 @@ def test_date_parsing(self):
def test_date_and_index(self):
""" Test case where same column appears in parse_date and index_col"""
- df = sql.read_sql("SELECT * FROM types_test_data",
- self.conn, flavor='sqlite',
- parse_dates=['DateCol', 'IntDateCol'],
- index_col='DateCol')
+ df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
+ flavor='sqlite', index_col='DateCol',
+ parse_dates=['DateCol', 'IntDateCol'])
+
self.assertTrue(
issubclass(df.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
@@ -465,13 +463,13 @@ def test_to_sql_index_label(self):
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
- frame = sql.read_sql('SELECT * FROM test_index_label', self.conn)
+ frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
- frame = sql.read_sql('SELECT * FROM test_index_label', self.conn)
+ frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
@@ -479,14 +477,14 @@ def test_to_sql_index_label(self):
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
- frame = sql.read_sql('SELECT * FROM test_index_label', self.conn)
+ frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index_name',
"Index name not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
- frame = sql.read_sql('SELECT * FROM test_index_label', self.conn)
+ frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
@@ -496,14 +494,14 @@ def test_to_sql_index_label_multiindex(self):
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
- frame = sql.read_sql('SELECT * FROM test_index_label', self.conn)
+ frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'level_0')
self.assertEqual(frame.columns[1], 'level_1')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
- frame = sql.read_sql('SELECT * FROM test_index_label', self.conn)
+ frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Specified index_labels not written to database")
@@ -511,14 +509,14 @@ def test_to_sql_index_label_multiindex(self):
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
- frame = sql.read_sql('SELECT * FROM test_index_label', self.conn)
+ frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Index names not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
- frame = sql.read_sql('SELECT * FROM test_index_label', self.conn)
+ frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'],
"Specified index_labels not written to database")
@@ -545,7 +543,7 @@ def test_read_table_columns(self):
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
- result = sql.read_table('test_frame', self.conn, columns=cols)
+ result = sql.read_sql_table('test_frame', self.conn, columns=cols)
self.assertEqual(result.columns.tolist(), cols,
"Columns not correctly selected")
@@ -553,21 +551,34 @@ def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
- result = sql.read_table('test_frame', self.conn, index_col="index")
+ result = sql.read_sql_table('test_frame', self.conn, index_col="index")
self.assertEqual(result.index.names, ["index"],
"index_col not correctly set")
- result = sql.read_table('test_frame', self.conn, index_col=["A", "B"])
+ result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
- result = sql.read_table('test_frame', self.conn, index_col=["A", "B"],
+ result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"],
columns=["C", "D"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
self.assertEqual(result.columns.tolist(), ["C", "D"],
"columns not set correctly whith index_col")
+ def test_read_sql_delegate(self):
+ iris_frame1 = sql.read_sql_query(
+ "SELECT * FROM iris", self.conn)
+ iris_frame2 = sql.read_sql(
+ "SELECT * FROM iris", self.conn)
+ tm.assert_frame_equal(iris_frame1, iris_frame2,
+ "read_sql and read_sql_query have not the same"
+ " result with a query")
+
+ iris_frame1 = sql.read_sql_table('iris', self.conn)
+ iris_frame2 = sql.read_sql('iris', self.conn)
+ tm.assert_frame_equal(iris_frame1, iris_frame2)
+
class TestSQLLegacyApi(_TestSQLApi):
@@ -612,7 +623,7 @@ def test_sql_open_close(self):
conn.close()
conn = self.connect(name)
- result = sql.read_sql(
+ result = sql.read_sql_query(
"SELECT * FROM test_frame2_legacy;",
conn,
flavor="sqlite",
@@ -622,6 +633,18 @@ def test_sql_open_close(self):
tm.assert_frame_equal(self.test_frame2, result)
+ def test_read_sql_delegate(self):
+ iris_frame1 = sql.read_sql_query(
+ "SELECT * FROM iris", self.conn, flavor=self.flavor)
+ iris_frame2 = sql.read_sql(
+ "SELECT * FROM iris", self.conn, flavor=self.flavor)
+ tm.assert_frame_equal(iris_frame1, iris_frame2,
+ "read_sql and read_sql_query have not the same"
+ " result with a query")
+
+ self.assertRaises(ValueError, sql.read_sql, 'iris', self.conn,
+ flavor=self.flavor)
+
class _TestSQLAlchemy(PandasSQLTest):
"""
@@ -686,21 +709,21 @@ def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
- iris_frame = sql.read_table("iris", con=self.conn)
+ iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
- iris_frame = sql.read_table(
+ iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
- ValueError, sql.read_table, "this_doesnt_exist", con=self.conn)
+ ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
- df = sql.read_table("types_test_data", self.conn)
+ df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
@@ -717,7 +740,7 @@ def test_default_type_conversion(self):
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
- df = sql.read_table("types_test_data", self.conn)
+ df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
@@ -726,34 +749,34 @@ def test_default_date_load(self):
def test_date_parsing(self):
# No Parsing
- df = sql.read_table("types_test_data", self.conn)
+ df = sql.read_sql_table("types_test_data", self.conn)
- df = sql.read_table(
+ df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['DateCol'])
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
- df = sql.read_table(
+ df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
- df = sql.read_table("types_test_data", self.conn, parse_dates={
+ df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
- df = sql.read_table(
+ df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
- df = sql.read_table(
+ df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
- df = sql.read_table(
+ df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
@@ -766,7 +789,7 @@ def test_mixed_dtype_insert(self):
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
- df2 = sql.read_table("test_read_write", self.conn)
+ df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
@@ -794,7 +817,7 @@ def setUp(self):
self._load_test1_data()
def test_default_type_conversion(self):
- df = sql.read_table("types_test_data", self.conn)
+ df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
@@ -812,7 +835,7 @@ def test_default_type_conversion(self):
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
- df = sql.read_table("types_test_data", self.conn)
+ df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64),
@@ -971,7 +994,7 @@ def tearDown(self):
self.conn.execute('DROP TABLE %s' % table[0])
def test_default_type_conversion(self):
- df = sql.read_table("types_test_data", self.conn)
+ df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")