diff --git a/.travis.yml b/.travis.yml index a203fc1..3914ee6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,7 @@ language: erlang otp_release: + - 18.1 - 17.0 - R16B03-1 - R16B03 diff --git a/common_test/pgsql_create.sql b/common_test/pgsql_create.sql index 8fc244f..f28a8c6 100644 --- a/common_test/pgsql_create.sql +++ b/common_test/pgsql_create.sql @@ -1,8 +1,20 @@ -CREATE USER itest; -CREATE DATABASE itest OWNER itest; -GRANT ALL PRIVILEGES ON DATABASE itest TO itest; +CREATE USER itest_sqerl1 WITH ENCRYPTED PASSWORD 'itest_sqerl1'; +CREATE USER itest_sqerl2 WITH ENCRYPTED PASSWORD 'itest_sqerl2'; + + +CREATE DATABASE itest_sqerl1 OWNER itest_sqerl1; +CREATE DATABASE itest_sqerl2 OWNER itest_sqerl2; + +\c itest_sqerl2; +GRANT ALL PRIVILEGES ON DATABASE itest_sqerl2 TO itest_sqerl2; +CREATE TABLE only_in_itest_sqerl2_db ( name VARCHAR(128)); +GRANT ALL PRIVILEGES ON TABLE only_in_itest_sqerl2_db TO itest_sqerl2; + +\c itest_sqerl1; +GRANT ALL PRIVILEGES ON DATABASE itest_sqerl1 TO itest_sqerl1; +CREATE TABLE only_in_itest_sqerl1_db ( name VARCHAR(128)); +GRANT ALL PRIVILEGES ON TABLE only_in_itest_sqerl1_db TO itest_sqerl1; -\c itest; CREATE SEQUENCE users_id_sequence; /* Create test tables */ CREATE TABLE users ( @@ -15,8 +27,8 @@ CREATE TABLE users ( created timestamp ); -GRANT ALL PRIVILEGES ON TABLE users TO itest; -GRANT ALL PRIVILEGES ON SEQUENCE users_id_sequence TO itest; +GRANT ALL PRIVILEGES ON TABLE users TO itest_sqerl1; +GRANT ALL PRIVILEGES ON SEQUENCE users_id_sequence TO itest_sqerl1; CREATE TABLE nodes ( id char(32) PRIMARY KEY, @@ -30,7 +42,7 @@ CREATE TABLE nodes ( updated_at timestamp NOT NULL ); -GRANT ALL PRIVILEGES ON TABLE nodes TO itest; +GRANT ALL PRIVILEGES ON TABLE nodes TO itest_sqerl1; CREATE OR REPLACE FUNCTION insert_users(varchar[], varchar[], @@ -57,7 +69,7 @@ CREATE TABLE uuids ( id uuid UNIQUE NOT NULL ); -GRANT ALL PRIVILEGES ON TABLE uuids TO itest; +GRANT ALL PRIVILEGES ON TABLE uuids TO itest_sqerl1; CREATE OR REPLACE FUNCTION insert_ids(uuid[]) RETURNS VOID AS @@ -73,4 +85,3 @@ END; $$ LANGUAGE plpgsql; - diff --git a/common_test/pgsql_test_buddy.erl b/common_test/pgsql_test_buddy.erl index db62b32..0aadfca 100644 --- a/common_test/pgsql_test_buddy.erl +++ b/common_test/pgsql_test_buddy.erl @@ -6,8 +6,15 @@ -compile([export_all]). -define(USER, string:strip(os:cmd("echo $USER"), right, $\n)). +% NOTE - do not merge. Doing this because kitchen isn't syncing up my local changes +% and spending my time debugging secondary issues (or completely cycling the kitchen boxes +% for each test) isn't something I'm inclined to do at the moment.. -define(DB_PIPE_CMD, "psql -q -d postgres -h localhost -p 5432 -U " ++ ?USER ++ " -f - < ~s"). -define(DB_CMD, "psql -h localhost -p 5432 -U " ++ ?USER ++ " -d postgres -w -c '~s'"). +%-define(DB_PIPE_CMD, "sudo -u postgres psql -q -d postgres -f - < ~s"). +%-define(DB_CMD, "psql -d postgres -w -c '~s'"). +% END NOTE + -define(POOL_NAME, sqerl). -define(POOLER_TIMEOUT, 500). @@ -17,40 +24,29 @@ clean() -> [ os:cmd(io_lib:format(?DB_CMD, [Cmd])) || Cmd <- [ - "drop database if exists itest", - "drop user if exists itest" + "drop database if exists itest_sqerl1", + "drop database if exists itest_sqerl2", + "drop user if exists itest_sqerl1", + "drop user if exists itest_sqerl2" ]]. -create(Config) -> +config_file(Config, File) -> Dir = lists:reverse(filename:split(?config(data_dir, Config))), {_, Good} = lists:split(1, Dir), - File = filename:join(lists:reverse(Good) ++ ["pgsql_create.sql"]), - ct:pal("File: ~s", [File]), - os:cmd(io_lib:format(?DB_PIPE_CMD, [File])). + filename:join(lists:reverse(Good) ++ [File]). + +create(Config) -> + File1 = config_file(Config, "pgsql_create.sql"), + ct:pal("Results of loading ~s", [File1]), + ct:pal( os:cmd(io_lib:format(?DB_PIPE_CMD, [File1]))). setup_env() -> application:stop(sasl), - Info = db_config(), - ok = application:set_env(sqerl, db_driver_mod, sqerl_pgsql_client), - ok = application:set_env(sqerl, db_host, ?GET_ARG(host, Info)), - ok = application:set_env(sqerl, db_port, ?GET_ARG(port, Info)), - ok = application:set_env(sqerl, db_user, "itest"), - ok = application:set_env(sqerl, db_pass, "itest"), - ok = application:set_env(sqerl, db_name, ?GET_ARG(db, Info)), - ok = application:set_env(sqerl, idle_check, 10000), - ok = application:set_env(sqerl, pooler_timeout, ?POOLER_TIMEOUT), - %% we could also call it like this: - %% {prepared_statements, statements()}, - %% {prepared_statements, "itest/statements_pgsql.conf"}, - ok = application:set_env(sqerl, prepared_statements, {obj_user, '#statements', []}), - ColumnTransforms = [{<<"created">>, - fun sqerl_transformers:convert_YMDHMS_tuple_to_datetime/1}], - ok = application:set_env(sqerl, column_transforms, ColumnTransforms), - PoolConfig = [{name, ?POOL_NAME}, - {max_count, ?MAX_POOL_COUNT}, - {init_count, 1}, - {start_mfa, {sqerl_client, start_link, []}}], - ok = application:set_env(pooler, pools, [PoolConfig]), + % By default we're going to set up multi-pool - this gives defacto verification + % that nothing gets broken in existing code (or tests) when the pool name is not + % specified. + % + setup_config(), Apps = [crypto, asn1, public_key, ssl, epgsql, pooler], [ application:start(A) || A <- Apps ], @@ -59,16 +55,58 @@ setup_env() -> ?assert(lists:member(Status, [ok, {error, {already_started, sqerl}}])), ok. +set_env_for(Key, EnvEntries) -> + [ application:set_env(Key, Entry, Value) || {Entry, Value} <- EnvEntries ]. + teardown_env() -> Apps = lists:reverse([crypto, asn1, public_key, ssl, epgsql, pooler, sqerl]), [application:stop(A) || A <- Apps]. -db_config() -> - [ - {host, "localhost"}, - {port, 5432}, - {db, "itest"} - ]. + +setup_config() -> + + Pool1Extras = [{column_transforms, [{<<"created">>, fun sqerl_transformers:convert_YMDHMS_tuple_to_datetime/1}]}, + {prepared_statements, {obj_user, '#statements', []}} ], + Pool2Extras = [{column_transforms, []}, {prepared_statements, none}], + EnvCfg = config([ + {other, "itest_sqerl2", Pool2Extras}, + {sqerl, "itest_sqerl1", Pool1Extras} + ]), + set_env_for(sqerl, ?config(sqerl, EnvCfg)), + set_env_for(pooler, ?config(pooler, EnvCfg)), + ct:pal("Environment configuration: ~p", [[{sqerl, application:get_all_env(sqerl)}, + {pooler, application:get_all_env(pooler)}]]). +config(DBInfo) -> + [{sqerl, [ + {ip_mode, [ ipv4 ] }, + {db_driver_mod, sqerl_pgsql_client}, + {pooler_timeout, ?POOLER_TIMEOUT}, + {databases, [ sqerl_db_config(Id, Name, Extras) || {Id, Name, Extras} <- DBInfo ]} + ]}, + {pooler, [ + {pools, [ pool_config(Id) || {Id, _, _} <- DBInfo ]} + %{metrics_module, folsom_metrics} + ] + }]. + +sqerl_db_config(Id, TheName, Extras) -> + {Id, lists:flatten([{db_host, "127.0.0.1"}, + {db_port, 5432}, + {db_user, TheName}, + {db_pass, TheName}, + {db_name, TheName}, + {db_timeout, 5000}, + {idle_check, 1000}, + {id, Id}, % debugging + Extras ]) + }. + +pool_config(Id) -> + [{name, Id}, + {max_count, ?MAX_POOL_COUNT}, + {init_count, 1}, + {start_mfa, {sqerl_client, start_link, [{pool, Id}]}}, + {queue_max, 5}]. kill_pool() -> kill_pool(?MAX_POOL_COUNT). kill_pool(1) -> @@ -76,3 +114,10 @@ kill_pool(1) -> kill_pool(X) -> pooler:take_member(?POOL_NAME, ?POOLER_TIMEOUT), kill_pool(X - 1). + +get_user() -> + case os:getenv("PG_USER") of + false -> os:getenv("USER"); + User -> User + end. + diff --git a/common_test/sqerl_integration_SUITE.erl b/common_test/sqerl_integration_SUITE.erl index f9b318c..4119912 100644 --- a/common_test/sqerl_integration_SUITE.erl +++ b/common_test/sqerl_integration_SUITE.erl @@ -14,7 +14,7 @@ all() -> [pool_overflow, insert_data, insert_returning, select_data, select_data_as_record, select_first_number_zero, delete_data, update_datablob, select_boolean, update_created, - select_simple, adhoc_select, adhoc_insert, insert_select_gzip_data, array_test, + select_simple, select_simple_multipool_, adhoc_select, adhoc_insert, insert_select_gzip_data, array_test, select_timeout, execute_timeout]. init_per_testcase(_, Config) -> @@ -133,12 +133,18 @@ select_simple(Config) -> {ok, Count} = sqerl:execute(SqlCount), [[{<<"num_users">>, 5}]] = Count, + + %% select_simple_with_parameters() -> Sql = <<"select id from users where last_name = $1">>, {ok, Rows} = sqerl:execute(Sql, ["Smith"]), ExpectedRows = [[{<<"id">>,1}]], ?assertEqual(ExpectedRows, Rows). +select_simple_multipool_(_Config) -> + [ ?_assertMatch(ok, 0), sqerl:execute_with(sqerl:make_context(other), <<"SELECT COUNT(*) FROM only_in_itest_sqerl2_db">>), + ?_assertMatch(ok, 0), sqerl:execute_with(sqerl:make_context(sqerl), <<"SELECT COUNT(*) FROM only_in_itest_sqerl1_db">>)]. + adhoc_select(Config) -> insert_data(Config), insert_returning(Config), @@ -361,6 +367,15 @@ adhoc_select(Config) -> ], ?assertEqual(ExpectedRows, Rows) end(), + %% adhoc_select_limit + fun() -> + {ok, Rows} = sqerl:adhoc_select_with(sqerl:make_context(sqerl), [<<"id">>], <<"users">>, all, [{order_by, [<<"id">>]}, {limit, 2}]), + ExpectedRows = [ + [{<<"id">>, 1}], + [{<<"id">>, 2}] + ], + ?assertEqual(ExpectedRows, Rows) + end(), %% adhoc_select_offset fun() -> @@ -380,7 +395,7 @@ adhoc_insert_delete_test(Table, Columns, Data, BatchSize) -> Values = [Value || [Value|_] <- Data], Where = {Field, in, Values}, {ok, Rows} = sqerl:adhoc_select(Columns, Table, Where), - {ReturnedColumns, ReturnedData} = sqerl:extract_insert_data(Rows), + {ReturnedColumns, ReturnedData} = sqerl_core:extract_insert_data(Rows), %% clean up before asserts {ok, DeleteCount} = sqerl:adhoc_delete(Table, Where), %% now verify... diff --git a/include/sqerl.hrl b/include/sqerl.hrl index b22b0fb..0417540 100644 --- a/include/sqerl.hrl +++ b/include/sqerl.hrl @@ -31,8 +31,14 @@ {ok, integer(), sqerl_rows()} | {error, atom() | tuple()}. +-record(sqerl_ctx, { pool :: atom() }). +-type sqerl_ctx() :: #sqerl_ctx{}. + -ifdef(namespaced_types). -type sqerl_dict() :: dict:dict(). -else. -type sqerl_dict() :: dict(). -endif. + +-define(SQERL_DEFAULT_BATCH_SIZE, 100). +-define(SQERL_ADHOC_INSERT_STMT_ATOM, '__adhoc_insert'). diff --git a/src/sqerl.erl b/src/sqerl.erl index 96d7e79..497371b 100644 --- a/src/sqerl.erl +++ b/src/sqerl.erl @@ -1,8 +1,9 @@ %% -*- erlang-indent-level: 4;indent-tabs-mode: nil; fill-column: 92 -*- %% ex: ts=4 sw=4 et -%% @author Seth Falcon -%% @author Mark Anderson -%% Copyright 2011-2012 Opscode, Inc. All Rights Reserved. +%% @author Seth Falcon +%% @author Mark Anderson +%% @author Marc Paradise +%% Copyright 2011-2015 Chef Software, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file @@ -22,9 +23,9 @@ -module(sqerl). --export([checkout/0, - checkin/1, - with_db/1, +%% Original API exports. These will execute +%% all queries against the default `sqerl` pool. +-export([ select/2, select/3, select/4, @@ -38,48 +39,37 @@ adhoc_insert/2, adhoc_insert/3, adhoc_insert/4, - extract_insert_data/1, - adhoc_delete/2]). - --include_lib("eunit/include/eunit.hrl"). --include_lib("sqerl.hrl"). - --define(MAX_RETRIES, 5). - -%% See http://www.postgresql.org/docs/current/static/errcodes-appendix.html --define(PGSQL_ERROR_CODES, [{<<"23505">>, conflict}, {<<"23503">>, foreign_key}]). - -checkout() -> - pooler:take_member(sqerl, envy:get(sqerl, pooler_timeout, 0, integer)). - -checkin(Connection) -> - pooler:return_member(sqerl, Connection). - -with_db(Call) -> - with_db(Call, ?MAX_RETRIES). - -with_db(_Call, 0) -> - {error, no_connections}; -with_db(Call, Retries) -> - case checkout() of - error_no_members -> - {error, no_connections}; - Cn when is_pid(Cn) -> - %% We don't need a try/catch around Call(Cn) because pooler links both the - %% connection and the process that has the connection checked out (this - %% process). So a crash here will not leak a connection. - case Call(Cn) of - {error, closed} -> - %% Closing the connection will cause the process - %% to shutdown. pooler will get notified and - %% remove the connection from the pool. - sqerl_client:close(Cn), - with_db(Call, Retries - 1); - Result -> - checkin(Cn), - Result - end - end. + adhoc_delete/2 + ]). + +%% Context-based API functions which currently +%% allows multipool support. These functions are identicial +%% to their counterparts above, but each accepts a context as the +%% first argument. +-export([make_context/1, + select_with/3, + select_with/4, + select_with/5, + statement_with/3, + statement_with/4, + statement_with/5, + execute_with/3, + execute_with/2, + adhoc_select_with/4, + adhoc_select_with/5, + adhoc_insert_with/3, + adhoc_insert_with/4, + adhoc_insert_with/5, + adhoc_delete_with/3]). + +-include("sqerl.hrl"). +-define(DEFAULT_POOL, sqerl). + +%% TODO: consider accepting a proplist (or map if we go 17+) here so that +%% we can extend it without requiring people to change everywhere +%% they reference it... +make_context(Pool) -> + #sqerl_ctx{pool = Pool}. select(StmtName, StmtArgs) -> select(StmtName, StmtArgs, identity, []). @@ -90,16 +80,22 @@ select(StmtName, StmtArgs, XformName) -> select(StmtName, StmtArgs, XformName, []). select(StmtName, StmtArgs, XformName, XformArgs) -> - case execute_statement(StmtName, StmtArgs, XformName, XformArgs) of - {ok, []} -> - {ok, none}; - {ok, Results} -> - {ok, Results}; - {ok, Count, Results} -> - {ok, Count, Results}; - {error, Reason} -> - parse_error(Reason) - end. + select_with(make_context(?DEFAULT_POOL), StmtName, StmtArgs, XformName, XformArgs). + +%% @doc as select/2 with context added +select_with(#sqerl_ctx{} = Context, StmtName, StmtArgs) -> + select_with(Context, StmtName, StmtArgs, identity, []). + +%% @doc as select/3 with context added +select_with(#sqerl_ctx{} = Context, StmtName, StmtArgs, {XformName, XformArgs}) -> + select_with(Context, StmtName, StmtArgs, XformName, XformArgs); +select_with(#sqerl_ctx{} = Context, StmtName, StmtArgs, XformName) -> + select_with(Context, StmtName, StmtArgs, XformName, []). + +%% @doc as select/4 with context added +select_with(#sqerl_ctx{} = Context, StmtName, StmtArgs, XformName, XformArgs) -> + Results = sqerl_core:execute_statement(Context, StmtName, StmtArgs, XformName, XformArgs), + sqerl_core:parse_select_results(Results). statement(StmtName, StmtArgs) -> statement(StmtName, StmtArgs, identity, []). @@ -108,33 +104,19 @@ statement(StmtName, StmtArgs, XformName) -> statement(StmtName, StmtArgs, XformName, []). statement(StmtName, StmtArgs, XformName, XformArgs) -> - case execute_statement(StmtName, StmtArgs, XformName, XformArgs) of - {ok, 0} -> - {ok, none}; - {ok, N} when is_number(N) -> - {ok, N}; - % response from execute of raw sql - {ok, {N, Rows}} when is_number(N) -> - {ok, N, Rows}; - {ok, N, Rows} when is_number(N) -> - {ok, N, Rows}; - {error, Reason} -> - parse_error(Reason) - end. - -execute_statement(StmtName, StmtArgs, XformName, XformArgs) -> - case execute(StmtName, StmtArgs) of - {ok, Results} -> - Xformer = erlang:apply(sqerl_transformers, XformName, XformArgs), - Xformer(Results); - {ok, Count, Results} -> - %% we'll get here for an INSERT ... RETURNING query - Xformer = erlang:apply(sqerl_transformers, XformName, XformArgs), - {ok, XResult} = Xformer(Results), - {ok, Count, XResult}; - Other -> - Other - end. + statement_with(make_context(?DEFAULT_POOL), StmtName, StmtArgs, XformName, XformArgs). + +statement_with(#sqerl_ctx{} = Context, StmtName, StmtArgs) -> + statement_with(Context, StmtName, StmtArgs, identity, []). + +statement_with(#sqerl_ctx{} = Context, StmtName, StmtArgs, XformName) -> + statement_with(Context, StmtName, StmtArgs, XformName, []). + +statement_with(#sqerl_ctx{} = Context, StmtName, StmtArgs, XformName, XformArgs) -> + Results = sqerl_core:execute_statement(Context, StmtName, StmtArgs, XformName, XformArgs), + sqerl_core:parse_statement_results(Results). + + %% @doc Execute query or statement with no parameters. %% See execute/2 for return info. @@ -160,8 +142,19 @@ execute(QueryOrStatement) -> %% -spec execute(sqerl_query(), [] | [term()]) -> sqerl_results(). execute(QueryOrStatement, Parameters) -> - F = fun(Cn) -> sqerl_client:execute(Cn, QueryOrStatement, Parameters) end, - with_db(F). + execute_with(make_context(?DEFAULT_POOL), QueryOrStatement, Parameters). + + +%% @doc as execute/1, adds a sqerl_ctx() as its first argument. +%% See execute/1 for return info. +-spec execute_with(sqerl_ctx(), sqerl_query()) -> sqerl_results(). +execute_with(#sqerl_ctx{} = Context, QueryOrStatement) -> + sqerl_core:execute(Context, QueryOrStatement, []). + +%% @doc as execute/2, adds a sqerl_ctx() as its first argument +-spec execute_with(sqerl_ctx(), sqerl_query(), [] | [term()]) -> sqerl_results(). +execute_with(#sqerl_ctx{} = Context, QueryOrStatement, Parameters) -> + sqerl_core:execute(Context, QueryOrStatement, Parameters). %% @doc Execute an adhoc select query. @@ -207,17 +200,32 @@ adhoc_select(Columns, Table, Where) -> %% that uses several clauses. -spec adhoc_select([binary() | string()], binary() | string(), atom() | tuple(), [] | [atom() | tuple()]) -> sqerl_results(). adhoc_select(Columns, Table, Where, Clauses) -> - {SQL, Values} = sqerl_adhoc:select(Columns, Table, - [{where, Where}|Clauses], param_style()), - execute(SQL, Values). + adhoc_select_with(make_context(?DEFAULT_POOL), Columns, Table, Where, Clauses). + +%% @doc as adhoc_select/3, +%% @see adhoc_select/3 +-spec adhoc_select_with(sqerl_ctx(), [binary() | string()], binary() | string(), atom() | tuple()) -> sqerl_results(). +adhoc_select_with(#sqerl_ctx{} = Context, Columns, Table, Where) -> + adhoc_select_with(Context, Columns, Table, Where, []). --define(SQERL_DEFAULT_BATCH_SIZE, 100). -%% Backend DBs limit what we can name a statement. -%% No upper case, no $... --define(SQERL_ADHOC_INSERT_STMT_ATOM, '__adhoc_insert'). +%% @doc as adhoc_select/4, adds a #sqerl_ctx as the first argument. +%% @see adhoc_select/4 +-spec adhoc_select_with(sqerl_ctx(), [binary() | string()], binary() | string(), atom() | tuple(), [] | [atom() | tuple()]) -> sqerl_results(). +adhoc_select_with(#sqerl_ctx{} = Context, Columns, Table, Where, Clauses) -> + {SQL, Values} = sqerl_adhoc:select(Columns, + Table, + [{where, Where}|Clauses], + sqerl_client:sql_parameter_style()), + sqerl_core:execute(Context, SQL, Values). -%% @doc Insert Rows into Table with default batch size. + + +%% @doc Utility for generating specific message tuples from database-specific error +%% messages. The 1-argument form determines which database is being used by querying +%% Sqerl's configuration at runtime, while the 2-argument form takes the database type as a +%% parameter directly. +%% @doc Inser Rows into Table with default batch size. %% @see adhoc_insert/3. adhoc_insert(Table, Rows) -> adhoc_insert(Table, Rows, ?SQERL_DEFAULT_BATCH_SIZE). @@ -238,7 +246,7 @@ adhoc_insert(Table, Rows) -> %% @see adhoc_insert/4. adhoc_insert(Table, Rows, BatchSize) -> %% reformat Rows to desired format - {Columns, RowsValues} = extract_insert_data(Rows), + {Columns, RowsValues} = sqerl_core:extract_insert_data(Rows), adhoc_insert(Table, Columns, RowsValues, BatchSize). %% @doc Insert records defined by {Columns, RowsValues} @@ -260,101 +268,34 @@ adhoc_insert(Table, Rows, BatchSize) -> %% {ok, 2} %% ''' %% -adhoc_insert(_Table, _Columns, [], _BatchSize) -> - %% empty list of rows means nothing to do +adhoc_insert(Table, Columns, RowsValues, BatchSize) -> + adhoc_insert_with(make_context(?DEFAULT_POOL), Table, Columns, RowsValues, BatchSize). + +%% @doc As adhoc_insert/2, adds a #sqerl_ctx as the first argument. +%% @see adhoc_insert/2. +adhoc_insert_with(#sqerl_ctx{} = Context, Table, Rows) -> + adhoc_insert_with(Context, Table, Rows, ?SQERL_DEFAULT_BATCH_SIZE). + +%% @doc As adhoc_insert/3,adds a #sqerl_ctx as the first argument. +%% @see adhoc_insert/3. +adhoc_insert_with(#sqerl_ctx{} = Context, Table, Rows, BatchSize) -> + %% reformat Rows to desired format + {Columns, RowsValues} = sqerl_core:extract_insert_data(Rows), + adhoc_insert_with(Context, Table, Columns, RowsValues, BatchSize). + +%% @doc as adhoc_insert/4, adds a #sqerl_ctx as the first argument. +%% @see adhoc_insert/4 +adhoc_insert_with(_Context, _Table, _Columns, [], _BatchSize) -> {ok, 0}; -adhoc_insert(Table, Columns, RowsValues, BatchSize) when BatchSize > 0 -> +adhoc_insert_with(#sqerl_ctx{} = Context, Table, Columns, RowsValues, BatchSize) when BatchSize > 0 -> NumRows = length(RowsValues), %% Avoid the case where NumRows < BatchSize EffectiveBatchSize = erlang:min(NumRows, BatchSize), - bulk_insert(Table, Columns, RowsValues, NumRows, EffectiveBatchSize). + bulk_insert_with(Context, Table, Columns, RowsValues, NumRows, EffectiveBatchSize). -%% @doc Bulk insert rows. Returns {ok, InsertedCount}. -bulk_insert(Table, Columns, RowsValues, NumRows, BatchSize) when NumRows >= BatchSize -> - Inserter = make_batch_inserter(Table, Columns, RowsValues, NumRows, BatchSize), - with_db(Inserter). - -%% @doc Returns a function to call via with_db/1. -%% -%% Function prepares an insert statement, inserts all the batches and -%% remaining rows, unprepares the statement, and returns -%% {ok, InsertedCount}. -%% -%% We need to use this approach because preparing, inserting, -%% unpreparing must all be done against the same DB connection. -%% -make_batch_inserter(Table, Columns, RowsValues, NumRows, BatchSize) -> - SQL = sqerl_adhoc:insert(Table, Columns, BatchSize, param_style()), - fun(Cn) -> - ok = sqerl_client:prepare(Cn, ?SQERL_ADHOC_INSERT_STMT_ATOM, SQL), - try - insert_batches(Cn, ?SQERL_ADHOC_INSERT_STMT_ATOM, - Table, Columns, RowsValues, NumRows, BatchSize) - after - sqerl_client:unprepare(Cn, ?SQERL_ADHOC_INSERT_STMT_ATOM) - end - end. - -%% @doc Insert data with insert statement already prepared. -insert_batches(Cn, StmtName, Table, Columns, RowsValues, NumRows, BatchSize) -> - insert_batches(Cn, StmtName, Table, Columns, RowsValues, NumRows, BatchSize, 0). - -%% @doc Tail-recursive function iterates over batches and inserts them. -%% Also inserts the remaining rows (if any) in one shot. -%% Returns {ok, InsertedCount}. -insert_batches(Cn, StmtName, Table, Columns, RowsValues, NumRows, BatchSize, CountSoFar) - when NumRows >= BatchSize -> - {RowsValuesToInsert, Rest} = lists:split(BatchSize, RowsValues), - {ok, Count} = insert_oneshot(Cn, StmtName, RowsValuesToInsert), - insert_batches(Cn, StmtName, Table, Columns, Rest, NumRows - Count, BatchSize, CountSoFar + Count); -insert_batches(Cn, _StmtName, Table, Columns, RowsValues, _NumRows, _BatchSize, CountSoFar) -> - %% We have fewer rows than fit in a batch, so we'll do a one-shot insert for those. - {ok, InsertCount} = adhoc_insert_oneshot(Cn, Table, Columns, RowsValues), - {ok, CountSoFar + InsertCount}. - -%% @doc Insert all given rows in one shot. -%% Creates one SQL statement to insert all the rows at once, -%% then executes. -%% Returns {ok, InsertedCount}. -adhoc_insert_oneshot(_Cn, _Table, _Columns, []) -> - %% 0 rows means nothing to do! - {ok, 0}; -adhoc_insert_oneshot(Cn, Table, Columns, RowsValues) -> - SQL = sqerl_adhoc:insert(Table, Columns, length(RowsValues), param_style()), - insert_oneshot(Cn, SQL, RowsValues). - -%% @doc Insert all rows at once using given -%% prepared statement or SQL. -%% Returns {ok, InsertCount}. -insert_oneshot(_Cn, _StmtOrSQL, []) -> - %% 0 rows means nothing to do! - {ok, 0}; -insert_oneshot(Cn, StmtOrSQL, RowsValues) -> - %% Need to flatten list of row data (list of lists) - %% to a flat list of parameters to the query - Parameters = lists:flatten(RowsValues), - sqerl_client:execute(Cn, StmtOrSQL, Parameters). - -%% @doc Extract insert data from Rows. -%% -%% Assumes all rows have the same format. -%% Returns {Columns, RowsValues}. -%% -%% ``` -%% 1> extract_insert_data([ -%% [{<<"id">>, 1}, {<<"name">>, <<"Joe">>}], -%% [{<<"id">>, 2}, {<<"name">>, <<"Jeff">>}], -%% ]). -%% {[<<"id">>,<<"name">>],[[1,<<"Joe">>],[2,<<"Jeff">>]]} -%% ''' -%% --spec extract_insert_data(sqerl_rows()) -> {[binary()], [[term()]]}. -extract_insert_data([]) -> - {[], []}; -extract_insert_data(Rows) -> - Columns = [C || {C, _V} <- hd(Rows)], - RowsValues = [[V || {_C, V} <- Row] || Row <- Rows], - {Columns, RowsValues}. +%% @doc Bulk insert rows using the provided context. Returns {ok, InsertedCount}. +bulk_insert_with(#sqerl_ctx{} = Context, Table, Columns, RowsValues, NumRows, BatchSize) when NumRows >= BatchSize -> + sqerl_core:bulk_insert(Context, Table, Columns, RowsValues, NumRows, BatchSize) . %% @doc Adhoc delete. @@ -363,70 +304,13 @@ extract_insert_data(Rows) -> %% -spec adhoc_delete(binary(), term()) -> {ok, integer()} | {error, any()}. adhoc_delete(Table, Where) -> - {SQL, Values} = sqerl_adhoc:delete(Table, Where, param_style()), - execute(SQL, Values). + adhoc_delete_with(make_context(?DEFAULT_POOL), Table, Where). -%% The following illustrates how we could also implement adhoc update -%% if ever desired. -%% -%% %@doc Adhoc update. -%% Updates records matching Where specifications with -%% fields and values in given Row. +%% @doc Adhoc delete. %% Uses the same Where specifications as adhoc_select/3. %% Returns {ok, Count} or {error, ErrorInfo}. %% -%%-spec adhoc_update(binary(), list(), term()) -> {ok, integer()} | {error, any()}. -%%adhoc_update(Table, Row, Where) -> -%% {SQL, Values} = sqerl_adhoc:update(Table, Row, Where, param_style()), -%% execute(SQL, Values). - - -%% @doc Shortcut for sqerl_client:parameter_style() --spec param_style() -> atom(). -param_style() -> sqerl_client:sql_parameter_style(). - - -%% @doc Utility for generating specific message tuples from database-specific error -%% messages. The 1-argument form determines which database is being used by querying -%% Sqerl's configuration at runtime, while the 2-argument form takes the database type as a -%% parameter directly. --spec parse_error( - {term(), term()} | %% MySQL error - {error, {error, error, _, _, _}} %% PostgreSQL error - ) -> sqerl_error(). -parse_error(Reason) -> - parse_error(pgsql, Reason). - --spec parse_error(pgsql, - 'no_connections' | - {'error', 'error', _, _, _} | - {'error', {'error', _, _, _, _}}) -> sqerl_error(). -parse_error(_DbType, no_connections) -> - {error, no_connections}; -parse_error(_DbType, {error, Reason} = Error) when is_atom(Reason) -> - Error; -parse_error(pgsql, {error, error, Code, Message, _Extra}) -> - do_parse_error({Code, Message}, ?PGSQL_ERROR_CODES); -parse_error(pgsql, {error, % error from sqerl - {error, % error record marker from epgsql - _Severity, % Severity - Code, Message, _Extra}}) -> - do_parse_error({Code, Message}, ?PGSQL_ERROR_CODES); -parse_error(_, Error) -> - case Error of - {error, _} -> - Error; - _ -> - {error, Error} - end. - -do_parse_error({Code, Message}, CodeList) -> - case lists:keyfind(Code, 1, CodeList) of - {_, ErrorType} -> - {ErrorType, Message}; - false -> - %% People of the Future! - %% For Postgres, sqerl_pgsql_errors:translate_code is available - %% turning Postgres codes to human-readable tuples - {error, {Code, Message}} - end. +-spec adhoc_delete_with(sqerl_ctx(), binary(), term()) -> {ok, integer()} | {error, any()}. +adhoc_delete_with(#sqerl_ctx{} = Context, Table, Where) -> + {SQL, Values} = sqerl_adhoc:delete(Table, Where, sqerl_client:sql_parameter_style()), + sqerl_core:execute(Context, SQL, Values). diff --git a/src/sqerl_client.erl b/src/sqerl_client.erl index 39587a9..8c44567 100644 --- a/src/sqerl_client.erl +++ b/src/sqerl_client.erl @@ -1,8 +1,9 @@ %% -*- erlang-indent-level: 4;indent-tabs-mode: nil; fill-column: 92 -*- %% ex: ts=4 sw=4 et %% @author Kevin Smith +%% @author Marc Paradise %% @doc Abstraction around interacting with SQL databases -%% Copyright 2011-2012 Opscode, Inc. All Rights Reserved. +%% Copyright 2011-2015 Chef Software, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file @@ -25,24 +26,6 @@ -behaviour(gen_server). --define(LOG_STATEMENT(Name, Args), case envy:get(sqerl, log_statements, ok, boolean) of - ok -> - ok; - false -> - ok; - true -> - error_logger:info_msg("(~p) Executing statement ~p with args ~p~n", [self(), Name, Args]) - end). - --define(LOG_RESULT(Result), case envy:get(sqerl, log_statements, ok, boolean) of - ok -> - ok; - false -> - ok; - true -> - error_logger:info_msg("(~p) Result: ~p~n", [self(), Result]) - end). - %% API -export([start_link/0, start_link/1, @@ -67,6 +50,7 @@ -record(state, {cb_mod, cb_state, + pool :: atom(), timeout = 5000 :: pos_integer()}). %% behavior callback definitions @@ -114,35 +98,40 @@ close(Cn) -> start_link() -> - gen_server:start_link(?MODULE, [], []). -start_link(DbType) -> - gen_server:start_link(?MODULE, [DbType], []). - -init([]) -> - init(drivermod()); -init(CallbackMod) -> - IdleCheck = envy:get(sqerl,idle_check, 1000, non_neg_integer), - - Statements = read_statements_from_config(), - - %% The ip_mode key in the sqerl clause determines if we parse db_host as IPv4 or IPv6 - Config = [{host, envy_parse:host_to_ip(sqerl, db_host)}, - {port, envy:get(sqerl, db_port, pos_integer)}, - {user, envy:get(sqerl, db_user, string)}, - {pass, envy:get(sqerl, db_pass, string)}, - {db, envy:get(sqerl, db_name, string)}, - {timeout, envy:get(sqerl,db_timeout, 5000, pos_integer)}, + gen_server:start_link(?MODULE, {sqerl, none}, []). + +start_link({pool, Pool}) -> + gen_server:start_link(?MODULE,{Pool, none}, []); +start_link(CB) -> + gen_server:start_link(?MODULE, {sqerl, CB}, []). + +init({Pool, none}) -> + init({Pool, drivermod()}); +init({Pool, CallbackMod}) -> + Cfg = pool_config(Pool), + Statements = read_statements_from_config(Pool), + IdleCheck = envy:proplist_get(idle_check, non_neg_integer, Cfg, 1000), + Host = envy:proplist_get(db_host, string, Cfg), + Config = [{host, envy_parse:parse_host_to_ip(sqerl, Host)}, + {port, envy:proplist_get(db_port, pos_integer, Cfg)}, + {user, envy:proplist_get(db_user, string, Cfg)}, + {pass, envy:proplist_get(db_pass, string, Cfg )}, + {db, envy:proplist_get(db_name, string, Cfg)}, + {timeout, envy:proplist_get(db_timeout, pos_integer, Cfg, 1000)}, {idle_check, IdleCheck}, {prepared_statements, Statements}, - {column_transforms, envy:get(sqerl, column_transforms, list)}], + {column_transforms, envy:proplist_get(column_transforms, list, Cfg, [])} + ], case CallbackMod:init(Config) of {ok, CallbackState} -> - {ok, #state{cb_mod=CallbackMod, cb_state=CallbackState, + {ok, #state{pool=Pool, + cb_mod=CallbackMod, cb_state=CallbackState, timeout=IdleCheck}, IdleCheck}; Error -> {stop, Error} end. + handle_call({Call, QueryOrStatementName, Args}, From, State) -> exec_driver({Call, QueryOrStatementName, Args}, From, State); handle_call(close, _From, State) -> @@ -173,19 +162,25 @@ code_change(_OldVsn, State, _Extra) -> %% @doc Call DB driver process exec_driver({Call, QueryOrName, Args}, _From, #state{cb_mod=CBMod, cb_state=CBState, timeout=Timeout}=State) -> - ?LOG_STATEMENT(QueryOrName, Args), + % Don't keep logging flag in state, so that it can be easily + % switched on for debug. + % TODO - logging per-pool? + LogIt = envy:get(sqerl, log_statements, false, boolean), + maybe_log(LogIt, QueryOrName, Args), {Result, NewCBState} = apply(CBMod, Call, [QueryOrName, Args, CBState]), - ?LOG_RESULT(Result), + maybe_log(LogIt, Result), {reply, Result, State#state{cb_state=NewCBState}, Timeout}. - %% @doc Prepared statements can be provides as a list of `{atom(), binary()}' tuples, as a %% path to a file that can be consulted for such tuples, or as `{M, F, A}' such that %% `apply(M, F, A)' returns the statements tuples. --spec read_statements([{atom(), term()}] +-spec read_statements(none + |[{atom(), term()}] | string() | {atom(), atom(), list()}) -> [{atom(), binary()}]. +read_statements(none) -> + []; read_statements({Mod, Fun, Args}) -> apply(Mod, Fun, Args); read_statements(L = [{Label, SQL}|_T]) when is_atom(Label) andalso is_binary(SQL) -> @@ -215,8 +210,8 @@ sql_parameter_style() -> %% translates. -spec drivermod() -> atom(). drivermod() -> - case envy:get(sqerl, db_driver_mod, undefined, atom) of - undefined -> + case envy:get(sqerl, db_driver_mod, unknown, atom) of + unknown -> case envy:get(sqerl, db_type, sqerl_pgsql_client, atom) of pgsql -> %% default pgsql driver mod @@ -241,14 +236,8 @@ drivermod() -> log_and_error({invalid_application_config, sqerl, db_driver_mod, Error}) end. -%% Helper function to report and error - -log_and_error(Msg) -> - error_logger:error_report(Msg), - error(Msg). - -read_statements_from_config() -> - StatementSource = envy:get(sqerl, prepared_statements, any), +read_statements_from_config(Pool) -> + StatementSource = envy:proplist_get(prepared_statements, any, pool_config(Pool)), try read_statements(StatementSource) catch @@ -257,3 +246,30 @@ read_statements_from_config() -> error_logger:error_report(Msg), error(Msg) end. + + +%% Helper function to report and error + +log_and_error(Msg) -> + error_logger:error_report(Msg), + error(Msg). + +maybe_log(true, QueryOrName, Args) -> + error_logger:info_msg("(~p) Executing statement ~p with args ~p~n", [self(), QueryOrName, Args]); +maybe_log(_, _, _) -> + ok. + +maybe_log(true, Results) -> + error_logger:info_msg("(~p) Result: ~p~n", [self(), Results]); +maybe_log(_, _) -> + ok. + +pool_config(Pool) -> + % Support backward compatibility for configurations that do not specify + % 'databases'. + case envy:get(sqerl, databases, none, list) of + none -> + application:get_all_env(sqerl); + Databases -> + envy:proplist_get(Pool, list, Databases) + end. diff --git a/src/sqerl_core.erl b/src/sqerl_core.erl new file mode 100644 index 0000000..0b4beac --- /dev/null +++ b/src/sqerl_core.erl @@ -0,0 +1,260 @@ +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil; fill-column: 92 -*- +%% ex: ts=4 sw=4 et +%% @author Marc Paradise +%% @author Seth Falcon +%% @author Mark Anderson +%% Copyright 2011-2016 Chef Software, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +-module(sqerl_core). + +-include("sqerl.hrl"). +-define(MAX_RETRIES, 5). + +%% See http://www.postgresql.org/docs/current/static/errcodes-appendix.html +-define(PGSQL_ERROR_CODES, [{<<"23505">>, conflict}, {<<"23503">>, foreign_key}]). + +-export([checkout/1, + checkin/2, + with_db/2, + with_db/3, + execute/3, + execute_statement/5, + bulk_insert/6, + parse_error/1, + parse_statement_results/1, + parse_select_results/1, + extract_insert_data/1]). + +checkout(#sqerl_ctx{pool = Pool}) -> + pooler:take_member(Pool, envy:get(sqerl, pooler_timeout, 0, integer)). + +checkin(#sqerl_ctx{pool = Pool}, Connection) -> + pooler:return_member(Pool, Connection). + +with_db(Context, Call) -> + with_db(Context, Call, ?MAX_RETRIES). + +with_db(_Context, _Call, 0) -> + {error, no_connections}; +with_db(#sqerl_ctx{} = Context, Call, Retries) -> + case checkout(Context) of + error_no_members -> + {error, no_connections}; + Cn when is_pid(Cn) -> + %% We don't need a try/catch around Call(Cn) because pooler links both the + %% connection and the process that has the connection checked out (this + %% process). So a crash here will not leak a connection. + case Call(Cn) of + {error, closed} -> + %% Closing the connection will cause the process + %% to shutdown. pooler will get notified and + %% remove the connection from the pool. + sqerl_client:close(Cn), + with_db(Call, Retries - 1); + Result -> + checkin(Context, Cn), + Result + end + end. + +execute_statement(Context, StmtName, StmtArgs, XformName, XformArgs) -> + case execute(Context, StmtName, StmtArgs) of + {ok, Results} -> + Xformer = erlang:apply(sqerl_transformers, XformName, XformArgs), + Xformer(Results); + {ok, Count, Results} -> + %% we'll get here for an INSERT ... RETURNING query + Xformer = erlang:apply(sqerl_transformers, XformName, XformArgs), + {ok, XResult} = Xformer(Results), + {ok, Count, XResult}; + Other -> + Other + end. + +%% @doc Execute query or statement with parameters. +%% ``` +%% Returns: +%% - {ok, Result} +%% - {error, ErrorInfo} +%% +%% Result depends on the query being executed, and can be +%% - Rows +%% - Count +%% +%% Row is a proplist-like array, e.g. [{<<"id">>, 1}, {<<"name">>, <<"John">>}] +%% +%% Note that both a simple query and a prepared statement can take +%% parameters. +%% ''' +%% +-spec execute(sqerl_ctx(), sqerl_query(), [] | [term()]) -> sqerl_results(). +execute(Context, QueryOrStatement, Parameters) -> + F = fun(Cn) -> sqerl_client:execute(Cn, QueryOrStatement, Parameters) end, + with_db(Context, F). + +bulk_insert(Context, Table, Columns, RowsValues, NumRows, BatchSize) when NumRows >= BatchSize -> + Inserter = make_batch_inserter(Table, Columns, RowsValues, NumRows, BatchSize), + with_db(Context, Inserter). + +%% @doc Returns a function to call via sqerl_core:with_db/1. +%% +%% Function prepares an insert statement, inserts all the batches and +%% remaining rows, unprepares the statement, and returns +%% {ok, InsertedCount}. +%% +%% We need to use this approach because preparing, inserting, +%% unpreparing must all be done against the same DB connection. +%% +make_batch_inserter(Table, Columns, RowsValues, NumRows, BatchSize) -> + ParamStyle = sqerl_client:sql_parameter_style(), + SQL = sqerl_adhoc:insert(Table, Columns, BatchSize, ParamStyle), + + fun(Cn) -> + ok = sqerl_client:prepare(Cn, ?SQERL_ADHOC_INSERT_STMT_ATOM, SQL), + try + insert_batches(Cn, ?SQERL_ADHOC_INSERT_STMT_ATOM, + Table, Columns, RowsValues, NumRows, BatchSize, ParamStyle) + after + sqerl_client:unprepare(Cn, ?SQERL_ADHOC_INSERT_STMT_ATOM) + end + end. + + +%% @doc Insert data with insert statement already prepared. +insert_batches(Cn, StmtName, Table, Columns, RowsValues, NumRows, BatchSize, ParamStyle) -> + insert_batches(Cn, StmtName, Table, Columns, RowsValues, NumRows, BatchSize, 0, ParamStyle). + +%% @doc Tail-recursive function iterates over batches and inserts them. +%% Also inserts the remaining rows (if any) in one shot. +%% Returns {ok, InsertedCount}. +insert_batches(Cn, StmtName, Table, Columns, RowsValues, NumRows, BatchSize, CountSoFar, ParamStyle) + when NumRows >= BatchSize -> + {RowsValuesToInsert, Rest} = lists:split(BatchSize, RowsValues), + {ok, Count} = insert_oneshot(Cn, StmtName, RowsValuesToInsert), + insert_batches(Cn, StmtName, Table, Columns, Rest, NumRows - Count, BatchSize, CountSoFar + Count, ParamStyle); +insert_batches(Cn, _StmtName, Table, Columns, RowsValues, _NumRows, _BatchSize, CountSoFar, ParamStyle) -> + %% We have fewer rows than fit in a batch, so we'll do a one-shot insert for those. + {ok, InsertCount} = adhoc_insert_oneshot(Cn, Table, Columns, RowsValues, ParamStyle), + {ok, CountSoFar + InsertCount}. + +%% @doc Insert all given rows in one shot. +%% Creates one SQL statement to insert all the rows at once, +%% then executes. +%% Returns {ok, InsertedCount}. +adhoc_insert_oneshot(_Cn, _Table, _Columns, [], _ParamStyle) -> + %% 0 rows means nothing to do! + {ok, 0}; +adhoc_insert_oneshot(Cn, Table, Columns, RowsValues, ParamStyle) -> + SQL = sqerl_adhoc:insert(Table, Columns, length(RowsValues), ParamStyle), + insert_oneshot(Cn, SQL, RowsValues). + +%% @doc Insert all rows at once using given +%% prepared statement or SQL. +%% Returns {ok, InsertCount}. +insert_oneshot(_Cn, _StmtOrSQL, []) -> + %% 0 rows means nothing to do! + {ok, 0}; +insert_oneshot(Cn, StmtOrSQL, RowsValues) -> + %% Need to flatten list of row data (list of lists) + %% to a flat list of parameters to the query + Parameters = lists:flatten(RowsValues), + sqerl_client:execute(Cn, StmtOrSQL, Parameters). + +%% @doc Extract insert data from Rows. +%% +%% Assumes all rows have the same format. +%% Returns {Columns, RowsValues}. +%% +%% ``` +%% 1> extract_insert_data([ +%% [{<<"id">>, 1}, {<<"name">>, <<"Joe">>}], +%% [{<<"id">>, 2}, {<<"name">>, <<"Jeff">>}], +%% ]). +%% {[<<"id">>,<<"name">>],[[1,<<"Joe">>],[2,<<"Jeff">>]]} +%% ''' +%% +-spec extract_insert_data(sqerl_rows()) -> {[binary()], [[term()]]}. +extract_insert_data([]) -> + {[], []}; +extract_insert_data(Rows) -> + Columns = [C || {C, _V} <- hd(Rows)], + RowsValues = [[V || {_C, V} <- Row] || Row <- Rows], + {Columns, RowsValues}. + +parse_select_results({ok, []}) -> + {ok, none}; +parse_select_results({ok, Results}) -> + {ok, Results}; +parse_select_results({ok, Count, Results}) -> + {ok, Count, Results}; +parse_select_results({error, Reason}) -> + parse_error(Reason). + +parse_statement_results({ok, 0}) -> + {ok, none}; +parse_statement_results({ok, N}) when is_number(N) -> + {ok, N}; +parse_statement_results({ok, {N, Rows}}) when is_number(N) -> + {ok, N, Rows}; +parse_statement_results({ok, N, Rows}) when is_number(N) -> + {ok, N, Rows}; +parse_statement_results({error, Reason}) -> + parse_error(Reason). + +-spec parse_error( + {term(), term()} | %% MySQL error + {error, {error, error, _, _, _}} %% PostgreSQL error + ) -> sqerl_error(). +parse_error(Reason) -> + parse_error(pgsql, Reason). + +-spec parse_error(pgsql, + 'no_connections' | + {'error', 'error', _, _, _} | + {'error', {'error', _, _, _, _}}) -> sqerl_error(). +parse_error(_DbType, no_connections) -> + {error, no_connections}; +parse_error(_DbType, {error, Reason} = Error) when is_atom(Reason) -> + Error; +parse_error(pgsql, {error, error, Code, Message, _Extra}) -> + do_parse_error({Code, Message}, ?PGSQL_ERROR_CODES); +parse_error(pgsql, {error, % error from sqerl + {error, % error record marker from epgsql + _Severity, % Severity + Code, Message, _Extra}}) -> + do_parse_error({Code, Message}, ?PGSQL_ERROR_CODES); +parse_error(_, Error) -> + case Error of + {error, _} -> + Error; + _ -> + {error, Error} + end. + +do_parse_error({Code, Message}, CodeList) -> + case lists:keyfind(Code, 1, CodeList) of + {_, ErrorType} -> + {ErrorType, Message}; + false -> + %% People of the Future! + %% For Postgres, sqerl_pgsql_errors:translate_code is available + %% turning Postgres codes to human-readable tuples + {error, {Code, Message}} + end. + + +