Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
pawurb committed Dec 29, 2023
0 parents commit 7994b7c
Show file tree
Hide file tree
Showing 78 changed files with 2,120 additions and 0 deletions.
37 changes: 37 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
name: Rust

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
build:

runs-on: ubuntu-latest

steps:
steps:
- uses: actions/checkout@v3
- name: Run PostgreSQL 14
run: |
docker run --env POSTGRES_USER=postgres \
--env POSTGRES_DB=ruby-pg-extras-test \
--env POSTGRES_PASSWORD=secret \
-d -p 5432:5432 postgres:14.6-alpine \
postgres -c shared_preload_libraries=pg_stat_statements
- uses: actions/checkout@v2
- name: Setup Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- name: Check
uses: actions-rs/cargo@v1
with:
command: check
- name: Test
uses: actions-rs/cargo@v1
with:
command: test
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
.tool-versions
docker-compose.yml
Cargo.lock
target/


12 changes: 12 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[package]
name = "rust-pg-extras"
version = "0.1.0"
edition = "2021"

[dependencies]
postgres = "0.19.7"
rust_decimal = { version = "1.32", features = ["db-postgres"] }
rust_decimal_macros = "1.33"
prettytable-rs = "^0.10"
pg_interval = "0.4.2"

12 changes: 12 additions & 0 deletions docker-compose.yml.sample
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
version: '3'

services:
postgres:
image: postgres:14.3-alpine
command: postgres -c shared_preload_libraries=pg_stat_statements
environment:
POSTGRES_USER: postgres
POSTGRES_DB: rust_pg_extras
POSTGRES_PASSWORD: secret
ports:
- '5432:5432'
1 change: 1 addition & 0 deletions live_tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
fswatch -or src | xargs -n1 -I{} sh -c "cargo test -- --nocapture"
294 changes: 294 additions & 0 deletions src/main.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,294 @@
use postgres::{Client, NoTls, Row};
use std::{env, fs};
mod structs;
use structs::all_locks::AllLocks;
use structs::bloat::Bloat;
use structs::blocking::Blocking;
use structs::buffercache_stats::BuffercacheStats;
use structs::buffercache_usage::BuffercacheUsage;
use structs::cache_hit::CacheHit;
use structs::calls::Calls;
use structs::connections::Connections;
use structs::db_settings::DbSetting;
use structs::duplicate_indexes::DuplicateIndexes;
use structs::extensions::Extensions;
use structs::index_cache_hit::IndexCacheHit;
use structs::index_scans::IndexScans;
use structs::index_size::IndexSize;
use structs::index_usage::IndexUsage;
use structs::indexes::Indexes;
use structs::locks::Locks;
use structs::long_running_queries::LongRunningQueries;
use structs::mandelbrot::Mandelbrot;
use structs::null_indexes::NullIndexes;
use structs::outliers::Outliers;
use structs::records_rank::RecordsRank;
use structs::seq_scans::SeqScans;
use structs::shared::{get_default_schema, Tabular};
use structs::ssl_used::SslUsed;
use structs::table_cache_hit::TableCacheHit;
use structs::table_index_scans::TableIndexScans;
use structs::table_indexes_size::TableIndexesSize;
use structs::table_size::TableSize;
use structs::tables::Tables;
use structs::total_index_size::TotalIndexSize;
use structs::total_table_size::TotalTableSize;
use structs::unused_indexes::UnusedIndexes;
use structs::vacuum_stats::VacuumStats;

#[macro_use]
extern crate prettytable;
use prettytable::Table;

pub fn render_table<T: Tabular>(items: Vec<T>) {
let mut table = Table::new();
table.add_row(T::headers());

for item in items {
table.add_row(item.to_row());
}
table.printstd();
}

pub fn bloat() -> Vec<Bloat> {
let query = read_file(Bloat::FILE_NAME);
get_rows(&query).iter().map(Bloat::new).collect()
}

pub fn blocking(limit: Option<String>) -> Vec<Blocking> {
let limit = limit.unwrap_or("10".to_string());
let query = read_file(Blocking::FILE_NAME).replace("%{limit}", limit.as_str());
get_rows(&query).iter().map(Blocking::new).collect()
}

pub fn calls(limit: Option<String>) -> Vec<Calls> {
let limit = limit.unwrap_or("10".to_string());
let query = read_file("calls").replace("%{limit}", limit.as_str());
get_rows(&query).iter().map(Calls::new).collect()
}

pub fn extensions() -> Vec<Extensions> {
let query = read_file(Extensions::FILE_NAME);
get_rows(&query).iter().map(Extensions::new).collect()
}

pub fn table_cache_hit() -> Vec<TableCacheHit> {
let query = read_file(TableCacheHit::FILE_NAME);
get_rows(&query).iter().map(TableCacheHit::new).collect()
}

pub fn tables(schema: Option<String>) -> Vec<Tables> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(Tables::FILE_NAME).replace("%{schema}", &schema_name);
get_rows(&query).iter().map(Tables::new).collect()
}

pub fn index_cache_hit(schema: Option<String>) -> Vec<IndexCacheHit> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(IndexCacheHit::FILE_NAME).replace("%{schema}", &schema_name);
get_rows(&query).iter().map(IndexCacheHit::new).collect()
}

pub fn indexes() -> Vec<Indexes> {
let query = read_file(Indexes::FILE_NAME);
get_rows(&query).iter().map(Indexes::new).collect()
}

pub fn index_size() -> Vec<IndexSize> {
let query = read_file(IndexSize::FILE_NAME);
let rows: Vec<Row> = connection()
.query(&query, &[])
.unwrap_or_else(|_| Vec::new());
rows.iter().map(IndexSize::new).collect()
}

pub fn index_usage(schema: Option<String>) -> Vec<IndexUsage> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(IndexUsage::FILE_NAME).replace("%{schema}", &schema_name);
get_rows(&query).iter().map(IndexUsage::new).collect()
}

pub fn index_scans(schema: Option<String>) -> Vec<IndexScans> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(IndexScans::FILE_NAME).replace("%{schema}", &schema_name);
get_rows(&query).iter().map(IndexScans::new).collect()
}

pub fn null_indexes(min_relation_size_mb: Option<String>) -> Vec<NullIndexes> {
let min_relation_size_mb = min_relation_size_mb.unwrap_or("0".to_string());
let query =
read_file(NullIndexes::FILE_NAME).replace("%{min_relation_size_mb}", &min_relation_size_mb);
get_rows(&query).iter().map(NullIndexes::new).collect()
}

pub fn locks() -> Vec<Locks> {
let query = read_file(Locks::FILE_NAME);
get_rows(&query).iter().map(Locks::new).collect()
}

pub fn all_locks() -> Vec<AllLocks> {
let query = read_file(AllLocks::FILE_NAME);
get_rows(&query).iter().map(AllLocks::new).collect()
}

pub fn long_running_queries() -> Vec<LongRunningQueries> {
let query = read_file(LongRunningQueries::FILE_NAME);
get_rows(&query)
.iter()
.map(LongRunningQueries::new)
.collect()
}

pub fn mandelbrot() -> Vec<Mandelbrot> {
let query = read_file(Mandelbrot::FILE_NAME);
get_rows(&query).iter().map(Mandelbrot::new).collect()
}

pub fn outliers() -> Vec<Outliers> {
let query = read_file(Outliers::FILE_NAME);
get_rows(&query).iter().map(Outliers::new).collect()
}

pub fn records_rank(schema: Option<String>) -> Vec<RecordsRank> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(RecordsRank::FILE_NAME).replace("%{schema}", schema_name.as_str());
get_rows(&query).iter().map(RecordsRank::new).collect()
}

pub fn seq_scans(schema: Option<String>) -> Vec<SeqScans> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(SeqScans::FILE_NAME).replace("%{schema}", schema_name.as_str());
get_rows(&query).iter().map(SeqScans::new).collect()
}

pub fn table_index_scans(schema: Option<String>) -> Vec<TableIndexScans> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(TableIndexScans::FILE_NAME).replace("%{schema}", schema_name.as_str());
get_rows(&query).iter().map(TableIndexScans::new).collect()
}

pub fn table_indexes_size(schema: Option<String>) -> Vec<TableIndexesSize> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(TableIndexesSize::FILE_NAME).replace("%{schema}", schema_name.as_str());
get_rows(&query).iter().map(TableIndexesSize::new).collect()
}

pub fn table_size() -> Vec<TableSize> {
let query = read_file(TableSize::FILE_NAME);
get_rows(&query).iter().map(TableSize::new).collect()
}

pub fn total_index_size() -> Vec<TotalIndexSize> {
let query = read_file(TotalIndexSize::FILE_NAME);
get_rows(&query).iter().map(TotalIndexSize::new).collect()
}

pub fn total_table_size() -> Vec<TotalTableSize> {
let query = read_file(TotalTableSize::FILE_NAME);
get_rows(&query).iter().map(TotalTableSize::new).collect()
}

pub fn unused_indexes(schema: Option<String>) -> Vec<UnusedIndexes> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(UnusedIndexes::FILE_NAME).replace("%{schema}", schema_name.as_str());
get_rows(&query).iter().map(UnusedIndexes::new).collect()
}

pub fn duplicate_indexes() -> Vec<DuplicateIndexes> {
let query = read_file(DuplicateIndexes::FILE_NAME);
get_rows(&query).iter().map(DuplicateIndexes::new).collect()
}

pub fn vacuum_stats() -> Vec<VacuumStats> {
let query = read_file(VacuumStats::FILE_NAME);
get_rows(&query).iter().map(VacuumStats::new).collect()
}

pub fn buffercache_stats() -> Vec<BuffercacheStats> {
let query = read_file(BuffercacheStats::FILE_NAME);
get_rows(&query).iter().map(BuffercacheStats::new).collect()
}

pub fn buffercache_usage() -> Vec<BuffercacheUsage> {
let query = read_file(BuffercacheUsage::FILE_NAME);
get_rows(&query).iter().map(BuffercacheUsage::new).collect()
}

pub fn ssl_used() -> Vec<SslUsed> {
let query = read_file(SslUsed::FILE_NAME);
get_rows(&query).iter().map(SslUsed::new).collect()
}

pub fn connections() -> Vec<Connections> {
let query = read_file(Connections::FILE_NAME);
get_rows(&query).iter().map(Connections::new).collect()
}

pub fn cache_hit(schema: Option<String>) -> Vec<CacheHit> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = read_file(CacheHit::FILE_NAME).replace("%{schema}", schema_name.as_str());
get_rows(&query).iter().map(CacheHit::new).collect()
}

pub fn db_settings() -> Vec<DbSetting> {
let query = read_file("db_settings");
get_rows(&query).iter().map(DbSetting::new).collect()
}

fn read_file(filename: &str) -> String {
let contents = fs::read_to_string(format!("src/queries/{}.sql", filename))
.expect(format!("Error reading the '{}' file", filename.to_string()).as_str());
contents
}

fn get_rows(query: &str) -> Vec<Row> {
connection()
.query(query, &[])
.unwrap_or_else(|_| Vec::new())
}

fn connection() -> Client {
let database_url = env::var("DATABASE_URL").expect("$DATABASE_URL is not set");
Client::connect(&database_url, NoTls).unwrap()
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn it_works() {
render_table(cache_hit(None));
render_table(bloat());
render_table(blocking(None));
render_table(calls(None));
render_table(extensions());
render_table(table_cache_hit());
render_table(tables(None));
render_table(index_cache_hit(None));
render_table(indexes());
render_table(index_size());
render_table(index_usage(None));
render_table(index_scans(None));
render_table(null_indexes(None));
render_table(locks());
render_table(all_locks());
render_table(long_running_queries());
render_table(mandelbrot());
render_table(outliers());
render_table(records_rank(None));
render_table(seq_scans(None));
render_table(table_index_scans(None));
render_table(table_indexes_size(None));
render_table(table_size());
render_table(total_index_size());
render_table(total_table_size());
render_table(unused_indexes(None));
render_table(duplicate_indexes());
render_table(vacuum_stats());
render_table(buffercache_stats());
render_table(buffercache_usage());
render_table(ssl_used());
render_table(connections());
}
}
17 changes: 17 additions & 0 deletions src/queries/all_locks.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
/* Queries with active locks */

SELECT
pg_stat_activity.pid,
pg_class.relname,
pg_locks.transactionid::text,
pg_locks.granted,
pg_locks.mode,
pg_stat_activity.query AS query_snippet,
age(now(),pg_stat_activity.query_start) AS "age",
pg_stat_activity.application_name AS application
FROM pg_stat_activity,pg_locks left
OUTER JOIN pg_class
ON (pg_locks.relation = pg_class.oid)
WHERE pg_stat_activity.query <> '<insufficient privilege>'
AND pg_locks.pid = pg_stat_activity.pid
AND pg_stat_activity.pid <> pg_backend_pid() order by query_start LIMIT 20;;
Loading

0 comments on commit 7994b7c

Please sign in to comment.