# Install: python-psycopg2 (Debian/Ubuntu)
# Run with python2 ./test_undo_worker_local_balancing.py

import psycopg2 as pg
import random
import time

def make_conn(dbname):
  return pg.connect("dbname=" + dbname)

def run_test(dbs, txs_per_sec, commit_ratio, runtime):

  # first, create a control connection that we'll use to create databases
  conn = make_conn("postgres")
  conn.set_isolation_level(pg.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
  cursor = conn.cursor()

  # recreate all the databases
  for n in range(dbs):
    cursor.execute("drop database if exists db%d" % n)
    cursor.execute("create database db%d" % n)

  # next, open a separate session to each database
  conns = [make_conn("db%d" % n) for n in range(dbs)]
  cursors = [conn.cursor() for conn in conns]

  # set up interesting GUCs in each session
  for cursor in cursors:
    cursor.execute("set rollback_overflow_size = '0kB'")
    cursor.connection.commit()

  # now do random work at the requested rate until our time runs out
  start = time.time()
  finish_at = start + runtime
  txs = 0
  table_number = 0
  now = start
  while now < finish_at:

    # choose a random session, and run a transaction
    cursor = random.choice(cursors)
    cursor.execute("create table t%d ()" % table_number)
    # decide whether to commit or roll back
    if random.uniform(0.0, 1.0) < commit_ratio:
      cursor.connection.commit()
    else:
      cursor.connection.rollback()
    table_number += 1

    # wait until it's time to start the next transaction
    txs += 1
    next_tx_at = (txs / txs_per_sec) + start
    if next_tx_at < now:
      print "can't run transactions fast enough, not sleeping"
    else:
      time.sleep(next_tx_at - now)
    now = time.time()

if __name__ == "__main__":
  dbs = 4
  txs_per_sec = 1 #100.0
  commit_ratio = 0.0 # 0.0 = always roll back, 1.0 = always commit
  runtime = 120 # how long to run for, in seconds
  run_test(dbs, txs_per_sec, commit_ratio, runtime)
