class Sequel::Postgres::Dataset
Constants
- BindArgumentMethods
- PREPARED_ARG_PLACEHOLDER
- PreparedStatementMethods
Public Instance Methods
# File lib/sequel/adapters/postgres.rb 698 def bound_variable_modules 699 [BindArgumentMethods] 700 end
# File lib/sequel/adapters/postgres.rb 612 def fetch_rows(sql) 613 return cursor_fetch_rows(sql){|h| yield h} if @opts[:cursor] 614 execute(sql){|res| yield_hash_rows(res, fetch_rows_set_cols(res)){|h| yield h}} 615 end
Use a cursor for paging.
# File lib/sequel/adapters/postgres.rb 618 def paged_each(opts=OPTS, &block) 619 unless block_given? 620 return enum_for(:paged_each, opts) 621 end 622 use_cursor(opts).each(&block) 623 end
PostgreSQL uses $N for placeholders instead of ?, so use a $ as the placeholder.
# File lib/sequel/adapters/postgres.rb 708 def prepared_arg_placeholder 709 PREPARED_ARG_PLACEHOLDER 710 end
# File lib/sequel/adapters/postgres.rb 702 def prepared_statement_modules 703 [PreparedStatementMethods] 704 end
Uses a cursor for fetching records, instead of fetching the entire result set at once. Note this uses a transaction around the cursor usage by default and can be changed using `hold: true` as described below. Cursors can be used to process large datasets without holding all rows in memory (which is what the underlying drivers may do by default). Options:
- :cursor_name
-
The name assigned to the cursor (default 'sequel_cursor'). Nested cursors require different names.
- :hold
-
Declare the cursor WITH HOLD and don't use transaction around the cursor usage.
- :rows_per_fetch
-
The number of rows per fetch (default 1000). Higher numbers result in fewer queries but greater memory use.
Usage:
DB[:huge_table].use_cursor.each{|row| p row} DB[:huge_table].use_cursor(rows_per_fetch: 10000).each{|row| p row} DB[:huge_table].use_cursor(cursor_name: 'my_cursor').each{|row| p row}
This is untested with the prepared statement/bound variable support, and unlikely to work with either.
# File lib/sequel/adapters/postgres.rb 647 def use_cursor(opts=OPTS) 648 clone(:cursor=>{:rows_per_fetch=>1000}.merge!(opts)) 649 end
Replace the WHERE clause with one that uses CURRENT OF with the given cursor name (or the default cursor name). This allows you to update a large dataset by updating individual rows while processing the dataset via a cursor:
DB[:huge_table].use_cursor(rows_per_fetch: 1).each do |row| DB[:huge_table].where_current_of.update(column: ruby_method(row)) end
# File lib/sequel/adapters/postgres.rb 659 def where_current_of(cursor_name='sequel_cursor') 660 clone(:where=>Sequel.lit(['CURRENT OF '], Sequel.identifier(cursor_name))) 661 end
Private Instance Methods
Generate and execute a procedure call.
# File lib/sequel/adapters/postgres.rb 716 def call_procedure(name, args) 717 sql = String.new 718 sql << "CALL " 719 identifier_append(sql, name) 720 literal_append(sql, args) 721 with_sql_first(sql) 722 end
Use a cursor to fetch groups of records at a time, yielding them to the block.
# File lib/sequel/adapters/postgres.rb 725 def cursor_fetch_rows(sql) 726 server_opts = {:server=>@opts[:server] || :read_only} 727 cursor = @opts[:cursor] 728 hold = cursor[:hold] 729 cursor_name = quote_identifier(cursor[:cursor_name] || 'sequel_cursor') 730 rows_per_fetch = cursor[:rows_per_fetch].to_i 731 732 db.public_send(*(hold ? [:synchronize, server_opts[:server]] : [:transaction, server_opts])) do 733 begin 734 execute_ddl("DECLARE #{cursor_name} NO SCROLL CURSOR WITH#{'OUT' unless hold} HOLD FOR #{sql}", server_opts) 735 rows_per_fetch = 1000 if rows_per_fetch <= 0 736 fetch_sql = "FETCH FORWARD #{rows_per_fetch} FROM #{cursor_name}" 737 cols = nil 738 # Load columns only in the first fetch, so subsequent fetches are faster 739 execute(fetch_sql) do |res| 740 cols = fetch_rows_set_cols(res) 741 yield_hash_rows(res, cols){|h| yield h} 742 return if res.ntuples < rows_per_fetch 743 end 744 while true 745 execute(fetch_sql) do |res| 746 yield_hash_rows(res, cols){|h| yield h} 747 return if res.ntuples < rows_per_fetch 748 end 749 end 750 rescue Exception => e 751 raise 752 ensure 753 begin 754 execute_ddl("CLOSE #{cursor_name}", server_opts) 755 rescue 756 raise e if e 757 raise 758 end 759 end 760 end 761 end
Set the columns based on the result set, and return the array of field numers, type conversion procs, and name symbol arrays.
# File lib/sequel/adapters/postgres.rb 765 def fetch_rows_set_cols(res) 766 cols = [] 767 procs = db.conversion_procs 768 res.nfields.times do |fieldnum| 769 cols << [procs[res.ftype(fieldnum)], output_identifier(res.fname(fieldnum))] 770 end 771 self.columns = cols.map{|c| c[1]} 772 cols 773 end
Use the driver's escape_bytea
# File lib/sequel/adapters/postgres.rb 776 def literal_blob_append(sql, v) 777 sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_bytea(v)} << "'" 778 end
Use the driver's escape_string
# File lib/sequel/adapters/postgres.rb 781 def literal_string_append(sql, v) 782 sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_string(v)} << "'" 783 end
For each row in the result set, yield a hash with column name symbol keys and typecasted values.
# File lib/sequel/adapters/postgres.rb 787 def yield_hash_rows(res, cols) 788 ntuples = res.ntuples 789 recnum = 0 790 while recnum < ntuples 791 fieldnum = 0 792 nfields = cols.length 793 converted_rec = {} 794 while fieldnum < nfields 795 type_proc, fieldsym = cols[fieldnum] 796 value = res.getvalue(recnum, fieldnum) 797 converted_rec[fieldsym] = (value && type_proc) ? type_proc.call(value) : value 798 fieldnum += 1 799 end 800 yield converted_rec 801 recnum += 1 802 end 803 end