Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions docs/reference/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,8 @@ The `gen` mapping supports the following keys:
- `emit_all_enum_values`:
- If true, emit a function per enum type
that returns all valid enum values.
- `emit_query_batch`:
- If true, generate a `QueryBatch` type with `Queue*` methods that batch multiple different queries into a single round-trip. Uses pgx v5's `QueuedQuery` callback API. Only supported with `sql_package: pgx/v5`. Defaults to `false`.
- `emit_sql_as_comment`:
- If true, emits the SQL statement as a code-block comment above the generated function, appending to any existing comments. Defaults to `false`.
- `build_tags`:
Expand All @@ -179,6 +181,8 @@ The `gen` mapping supports the following keys:
- If `true`, sqlc won't generate table and enum structs that aren't used in queries for a given package. Defaults to `false`.
- `output_batch_file_name`:
- Customize the name of the batch file. Defaults to `batch.go`.
- `output_query_batch_file_name`:
- Customize the name of the query batch file. Defaults to `query_batch.sql.go`.
- `output_db_file_name`:
- Customize the name of the db file. Defaults to `db.go`.
- `output_models_file_name`:
Expand Down Expand Up @@ -448,6 +452,8 @@ Each mapping in the `packages` collection has the following keys:
- `emit_all_enum_values`:
- If true, emit a function per enum type
that returns all valid enum values.
- `emit_query_batch`:
- If true, generate a `QueryBatch` type with `Queue*` methods that batch multiple different queries into a single round-trip. Uses pgx v5's `QueuedQuery` callback API. Only supported with `sql_package: pgx/v5`. Defaults to `false`.
- `build_tags`:
- If set, add a `//go:build <build_tags>` directive at the beginning of each generated Go file.
- `json_tags_case_style`:
Expand All @@ -456,6 +462,8 @@ Each mapping in the `packages` collection has the following keys:
- If `true`, sqlc won't generate table and enum structs that aren't used in queries for a given package. Defaults to `false`.
- `output_batch_file_name`:
- Customize the name of the batch file. Defaults to `batch.go`.
- `output_query_batch_file_name`:
- Customize the name of the query batch file. Defaults to `query_batch.sql.go`.
- `output_db_file_name`:
- Customize the name of the db file. Defaults to `db.go`.
- `output_models_file_name`:
Expand Down
68 changes: 68 additions & 0 deletions docs/reference/query-annotations.md
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,74 @@ func (b *CreateBookBatchResults) Close() error {
}
```

## `emit_query_batch` (batching different queries)

The `:batchexec`, `:batchmany`, and `:batchone` annotations above batch the
**same query** with different parameters. If you need to batch **different
queries** into a single round-trip, use the `emit_query_batch` configuration
option instead.

When `emit_query_batch` is enabled, sqlc generates a `QueryBatch` type that
uses pgx v5's `QueuedQuery` callback API. Each regular query (`:one`, `:many`,
`:exec`, `:execrows`, `:execresult`) gets a `Queue*` method on `QueryBatch`.
All queued queries are sent in a single round-trip when `ExecuteBatch` is
called.

__NOTE: This option only works with PostgreSQL using the `pgx/v5` driver and outputting Go code.__

```yaml
# sqlc.yaml
version: "2"
sql:
- engine: "postgresql"
schema: "schema.sql"
queries: "query.sql"
gen:
go:
package: "db"
out: "db"
sql_package: "pgx/v5"
emit_query_batch: true
```

```sql
-- name: GetUser :one
SELECT * FROM users WHERE id = $1;

-- name: ListUsers :many
SELECT * FROM users ORDER BY id;

-- name: UpdateUser :exec
UPDATE users SET name = $1 WHERE id = $2;
```

```go
// Generated QueryBatch API:
batch := db.NewQueryBatch()

batch.QueueGetUser(userID, func(user db.User, found bool) error {
if !found {
return nil // no row matched
}
fmt.Println(user.Name)
return nil
})

batch.QueueListUsers(func(users []db.User) error {
fmt.Println("found", len(users), "users")
return nil
})

batch.QueueUpdateUser(db.UpdateUserParams{Name: "Alice", ID: 1})

// Send all queries in one round-trip:
err := queries.ExecuteBatch(ctx, batch)
```

The `QueryBatch.Batch` field is exported so you can mix generated `Queue*`
calls with custom pgx batch operations on the same `pgx.Batch`. This feature
can be used alongside `:batch*` annotations in the same package.

## `:copyfrom`

__NOTE: This command is driver and package specific, see [how to insert](../howto/insert.md#using-copyfrom)
Expand Down
22 changes: 19 additions & 3 deletions internal/codegen/golang/gen.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ type tmplCtx struct {
EmitAllEnumValues bool
UsesCopyFrom bool
UsesBatch bool
EmitQueryBatch bool
OmitSqlcVersion bool
BuildTags string
WrapErrors bool
Expand Down Expand Up @@ -182,7 +183,8 @@ func generate(req *plugin.GenerateRequest, options *opts.Options, enums []Enum,
EmitEnumValidMethod: options.EmitEnumValidMethod,
EmitAllEnumValues: options.EmitAllEnumValues,
UsesCopyFrom: usesCopyFrom(queries),
UsesBatch: usesBatch(queries),
UsesBatch: usesBatch(queries) || options.EmitQueryBatch,
EmitQueryBatch: options.EmitQueryBatch,
SQLDriver: parseDriver(options.SqlPackage),
Q: "`",
Package: options.Package,
Expand All @@ -205,10 +207,14 @@ func generate(req *plugin.GenerateRequest, options *opts.Options, enums []Enum,
tctx.SQLDriver = opts.SQLDriverGoSQLDriverMySQL
}

if tctx.UsesBatch && !tctx.SQLDriver.IsPGX() {
if usesBatch(queries) && !tctx.SQLDriver.IsPGX() {
return nil, errors.New(":batch* commands are only supported by pgx")
}

if options.EmitQueryBatch && tctx.SQLDriver != opts.SQLDriverPGXV5 {
return nil, errors.New("emit_query_batch is only supported by pgx/v5")
}

funcMap := template.FuncMap{
"lowerTitle": sdk.LowerTitle,
"comment": sdk.DoubleSlashComment,
Expand Down Expand Up @@ -289,6 +295,11 @@ func generate(req *plugin.GenerateRequest, options *opts.Options, enums []Enum,
batchFileName = options.OutputBatchFileName
}

queryBatchFileName := "query_batch.sql.go"
if options.OutputQueryBatchFileName != "" {
queryBatchFileName = options.OutputQueryBatchFileName
}

if err := execute(dbFileName, "dbFile"); err != nil {
return nil, err
}
Expand All @@ -305,11 +316,16 @@ func generate(req *plugin.GenerateRequest, options *opts.Options, enums []Enum,
return nil, err
}
}
if tctx.UsesBatch {
if usesBatch(queries) {
if err := execute(batchFileName, "batchFile"); err != nil {
return nil, err
}
}
if tctx.EmitQueryBatch {
if err := execute(queryBatchFileName, "queryBatchFile"); err != nil {
return nil, err
}
}

files := map[string]struct{}{}
for _, gq := range queries {
Expand Down
65 changes: 65 additions & 0 deletions internal/codegen/golang/imports.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,10 @@ func (i *importer) Imports(filename string) [][]ImportSpec {
if i.Options.OutputBatchFileName != "" {
batchFileName = i.Options.OutputBatchFileName
}
queryBatchFileName := "query_batch.sql.go"
if i.Options.OutputQueryBatchFileName != "" {
queryBatchFileName = i.Options.OutputQueryBatchFileName
}

switch filename {
case dbFileName:
Expand All @@ -113,6 +117,8 @@ func (i *importer) Imports(filename string) [][]ImportSpec {
return mergeImports(i.copyfromImports())
case batchFileName:
return mergeImports(i.batchImports())
case queryBatchFileName:
return mergeImports(i.queryBatchImports())
default:
return mergeImports(i.queryImports(filename))
}
Expand Down Expand Up @@ -506,6 +512,65 @@ func hasPrefixIgnoringSliceAndPointerPrefix(s, prefix string) bool {
return strings.HasPrefix(trimmedS, trimmedPrefix)
}

func (i *importer) queryBatchImports() fileImports {
// Filter to only non-batch, non-copyfrom queries
regularQueries := make([]Query, 0, len(i.Queries))
for _, q := range i.Queries {
if q.Cmd != metadata.CmdCopyFrom && !usesBatch([]Query{q}) {
regularQueries = append(regularQueries, q)
}
}
std, pkg := buildImports(i.Options, regularQueries, queryBatchUsesType(regularQueries))

for _, q := range regularQueries {
switch q.Cmd {
case metadata.CmdOne:
// :one queries use errors.Is for pgx.ErrNoRows check
std["errors"] = struct{}{}
case metadata.CmdExecRows, metadata.CmdExecResult:
// Exec queries need pgconn.CommandTag to handle results.
// metadata.CmdExecLastId is unsupported in Postgres.
pkg[ImportSpec{Path: "github.com/jackc/pgx/v5/pgconn"}] = struct{}{}
}
}

// context is always needed for ExecuteBatch
std["context"] = struct{}{}
// pgx/v5 is always needed for pgx.Batch and pgx.Rows
pkg[ImportSpec{Path: "github.com/jackc/pgx/v5"}] = struct{}{}
return sortedImports(std, pkg)
}

// queryBatchUsesType returns a predicate that checks whether a type name is
// directly referenced in the generated query batch file. This skips struct
// field types because struct definitions live in query.sql.go, not
// query_batch.sql.go. The batch file only references structs by name.
func queryBatchUsesType(queries []Query) func(string) bool {
return func(name string) bool {
for _, q := range queries {
if q.hasRetType() {
// Only check non-struct return types. Struct definitions
// live in the query file, not the batch file.
if !q.Ret.EmitStruct() {
if hasPrefixIgnoringSliceAndPointerPrefix(q.Ret.Type(), name) {
return true
}
}
}
// Only check non-struct arg types. Struct args appear as
// the struct name in the function signature, not field types.
if !q.Arg.EmitStruct() {
for _, f := range q.Arg.Pairs() {
if hasPrefixIgnoringSliceAndPointerPrefix(f.Type, name) {
return true
}
}
}
}
return false
}
}

func replaceConflictedArg(imports [][]ImportSpec, queries []Query) []Query {
m := make(map[string]struct{})
for _, is := range imports {
Expand Down
2 changes: 2 additions & 0 deletions internal/codegen/golang/opts/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ type Options struct {
OmitUnusedStructs bool `json:"omit_unused_structs,omitempty" yaml:"omit_unused_structs"`
BuildTags string `json:"build_tags,omitempty" yaml:"build_tags"`
Initialisms *[]string `json:"initialisms,omitempty" yaml:"initialisms"`
EmitQueryBatch bool `json:"emit_query_batch,omitempty" yaml:"emit_query_batch"`
OutputQueryBatchFileName string `json:"output_query_batch_file_name,omitempty" yaml:"output_query_batch_file_name"`

InitialismsMap map[string]struct{} `json:"-" yaml:"-"`
}
Expand Down
103 changes: 103 additions & 0 deletions internal/codegen/golang/templates/pgx/queryBatchCode.tmpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
{{define "queryBatchCodePgx"}}

// QueryBatch allows queuing multiple queries to be executed in a single
// round-trip using pgx v5's QueuedQuery callback pattern. Each Queue* method
// calls pgx.Batch.Queue and registers a result callback (QueryRow, Query, or
// Exec) that is invoked when ExecuteBatch processes the batch results.
// For :exec queries, no callback is needed - errors propagate via ExecuteBatch.
//
// The Batch field is exported to allow interoperability: callers can mix
// generated Queue* calls with custom pgx batch operations on the same
// underlying pgx.Batch.
type QueryBatch struct {
Batch *pgx.Batch
}

// NewQueryBatch creates a new QueryBatch.
func NewQueryBatch() *QueryBatch {
return &QueryBatch{
Batch: &pgx.Batch{},
}
}

// ExecuteBatch sends all queued queries and closes the batch.
func (q *Queries) ExecuteBatch(ctx context.Context, {{if $.EmitMethodsWithDBArgument}}db DBTX, {{end}}batch *QueryBatch) error {
return {{if $.EmitMethodsWithDBArgument}}db{{else}}q.db{{end}}.SendBatch(ctx, batch.Batch).Close()
}

{{range .GoQueries}}
{{if and (ne .Cmd ":copyfrom") (ne (hasPrefix .Cmd ":batch") true)}}
{{if eq .Cmd ":one"}}
// Queue{{.MethodName}} queues {{.MethodName}} for batch execution.
// The callback fn is called when ExecuteBatch is called. The second parameter
// is false if the row was not found (no error is returned in this case).
func (b *QueryBatch) Queue{{.MethodName}}({{.Arg.Pair}}{{if .Arg.Pair}}, {{end}}fn func({{.Ret.DefineType}}, bool) error) {
b.Batch.Queue({{.ConstantName}}, {{.Arg.Params}}).QueryRow(func(row pgx.Row) error {
var {{.Ret.Name}} {{.Ret.Type}}
err := row.Scan({{.Ret.Scan}})
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return fn({{.Ret.ReturnName}}, false)
}
return err
}
return fn({{.Ret.ReturnName}}, true)
})
}
{{end}}

{{if eq .Cmd ":many"}}
// Queue{{.MethodName}} queues {{.MethodName}} for batch execution.
// The callback fn is called with the results when ExecuteBatch is called.
func (b *QueryBatch) Queue{{.MethodName}}({{.Arg.Pair}}{{if .Arg.Pair}}, {{end}}fn func([]{{.Ret.DefineType}}) error) {
b.Batch.Queue({{.ConstantName}}, {{.Arg.Params}}).Query(func(rows pgx.Rows) error {
defer rows.Close()
{{- if $.EmitEmptySlices}}
items := []{{.Ret.DefineType}}{}
{{else}}
var items []{{.Ret.DefineType}}
{{end -}}
for rows.Next() {
var {{.Ret.Name}} {{.Ret.Type}}
if err := rows.Scan({{.Ret.Scan}}); err != nil {
return err
}
items = append(items, {{.Ret.ReturnName}})
}
if err := rows.Err(); err != nil {
return err
}
return fn(items)
})
}
{{end}}

{{if eq .Cmd ":exec"}}
// Queue{{.MethodName}} queues {{.MethodName}} for batch execution.
func (b *QueryBatch) Queue{{.MethodName}}({{.Arg.Pair}}) {
b.Batch.Queue({{.ConstantName}}, {{.Arg.Params}})
}
{{end}}

{{if eq .Cmd ":execrows"}}
// Queue{{.MethodName}} queues {{.MethodName}} for batch execution.
// The callback fn is called with the number of rows affected when ExecuteBatch is called.
func (b *QueryBatch) Queue{{.MethodName}}({{.Arg.Pair}}{{if .Arg.Pair}}, {{end}}fn func(int64) error) {
b.Batch.Queue({{.ConstantName}}, {{.Arg.Params}}).Exec(func(ct pgconn.CommandTag) error {
return fn(ct.RowsAffected())
})
}
{{end}}

{{if eq .Cmd ":execresult"}}
// Queue{{.MethodName}} queues {{.MethodName}} for batch execution.
// The callback fn is called with the command tag when ExecuteBatch is called.
func (b *QueryBatch) Queue{{.MethodName}}({{.Arg.Pair}}{{if .Arg.Pair}}, {{end}}fn func(pgconn.CommandTag) error) {
b.Batch.Queue({{.ConstantName}}, {{.Arg.Params}}).Exec(func(ct pgconn.CommandTag) error {
return fn(ct)
})
}
{{end}}
{{end}}
{{end}}
{{end}}
Loading
Loading