Skip to content

Commit

Permalink
Merge branch 'main' into release-please--branches--main--components--…
Browse files Browse the repository at this point in the history
…spanner
  • Loading branch information
rahul2393 committed May 9, 2022
2 parents 7a0d8b4 + b500120 commit 2bfd208
Show file tree
Hide file tree
Showing 13 changed files with 208 additions and 63 deletions.
2 changes: 1 addition & 1 deletion bigquery/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
"bigquery": "1.31.0"
"bigquery": "1.32.0"
}
8 changes: 8 additions & 0 deletions bigquery/CHANGES.md
@@ -1,5 +1,13 @@
# Changes

## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/bigquery/v1.31.0...bigquery/v1.32.0) (2022-05-06)


### Features

* **bigquery:** add interval support ([#5907](https://github.com/googleapis/google-cloud-go/issues/5907)) ([9e979c9](https://github.com/googleapis/google-cloud-go/commit/9e979c9718df1de440d440e4c3e20bb3cb8c5aa1))
* **bigquery:** expose connections and schema autodetect modifier ([#5739](https://github.com/googleapis/google-cloud-go/issues/5739)) ([c72e34f](https://github.com/googleapis/google-cloud-go/commit/c72e34fd79990eedaa56ed9e5121ab1a7fc4e2da))

## [1.31.0](https://github.com/googleapis/google-cloud-go/compare/bigquery/v1.30.2...bigquery/v1.31.0) (2022-04-12)


Expand Down
2 changes: 1 addition & 1 deletion bigquery/internal/version.go
Expand Up @@ -15,4 +15,4 @@
package internal

// Version is the current tagged release of the library.
const Version = "1.31.0"
const Version = "1.32.0"
2 changes: 1 addition & 1 deletion compute/metadata/metadata.go
Expand Up @@ -16,7 +16,7 @@
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
// as documented at https://cloud.google.com/compute/docs/metadata/overview.
package metadata // import "cloud.google.com/go/compute/metadata"

import (
Expand Down
43 changes: 38 additions & 5 deletions datastore/query.go
Expand Up @@ -38,6 +38,9 @@ const (
equal
greaterEq
greaterThan
in
notIn
notEqual

keyFieldName = "__key__"
)
Expand All @@ -48,6 +51,9 @@ var operatorToProto = map[operator]pb.PropertyFilter_Operator{
equal: pb.PropertyFilter_EQUAL,
greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL,
greaterThan: pb.PropertyFilter_GREATER_THAN,
in: pb.PropertyFilter_IN,
notIn: pb.PropertyFilter_NOT_IN,
notEqual: pb.PropertyFilter_NOT_EQUAL,
}

// filter is a conditional filter on query results.
Expand Down Expand Up @@ -171,25 +177,46 @@ func (q *Query) Transaction(t *Transaction) *Query {
}

// Filter returns a derivative query with a field-based filter.
//
// Deprecated: Use the FilterField method instead, which supports the same
// set of operations (and more).
//
// The filterStr argument must be a field name followed by optional space,
// followed by an operator, one of ">", "<", ">=", "<=", or "=".
// followed by an operator, one of ">", "<", ">=", "<=", "=", and "!=".
// Fields are compared against the provided value using the operator.
// Multiple filters are AND'ed together.
// Field names which contain spaces, quote marks, or operator characters
// should be passed as quoted Go string literals as returned by strconv.Quote
// or the fmt package's %q verb.
func (q *Query) Filter(filterStr string, value interface{}) *Query {
q = q.clone()
// TODO( #5977 ): Add better string parsing (or something)
filterStr = strings.TrimSpace(filterStr)
if filterStr == "" {
q.err = fmt.Errorf("datastore: invalid filter %q", filterStr)
return q
}
f := strings.TrimRight(filterStr, " ><=!")
op := strings.TrimSpace(filterStr[len(f):])
return q.FilterField(f, op, value)
}

// FilterField returns a derivative query with a field-based filter.
// The operation parameter takes the following strings: ">", "<", ">=", "<=",
// "=", "!=", "in", and "not-in".
// Fields are compared against the provided value using the operator.
// Multiple filters are AND'ed together.
// Field names which contain spaces, quote marks, or operator characters
// should be passed as quoted Go string literals as returned by strconv.Quote
// or the fmt package's %q verb.
func (q *Query) FilterField(fieldName, operator string, value interface{}) *Query {
q = q.clone()

f := filter{
FieldName: strings.TrimRight(filterStr, " ><=!"),
FieldName: fieldName,
Value: value,
}
switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {

switch o := strings.TrimSpace(operator); o {
case "<=":
f.Op = lessEq
case ">=":
Expand All @@ -200,8 +227,14 @@ func (q *Query) Filter(filterStr string, value interface{}) *Query {
f.Op = greaterThan
case "=":
f.Op = equal
case "in":
f.Op = in
case "not-in":
f.Op = notIn
case "!=":
f.Op = notEqual
default:
q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
q.err = fmt.Errorf("datastore: invalid operator %q in filter", operator)
return q
}
var err error
Expand Down
158 changes: 109 additions & 49 deletions datastore/query_test.go
Expand Up @@ -391,57 +391,61 @@ func TestQueriesAreImmutable(t *testing.T) {
}
}

func TestFilterParser(t *testing.T) {
testCases := []struct {
filterStr string
wantOK bool
wantFieldName string
wantOp operator
}{
// Supported ops.
{"x<", true, "x", lessThan},
{"x <", true, "x", lessThan},
{"x <", true, "x", lessThan},
{" x < ", true, "x", lessThan},
{"x <=", true, "x", lessEq},
{"x =", true, "x", equal},
{"x >=", true, "x", greaterEq},
{"x >", true, "x", greaterThan},
{"in >", true, "in", greaterThan},
{"in>", true, "in", greaterThan},
// Valid but (currently) unsupported ops.
{"x!=", false, "", 0},
{"x !=", false, "", 0},
{" x != ", false, "", 0},
{"x IN", false, "", 0},
{"x in", false, "", 0},
// Invalid ops.
{"x EQ", false, "", 0},
{"x lt", false, "", 0},
{"x <>", false, "", 0},
{"x >>", false, "", 0},
{"x ==", false, "", 0},
{"x =<", false, "", 0},
{"x =>", false, "", 0},
{"x !", false, "", 0},
{"x ", false, "", 0},
{"x", false, "", 0},
// Quoted and interesting field names.
{"x > y =", true, "x > y", equal},
{"` x ` =", true, " x ", equal},
{`" x " =`, true, " x ", equal},
{`" \"x " =`, true, ` "x `, equal},
{`" x =`, false, "", 0},
{`" x ="`, false, "", 0},
{"` x \" =", false, "", 0},
type testFilterCase struct {
filterStr string
fieldName string
operator string
wantOp operator
wantFieldName string
}

var (
// Supported ops both filters.
filterTestCases = []testFilterCase{
{"x<", "x", "<", lessThan, "x"},
{"x <", "x", "<", lessThan, "x"},
{"x <", "x", "<", lessThan, "x"},
{" x < ", "x", "<", lessThan, "x"},
{"x <=", "x", "<=", lessEq, "x"},
{"x =", "x", "=", equal, "x"},
{"x >=", "x", ">=", greaterEq, "x"},
{"x >", "x", ">", greaterThan, "x"},
{"in >", "in", ">", greaterThan, "in"},
{"in>", "in", ">", greaterThan, "in"},
{"x!=", "x", "!=", notEqual, "x"},
{"x !=", "x", "!=", notEqual, "x"},
{" x != ", "x", "!=", notEqual, "x"},
}
for _, tc := range testCases {
// Supported in FilterField only.
filterFieldTestCases = []testFilterCase{
{"x in", "x", "in", in, "x"},
{"x not-in", "x", "not-in", notIn, "x"},
{"ins in", "ins", "in", in, "ins"},
{"in not-in", "in", "not-in", notIn, "in"},
}
// Operators not supported in either filter method
filterUnsupported = []testFilterCase{
{"x IN", "x", "IN", 0, ""},
{"x NOT-IN", "x", "NOT-IN", 0, ""},
{"x EQ", "x", "EQ", 0, ""},
{"x lt", "x", "lt", 0, ""},
{"x <>", "x", "<>", 0, ""},
{"x >>", "x", ">>", 0, ""},
{"x ==", "x", "==", 0, ""},
{"x =<", "x", "=<", 0, ""},
{"x =>", "x", "=>", 0, ""},
{"x !", "x", "!", 0, ""},
{"x ", "x", "", 0, ""},
{"x", "x", "", 0, ""},
}
)

func TestFilterParser(t *testing.T) {
// Success cases
for _, tc := range filterTestCases {
q := NewQuery("foo").Filter(tc.filterStr, 42)
if ok := q.err == nil; ok != tc.wantOK {
t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK)
continue
}
if !tc.wantOK {
if q.err != nil {
t.Errorf("%q: error=%v", tc.filterStr, q.err)
continue
}
if len(q.filter) != 1 {
Expand All @@ -454,6 +458,62 @@ func TestFilterParser(t *testing.T) {
continue
}
}
// Failure cases
failureTestCases := append(filterFieldTestCases, filterUnsupported...)
for _, tc := range failureTestCases {
q := NewQuery("foo").Filter(tc.filterStr, 42)
if q.err == nil {
t.Errorf("%q: should have thrown error", tc.filterStr)
}
}
}

func TestFilterField(t *testing.T) {
successTestCases := append(filterTestCases, filterFieldTestCases...)
for _, tc := range successTestCases {
q := NewQuery("foo").FilterField(tc.fieldName, tc.operator, 42)
if q.err != nil {
t.Errorf("%q %q: error: %v", tc.fieldName, tc.operator, q.err)
continue
}
if len(q.filter) != 1 {
t.Errorf("%q: len=%d, want %d", tc.fieldName, len(q.filter), 1)
continue
}
got, want := q.filter[0], filter{tc.fieldName, tc.wantOp, 42}
if got != want {
t.Errorf("%q %q: got %v, want %v", tc.fieldName, tc.operator, got, want)
continue
}
}
for _, tc := range filterUnsupported {
q := NewQuery("foo").Filter(tc.filterStr, 42)
if q.err == nil {
t.Errorf("%q: should have thrown error", tc.filterStr)
}
}
}

func TestUnquote(t *testing.T) {
testCases := []struct {
input string
want string
}{
{`" x "`, ` x `},
{`"\" \\\"x \""`, `" \"x "`},
}

for _, tc := range testCases {
got, err := unquote(tc.input)

if err != nil {
t.Errorf("error parsing field name: %v", err)
}

if got != tc.want {
t.Errorf("field name parsing error: \nwant %v,\ngot %v", tc.want, got)
}
}
}

func TestNamespaceQuery(t *testing.T) {
Expand Down
10 changes: 8 additions & 2 deletions pubsub/topic.go
Expand Up @@ -19,6 +19,7 @@ import (
"errors"
"fmt"
"log"
"math"
"runtime"
"strings"
"sync"
Expand Down Expand Up @@ -552,6 +553,7 @@ func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult {
}
err := t.scheduler.Add(msg.OrderingKey, &bundledMessage{msg, r, msgSize}, msgSize)
if err != nil {
fmt.Printf("got err: %v\n", err)
t.scheduler.Pause(msg.OrderingKey)
ipubsub.SetPublishResult(r, "", err)
}
Expand Down Expand Up @@ -632,8 +634,12 @@ func (t *Topic) initBundler() {
if t.PublishSettings.FlowControlSettings.MaxOutstandingBytes > 0 {
b := t.PublishSettings.FlowControlSettings.MaxOutstandingBytes
fcs.MaxOutstandingBytes = b
// If MaxOutstandingBytes is set, override BufferedByteLimit.
t.PublishSettings.BufferedByteLimit = b

// If MaxOutstandingBytes is set, disable BufferedByteLimit by setting it to maxint.
// This is because there's no way to set "unlimited" for BufferedByteLimit,
// and simply setting it to MaxOutstandingBytes occasionally leads to issues where
// BufferedByteLimit is reached even though there are resources available.
t.PublishSettings.BufferedByteLimit = math.MaxInt64
}
if t.PublishSettings.FlowControlSettings.MaxOutstandingMessages > 0 {
fcs.MaxOutstandingMessages = t.PublishSettings.FlowControlSettings.MaxOutstandingMessages
Expand Down
9 changes: 5 additions & 4 deletions pubsub/topic_test.go
Expand Up @@ -554,10 +554,9 @@ func TestPublishFlowControl_Block(t *testing.T) {
publishSingleMessage(ctx, topic, "AA")
publishSingleMessage(ctx, topic, "AA")

// Sendinga third message blocks because the messages are outstanding
var publish3Completed, response3Sent sync.WaitGroup
// Sending a third message blocks because the messages are outstanding.
var publish3Completed sync.WaitGroup
publish3Completed.Add(1)
response3Sent.Add(1)
go func() {
publishSingleMessage(ctx, topic, "AAAAAA")
publish3Completed.Done()
Expand All @@ -569,6 +568,8 @@ func TestPublishFlowControl_Block(t *testing.T) {
sendResponse2.Done()
}()

// Sending a fourth message blocks because although only one message has been sent,
// the third message claimed the tokens for outstanding bytes.
var publish4Completed sync.WaitGroup
publish4Completed.Add(1)

Expand All @@ -580,7 +581,7 @@ func TestPublishFlowControl_Block(t *testing.T) {

publish3Completed.Wait()
addSingleResponse(srv, "3")
response3Sent.Done()
addSingleResponse(srv, "4")

publish4Completed.Wait()
}
Expand Down
18 changes: 18 additions & 0 deletions spanner/spansql/parser.go
Expand Up @@ -2975,6 +2975,11 @@ func (p *parser) parseLit() (Expr, *parseError) {
p.back()
return p.parseTimestampLit()
}
case tok.caseEqual("JSON"):
if p.sniffTokenType(stringToken) {
p.back()
return p.parseJSONLit()
}
}

// TODO: struct literals
Expand Down Expand Up @@ -3140,6 +3145,19 @@ func (p *parser) parseTimestampLit() (TimestampLiteral, *parseError) {
return TimestampLiteral{}, p.errorf("invalid timestamp literal %q", s)
}

func (p *parser) parseJSONLit() (JSONLiteral, *parseError) {
if err := p.expect("JSON"); err != nil {
return JSONLiteral{}, err
}
s, err := p.parseStringLit()
if err != nil {
return JSONLiteral{}, err
}
// It is not guaranteed that the returned JSONLiteral is a valid JSON document
// to avoid error due to parsing SQL generated with an invalid JSONLiteral like JSONLiteral("")
return JSONLiteral(s), nil
}

func (p *parser) parseStringLit() (StringLiteral, *parseError) {
tok := p.next()
if tok.err != nil {
Expand Down
3 changes: 3 additions & 0 deletions spanner/spansql/parser_test.go
Expand Up @@ -427,6 +427,9 @@ func TestParseExpr(t *testing.T) {
{`[1, 2, 3]`, Array{IntegerLiteral(1), IntegerLiteral(2), IntegerLiteral(3)}},
{`['x', 'y', 'xy']`, Array{StringLiteral("x"), StringLiteral("y"), StringLiteral("xy")}},
{`ARRAY[1, 2, 3]`, Array{IntegerLiteral(1), IntegerLiteral(2), IntegerLiteral(3)}},
// JSON literals:
// https://cloud.google.com/spanner/docs/reference/standard-sql/lexical#json_literals
{`JSON '{"a": 1}'`, JSONLiteral(`{"a": 1}`)},

// OR is lower precedence than AND.
{`A AND B OR C`, LogicalOp{LHS: LogicalOp{LHS: ID("A"), Op: And, RHS: ID("B")}, Op: Or, RHS: ID("C")}},
Expand Down

0 comments on commit 2bfd208

Please sign in to comment.