STET

agent_patch

runs/2026-02-28__13-38-10__gpt-5-4/flux-pr-828/flux-pr-828.1-of-1.2026-02-28__13-38-10__gpt-5-4/agent-logs/agent.patch

13733 bytes

Back to adjudication
diff --git a/app/v2/pkg/engine/datasource/graphql_datasource/astminifier.go b/app/v2/pkg/engine/datasource/graphql_datasource/astminifier.go
new file mode 100644
index 0000000..2bcc51e
--- /dev/null
+++ b/app/v2/pkg/engine/datasource/graphql_datasource/astminifier.go
@@ -0,0 +1,287 @@
+package graphql_datasource
+
+import (
+	"bytes"
+	"fmt"
+	"slices"
+
+	"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
+	"github.com/wundergraph/graphql-go-tools/v2/pkg/astparser"
+	"github.com/wundergraph/graphql-go-tools/v2/pkg/astprinter"
+	"github.com/wundergraph/graphql-go-tools/v2/pkg/astvalidation"
+	"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
+	"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
+)
+
+type selectionSetInfo struct {
+	ref      int
+	typeName string
+	depth    int
+}
+
+type selectionSetGroup struct {
+	typeName string
+	refs     []int
+	depth    int
+}
+
+type selectionSetCollector struct {
+	walker *astvisitor.Walker
+	def    *ast.Document
+	sets   []selectionSetInfo
+}
+
+func (c *selectionSetCollector) EnterSelectionSet(ref int) {
+	if len(c.walker.Ancestors) != 0 && c.walker.Ancestors[len(c.walker.Ancestors)-1].Kind == ast.NodeKindFragmentDefinition {
+		return
+	}
+
+	typeName := c.walker.EnclosingTypeDefinition.NameString(c.def)
+	if typeName == "" {
+		return
+	}
+
+	c.sets = append(c.sets, selectionSetInfo{
+		ref:      ref,
+		typeName: typeName,
+		depth:    len(c.walker.Ancestors),
+	})
+}
+
+func (c *selectionSetCollector) LeaveSelectionSet(_ int) {}
+
+func minifySubgraphOperationIfSmaller(definition *ast.Document, original []byte) []byte {
+	operation := ast.NewSmallDocument()
+	operation.Input.ResetInputBytes(original)
+
+	report := &operationreport.Report{}
+	parser := astparser.NewParser()
+	parser.Parse(operation, report)
+	if report.HasErrors() {
+		return original
+	}
+
+	if !minifySubgraphOperation(operation, definition) {
+		return original
+	}
+
+	report.Reset()
+	validator := astvalidation.DefaultOperationValidator()
+	validator.Validate(operation, definition, report)
+	if report.HasErrors() {
+		return original
+	}
+
+	buf := &bytes.Buffer{}
+	if err := astprinter.Print(operation, definition, buf); err != nil {
+		return original
+	}
+
+	if buf.Len() >= len(original) {
+		return original
+	}
+
+	return buf.Bytes()
+}
+
+func minifySubgraphOperation(operation, definition *ast.Document) bool {
+	groups := collectSelectionSetGroups(operation, definition)
+	if len(groups) == 0 {
+		return false
+	}
+
+	slices.SortFunc(groups, func(left, right selectionSetGroup) int {
+		switch {
+		case left.depth > right.depth:
+			return -1
+		case left.depth < right.depth:
+			return 1
+		default:
+			return 0
+		}
+	})
+
+	minified := false
+	for i := range groups {
+		if len(groups[i].refs) < 2 {
+			continue
+		}
+
+		fragmentName := nextMinifiedFragmentName(operation)
+		nameRef := operation.Input.AppendInputBytes(fragmentName)
+		selectionSetRef := operation.CopySelectionSet(groups[i].refs[0])
+		typeRef := operation.AddNamedType([]byte(groups[i].typeName))
+
+		fragmentRef := len(operation.FragmentDefinitions)
+		operation.FragmentDefinitions = append(operation.FragmentDefinitions, ast.FragmentDefinition{
+			Name: nameRef,
+			TypeCondition: ast.TypeCondition{
+				Type: typeRef,
+			},
+			SelectionSet:  selectionSetRef,
+			HasSelections: true,
+		})
+		operation.AddRootNode(ast.Node{
+			Kind: ast.NodeKindFragmentDefinition,
+			Ref:  fragmentRef,
+		})
+
+		for _, ref := range groups[i].refs {
+			operation.EmptySelectionSet(ref)
+			spreadRef := operation.AddFragmentSpread(ast.FragmentSpread{
+				FragmentName: nameRef,
+			})
+			operation.AddSelection(ref, ast.Selection{
+				Kind: ast.SelectionKindFragmentSpread,
+				Ref:  spreadRef,
+			})
+		}
+
+		minified = true
+	}
+
+	return minified
+}
+
+func collectSelectionSetGroups(operation, definition *ast.Document) []selectionSetGroup {
+	walker := astvisitor.NewWalker(48)
+	collector := &selectionSetCollector{walker: &walker, def: definition}
+	walker.RegisterSelectionSetVisitor(collector)
+	walker.Walk(operation, definition, nil)
+
+	if len(collector.sets) < 2 {
+		return nil
+	}
+
+	used := make([]bool, len(collector.sets))
+	groups := make([]selectionSetGroup, 0)
+
+	for i := range collector.sets {
+		if used[i] {
+			continue
+		}
+
+		group := selectionSetGroup{
+			typeName: collector.sets[i].typeName,
+			refs:     []int{collector.sets[i].ref},
+			depth:    collector.sets[i].depth,
+		}
+
+		for j := i + 1; j < len(collector.sets); j++ {
+			if used[j] || collector.sets[i].typeName != collector.sets[j].typeName {
+				continue
+			}
+			if !selectionSetsAreEqual(operation, collector.sets[i].ref, collector.sets[j].ref) {
+				continue
+			}
+
+			group.refs = append(group.refs, collector.sets[j].ref)
+			used[j] = true
+		}
+
+		if len(group.refs) > 1 {
+			groups = append(groups, group)
+		}
+	}
+
+	return groups
+}
+
+func selectionSetsAreEqual(document *ast.Document, left, right int) bool {
+	leftSelections := document.SelectionSets[left].SelectionRefs
+	rightSelections := document.SelectionSets[right].SelectionRefs
+	if len(leftSelections) != len(rightSelections) {
+		return false
+	}
+
+	for i := range leftSelections {
+		leftSelection := document.Selections[leftSelections[i]]
+		rightSelection := document.Selections[rightSelections[i]]
+		if leftSelection.Kind != rightSelection.Kind {
+			return false
+		}
+
+		switch leftSelection.Kind {
+		case ast.SelectionKindField:
+			if !fieldsAreEqual(document, leftSelection.Ref, rightSelection.Ref) {
+				return false
+			}
+		case ast.SelectionKindInlineFragment:
+			if !inlineFragmentsAreEqual(document, leftSelection.Ref, rightSelection.Ref) {
+				return false
+			}
+		case ast.SelectionKindFragmentSpread:
+			if !fragmentSpreadsAreEqual(document, leftSelection.Ref, rightSelection.Ref) {
+				return false
+			}
+		default:
+			return false
+		}
+	}
+
+	return true
+}
+
+func fieldsAreEqual(document *ast.Document, left, right int) bool {
+	if document.FieldHasSelections(left) != document.FieldHasSelections(right) {
+		return false
+	}
+
+	if !document.FieldsAreEqualFlat(left, right, true) {
+		return false
+	}
+
+	if !document.FieldHasSelections(left) {
+		return true
+	}
+
+	return selectionSetsAreEqual(document, document.Fields[left].SelectionSet, document.Fields[right].SelectionSet)
+}
+
+func inlineFragmentsAreEqual(document *ast.Document, left, right int) bool {
+	leftType := document.InlineFragmentTypeConditionName(left)
+	rightType := document.InlineFragmentTypeConditionName(right)
+	if !bytes.Equal(leftType, rightType) {
+		return false
+	}
+
+	if document.InlineFragmentHasDirectives(left) != document.InlineFragmentHasDirectives(right) {
+		return false
+	}
+	if document.InlineFragmentHasDirectives(left) && !document.DirectiveSetsAreEqual(document.InlineFragmentDirectives(left), document.InlineFragmentDirectives(right)) {
+		return false
+	}
+
+	if document.InlineFragments[left].HasSelections != document.InlineFragments[right].HasSelections {
+		return false
+	}
+	if !document.InlineFragments[left].HasSelections {
+		return true
+	}
+
+	return selectionSetsAreEqual(document, document.InlineFragments[left].SelectionSet, document.InlineFragments[right].SelectionSet)
+}
+
+func fragmentSpreadsAreEqual(document *ast.Document, left, right int) bool {
+	if !bytes.Equal(document.FragmentSpreadNameBytes(left), document.FragmentSpreadNameBytes(right)) {
+		return false
+	}
+
+	if document.FragmentSpreadHasDirectives(left) != document.FragmentSpreadHasDirectives(right) {
+		return false
+	}
+	if !document.FragmentSpreadHasDirectives(left) {
+		return true
+	}
+
+	return document.DirectiveSetsAreEqual(document.FragmentSpreads[left].Directives.Refs, document.FragmentSpreads[right].Directives.Refs)
+}
+
+func nextMinifiedFragmentName(operation *ast.Document) []byte {
+	for i := 0; ; i++ {
+		name := []byte(fmt.Sprintf("__wgf%d", i))
+		if _, exists := operation.FragmentDefinitionRef(name); !exists {
+			return name
+		}
+	}
+}
diff --git a/app/v2/pkg/engine/datasource/graphql_datasource/astminifier_test.go b/app/v2/pkg/engine/datasource/graphql_datasource/astminifier_test.go
new file mode 100644
index 0000000..9e379f4
--- /dev/null
+++ b/app/v2/pkg/engine/datasource/graphql_datasource/astminifier_test.go
@@ -0,0 +1,113 @@
+package graphql_datasource
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafeparser"
+	"github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafeprinter"
+)
+
+func TestMinifySubgraphOperation(t *testing.T) {
+	definition := unsafeparser.ParseGraphqlDocumentStringWithBaseSchema(`
+		type Query {
+			a: User!
+			b: User!
+			c: User!
+			d: User!
+			e: User!
+		}
+
+		type User {
+			id: ID!
+			name: String!
+			username: String!
+			profile: Profile!
+		}
+
+		type Profile {
+			avatar: String!
+			bio: String!
+			location: String!
+			website: String!
+		}
+	`)
+
+	operation := unsafeparser.ParseGraphqlDocumentString(`query{a{id name username profile{avatar bio location website}} b{id name username profile{avatar bio location website}} c{id name username profile{avatar bio location website}} d{id name username profile{avatar bio location website}} e{id name username profile{avatar bio location website}}}`)
+	require.True(t, minifySubgraphOperation(&operation, &definition))
+
+	require.Equal(
+		t,
+		unsafeprinter.Prettify(`
+			query {
+				a {
+					id
+					name
+					username
+					profile {
+						...__wgf0
+					}
+				}
+				b {
+					id
+					name
+					username
+					profile {
+						...__wgf0
+					}
+				}
+				c {
+					id
+					name
+					username
+					profile {
+						...__wgf0
+					}
+				}
+				d {
+					id
+					name
+					username
+					profile {
+						...__wgf0
+					}
+				}
+				e {
+					id
+					name
+					username
+					profile {
+						...__wgf0
+					}
+				}
+			}
+
+			fragment __wgf0 on Profile {
+				avatar
+				bio
+				location
+				website
+			}
+		`),
+		unsafeprinter.PrettyPrint(&operation, &definition),
+	)
+}
+
+func TestMinifySubgraphOperationIfSmaller_NoShrink(t *testing.T) {
+	definition := unsafeparser.ParseGraphqlDocumentStringWithBaseSchema(`
+		type Query {
+			a: User!
+			b: User!
+		}
+
+		type User {
+			id: ID!
+		}
+	`)
+
+	original := []byte(`query{a{id} b{id}}`)
+	minified := minifySubgraphOperationIfSmaller(&definition, original)
+
+	require.Equal(t, string(original), string(minified))
+}
diff --git a/tmp/agent-patch-flux-pr-828.1-of-1.2026-02-28__13-38-10__gpt-5-4/app/v2/pkg/engine/datasource/graphql_datasource/configuration.go b/app/v2/pkg/engine/datasource/graphql_datasource/configuration.go
index a0cb6ab..4e64e26 100644
--- a/tmp/agent-patch-flux-pr-828.1-of-1.2026-02-28__13-38-10__gpt-5-4/app/v2/pkg/engine/datasource/graphql_datasource/configuration.go
+++ b/app/v2/pkg/engine/datasource/graphql_datasource/configuration.go
@@ -14,22 +14,25 @@ import (
 )
 
 type ConfigurationInput struct {
-	Fetch                  *FetchConfiguration
-	Subscription           *SubscriptionConfiguration
-	SchemaConfiguration    *SchemaConfiguration
-	CustomScalarTypeFields []SingleTypeField
+	Fetch                   *FetchConfiguration
+	Subscription            *SubscriptionConfiguration
+	SchemaConfiguration     *SchemaConfiguration
+	CustomScalarTypeFields  []SingleTypeField
+	EnableOperationMinifier bool
 }
 
 type Configuration struct {
-	fetch                  *FetchConfiguration
-	subscription           *SubscriptionConfiguration
-	schemaConfiguration    SchemaConfiguration
-	customScalarTypeFields []SingleTypeField
+	fetch                   *FetchConfiguration
+	subscription            *SubscriptionConfiguration
+	schemaConfiguration     SchemaConfiguration
+	customScalarTypeFields  []SingleTypeField
+	enableOperationMinifier bool
 }
 
 func NewConfiguration(input ConfigurationInput) (Configuration, error) {
 	cfg := Configuration{
-		customScalarTypeFields: input.CustomScalarTypeFields,
+		customScalarTypeFields:  input.CustomScalarTypeFields,
+		enableOperationMinifier: input.EnableOperationMinifier,
 	}
 
 	if input.SchemaConfiguration == nil {
@@ -70,6 +73,10 @@ func NewConfiguration(input ConfigurationInput) (Configuration, error) {
 	return cfg, nil
 }
 
+func (c *Configuration) OperationMinifierEnabled() bool {
+	return c.enableOperationMinifier
+}
+
 func (c *Configuration) UpstreamSchema() (*ast.Document, error) {
 	if c.schemaConfiguration.upstreamSchemaAst == nil {
 		return nil, errors.New("upstream schema is not parsed")
diff --git a/tmp/agent-patch-flux-pr-828.1-of-1.2026-02-28__13-38-10__gpt-5-4/app/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource.go b/app/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource.go
index d9ea5f5..975304e 100644
--- a/tmp/agent-patch-flux-pr-828.1-of-1.2026-02-28__13-38-10__gpt-5-4/app/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource.go
+++ b/app/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource.go
@@ -1342,8 +1342,6 @@ func (p *Planner[T]) printOperation() []byte {
 	}
 
 	// p.printQueryPlan(p.upstreamOperation) // uncomment to print upstream operation before normalization
-	p.printQueryPlan(operation)
-
 	buf.Reset()
 
 	// print upstream operation
@@ -1353,7 +1351,14 @@ func (p *Planner[T]) printOperation() []byte {
 		return nil
 	}
 
-	return buf.Bytes()
+	printedOperation := buf.Bytes()
+	if p.config.OperationMinifierEnabled() {
+		printedOperation = minifySubgraphOperationIfSmaller(definition, printedOperation)
+	}
+
+	p.printQueryPlan(operation)
+
+	return printedOperation
 }
 
 func (p *Planner[T]) stopWithError(msg string, args ...interface{}) {