2015-10-28 02:33:17 +02:00
|
|
|
|
package graphql
|
2015-07-11 09:27:14 +02:00
|
|
|
|
|
|
|
|
|
|
import (
|
2017-07-16 23:02:02 +08:00
|
|
|
|
"context"
|
2015-11-03 22:02:13 -08:00
|
|
|
|
"errors"
|
2015-08-15 00:43:24 -05:00
|
|
|
|
"fmt"
|
2015-09-14 09:41:13 +08:00
|
|
|
|
"reflect"
|
2019-07-03 12:45:55 -07:00
|
|
|
|
"sort"
|
2015-09-19 21:51:32 +08:00
|
|
|
|
"strings"
|
2015-10-30 00:49:49 +02:00
|
|
|
|
|
2015-11-05 10:56:35 +08:00
|
|
|
|
"github.com/graphql-go/graphql/gqlerrors"
|
|
|
|
|
|
"github.com/graphql-go/graphql/language/ast"
|
2015-07-11 09:27:14 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
type ExecuteParams struct {
|
2015-10-28 02:33:17 +02:00
|
|
|
|
Schema Schema
|
2015-09-19 21:51:32 +08:00
|
|
|
|
Root interface{}
|
2015-10-30 00:49:49 +02:00
|
|
|
|
AST *ast.Document
|
2015-07-11 09:27:14 +02:00
|
|
|
|
OperationName string
|
2015-09-11 14:28:48 +08:00
|
|
|
|
Args map[string]interface{}
|
Add context parameter that passes through the API to resolvers.
This adds a net/context.Context parameter that is threaded through from
the calling API to any resolver functions. This allows an application
to provide custom, per-request handling when resolving queries.
For example, when working on App Engine, all interactions with the
datastore require a per-request context. Other examples include
authentication, logging, or auditing of graphql operations.
An alternative that was considered was to use an arbitrary, application-
provided interface{} value -- that is, the application could stick
anything in that field and it would be up to the app to handle it. This
is fairly reasonable, however using context.Context has a few other
advantages:
- It provides a clean way for the graphql execution system to handle
parallelizing and deadlining/cancelling requests. Doing so would
provide a consistent API to developers to also hook into such
operations.
- It fits with a potentially upcoming trend of using context.Context
for most HTTP handlers.
Going with an arbitrary interface{} now, but later using context.Context
for its other uses as well would result in redundant mechanisms to provide
external (application) metadata to requests.
Another potential alternative is to specifically provide just the
*http.Request pointer. Many libraries do this and use a global,
synchronized map[*http.Request]metadata lookup table. This would satisfy
the AppEngine requirements and provide a minimal mechanism to provide
additional metadata, but the global LUT is clumsy and, again, if
context.Context were later used to manage subprocessing it would provide
a redundant metadata mechanism.
2015-11-30 23:02:58 -08:00
|
|
|
|
|
|
|
|
|
|
// Context may be provided to pass application-specific per-request
|
|
|
|
|
|
// information to resolve functions.
|
|
|
|
|
|
Context context.Context
|
2015-07-11 09:27:14 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2015-11-03 22:02:13 -08:00
|
|
|
|
func Execute(p ExecuteParams) (result *Result) {
|
2017-02-08 17:10:51 +13:00
|
|
|
|
// Use background context if no context was provided
|
|
|
|
|
|
ctx := p.Context
|
|
|
|
|
|
if ctx == nil {
|
|
|
|
|
|
ctx = context.Background()
|
|
|
|
|
|
}
|
2019-03-07 17:16:28 +01:00
|
|
|
|
// run executionDidStart functions from extensions
|
|
|
|
|
|
extErrs, executionFinishFn := handleExtensionsExecutionDidStart(&p)
|
|
|
|
|
|
if len(extErrs) != 0 {
|
|
|
|
|
|
return &Result{
|
|
|
|
|
|
Errors: extErrs,
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
|
|
extErrs = executionFinishFn(result)
|
|
|
|
|
|
if len(extErrs) != 0 {
|
|
|
|
|
|
result.Errors = append(result.Errors, extErrs...)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
addExtensionResults(&p, result)
|
|
|
|
|
|
}()
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2019-04-02 12:30:06 +03:00
|
|
|
|
resultChannel := make(chan *Result, 2)
|
|
|
|
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
|
|
result := &Result{}
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2018-04-27 20:54:56 +08:00
|
|
|
|
defer func() {
|
|
|
|
|
|
if err := recover(); err != nil {
|
2019-04-02 12:30:06 +03:00
|
|
|
|
result.Errors = append(result.Errors, gqlerrors.FormatError(err.(error)))
|
2018-04-27 20:54:56 +08:00
|
|
|
|
}
|
2019-04-02 12:30:06 +03:00
|
|
|
|
resultChannel <- result
|
2018-04-27 20:54:56 +08:00
|
|
|
|
}()
|
2019-04-02 12:30:06 +03:00
|
|
|
|
|
2018-01-05 17:08:39 +01:00
|
|
|
|
exeContext, err := buildExecutionContext(buildExecutionCtxParams{
|
2017-02-08 17:10:51 +13:00
|
|
|
|
Schema: p.Schema,
|
|
|
|
|
|
Root: p.Root,
|
|
|
|
|
|
AST: p.AST,
|
|
|
|
|
|
OperationName: p.OperationName,
|
|
|
|
|
|
Args: p.Args,
|
|
|
|
|
|
Result: result,
|
|
|
|
|
|
Context: p.Context,
|
|
|
|
|
|
})
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2017-02-08 17:10:51 +13:00
|
|
|
|
if err != nil {
|
2019-04-02 12:30:06 +03:00
|
|
|
|
result.Errors = append(result.Errors, gqlerrors.FormatError(err.(error)))
|
|
|
|
|
|
resultChannel <- result
|
2017-02-08 17:10:51 +13:00
|
|
|
|
return
|
2015-09-19 18:26:46 +08:00
|
|
|
|
}
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2019-04-02 12:30:06 +03:00
|
|
|
|
resultChannel <- executeOperation(executeOperationParams{
|
2017-02-08 17:10:51 +13:00
|
|
|
|
ExecutionContext: exeContext,
|
|
|
|
|
|
Root: p.Root,
|
|
|
|
|
|
Operation: exeContext.Operation,
|
|
|
|
|
|
})
|
2019-04-02 12:30:06 +03:00
|
|
|
|
}()
|
2017-02-08 17:10:51 +13:00
|
|
|
|
|
|
|
|
|
|
select {
|
|
|
|
|
|
case <-ctx.Done():
|
2019-04-02 12:30:06 +03:00
|
|
|
|
result := &Result{}
|
|
|
|
|
|
result.Errors = append(result.Errors, gqlerrors.FormatError(ctx.Err()))
|
|
|
|
|
|
return result
|
2017-02-08 17:10:51 +13:00
|
|
|
|
case r := <-resultChannel:
|
2019-04-02 12:30:06 +03:00
|
|
|
|
return r
|
2017-02-08 17:10:51 +13:00
|
|
|
|
}
|
2015-07-11 09:27:14 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-01-05 17:08:39 +01:00
|
|
|
|
type buildExecutionCtxParams struct {
|
2015-10-28 02:33:17 +02:00
|
|
|
|
Schema Schema
|
2015-09-19 21:51:32 +08:00
|
|
|
|
Root interface{}
|
2015-10-30 00:49:49 +02:00
|
|
|
|
AST *ast.Document
|
2015-09-16 11:42:48 +08:00
|
|
|
|
OperationName string
|
|
|
|
|
|
Args map[string]interface{}
|
2015-10-28 02:33:17 +02:00
|
|
|
|
Result *Result
|
Add context parameter that passes through the API to resolvers.
This adds a net/context.Context parameter that is threaded through from
the calling API to any resolver functions. This allows an application
to provide custom, per-request handling when resolving queries.
For example, when working on App Engine, all interactions with the
datastore require a per-request context. Other examples include
authentication, logging, or auditing of graphql operations.
An alternative that was considered was to use an arbitrary, application-
provided interface{} value -- that is, the application could stick
anything in that field and it would be up to the app to handle it. This
is fairly reasonable, however using context.Context has a few other
advantages:
- It provides a clean way for the graphql execution system to handle
parallelizing and deadlining/cancelling requests. Doing so would
provide a consistent API to developers to also hook into such
operations.
- It fits with a potentially upcoming trend of using context.Context
for most HTTP handlers.
Going with an arbitrary interface{} now, but later using context.Context
for its other uses as well would result in redundant mechanisms to provide
external (application) metadata to requests.
Another potential alternative is to specifically provide just the
*http.Request pointer. Many libraries do this and use a global,
synchronized map[*http.Request]metadata lookup table. This would satisfy
the AppEngine requirements and provide a minimal mechanism to provide
additional metadata, but the global LUT is clumsy and, again, if
context.Context were later used to manage subprocessing it would provide
a redundant metadata mechanism.
2015-11-30 23:02:58 -08:00
|
|
|
|
Context context.Context
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2018-01-05 17:08:39 +01:00
|
|
|
|
|
|
|
|
|
|
type executionContext struct {
|
2015-10-28 02:33:17 +02:00
|
|
|
|
Schema Schema
|
2015-10-30 00:49:49 +02:00
|
|
|
|
Fragments map[string]ast.Definition
|
2015-09-19 21:51:32 +08:00
|
|
|
|
Root interface{}
|
2015-10-30 00:49:49 +02:00
|
|
|
|
Operation ast.Definition
|
2015-09-16 11:42:48 +08:00
|
|
|
|
VariableValues map[string]interface{}
|
2015-10-30 00:49:49 +02:00
|
|
|
|
Errors []gqlerrors.FormattedError
|
Add context parameter that passes through the API to resolvers.
This adds a net/context.Context parameter that is threaded through from
the calling API to any resolver functions. This allows an application
to provide custom, per-request handling when resolving queries.
For example, when working on App Engine, all interactions with the
datastore require a per-request context. Other examples include
authentication, logging, or auditing of graphql operations.
An alternative that was considered was to use an arbitrary, application-
provided interface{} value -- that is, the application could stick
anything in that field and it would be up to the app to handle it. This
is fairly reasonable, however using context.Context has a few other
advantages:
- It provides a clean way for the graphql execution system to handle
parallelizing and deadlining/cancelling requests. Doing so would
provide a consistent API to developers to also hook into such
operations.
- It fits with a potentially upcoming trend of using context.Context
for most HTTP handlers.
Going with an arbitrary interface{} now, but later using context.Context
for its other uses as well would result in redundant mechanisms to provide
external (application) metadata to requests.
Another potential alternative is to specifically provide just the
*http.Request pointer. Many libraries do this and use a global,
synchronized map[*http.Request]metadata lookup table. This would satisfy
the AppEngine requirements and provide a minimal mechanism to provide
additional metadata, but the global LUT is clumsy and, again, if
context.Context were later used to manage subprocessing it would provide
a redundant metadata mechanism.
2015-11-30 23:02:58 -08:00
|
|
|
|
Context context.Context
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-01-05 17:08:39 +01:00
|
|
|
|
func buildExecutionContext(p buildExecutionCtxParams) (*executionContext, error) {
|
|
|
|
|
|
eCtx := &executionContext{}
|
2016-03-08 10:26:35 +08:00
|
|
|
|
var operation *ast.OperationDefinition
|
2015-10-30 00:49:49 +02:00
|
|
|
|
fragments := map[string]ast.Definition{}
|
2016-03-08 10:26:35 +08:00
|
|
|
|
|
|
|
|
|
|
for _, definition := range p.AST.Definitions {
|
|
|
|
|
|
switch definition := definition.(type) {
|
2015-10-30 00:49:49 +02:00
|
|
|
|
case *ast.OperationDefinition:
|
2016-03-08 10:26:35 +08:00
|
|
|
|
if (p.OperationName == "") && operation != nil {
|
|
|
|
|
|
return nil, errors.New("Must provide operation name if query contains multiple operations.")
|
|
|
|
|
|
}
|
|
|
|
|
|
if p.OperationName == "" || definition.GetName() != nil && definition.GetName().Value == p.OperationName {
|
|
|
|
|
|
operation = definition
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2015-10-30 00:49:49 +02:00
|
|
|
|
case *ast.FragmentDefinition:
|
2015-09-16 11:42:48 +08:00
|
|
|
|
key := ""
|
2016-03-08 10:26:35 +08:00
|
|
|
|
if definition.GetName() != nil && definition.GetName().Value != "" {
|
|
|
|
|
|
key = definition.GetName().Value
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2016-03-08 10:26:35 +08:00
|
|
|
|
fragments[key] = definition
|
2015-09-16 11:42:48 +08:00
|
|
|
|
default:
|
2016-03-08 10:26:35 +08:00
|
|
|
|
return nil, fmt.Errorf("GraphQL cannot execute a request containing a %v", definition.GetKind())
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
}
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2016-03-08 10:26:35 +08:00
|
|
|
|
if operation == nil {
|
2016-05-30 12:07:33 +08:00
|
|
|
|
if p.OperationName != "" {
|
2016-03-08 10:26:35 +08:00
|
|
|
|
return nil, fmt.Errorf(`Unknown operation named "%v".`, p.OperationName)
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2016-05-30 17:21:35 +08:00
|
|
|
|
return nil, fmt.Errorf(`Must provide an operation.`)
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2015-09-16 11:42:48 +08:00
|
|
|
|
variableValues, err := getVariableValues(p.Schema, operation.GetVariableDefinitions(), p.Args)
|
|
|
|
|
|
if err != nil {
|
2015-11-03 22:02:13 -08:00
|
|
|
|
return nil, err
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
eCtx.Schema = p.Schema
|
|
|
|
|
|
eCtx.Fragments = fragments
|
|
|
|
|
|
eCtx.Root = p.Root
|
|
|
|
|
|
eCtx.Operation = operation
|
|
|
|
|
|
eCtx.VariableValues = variableValues
|
Add context parameter that passes through the API to resolvers.
This adds a net/context.Context parameter that is threaded through from
the calling API to any resolver functions. This allows an application
to provide custom, per-request handling when resolving queries.
For example, when working on App Engine, all interactions with the
datastore require a per-request context. Other examples include
authentication, logging, or auditing of graphql operations.
An alternative that was considered was to use an arbitrary, application-
provided interface{} value -- that is, the application could stick
anything in that field and it would be up to the app to handle it. This
is fairly reasonable, however using context.Context has a few other
advantages:
- It provides a clean way for the graphql execution system to handle
parallelizing and deadlining/cancelling requests. Doing so would
provide a consistent API to developers to also hook into such
operations.
- It fits with a potentially upcoming trend of using context.Context
for most HTTP handlers.
Going with an arbitrary interface{} now, but later using context.Context
for its other uses as well would result in redundant mechanisms to provide
external (application) metadata to requests.
Another potential alternative is to specifically provide just the
*http.Request pointer. Many libraries do this and use a global,
synchronized map[*http.Request]metadata lookup table. This would satisfy
the AppEngine requirements and provide a minimal mechanism to provide
additional metadata, but the global LUT is clumsy and, again, if
context.Context were later used to manage subprocessing it would provide
a redundant metadata mechanism.
2015-11-30 23:02:58 -08:00
|
|
|
|
eCtx.Context = p.Context
|
2015-11-03 22:02:13 -08:00
|
|
|
|
return eCtx, nil
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-01-05 17:08:39 +01:00
|
|
|
|
type executeOperationParams struct {
|
|
|
|
|
|
ExecutionContext *executionContext
|
2015-09-19 21:51:32 +08:00
|
|
|
|
Root interface{}
|
2015-10-30 00:49:49 +02:00
|
|
|
|
Operation ast.Definition
|
2015-07-11 17:59:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-01-05 17:08:39 +01:00
|
|
|
|
func executeOperation(p executeOperationParams) *Result {
|
2015-11-03 22:02:13 -08:00
|
|
|
|
operationType, err := getOperationRootType(p.ExecutionContext.Schema, p.Operation)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return &Result{Errors: gqlerrors.FormatErrors(err)}
|
|
|
|
|
|
}
|
2015-09-12 11:09:33 +08:00
|
|
|
|
|
2018-01-05 17:08:39 +01:00
|
|
|
|
fields := collectFields(collectFieldsParams{
|
2016-04-06 17:23:31 +08:00
|
|
|
|
ExeContext: p.ExecutionContext,
|
|
|
|
|
|
RuntimeType: operationType,
|
|
|
|
|
|
SelectionSet: p.Operation.GetSelectionSet(),
|
2015-11-03 22:02:13 -08:00
|
|
|
|
})
|
|
|
|
|
|
|
2018-01-05 17:08:39 +01:00
|
|
|
|
executeFieldsParams := executeFieldsParams{
|
2015-07-16 13:25:46 +02:00
|
|
|
|
ExecutionContext: p.ExecutionContext,
|
|
|
|
|
|
ParentType: operationType,
|
|
|
|
|
|
Source: p.Root,
|
|
|
|
|
|
Fields: fields,
|
2015-07-12 00:11:14 +02:00
|
|
|
|
}
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2016-05-30 23:50:59 +08:00
|
|
|
|
if p.Operation.GetOperation() == ast.OperationTypeMutation {
|
2015-11-03 22:02:13 -08:00
|
|
|
|
return executeFieldsSerially(executeFieldsParams)
|
2015-09-15 12:05:34 +08:00
|
|
|
|
}
|
2016-04-15 18:02:57 +08:00
|
|
|
|
return executeFields(executeFieldsParams)
|
|
|
|
|
|
|
2015-07-11 17:59:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2015-09-12 00:22:22 +08:00
|
|
|
|
// Extracts the root type of the operation from the schema.
|
2015-11-03 22:02:13 -08:00
|
|
|
|
func getOperationRootType(schema Schema, operation ast.Definition) (*Object, error) {
|
2015-09-14 09:41:13 +08:00
|
|
|
|
if operation == nil {
|
2019-01-03 11:20:31 +03:00
|
|
|
|
return nil, errors.New("Can only execute queries, mutations and subscription")
|
2015-09-14 09:41:13 +08:00
|
|
|
|
}
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2015-08-15 00:43:24 -05:00
|
|
|
|
switch operation.GetOperation() {
|
2016-05-30 23:50:59 +08:00
|
|
|
|
case ast.OperationTypeQuery:
|
2015-11-07 16:01:14 -08:00
|
|
|
|
return schema.QueryType(), nil
|
2016-05-30 23:50:59 +08:00
|
|
|
|
case ast.OperationTypeMutation:
|
2015-11-07 16:01:14 -08:00
|
|
|
|
mutationType := schema.MutationType()
|
2019-01-03 11:20:31 +03:00
|
|
|
|
if mutationType == nil || mutationType.PrivateName == "" {
|
2016-03-11 13:29:52 +08:00
|
|
|
|
return nil, gqlerrors.NewError(
|
|
|
|
|
|
"Schema is not configured for mutations",
|
|
|
|
|
|
[]ast.Node{operation},
|
|
|
|
|
|
"",
|
|
|
|
|
|
nil,
|
|
|
|
|
|
[]int{},
|
|
|
|
|
|
nil,
|
|
|
|
|
|
)
|
2015-08-15 00:43:24 -05:00
|
|
|
|
}
|
2015-11-03 22:02:13 -08:00
|
|
|
|
return mutationType, nil
|
2016-05-30 23:50:59 +08:00
|
|
|
|
case ast.OperationTypeSubscription:
|
2016-03-11 13:29:52 +08:00
|
|
|
|
subscriptionType := schema.SubscriptionType()
|
2019-01-03 11:20:31 +03:00
|
|
|
|
if subscriptionType == nil || subscriptionType.PrivateName == "" {
|
2016-03-11 13:29:52 +08:00
|
|
|
|
return nil, gqlerrors.NewError(
|
|
|
|
|
|
"Schema is not configured for subscriptions",
|
|
|
|
|
|
[]ast.Node{operation},
|
|
|
|
|
|
"",
|
|
|
|
|
|
nil,
|
|
|
|
|
|
[]int{},
|
|
|
|
|
|
nil,
|
|
|
|
|
|
)
|
|
|
|
|
|
}
|
|
|
|
|
|
return subscriptionType, nil
|
2015-08-15 00:43:24 -05:00
|
|
|
|
default:
|
2016-03-11 13:29:52 +08:00
|
|
|
|
return nil, gqlerrors.NewError(
|
|
|
|
|
|
"Can only execute queries, mutations and subscription",
|
|
|
|
|
|
[]ast.Node{operation},
|
|
|
|
|
|
"",
|
|
|
|
|
|
nil,
|
|
|
|
|
|
[]int{},
|
|
|
|
|
|
nil,
|
|
|
|
|
|
)
|
2015-08-15 00:43:24 -05:00
|
|
|
|
}
|
2015-07-12 00:11:14 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-01-05 17:08:39 +01:00
|
|
|
|
type executeFieldsParams struct {
|
|
|
|
|
|
ExecutionContext *executionContext
|
2015-10-28 02:33:17 +02:00
|
|
|
|
ParentType *Object
|
2015-09-16 11:42:48 +08:00
|
|
|
|
Source interface{}
|
2015-10-30 00:49:49 +02:00
|
|
|
|
Fields map[string][]*ast.Field
|
2018-10-02 06:48:16 -07:00
|
|
|
|
Path *ResponsePath
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Implements the "Evaluating selection sets" section of the spec for "write" mode.
|
2018-01-05 17:08:39 +01:00
|
|
|
|
func executeFieldsSerially(p executeFieldsParams) *Result {
|
2015-09-16 11:42:48 +08:00
|
|
|
|
if p.Source == nil {
|
|
|
|
|
|
p.Source = map[string]interface{}{}
|
|
|
|
|
|
}
|
|
|
|
|
|
if p.Fields == nil {
|
2015-10-30 00:49:49 +02:00
|
|
|
|
p.Fields = map[string][]*ast.Field{}
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-04-09 16:08:18 -07:00
|
|
|
|
finalResults := make(map[string]interface{}, len(p.Fields))
|
2019-07-03 12:45:55 -07:00
|
|
|
|
for _, orderedField := range orderedFields(p.Fields) {
|
|
|
|
|
|
responseName := orderedField.responseName
|
|
|
|
|
|
fieldASTs := orderedField.fieldASTs
|
2018-07-18 22:55:20 -05:00
|
|
|
|
fieldPath := p.Path.WithKey(responseName)
|
|
|
|
|
|
resolved, state := resolveField(p.ExecutionContext, p.ParentType, p.Source, fieldASTs, fieldPath)
|
2015-09-19 18:26:46 +08:00
|
|
|
|
if state.hasNoFieldDefs {
|
2015-09-17 18:32:16 +08:00
|
|
|
|
continue
|
|
|
|
|
|
}
|
|
|
|
|
|
finalResults[responseName] = resolved
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2018-09-02 18:01:00 +00:00
|
|
|
|
dethunkMapDepthFirst(finalResults)
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
|
|
|
|
|
return &Result{
|
|
|
|
|
|
Data: finalResults,
|
|
|
|
|
|
Errors: p.ExecutionContext.Errors,
|
|
|
|
|
|
}
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Implements the "Evaluating selection sets" section of the spec for "read" mode.
|
2018-01-05 17:08:39 +01:00
|
|
|
|
func executeFields(p executeFieldsParams) *Result {
|
2018-09-02 01:44:09 +00:00
|
|
|
|
finalResults := executeSubFields(p)
|
|
|
|
|
|
|
2018-09-02 18:01:00 +00:00
|
|
|
|
dethunkMapWithBreadthFirstTraversal(finalResults)
|
2018-09-02 01:44:09 +00:00
|
|
|
|
|
|
|
|
|
|
return &Result{
|
|
|
|
|
|
Data: finalResults,
|
|
|
|
|
|
Errors: p.ExecutionContext.Errors,
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func executeSubFields(p executeFieldsParams) map[string]interface{} {
|
2019-02-21 15:01:51 +01:00
|
|
|
|
|
2015-09-16 11:42:48 +08:00
|
|
|
|
if p.Source == nil {
|
|
|
|
|
|
p.Source = map[string]interface{}{}
|
|
|
|
|
|
}
|
|
|
|
|
|
if p.Fields == nil {
|
2015-10-30 00:49:49 +02:00
|
|
|
|
p.Fields = map[string][]*ast.Field{}
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2018-04-09 16:08:18 -07:00
|
|
|
|
finalResults := make(map[string]interface{}, len(p.Fields))
|
2015-09-16 11:42:48 +08:00
|
|
|
|
for responseName, fieldASTs := range p.Fields {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
fieldPath := p.Path.WithKey(responseName)
|
|
|
|
|
|
resolved, state := resolveField(p.ExecutionContext, p.ParentType, p.Source, fieldASTs, fieldPath)
|
2015-09-19 18:26:46 +08:00
|
|
|
|
if state.hasNoFieldDefs {
|
2015-09-17 18:32:16 +08:00
|
|
|
|
continue
|
|
|
|
|
|
}
|
|
|
|
|
|
finalResults[responseName] = resolved
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2015-11-03 22:02:13 -08:00
|
|
|
|
|
2018-09-02 01:44:09 +00:00
|
|
|
|
return finalResults
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-09-02 03:59:25 +00:00
|
|
|
|
// dethunkQueue is a structure that allows us to execute a classic breadth-first traversal.
|
|
|
|
|
|
type dethunkQueue struct {
|
|
|
|
|
|
DethunkFuncs []func()
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func (d *dethunkQueue) push(f func()) {
|
|
|
|
|
|
d.DethunkFuncs = append(d.DethunkFuncs, f)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func (d *dethunkQueue) shift() func() {
|
|
|
|
|
|
f := d.DethunkFuncs[0]
|
|
|
|
|
|
d.DethunkFuncs = d.DethunkFuncs[1:]
|
|
|
|
|
|
return f
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// dethunkWithBreadthFirstTraversal performs a breadth-first descent of the map, calling any thunks
|
2018-09-02 18:01:00 +00:00
|
|
|
|
// in the map values and replacing each thunk with that thunk's return value. This parallels
|
|
|
|
|
|
// the reference graphql-js implementation, which calls Promise.all on thunks at each depth (which
|
|
|
|
|
|
// is an implicit parallel descent).
|
|
|
|
|
|
func dethunkMapWithBreadthFirstTraversal(finalResults map[string]interface{}) {
|
2018-09-02 03:59:25 +00:00
|
|
|
|
dethunkQueue := &dethunkQueue{DethunkFuncs: []func(){}}
|
2018-09-02 18:01:00 +00:00
|
|
|
|
dethunkMapBreadthFirst(finalResults, dethunkQueue)
|
2018-09-02 03:59:25 +00:00
|
|
|
|
for len(dethunkQueue.DethunkFuncs) > 0 {
|
|
|
|
|
|
f := dethunkQueue.shift()
|
|
|
|
|
|
f()
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-09-02 18:01:00 +00:00
|
|
|
|
func dethunkMapBreadthFirst(m map[string]interface{}, dethunkQueue *dethunkQueue) {
|
2018-09-02 01:44:09 +00:00
|
|
|
|
for k, v := range m {
|
|
|
|
|
|
if f, ok := v.(func() interface{}); ok {
|
|
|
|
|
|
m[k] = f()
|
|
|
|
|
|
}
|
2018-09-02 18:54:51 +00:00
|
|
|
|
switch val := m[k].(type) {
|
2018-09-02 01:44:09 +00:00
|
|
|
|
case map[string]interface{}:
|
2018-09-02 18:01:00 +00:00
|
|
|
|
dethunkQueue.push(func() { dethunkMapBreadthFirst(val, dethunkQueue) })
|
2018-09-02 01:44:09 +00:00
|
|
|
|
case []interface{}:
|
2018-09-02 18:01:00 +00:00
|
|
|
|
dethunkQueue.push(func() { dethunkListBreadthFirst(val, dethunkQueue) })
|
2018-09-02 01:44:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-09-02 18:01:00 +00:00
|
|
|
|
func dethunkListBreadthFirst(list []interface{}, dethunkQueue *dethunkQueue) {
|
2018-09-02 01:44:09 +00:00
|
|
|
|
for i, v := range list {
|
|
|
|
|
|
if f, ok := v.(func() interface{}); ok {
|
|
|
|
|
|
list[i] = f()
|
|
|
|
|
|
}
|
2018-09-02 18:54:51 +00:00
|
|
|
|
switch val := list[i].(type) {
|
2018-09-02 01:44:09 +00:00
|
|
|
|
case map[string]interface{}:
|
2018-09-02 18:01:00 +00:00
|
|
|
|
dethunkQueue.push(func() { dethunkMapBreadthFirst(val, dethunkQueue) })
|
2018-09-02 01:44:09 +00:00
|
|
|
|
case []interface{}:
|
2018-09-02 18:01:00 +00:00
|
|
|
|
dethunkQueue.push(func() { dethunkListBreadthFirst(val, dethunkQueue) })
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// dethunkMapDepthFirst performs a serial descent of the map, calling any thunks
|
|
|
|
|
|
// in the map values and replacing each thunk with that thunk's return value. This is needed
|
|
|
|
|
|
// to conform to the graphql-js reference implementation, which requires serial (depth-first)
|
|
|
|
|
|
// implementations for mutation selects.
|
|
|
|
|
|
func dethunkMapDepthFirst(m map[string]interface{}) {
|
|
|
|
|
|
for k, v := range m {
|
|
|
|
|
|
if f, ok := v.(func() interface{}); ok {
|
|
|
|
|
|
m[k] = f()
|
|
|
|
|
|
}
|
|
|
|
|
|
switch val := m[k].(type) {
|
|
|
|
|
|
case map[string]interface{}:
|
|
|
|
|
|
dethunkMapDepthFirst(val)
|
|
|
|
|
|
case []interface{}:
|
|
|
|
|
|
dethunkListDepthFirst(val)
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func dethunkListDepthFirst(list []interface{}) {
|
|
|
|
|
|
for i, v := range list {
|
|
|
|
|
|
if f, ok := v.(func() interface{}); ok {
|
|
|
|
|
|
list[i] = f()
|
|
|
|
|
|
}
|
|
|
|
|
|
switch val := list[i].(type) {
|
|
|
|
|
|
case map[string]interface{}:
|
|
|
|
|
|
dethunkMapDepthFirst(val)
|
|
|
|
|
|
case []interface{}:
|
|
|
|
|
|
dethunkListDepthFirst(val)
|
2018-09-02 01:44:09 +00:00
|
|
|
|
}
|
2015-09-19 18:26:46 +08:00
|
|
|
|
}
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-01-05 17:08:39 +01:00
|
|
|
|
type collectFieldsParams struct {
|
|
|
|
|
|
ExeContext *executionContext
|
2016-04-06 17:23:31 +08:00
|
|
|
|
RuntimeType *Object // previously known as OperationType
|
2015-10-30 00:49:49 +02:00
|
|
|
|
SelectionSet *ast.SelectionSet
|
|
|
|
|
|
Fields map[string][]*ast.Field
|
2015-07-12 00:11:14 +02:00
|
|
|
|
VisitedFragmentNames map[string]bool
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2015-09-12 00:22:22 +08:00
|
|
|
|
// Given a selectionSet, adds all of the fields in that selection to
|
|
|
|
|
|
// the passed in map of fields, and returns it at the end.
|
2016-04-06 17:23:31 +08:00
|
|
|
|
// CollectFields requires the "runtime type" of an object. For a field which
|
|
|
|
|
|
// returns and Interface or Union type, the "runtime type" will be the actual
|
|
|
|
|
|
// Object type returned by that field.
|
2018-04-28 14:57:58 +08:00
|
|
|
|
func collectFields(p collectFieldsParams) (fields map[string][]*ast.Field) {
|
|
|
|
|
|
// overlying SelectionSet & Fields to fields
|
|
|
|
|
|
if p.SelectionSet == nil {
|
|
|
|
|
|
return p.Fields
|
|
|
|
|
|
}
|
|
|
|
|
|
fields = p.Fields
|
2015-09-12 00:22:22 +08:00
|
|
|
|
if fields == nil {
|
2015-10-30 00:49:49 +02:00
|
|
|
|
fields = map[string][]*ast.Field{}
|
2015-09-12 00:22:22 +08:00
|
|
|
|
}
|
2015-09-14 15:48:30 +08:00
|
|
|
|
if p.VisitedFragmentNames == nil {
|
|
|
|
|
|
p.VisitedFragmentNames = map[string]bool{}
|
|
|
|
|
|
}
|
2015-09-12 00:22:22 +08:00
|
|
|
|
for _, iSelection := range p.SelectionSet.Selections {
|
|
|
|
|
|
switch selection := iSelection.(type) {
|
2015-10-30 00:49:49 +02:00
|
|
|
|
case *ast.Field:
|
2015-09-12 00:22:22 +08:00
|
|
|
|
if !shouldIncludeNode(p.ExeContext, selection.Directives) {
|
|
|
|
|
|
continue
|
|
|
|
|
|
}
|
|
|
|
|
|
name := getFieldEntryKey(selection)
|
|
|
|
|
|
if _, ok := fields[name]; !ok {
|
2015-10-30 00:49:49 +02:00
|
|
|
|
fields[name] = []*ast.Field{}
|
2015-09-12 00:22:22 +08:00
|
|
|
|
}
|
|
|
|
|
|
fields[name] = append(fields[name], selection)
|
2015-10-30 00:49:49 +02:00
|
|
|
|
case *ast.InlineFragment:
|
2015-09-14 15:48:30 +08:00
|
|
|
|
|
2015-09-12 00:22:22 +08:00
|
|
|
|
if !shouldIncludeNode(p.ExeContext, selection.Directives) ||
|
2016-04-06 17:23:31 +08:00
|
|
|
|
!doesFragmentConditionMatch(p.ExeContext, selection, p.RuntimeType) {
|
2015-09-12 00:22:22 +08:00
|
|
|
|
continue
|
|
|
|
|
|
}
|
2018-01-05 17:08:39 +01:00
|
|
|
|
innerParams := collectFieldsParams{
|
2015-09-14 09:41:13 +08:00
|
|
|
|
ExeContext: p.ExeContext,
|
2016-04-06 17:23:31 +08:00
|
|
|
|
RuntimeType: p.RuntimeType,
|
2015-09-14 09:41:13 +08:00
|
|
|
|
SelectionSet: selection.SelectionSet,
|
|
|
|
|
|
Fields: fields,
|
2015-09-12 00:22:22 +08:00
|
|
|
|
VisitedFragmentNames: p.VisitedFragmentNames,
|
|
|
|
|
|
}
|
2015-09-14 15:48:30 +08:00
|
|
|
|
collectFields(innerParams)
|
2015-10-30 00:49:49 +02:00
|
|
|
|
case *ast.FragmentSpread:
|
2015-09-12 00:22:22 +08:00
|
|
|
|
fragName := ""
|
|
|
|
|
|
if selection.Name != nil {
|
|
|
|
|
|
fragName = selection.Name.Value
|
|
|
|
|
|
}
|
2015-09-14 15:48:30 +08:00
|
|
|
|
if visited, ok := p.VisitedFragmentNames[fragName]; (ok && visited) ||
|
2015-09-14 16:03:08 +08:00
|
|
|
|
!shouldIncludeNode(p.ExeContext, selection.Directives) {
|
2015-09-12 00:22:22 +08:00
|
|
|
|
continue
|
|
|
|
|
|
}
|
|
|
|
|
|
p.VisitedFragmentNames[fragName] = true
|
|
|
|
|
|
fragment, hasFragment := p.ExeContext.Fragments[fragName]
|
|
|
|
|
|
if !hasFragment {
|
|
|
|
|
|
continue
|
|
|
|
|
|
}
|
2015-09-14 15:48:30 +08:00
|
|
|
|
|
2015-10-30 00:49:49 +02:00
|
|
|
|
if fragment, ok := fragment.(*ast.FragmentDefinition); ok {
|
2016-05-31 22:55:06 +08:00
|
|
|
|
if !doesFragmentConditionMatch(p.ExeContext, fragment, p.RuntimeType) {
|
2015-09-12 00:22:22 +08:00
|
|
|
|
continue
|
|
|
|
|
|
}
|
2018-01-05 17:08:39 +01:00
|
|
|
|
innerParams := collectFieldsParams{
|
2015-09-14 09:41:13 +08:00
|
|
|
|
ExeContext: p.ExeContext,
|
2016-04-06 17:23:31 +08:00
|
|
|
|
RuntimeType: p.RuntimeType,
|
2015-09-14 09:41:13 +08:00
|
|
|
|
SelectionSet: fragment.GetSelectionSet(),
|
|
|
|
|
|
Fields: fields,
|
2015-09-12 00:22:22 +08:00
|
|
|
|
VisitedFragmentNames: p.VisitedFragmentNames,
|
|
|
|
|
|
}
|
2015-09-14 15:48:30 +08:00
|
|
|
|
collectFields(innerParams)
|
2015-09-12 00:22:22 +08:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
return fields
|
2015-07-12 00:11:14 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2015-09-12 00:22:22 +08:00
|
|
|
|
// Determines if a field should be included based on the @include and @skip
|
|
|
|
|
|
// directives, where @skip has higher precedence than @include.
|
2018-01-05 17:08:39 +01:00
|
|
|
|
func shouldIncludeNode(eCtx *executionContext, directives []*ast.Directive) bool {
|
2018-04-28 14:57:58 +08:00
|
|
|
|
var (
|
|
|
|
|
|
skipAST, includeAST *ast.Directive
|
|
|
|
|
|
argValues map[string]interface{}
|
|
|
|
|
|
)
|
2015-09-14 15:48:30 +08:00
|
|
|
|
for _, directive := range directives {
|
|
|
|
|
|
if directive == nil || directive.Name == nil {
|
|
|
|
|
|
continue
|
|
|
|
|
|
}
|
2018-04-28 14:57:58 +08:00
|
|
|
|
switch directive.Name.Value {
|
|
|
|
|
|
case SkipDirective.Name:
|
2015-09-14 15:48:30 +08:00
|
|
|
|
skipAST = directive
|
2018-04-28 14:57:58 +08:00
|
|
|
|
case IncludeDirective.Name:
|
|
|
|
|
|
includeAST = directive
|
2015-09-14 15:48:30 +08:00
|
|
|
|
}
|
|
|
|
|
|
}
|
2018-04-28 14:57:58 +08:00
|
|
|
|
// precedence: skipAST > includeAST
|
2015-09-14 15:48:30 +08:00
|
|
|
|
if skipAST != nil {
|
2018-04-28 14:57:58 +08:00
|
|
|
|
argValues = getArgumentValues(SkipDirective.Args, skipAST.Arguments, eCtx.VariableValues)
|
|
|
|
|
|
if skipIf, ok := argValues["if"].(bool); ok && skipIf {
|
|
|
|
|
|
return false // excluded selectionSet's fields
|
2015-09-14 15:48:30 +08:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
if includeAST != nil {
|
2018-04-28 14:57:58 +08:00
|
|
|
|
argValues = getArgumentValues(IncludeDirective.Args, includeAST.Arguments, eCtx.VariableValues)
|
|
|
|
|
|
if includeIf, ok := argValues["if"].(bool); ok && !includeIf {
|
|
|
|
|
|
return false // excluded selectionSet's fields
|
2015-09-14 15:48:30 +08:00
|
|
|
|
}
|
|
|
|
|
|
}
|
2018-04-28 14:57:58 +08:00
|
|
|
|
return true
|
2015-09-12 00:22:22 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Determines if a fragment is applicable to the given type.
|
2018-01-05 17:08:39 +01:00
|
|
|
|
func doesFragmentConditionMatch(eCtx *executionContext, fragment ast.Node, ttype *Object) bool {
|
2015-09-12 11:09:33 +08:00
|
|
|
|
|
2015-09-14 15:48:30 +08:00
|
|
|
|
switch fragment := fragment.(type) {
|
2015-10-30 00:49:49 +02:00
|
|
|
|
case *ast.FragmentDefinition:
|
2016-03-08 09:11:31 +08:00
|
|
|
|
typeConditionAST := fragment.TypeCondition
|
|
|
|
|
|
if typeConditionAST == nil {
|
|
|
|
|
|
return true
|
|
|
|
|
|
}
|
|
|
|
|
|
conditionalType, err := typeFromAST(eCtx.Schema, typeConditionAST)
|
2015-09-14 15:48:30 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
|
return false
|
|
|
|
|
|
}
|
|
|
|
|
|
if conditionalType == ttype {
|
|
|
|
|
|
return true
|
|
|
|
|
|
}
|
2016-03-07 16:40:37 +08:00
|
|
|
|
if conditionalType.Name() == ttype.Name() {
|
2015-12-29 15:42:33 +05:30
|
|
|
|
return true
|
|
|
|
|
|
}
|
2016-05-31 19:00:29 +08:00
|
|
|
|
if conditionalType, ok := conditionalType.(*Interface); ok {
|
|
|
|
|
|
return eCtx.Schema.IsPossibleType(conditionalType, ttype)
|
|
|
|
|
|
}
|
|
|
|
|
|
if conditionalType, ok := conditionalType.(*Union); ok {
|
|
|
|
|
|
return eCtx.Schema.IsPossibleType(conditionalType, ttype)
|
2015-09-14 15:48:30 +08:00
|
|
|
|
}
|
2015-10-30 00:49:49 +02:00
|
|
|
|
case *ast.InlineFragment:
|
2016-03-08 09:11:31 +08:00
|
|
|
|
typeConditionAST := fragment.TypeCondition
|
|
|
|
|
|
if typeConditionAST == nil {
|
|
|
|
|
|
return true
|
|
|
|
|
|
}
|
|
|
|
|
|
conditionalType, err := typeFromAST(eCtx.Schema, typeConditionAST)
|
2015-09-14 15:48:30 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
|
return false
|
|
|
|
|
|
}
|
|
|
|
|
|
if conditionalType == ttype {
|
|
|
|
|
|
return true
|
|
|
|
|
|
}
|
2016-03-08 09:11:31 +08:00
|
|
|
|
if conditionalType.Name() == ttype.Name() {
|
|
|
|
|
|
return true
|
|
|
|
|
|
}
|
2016-05-31 19:00:29 +08:00
|
|
|
|
if conditionalType, ok := conditionalType.(*Interface); ok {
|
|
|
|
|
|
return eCtx.Schema.IsPossibleType(conditionalType, ttype)
|
|
|
|
|
|
}
|
|
|
|
|
|
if conditionalType, ok := conditionalType.(*Union); ok {
|
|
|
|
|
|
return eCtx.Schema.IsPossibleType(conditionalType, ttype)
|
2015-09-14 15:48:30 +08:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return false
|
|
|
|
|
|
}
|
2015-09-14 16:03:08 +08:00
|
|
|
|
|
2015-09-16 11:42:48 +08:00
|
|
|
|
// Implements the logic to compute the key of a given field’s entry
|
2015-10-30 00:49:49 +02:00
|
|
|
|
func getFieldEntryKey(node *ast.Field) string {
|
2015-09-16 11:42:48 +08:00
|
|
|
|
|
|
|
|
|
|
if node.Alias != nil && node.Alias.Value != "" {
|
|
|
|
|
|
return node.Alias.Value
|
|
|
|
|
|
}
|
|
|
|
|
|
if node.Name != nil && node.Name.Value != "" {
|
|
|
|
|
|
return node.Name.Value
|
|
|
|
|
|
}
|
|
|
|
|
|
return ""
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2015-09-19 18:26:46 +08:00
|
|
|
|
// Internal resolveField state
|
|
|
|
|
|
type resolveFieldResultState struct {
|
|
|
|
|
|
hasNoFieldDefs bool
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-10-02 06:48:16 -07:00
|
|
|
|
func handleFieldError(r interface{}, fieldNodes []ast.Node, path *ResponsePath, returnType Output, eCtx *executionContext) {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
err := NewLocatedErrorWithPath(r, fieldNodes, path.AsArray())
|
|
|
|
|
|
// send panic upstream
|
|
|
|
|
|
if _, ok := returnType.(*NonNull); ok {
|
|
|
|
|
|
panic(err)
|
|
|
|
|
|
}
|
|
|
|
|
|
eCtx.Errors = append(eCtx.Errors, gqlerrors.FormatError(err))
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2016-04-15 18:02:57 +08:00
|
|
|
|
// Resolves the field on the given source object. In particular, this
|
|
|
|
|
|
// figures out the value that the field returns by calling its resolve function,
|
|
|
|
|
|
// then calls completeValue to complete promises, serialize scalars, or execute
|
|
|
|
|
|
// the sub-selection-set for objects.
|
2018-10-02 06:48:16 -07:00
|
|
|
|
func resolveField(eCtx *executionContext, parentType *Object, source interface{}, fieldASTs []*ast.Field, path *ResponsePath) (result interface{}, resultState resolveFieldResultState) {
|
2015-09-19 18:26:46 +08:00
|
|
|
|
// catch panic from resolveFn
|
2015-10-28 02:33:17 +02:00
|
|
|
|
var returnType Output
|
2015-09-19 21:51:32 +08:00
|
|
|
|
defer func() (interface{}, resolveFieldResultState) {
|
2015-09-15 03:04:36 +08:00
|
|
|
|
if r := recover(); r != nil {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
handleFieldError(r, FieldASTsToNodeASTs(fieldASTs), path, returnType, eCtx)
|
2015-09-19 21:51:32 +08:00
|
|
|
|
return result, resultState
|
2015-09-15 03:04:36 +08:00
|
|
|
|
}
|
2015-09-19 21:51:32 +08:00
|
|
|
|
return result, resultState
|
2015-09-15 03:04:36 +08:00
|
|
|
|
}()
|
2015-09-17 18:32:16 +08:00
|
|
|
|
|
2015-09-12 11:09:33 +08:00
|
|
|
|
fieldAST := fieldASTs[0]
|
2015-09-14 15:48:30 +08:00
|
|
|
|
fieldName := ""
|
|
|
|
|
|
if fieldAST.Name != nil {
|
|
|
|
|
|
fieldName = fieldAST.Name.Value
|
|
|
|
|
|
}
|
2015-09-12 11:09:33 +08:00
|
|
|
|
|
|
|
|
|
|
fieldDef := getFieldDef(eCtx.Schema, parentType, fieldName)
|
2015-09-14 09:41:13 +08:00
|
|
|
|
if fieldDef == nil {
|
2015-09-19 18:26:46 +08:00
|
|
|
|
resultState.hasNoFieldDefs = true
|
2015-09-19 21:51:32 +08:00
|
|
|
|
return nil, resultState
|
2015-09-14 09:41:13 +08:00
|
|
|
|
}
|
2015-09-19 18:26:46 +08:00
|
|
|
|
returnType = fieldDef.Type
|
2015-09-12 11:09:33 +08:00
|
|
|
|
resolveFn := fieldDef.Resolve
|
|
|
|
|
|
if resolveFn == nil {
|
2016-02-01 10:40:40 +01:00
|
|
|
|
resolveFn = DefaultResolveFn
|
2015-09-12 11:09:33 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2015-09-14 09:41:13 +08:00
|
|
|
|
// Build a map of arguments from the field.arguments AST, using the
|
2015-09-12 11:09:33 +08:00
|
|
|
|
// variables scope to fulfill any variable references.
|
|
|
|
|
|
// TODO: find a way to memoize, in case this field is within a List type.
|
2018-04-28 14:57:58 +08:00
|
|
|
|
args := getArgumentValues(fieldDef.Args, fieldAST.Arguments, eCtx.VariableValues)
|
2015-09-12 11:09:33 +08:00
|
|
|
|
|
2015-10-28 02:33:17 +02:00
|
|
|
|
info := ResolveInfo{
|
2015-09-14 09:41:13 +08:00
|
|
|
|
FieldName: fieldName,
|
|
|
|
|
|
FieldASTs: fieldASTs,
|
2018-10-02 06:48:16 -07:00
|
|
|
|
Path: path,
|
2015-09-14 09:41:13 +08:00
|
|
|
|
ReturnType: returnType,
|
|
|
|
|
|
ParentType: parentType,
|
|
|
|
|
|
Schema: eCtx.Schema,
|
|
|
|
|
|
Fragments: eCtx.Fragments,
|
|
|
|
|
|
RootValue: eCtx.Root,
|
|
|
|
|
|
Operation: eCtx.Operation,
|
2015-09-12 11:09:33 +08:00
|
|
|
|
VariableValues: eCtx.VariableValues,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2015-11-26 23:11:54 -05:00
|
|
|
|
var resolveFnError error
|
2015-11-26 02:17:00 -05:00
|
|
|
|
|
2019-03-07 17:16:28 +01:00
|
|
|
|
extErrs, resolveFieldFinishFn := handleExtensionsResolveFieldDidStart(eCtx.Schema.extensions, eCtx, &info)
|
|
|
|
|
|
if len(extErrs) != 0 {
|
|
|
|
|
|
eCtx.Errors = append(eCtx.Errors, extErrs...)
|
|
|
|
|
|
}
|
2019-02-21 15:01:51 +01:00
|
|
|
|
|
2015-11-26 23:11:54 -05:00
|
|
|
|
result, resolveFnError = resolveFn(ResolveParams{
|
2016-01-05 12:12:52 -05:00
|
|
|
|
Source: source,
|
|
|
|
|
|
Args: args,
|
|
|
|
|
|
Info: info,
|
|
|
|
|
|
Context: eCtx.Context,
|
2015-09-12 11:09:33 +08:00
|
|
|
|
})
|
2015-09-19 21:51:32 +08:00
|
|
|
|
|
2019-03-07 17:16:28 +01:00
|
|
|
|
extErrs = resolveFieldFinishFn(result, resolveFnError)
|
|
|
|
|
|
if len(extErrs) != 0 {
|
|
|
|
|
|
eCtx.Errors = append(eCtx.Errors, extErrs...)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-01-29 13:52:46 -08:00
|
|
|
|
if resolveFnError != nil {
|
|
|
|
|
|
panic(resolveFnError)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-07-18 22:55:20 -05:00
|
|
|
|
completed := completeValueCatchingError(eCtx, returnType, fieldASTs, info, path, result)
|
2015-09-19 21:51:32 +08:00
|
|
|
|
return completed, resultState
|
2015-09-12 11:09:33 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-10-02 06:48:16 -07:00
|
|
|
|
func completeValueCatchingError(eCtx *executionContext, returnType Type, fieldASTs []*ast.Field, info ResolveInfo, path *ResponsePath, result interface{}) (completed interface{}) {
|
2015-09-14 22:48:41 +08:00
|
|
|
|
// catch panic
|
2015-09-19 18:26:46 +08:00
|
|
|
|
defer func() interface{} {
|
2015-09-14 22:48:41 +08:00
|
|
|
|
if r := recover(); r != nil {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
handleFieldError(r, FieldASTsToNodeASTs(fieldASTs), path, returnType, eCtx)
|
2015-09-19 18:26:46 +08:00
|
|
|
|
return completed
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
2015-09-19 18:26:46 +08:00
|
|
|
|
return completed
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}()
|
|
|
|
|
|
|
2015-10-28 02:33:17 +02:00
|
|
|
|
if returnType, ok := returnType.(*NonNull); ok {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
completed := completeValue(eCtx, returnType, fieldASTs, info, path, result)
|
2015-09-19 18:26:46 +08:00
|
|
|
|
return completed
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
2018-07-18 22:55:20 -05:00
|
|
|
|
completed = completeValue(eCtx, returnType, fieldASTs, info, path, result)
|
2015-09-19 18:26:46 +08:00
|
|
|
|
return completed
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-10-02 06:48:16 -07:00
|
|
|
|
func completeValue(eCtx *executionContext, returnType Type, fieldASTs []*ast.Field, info ResolveInfo, path *ResponsePath, result interface{}) interface{} {
|
2015-09-14 22:48:41 +08:00
|
|
|
|
|
|
|
|
|
|
resultVal := reflect.ValueOf(result)
|
2018-09-02 01:44:09 +00:00
|
|
|
|
if resultVal.IsValid() && resultVal.Kind() == reflect.Func {
|
|
|
|
|
|
return func() interface{} {
|
|
|
|
|
|
return completeThunkValueCatchingError(eCtx, returnType, fieldASTs, info, path, result)
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-31 00:02:29 +08:00
|
|
|
|
// If field type is NonNull, complete for inner type, and throw field error
|
|
|
|
|
|
// if result is null.
|
2015-10-28 02:33:17 +02:00
|
|
|
|
if returnType, ok := returnType.(*NonNull); ok {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
completed := completeValue(eCtx, returnType.OfType, fieldASTs, info, path, result)
|
2015-09-14 22:48:41 +08:00
|
|
|
|
if completed == nil {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
err := NewLocatedErrorWithPath(
|
2015-09-17 18:32:16 +08:00
|
|
|
|
fmt.Sprintf("Cannot return null for non-nullable field %v.%v.", info.ParentType, info.FieldName),
|
2015-10-28 02:33:17 +02:00
|
|
|
|
FieldASTsToNodeASTs(fieldASTs),
|
2018-07-18 22:55:20 -05:00
|
|
|
|
path.AsArray(),
|
2015-09-17 18:32:16 +08:00
|
|
|
|
)
|
2015-10-30 00:49:49 +02:00
|
|
|
|
panic(gqlerrors.FormatError(err))
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
2015-09-19 18:26:46 +08:00
|
|
|
|
return completed
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-31 00:02:29 +08:00
|
|
|
|
// If result value is null-ish (null, undefined, or NaN) then return null.
|
2015-09-17 18:32:16 +08:00
|
|
|
|
if isNullish(result) {
|
2015-09-19 18:26:46 +08:00
|
|
|
|
return nil
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// If field type is List, complete each item in the list with the inner type
|
2015-10-28 02:33:17 +02:00
|
|
|
|
if returnType, ok := returnType.(*List); ok {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
return completeListValue(eCtx, returnType, fieldASTs, info, path, result)
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-31 00:02:29 +08:00
|
|
|
|
// If field type is a leaf type, Scalar or Enum, serialize to a valid value,
|
|
|
|
|
|
// returning null if serialization is not possible.
|
2015-10-28 02:33:17 +02:00
|
|
|
|
if returnType, ok := returnType.(*Scalar); ok {
|
2016-05-31 09:41:14 +08:00
|
|
|
|
return completeLeafValue(returnType, result)
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
2015-10-28 02:33:17 +02:00
|
|
|
|
if returnType, ok := returnType.(*Enum); ok {
|
2016-05-31 00:04:05 +08:00
|
|
|
|
return completeLeafValue(returnType, result)
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-31 00:02:29 +08:00
|
|
|
|
// If field type is an abstract type, Interface or Union, determine the
|
|
|
|
|
|
// runtime Object type and complete for that type.
|
2016-05-31 19:00:29 +08:00
|
|
|
|
if returnType, ok := returnType.(*Union); ok {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
return completeAbstractValue(eCtx, returnType, fieldASTs, info, path, result)
|
2016-05-31 19:00:29 +08:00
|
|
|
|
}
|
|
|
|
|
|
if returnType, ok := returnType.(*Interface); ok {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
return completeAbstractValue(eCtx, returnType, fieldASTs, info, path, result)
|
2016-05-30 12:02:30 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-31 00:02:29 +08:00
|
|
|
|
// If field type is Object, execute and complete all sub-selections.
|
|
|
|
|
|
if returnType, ok := returnType.(*Object); ok {
|
2018-07-18 22:55:20 -05:00
|
|
|
|
return completeObjectValue(eCtx, returnType, fieldASTs, info, path, result)
|
2016-05-31 00:02:29 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Not reachable. All possible output types have been considered.
|
2018-01-05 17:33:45 +01:00
|
|
|
|
err := invariantf(false,
|
|
|
|
|
|
`Cannot complete value of unexpected type "%v."`, returnType)
|
|
|
|
|
|
|
2016-05-30 12:06:06 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
|
panic(gqlerrors.FormatError(err))
|
|
|
|
|
|
}
|
2016-05-30 12:02:30 +08:00
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-10-02 06:48:16 -07:00
|
|
|
|
func completeThunkValueCatchingError(eCtx *executionContext, returnType Type, fieldASTs []*ast.Field, info ResolveInfo, path *ResponsePath, result interface{}) (completed interface{}) {
|
2018-09-02 01:44:09 +00:00
|
|
|
|
|
|
|
|
|
|
// catch any panic invoked from the propertyFn (thunk)
|
|
|
|
|
|
defer func() {
|
|
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
|
|
handleFieldError(r, FieldASTsToNodeASTs(fieldASTs), path, returnType, eCtx)
|
|
|
|
|
|
}
|
|
|
|
|
|
}()
|
|
|
|
|
|
|
2018-09-10 10:40:22 -05:00
|
|
|
|
propertyFn, ok := result.(func() (interface{}, error))
|
2018-09-02 01:44:09 +00:00
|
|
|
|
if !ok {
|
2018-09-10 10:40:22 -05:00
|
|
|
|
err := gqlerrors.NewFormattedError("Error resolving func. Expected `func() (interface{}, error)` signature")
|
|
|
|
|
|
panic(gqlerrors.FormatError(err))
|
|
|
|
|
|
}
|
|
|
|
|
|
fnResult, err := propertyFn()
|
|
|
|
|
|
if err != nil {
|
2018-09-02 01:44:09 +00:00
|
|
|
|
panic(gqlerrors.FormatError(err))
|
|
|
|
|
|
}
|
2018-09-10 10:40:22 -05:00
|
|
|
|
|
|
|
|
|
|
result = fnResult
|
2018-09-02 01:44:09 +00:00
|
|
|
|
|
|
|
|
|
|
if returnType, ok := returnType.(*NonNull); ok {
|
|
|
|
|
|
completed := completeValue(eCtx, returnType, fieldASTs, info, path, result)
|
|
|
|
|
|
return completed
|
|
|
|
|
|
}
|
|
|
|
|
|
completed = completeValue(eCtx, returnType, fieldASTs, info, path, result)
|
2018-09-10 10:40:22 -05:00
|
|
|
|
|
2018-09-02 01:44:09 +00:00
|
|
|
|
return completed
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-31 00:02:29 +08:00
|
|
|
|
// completeAbstractValue completes value of an Abstract type (Union / Interface) by determining the runtime type
|
2016-05-30 12:02:30 +08:00
|
|
|
|
// of that value, then completing based on that type.
|
2018-10-02 06:48:16 -07:00
|
|
|
|
func completeAbstractValue(eCtx *executionContext, returnType Abstract, fieldASTs []*ast.Field, info ResolveInfo, path *ResponsePath, result interface{}) interface{} {
|
2016-05-30 12:02:30 +08:00
|
|
|
|
|
2016-04-06 17:23:31 +08:00
|
|
|
|
var runtimeType *Object
|
2016-05-30 11:56:34 +08:00
|
|
|
|
|
2016-05-31 22:17:08 +08:00
|
|
|
|
resolveTypeParams := ResolveTypeParams{
|
|
|
|
|
|
Value: result,
|
|
|
|
|
|
Info: info,
|
|
|
|
|
|
Context: eCtx.Context,
|
|
|
|
|
|
}
|
2016-05-31 09:44:26 +08:00
|
|
|
|
if unionReturnType, ok := returnType.(*Union); ok && unionReturnType.ResolveType != nil {
|
2016-05-31 22:17:08 +08:00
|
|
|
|
runtimeType = unionReturnType.ResolveType(resolveTypeParams)
|
2016-05-31 09:44:26 +08:00
|
|
|
|
} else if interfaceReturnType, ok := returnType.(*Interface); ok && interfaceReturnType.ResolveType != nil {
|
2016-05-31 22:17:08 +08:00
|
|
|
|
runtimeType = interfaceReturnType.ResolveType(resolveTypeParams)
|
2016-05-31 09:44:26 +08:00
|
|
|
|
} else {
|
2016-05-31 22:17:08 +08:00
|
|
|
|
runtimeType = defaultResolveTypeFn(resolveTypeParams, returnType)
|
2016-05-31 09:44:26 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-12-22 21:46:38 +08:00
|
|
|
|
err := invariantf(runtimeType != nil, `Abstract type %v must resolve to an Object type at runtime `+
|
|
|
|
|
|
`for field %v.%v with value "%v", received "%v".`, returnType, info.ParentType, info.FieldName, result, runtimeType,
|
2016-05-31 22:34:07 +08:00
|
|
|
|
)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
panic(err)
|
2016-05-31 12:16:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-31 22:34:07 +08:00
|
|
|
|
if !eCtx.Schema.IsPossibleType(returnType, runtimeType) {
|
2016-05-31 09:44:26 +08:00
|
|
|
|
panic(gqlerrors.NewFormattedError(
|
|
|
|
|
|
fmt.Sprintf(`Runtime Object type "%v" is not a possible type `+
|
|
|
|
|
|
`for "%v".`, runtimeType, returnType),
|
|
|
|
|
|
))
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
2016-05-30 11:56:34 +08:00
|
|
|
|
|
2018-07-18 22:55:20 -05:00
|
|
|
|
return completeObjectValue(eCtx, runtimeType, fieldASTs, info, path, result)
|
2016-05-30 11:56:34 +08:00
|
|
|
|
}
|
2016-05-31 00:02:29 +08:00
|
|
|
|
|
|
|
|
|
|
// completeObjectValue complete an Object value by executing all sub-selections.
|
2018-10-02 06:48:16 -07:00
|
|
|
|
func completeObjectValue(eCtx *executionContext, returnType *Object, fieldASTs []*ast.Field, info ResolveInfo, path *ResponsePath, result interface{}) interface{} {
|
2016-05-30 11:56:34 +08:00
|
|
|
|
|
2015-09-14 22:48:41 +08:00
|
|
|
|
// If there is an isTypeOf predicate function, call it with the
|
|
|
|
|
|
// current result. If isTypeOf returns false, then raise an error rather
|
|
|
|
|
|
// than continuing execution.
|
2016-05-31 22:17:08 +08:00
|
|
|
|
if returnType.IsTypeOf != nil {
|
|
|
|
|
|
p := IsTypeOfParams{
|
|
|
|
|
|
Value: result,
|
|
|
|
|
|
Info: info,
|
|
|
|
|
|
Context: eCtx.Context,
|
|
|
|
|
|
}
|
|
|
|
|
|
if !returnType.IsTypeOf(p) {
|
|
|
|
|
|
panic(gqlerrors.NewFormattedError(
|
|
|
|
|
|
fmt.Sprintf(`Expected value of type "%v" but got: %T.`, returnType, result),
|
|
|
|
|
|
))
|
|
|
|
|
|
}
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Collect sub-fields to execute to complete this value.
|
2015-10-30 00:49:49 +02:00
|
|
|
|
subFieldASTs := map[string][]*ast.Field{}
|
2015-09-14 22:48:41 +08:00
|
|
|
|
visitedFragmentNames := map[string]bool{}
|
|
|
|
|
|
for _, fieldAST := range fieldASTs {
|
|
|
|
|
|
if fieldAST == nil {
|
|
|
|
|
|
continue
|
|
|
|
|
|
}
|
|
|
|
|
|
selectionSet := fieldAST.SelectionSet
|
|
|
|
|
|
if selectionSet != nil {
|
2018-01-05 17:08:39 +01:00
|
|
|
|
innerParams := collectFieldsParams{
|
2015-09-14 22:48:41 +08:00
|
|
|
|
ExeContext: eCtx,
|
2016-05-30 11:56:34 +08:00
|
|
|
|
RuntimeType: returnType,
|
2015-09-14 22:48:41 +08:00
|
|
|
|
SelectionSet: selectionSet,
|
|
|
|
|
|
Fields: subFieldASTs,
|
|
|
|
|
|
VisitedFragmentNames: visitedFragmentNames,
|
|
|
|
|
|
}
|
|
|
|
|
|
subFieldASTs = collectFields(innerParams)
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2018-01-05 17:08:39 +01:00
|
|
|
|
executeFieldsParams := executeFieldsParams{
|
2015-09-14 22:48:41 +08:00
|
|
|
|
ExecutionContext: eCtx,
|
2016-05-30 11:56:34 +08:00
|
|
|
|
ParentType: returnType,
|
2015-09-16 01:50:58 +08:00
|
|
|
|
Source: result,
|
2015-09-14 22:48:41 +08:00
|
|
|
|
Fields: subFieldASTs,
|
2018-07-18 22:55:20 -05:00
|
|
|
|
Path: path,
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
2018-09-02 01:44:09 +00:00
|
|
|
|
return executeSubFields(executeFieldsParams)
|
2015-09-14 22:48:41 +08:00
|
|
|
|
}
|
2016-05-30 11:42:00 +08:00
|
|
|
|
|
|
|
|
|
|
// completeLeafValue complete a leaf value (Scalar / Enum) by serializing to a valid value, returning nil if serialization is not possible.
|
2016-05-31 00:04:05 +08:00
|
|
|
|
func completeLeafValue(returnType Leaf, result interface{}) interface{} {
|
2016-05-30 11:35:16 +08:00
|
|
|
|
serializedResult := returnType.Serialize(result)
|
|
|
|
|
|
if isNullish(serializedResult) {
|
|
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
return serializedResult
|
|
|
|
|
|
}
|
2015-09-16 11:42:48 +08:00
|
|
|
|
|
2016-05-30 11:42:00 +08:00
|
|
|
|
// completeListValue complete a list value by completing each item in the list with the inner type
|
2018-10-02 06:48:16 -07:00
|
|
|
|
func completeListValue(eCtx *executionContext, returnType *List, fieldASTs []*ast.Field, info ResolveInfo, path *ResponsePath, result interface{}) interface{} {
|
2016-05-30 11:20:07 +08:00
|
|
|
|
resultVal := reflect.ValueOf(result)
|
2018-08-01 16:29:02 -04:00
|
|
|
|
if resultVal.Kind() == reflect.Ptr {
|
|
|
|
|
|
resultVal = resultVal.Elem()
|
|
|
|
|
|
}
|
2016-05-30 11:20:07 +08:00
|
|
|
|
parentTypeName := ""
|
|
|
|
|
|
if info.ParentType != nil {
|
|
|
|
|
|
parentTypeName = info.ParentType.Name()
|
|
|
|
|
|
}
|
2018-01-05 17:33:45 +01:00
|
|
|
|
err := invariantf(
|
2018-05-25 11:43:41 -07:00
|
|
|
|
resultVal.IsValid() && isIterable(result),
|
2018-01-05 17:33:45 +01:00
|
|
|
|
"User Error: expected iterable, but did not find one "+
|
|
|
|
|
|
"for field %v.%v.", parentTypeName, info.FieldName)
|
|
|
|
|
|
|
2016-05-30 11:20:07 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
|
panic(gqlerrors.FormatError(err))
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
itemType := returnType.OfType
|
2018-04-09 16:08:18 -07:00
|
|
|
|
completedResults := make([]interface{}, 0, resultVal.Len())
|
2016-05-30 11:20:07 +08:00
|
|
|
|
for i := 0; i < resultVal.Len(); i++ {
|
|
|
|
|
|
val := resultVal.Index(i).Interface()
|
2018-07-18 22:55:20 -05:00
|
|
|
|
fieldPath := path.WithKey(i)
|
|
|
|
|
|
completedItem := completeValueCatchingError(eCtx, itemType, fieldASTs, info, fieldPath, val)
|
2016-05-30 11:20:07 +08:00
|
|
|
|
completedResults = append(completedResults, completedItem)
|
|
|
|
|
|
}
|
|
|
|
|
|
return completedResults
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-31 09:44:26 +08:00
|
|
|
|
// defaultResolveTypeFn If a resolveType function is not given, then a default resolve behavior is
|
|
|
|
|
|
// used which tests each possible type for the abstract type by calling
|
|
|
|
|
|
// isTypeOf for the object being coerced, returning the first type that matches.
|
2016-05-31 22:17:08 +08:00
|
|
|
|
func defaultResolveTypeFn(p ResolveTypeParams, abstractType Abstract) *Object {
|
|
|
|
|
|
possibleTypes := p.Info.Schema.PossibleTypes(abstractType)
|
2016-05-31 09:44:26 +08:00
|
|
|
|
for _, possibleType := range possibleTypes {
|
|
|
|
|
|
if possibleType.IsTypeOf == nil {
|
|
|
|
|
|
continue
|
|
|
|
|
|
}
|
2016-05-31 22:17:08 +08:00
|
|
|
|
isTypeOfParams := IsTypeOfParams{
|
|
|
|
|
|
Value: p.Value,
|
|
|
|
|
|
Info: p.Info,
|
|
|
|
|
|
Context: p.Context,
|
|
|
|
|
|
}
|
|
|
|
|
|
if res := possibleType.IsTypeOf(isTypeOfParams); res {
|
2016-05-31 09:44:26 +08:00
|
|
|
|
return possibleType
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-03-01 10:26:15 +01:00
|
|
|
|
// FieldResolver is used in DefaultResolveFn when the the source value implements this interface.
|
|
|
|
|
|
type FieldResolver interface {
|
|
|
|
|
|
// Resolve resolves the value for the given ResolveParams. It has the same semantics as FieldResolveFn.
|
|
|
|
|
|
Resolve(p ResolveParams) (interface{}, error)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-03-12 07:01:08 +01:00
|
|
|
|
// DefaultResolveFn If a resolve function is not given, then a default resolve behavior is used
|
2016-05-31 09:44:26 +08:00
|
|
|
|
// which takes the property of the source object of the same name as the field
|
|
|
|
|
|
// and returns it as the result, or if it's a function, returns the result
|
|
|
|
|
|
// of calling that function.
|
2016-02-01 10:40:40 +01:00
|
|
|
|
func DefaultResolveFn(p ResolveParams) (interface{}, error) {
|
2015-09-19 21:51:32 +08:00
|
|
|
|
sourceVal := reflect.ValueOf(p.Source)
|
2018-03-24 05:02:28 +00:00
|
|
|
|
// Check if value implements 'Resolver' interface
|
|
|
|
|
|
if resolver, ok := sourceVal.Interface().(FieldResolver); ok {
|
|
|
|
|
|
return resolver.Resolve(p)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// try to resolve p.Source as a struct
|
2015-09-30 17:03:31 +08:00
|
|
|
|
if sourceVal.IsValid() && sourceVal.Type().Kind() == reflect.Ptr {
|
2015-09-19 21:51:32 +08:00
|
|
|
|
sourceVal = sourceVal.Elem()
|
|
|
|
|
|
}
|
2015-10-01 00:21:14 +08:00
|
|
|
|
if !sourceVal.IsValid() {
|
2015-11-25 18:23:57 -05:00
|
|
|
|
return nil, nil
|
2015-10-01 00:21:14 +08:00
|
|
|
|
}
|
2018-03-01 10:26:15 +01:00
|
|
|
|
|
2015-10-01 00:21:14 +08:00
|
|
|
|
if sourceVal.Type().Kind() == reflect.Struct {
|
2015-09-19 21:51:32 +08:00
|
|
|
|
for i := 0; i < sourceVal.NumField(); i++ {
|
|
|
|
|
|
valueField := sourceVal.Field(i)
|
|
|
|
|
|
typeField := sourceVal.Type().Field(i)
|
2015-09-26 23:14:45 +08:00
|
|
|
|
// try matching the field name first
|
2018-03-25 04:36:59 +07:00
|
|
|
|
if strings.EqualFold(typeField.Name, p.Info.FieldName) {
|
2015-11-25 18:23:57 -05:00
|
|
|
|
return valueField.Interface(), nil
|
2015-09-26 23:14:45 +08:00
|
|
|
|
}
|
2015-09-19 21:51:32 +08:00
|
|
|
|
tag := typeField.Tag
|
2016-11-29 19:46:52 -05:00
|
|
|
|
checkTag := func(tagName string) bool {
|
|
|
|
|
|
t := tag.Get(tagName)
|
|
|
|
|
|
tOptions := strings.Split(t, ",")
|
|
|
|
|
|
if len(tOptions) == 0 {
|
|
|
|
|
|
return false
|
|
|
|
|
|
}
|
|
|
|
|
|
if tOptions[0] != p.Info.FieldName {
|
|
|
|
|
|
return false
|
|
|
|
|
|
}
|
|
|
|
|
|
return true
|
2015-09-19 21:51:32 +08:00
|
|
|
|
}
|
2016-11-29 19:46:52 -05:00
|
|
|
|
if checkTag("json") || checkTag("graphql") {
|
|
|
|
|
|
return valueField.Interface(), nil
|
|
|
|
|
|
} else {
|
2015-09-19 21:51:32 +08:00
|
|
|
|
continue
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2015-11-25 18:23:57 -05:00
|
|
|
|
return nil, nil
|
2015-09-19 21:51:32 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// try p.Source as a map[string]interface
|
2015-09-16 11:42:48 +08:00
|
|
|
|
if sourceMap, ok := p.Source.(map[string]interface{}); ok {
|
|
|
|
|
|
property := sourceMap[p.Info.FieldName]
|
|
|
|
|
|
val := reflect.ValueOf(property)
|
|
|
|
|
|
if val.IsValid() && val.Type().Kind() == reflect.Func {
|
|
|
|
|
|
// try type casting the func to the most basic func signature
|
|
|
|
|
|
// for more complex signatures, user have to define ResolveFn
|
|
|
|
|
|
if propertyFn, ok := property.(func() interface{}); ok {
|
2015-11-25 18:23:57 -05:00
|
|
|
|
return propertyFn(), nil
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
}
|
2015-11-25 18:23:57 -05:00
|
|
|
|
return property, nil
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2015-09-19 21:51:32 +08:00
|
|
|
|
|
2019-02-04 13:26:47 -05:00
|
|
|
|
// Try accessing as map via reflection
|
|
|
|
|
|
if r := reflect.ValueOf(p.Source); r.Kind() == reflect.Map && r.Type().Key().Kind() == reflect.String {
|
|
|
|
|
|
val := r.MapIndex(reflect.ValueOf(p.Info.FieldName))
|
|
|
|
|
|
if val.IsValid() {
|
|
|
|
|
|
property := val.Interface()
|
|
|
|
|
|
if val.Type().Kind() == reflect.Func {
|
|
|
|
|
|
// try type casting the func to the most basic func signature
|
|
|
|
|
|
// for more complex signatures, user have to define ResolveFn
|
|
|
|
|
|
if propertyFn, ok := property.(func() interface{}); ok {
|
|
|
|
|
|
return propertyFn(), nil
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
return property, nil
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2015-09-26 23:14:45 +08:00
|
|
|
|
// last resort, return nil
|
2015-11-25 18:23:57 -05:00
|
|
|
|
return nil, nil
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2016-06-01 01:40:58 +08:00
|
|
|
|
// This method looks up the field on the given type definition.
|
2016-04-15 18:02:57 +08:00
|
|
|
|
// It has special casing for the two introspection fields, __schema
|
|
|
|
|
|
// and __typename. __typename is special because it can always be
|
|
|
|
|
|
// queried as a field, even in situations where no other fields
|
|
|
|
|
|
// are allowed, like on a Union. __schema could get automatically
|
|
|
|
|
|
// added to the query type, but that would require mutating type
|
|
|
|
|
|
// definitions, which would cause issues.
|
2015-10-28 02:33:17 +02:00
|
|
|
|
func getFieldDef(schema Schema, parentType *Object, fieldName string) *FieldDefinition {
|
2015-09-16 11:42:48 +08:00
|
|
|
|
|
|
|
|
|
|
if parentType == nil {
|
|
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2015-10-28 02:33:17 +02:00
|
|
|
|
if fieldName == SchemaMetaFieldDef.Name &&
|
2015-11-07 16:01:14 -08:00
|
|
|
|
schema.QueryType() == parentType {
|
2015-10-28 02:33:17 +02:00
|
|
|
|
return SchemaMetaFieldDef
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2015-10-28 02:33:17 +02:00
|
|
|
|
if fieldName == TypeMetaFieldDef.Name &&
|
2015-11-07 16:01:14 -08:00
|
|
|
|
schema.QueryType() == parentType {
|
2015-10-28 02:33:17 +02:00
|
|
|
|
return TypeMetaFieldDef
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2015-10-28 02:33:17 +02:00
|
|
|
|
if fieldName == TypeNameMetaFieldDef.Name {
|
|
|
|
|
|
return TypeNameMetaFieldDef
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2015-11-07 16:01:14 -08:00
|
|
|
|
return parentType.Fields()[fieldName]
|
2015-09-16 11:42:48 +08:00
|
|
|
|
}
|
2019-07-03 12:45:55 -07:00
|
|
|
|
|
|
|
|
|
|
// contains field information that will be placed in an ordered slice
|
|
|
|
|
|
type orderedField struct {
|
|
|
|
|
|
responseName string
|
|
|
|
|
|
fieldASTs []*ast.Field
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// orders fields from a fields map by location in the source
|
|
|
|
|
|
func orderedFields(fields map[string][]*ast.Field) []*orderedField {
|
|
|
|
|
|
orderedFields := []*orderedField{}
|
|
|
|
|
|
fieldMap := map[int]*orderedField{}
|
|
|
|
|
|
startLocs := []int{}
|
|
|
|
|
|
|
|
|
|
|
|
for responseName, fieldASTs := range fields {
|
|
|
|
|
|
// find the lowest location in the current fieldASTs
|
|
|
|
|
|
lowest := -1
|
|
|
|
|
|
for _, fieldAST := range fieldASTs {
|
|
|
|
|
|
loc := fieldAST.GetLoc().Start
|
|
|
|
|
|
if lowest == -1 || loc < lowest {
|
|
|
|
|
|
lowest = loc
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
startLocs = append(startLocs, lowest)
|
|
|
|
|
|
fieldMap[lowest] = &orderedField{
|
|
|
|
|
|
responseName: responseName,
|
|
|
|
|
|
fieldASTs: fieldASTs,
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sort.Ints(startLocs)
|
|
|
|
|
|
for _, startLoc := range startLocs {
|
|
|
|
|
|
orderedFields = append(orderedFields, fieldMap[startLoc])
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return orderedFields
|
|
|
|
|
|
}
|