diff --git a/base/bootstrap.go b/base/bootstrap.go index d1edb5829e..9e6069716a 100644 --- a/base/bootstrap.go +++ b/base/bootstrap.go @@ -44,7 +44,6 @@ type BootstrapConnection interface { // GetDocument retrieves the document with the specified key from the bucket's default collection. // Returns exists=false if key is not found, returns error for any other error. GetDocument(ctx context.Context, bucket, docID string, rv interface{}) (exists bool, err error) - // Close releases any long-lived connections Close() } diff --git a/base/bucket.go b/base/bucket.go index b075b74a07..649af70753 100644 --- a/base/bucket.go +++ b/base/bucket.go @@ -378,15 +378,19 @@ func GetServerUUID(ctx context.Context, store CouchbaseBucketStore) (uuid string return "", err } + return ParseClusterUUID(respBytes) +} + +func ParseClusterUUID(respBytes []byte) (string, error) { var responseJson struct { - ServerUUID string `json:"uuid"` + UUID string `json:"uuid"` } if err := JSONUnmarshal(respBytes, &responseJson); err != nil { return "", err } - return responseJson.ServerUUID, nil + return responseJson.UUID, nil } // Gets the metadata purge interval for the bucket. First checks for a bucket-specific value. If not diff --git a/base/collection.go b/base/collection.go index 33cb8cbbfa..f093265cbd 100644 --- a/base/collection.go +++ b/base/collection.go @@ -504,31 +504,17 @@ func (b *GocbV2Bucket) MgmtRequest(ctx context.Context, method, uri, contentType return nil, 0, err } - req, err := http.NewRequest(method, mgmtEp+uri, body) - if err != nil { - return nil, 0, err - } - - if contentType != "" { - req.Header.Add("Content-Type", contentType) - } - + var username, password string if b.Spec.Auth != nil { - username, password, _ := b.Spec.Auth.GetCredentials() - req.SetBasicAuth(username, password) - } - response, err := b.HttpClient(ctx).Do(req) - if err != nil { - return nil, response.StatusCode, err + username, password, _ = b.Spec.Auth.GetCredentials() } - defer func() { _ = response.Body.Close() }() - respBytes, err := io.ReadAll(response.Body) + respBytes, statusCode, err := MgmtRequest(b.HttpClient(ctx), mgmtEp, method, uri, contentType, username, password, body) if err != nil { - return nil, 0, err + return nil, statusCode, err } - return respBytes, response.StatusCode, nil + return respBytes, statusCode, nil } // This prevents Sync Gateway from overflowing gocb's pipeline diff --git a/base/gocb_utils.go b/base/gocb_utils.go index dd1b77604a..2f133aaf6f 100644 --- a/base/gocb_utils.go +++ b/base/gocb_utils.go @@ -13,6 +13,8 @@ import ( "crypto/tls" "crypto/x509" "errors" + "io" + "net/http" "os" "time" @@ -161,3 +163,31 @@ func getRootCAs(ctx context.Context, caCertPath string) (*x509.CertPool, error) } return rootCAs, nil } + +// MgmtRequest makes a request to the http couchbase management api. This function will read the entire contents of +// the response and return the output bytes, the status code, and an error. +func MgmtRequest(client *http.Client, mgmtEp, method, uri, contentType, username, password string, body io.Reader) ([]byte, int, error) { + req, err := http.NewRequest(method, mgmtEp+uri, body) + if err != nil { + return nil, 0, err + } + + if contentType != "" { + req.Header.Add("Content-Type", contentType) + } + + if username != "" { + req.SetBasicAuth(username, password) + } + response, err := client.Do(req) + if err != nil { + return nil, response.StatusCode, err + } + defer func() { _ = response.Body.Close() }() + + respBytes, err := io.ReadAll(response.Body) + if err != nil { + return nil, 0, err + } + return respBytes, response.StatusCode, nil +} diff --git a/rest/admin_api.go b/rest/admin_api.go index e3cba49679..a2173a1863 100644 --- a/rest/admin_api.go +++ b/rest/admin_api.go @@ -352,7 +352,13 @@ func (h *handler) handleGetDbConfig() error { type RunTimeServerConfigResponse struct { *StartupConfig - Databases map[string]*DbConfig `json:"databases"` + RuntimeInformation `json:"runtime_information"` + Databases map[string]*DbConfig `json:"databases"` +} + +// RuntimeInformation is a struct that holds runtime-only info in without interfering or being lost inside the actual StartupConfig properties. +type RuntimeInformation struct { + ClusterUUID string `json:"cluster_uuid"` } // Get admin config info @@ -409,6 +415,13 @@ func (h *handler) handleGetConfig() error { } } + // grab cluster uuid for runtime config + clusterUUID, err := h.server.getClusterUUID(h.ctx()) + if err != nil { + base.InfofCtx(h.ctx(), base.KeyConfig, "Could not determine cluster UUID: %s", err) + } + cfg.ClusterUUID = clusterUUID + // because loggers can be changed at runtime, we need to work backwards to get the config that would've created the actually running instances cfg.Logging = *base.BuildLoggingConfigFromLoggers(h.server.Config.Logging) cfg.Databases = databaseMap diff --git a/rest/config_test.go b/rest/config_test.go index 4d22e2eea4..0c0fc22a47 100644 --- a/rest/config_test.go +++ b/rest/config_test.go @@ -3240,3 +3240,64 @@ func TestRoleUpdatedAtField(t *testing.T) { assert.Greater(t, newTime.UnixNano(), currTime.UnixNano()) assert.Equal(t, timeCreated.UnixNano(), newCreated.UnixNano()) } + +func TestServerUUIDRuntimeServerConfig(t *testing.T) { + testCases := []struct { + name string + persistentConfig bool + }{ + { + name: "Persistent config", + persistentConfig: true, + }, + { + name: "non persistent config", + persistentConfig: false, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + rt := NewRestTester(t, &RestTesterConfig{PersistentConfig: testCase.persistentConfig}) + defer rt.Close() + + // create a db and test code pathway when we have db defined + if testCase.persistentConfig { + dbConfig := rt.NewDbConfig() + RequireStatus(t, rt.CreateDatabase("db", dbConfig), http.StatusCreated) + } + + resp := rt.SendAdminRequest(http.MethodGet, "/_config?include_runtime=true", "") + RequireStatus(t, resp, http.StatusOK) + + var clusterUUID string + config := RunTimeServerConfigResponse{} + err := base.JSONUnmarshal(resp.Body.Bytes(), &config) + require.NoError(t, err) + if base.TestUseCouchbaseServer() { + require.Len(t, config.ClusterUUID, 32) + } else { + require.Empty(t, config.ClusterUUID) + } + clusterUUID = config.ClusterUUID + + // delete db and attempt to retrieve cluster UUID again to ensure we can still retrieve it + resp = rt.SendAdminRequest(http.MethodDelete, "/db/", "") + RequireStatus(t, resp, http.StatusOK) + + resp = rt.SendAdminRequest(http.MethodGet, "/_config?include_runtime=true", "") + RequireStatus(t, resp, http.StatusOK) + + config = RunTimeServerConfigResponse{} + err = base.JSONUnmarshal(resp.Body.Bytes(), &config) + require.NoError(t, err) + if base.TestUseCouchbaseServer() { + require.Len(t, config.ClusterUUID, 32) + } else { + require.Empty(t, config.ClusterUUID) + } + assert.Equal(t, clusterUUID, config.ClusterUUID) + }) + } + +} diff --git a/rest/server_context.go b/rest/server_context.go index 6ac6c3412b..b2f304eec4 100644 --- a/rest/server_context.go +++ b/rest/server_context.go @@ -16,7 +16,6 @@ import ( "encoding/json" "errors" "fmt" - "io" "net" "net/http" "os" @@ -1995,20 +1994,8 @@ func doHTTPAuthRequest(ctx context.Context, httpClient *http.Client, username, p retryCount := 0 worker := func() (shouldRetry bool, err error, value interface{}) { - var httpResponse *http.Response - endpointIdx := retryCount % len(endpoints) - req, err := http.NewRequest(method, endpoints[endpointIdx]+path, bytes.NewBuffer(requestBody)) - if err != nil { - return false, err, nil - } - - req.SetBasicAuth(username, password) - - httpResponse, err = httpClient.Do(req) // nolint:bodyclose // The body is closed outside of the worker loop - if err == nil { - return false, nil, httpResponse - } + responseBody, statusCode, err = base.MgmtRequest(httpClient, endpoints[endpointIdx], method, path, "", username, password, bytes.NewBuffer(requestBody)) if err, ok := err.(net.Error); ok && err.Timeout() { retryCount++ @@ -2018,27 +2005,12 @@ func doHTTPAuthRequest(ctx context.Context, httpClient *http.Client, username, p return false, err, nil } - err, result := base.RetryLoop(ctx, "doHTTPAuthRequest", worker, base.CreateSleeperFunc(10, 100)) + err, _ = base.RetryLoop(ctx, "doHTTPAuthRequest", worker, base.CreateSleeperFunc(10, 100)) if err != nil { return 0, nil, err } - httpResponse, ok := result.(*http.Response) - if !ok { - return 0, nil, fmt.Errorf("unexpected response type from doHTTPAuthRequest") - } - - bodyString, err := io.ReadAll(httpResponse.Body) - if err != nil { - return 0, nil, err - } - - err = httpResponse.Body.Close() - if err != nil { - return 0, nil, err - } - - return httpResponse.StatusCode, bodyString, nil + return statusCode, responseBody, nil } // For test use @@ -2159,3 +2131,32 @@ func getTotalMemory(ctx context.Context) uint64 { } return memory.Total } + +// getClusterUUID returns the cluster UUID. rosmar does not have a ClusterUUID, so this will return an empty cluster UUID and no error in this case. +func (sc *ServerContext) getClusterUUID(ctx context.Context) (string, error) { + allDbNames := sc.AllDatabaseNames() + // we can use db context to retrieve clusterUUID + if len(allDbNames) > 0 { + db, err := sc.GetDatabase(ctx, allDbNames[0]) + if err == nil { + return db.ServerUUID, nil + } + } + // no cluster uuid for rosmar cluster + if base.ServerIsWalrus(sc.Config.Bootstrap.Server) { + return "", nil + } + // request server for cluster uuid + eps, client, err := sc.ObtainManagementEndpointsAndHTTPClient() + if err != nil { + return "", err + } + statusCode, output, err := doHTTPAuthRequest(ctx, client, sc.Config.Bootstrap.Username, sc.Config.Bootstrap.Password, http.MethodGet, "/pools", eps, nil) + if err != nil { + return "", err + } + if statusCode != http.StatusOK { + return "", fmt.Errorf("unable to get cluster UUID from server: %s", output) + } + return base.ParseClusterUUID(output) +}