Hashicorp Vault - Database Secrets Engine Not Visible in UI - hashicorp-vault

I created a new user in Hashicorp Vault so as to prevent the usage of the root token. The following policy was applied:
# Manage auth methods broadly across Vault
path "auth/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Create, update, and delete auth methods
path "sys/auth/*" {
capabilities = ["create", "update", "delete", "sudo"]
}
# List auth methods
path "sys/auth" {
capabilities = ["read"]
}
# Create and manage ACL policies
path "sys/policies/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# To list policies - Step 3
path "sys/policies/" {
capabilities = ["list"]
}
# List, create, update, and delete key/value secrets mounted under secret/
path "secret/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# List secret/
path "secret/" {
capabilities = ["list"]
}
# Prevent admin users from reading user secrets
# But allow them to create, update, delete, and list them
path "secret/users/*" {
capabilities = ["create", "update", "delete", "list"]
}
# List, create, update, and delete key/value secrets mounted under kv/
path "kv/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# List kv/
path "kv/" {
capabilities = ["list"]
}
# Prevent admin users from reading user secrets
# But allow them to create, update, delete, and list them
# Creating and updating are explicitly included here
# Deleting and listing are implied by capabilities given on kv/* which includes kv/delete/users/* and kv/metadata/users/* paths
path "kv/data/users/*" {
capabilities = ["create", "update"]
}
# Active Directory secrets engine
path "ad/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Alicloud secrets engine
path "alicloud/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# AWS secrets engine
path "aws/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Azure secrets engine
path "azure/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Google Cloud secrets engine
path "gcp/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Google Cloud KMS secrets engine
path "gcpkms/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Consul secrets engine
path "consul/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Cubbyhole secrets engine
path "cubbyhole/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Database secrets engine
path "database/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Identity secrets engine
path "identity/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# PKI secrets engine
path "nomad/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Nomad secrets engine
path "pki/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# RabbitMQ secrets engine
path "rabbitmq/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# SSH secrets engine
path "ssh/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# TOTP secrets engine
path "totp/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Transit secrets engine
path "transit/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Create and manage secrets engines broadly across Vault.
path "sys/mounts/*"
{
capabilities = ["create", "read", "update", "delete", "list"]
}
# List sys/mounts/
path "sys/mounts" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Check token capabilities
path "sys/capabilities" {
capabilities = ["create", "update"]
}
# Check token accessor capabilities
path "sys/capabilities-accessor" {
capabilities = ["create", "update"]
}
# Check token's own capabilities
path "sys/capabilities-self" {
capabilities = ["create", "update"]
}
# Audit hash
path "sys/audit-hash" {
capabilities = ["create", "update"]
}
# Health checks
path "sys/health" {
capabilities = ["read"]
}
# Host info
path "sys/host-info" {
capabilities = ["read"]
}
# Key Status
path "sys/key-status" {
capabilities = ["read"]
}
# Leader
path "sys/leader" {
capabilities = ["read"]
}
# Plugins catalog
path "sys/plugins/catalog/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# List sys/plugins/catalog
path "sys/plugins/catalog" {
capabilities = ["read"]
}
# Read system configuration state
path "sys/config/state/sanitized" {
capabilities = ["read"]
}
# Use system tools
path "sys/tools/*" {
capabilities = ["create", "update"]
}
# Generate OpenAPI docs
path "sys/internal/specs/openapi" {
capabilities = ["read"]
}
# Lookup leases
path "sys/leases/lookup" {
capabilities = ["create", "update"]
}
# Renew leases
path "sys/leases/renew" {
capabilities = ["create", "update"]
}
# Revoke leases
path "sys/leases/revoke" {
capabilities = ["create", "update"]
}
# Tidy leases
path "sys/leases/tidy" {
capabilities = ["create", "update"]
}
# Telemetry
path "sys/metrics" {
capabilities = ["read"]
}
# Seal Vault
path "sys/seal" {
capabilities = ["create", "update", "sudo"]
}
# Unseal Vault
path "sys/unseal" {
capabilities = ["create", "update", "sudo"]
}
# Step Down
path "sys/step-down" {
capabilities = ["create", "update", "sudo"]
}
# Wrapping
path "sys/wrapping/*" {
capabilities = ["create", "update"]
}
## Enterprise Features
# Manage license
path "sys/license" {
capabilities = ["create", "read", "update"]
}
# Use control groups
path "sys/control-group/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# MFA
path "sys/mfa/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# List MFA
path "sys/mfa/" {
capabilities = ["list"]
}
# Namespaces
path "sys/namespaces/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# List sys/namespaces
path "sys/namespaces/" {
capabilities = ["list"]
}
# Replication
path "sys/replication/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Seal Wrap
path "sys/sealwrap/rewrap" {
capabilities = ["create", "read", "update"]
}
# KMIP secrets engine
path "kmip/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
Using the token of that user, I was able to successfully create a database secrets engine.
curl --header "X-Vault-Token: xxxx" --request POST --data '{"type":"database"}' {VAULT_URL}/v1/sys/mounts/xxxx
However, when logging in to the UI using the same user that created it, it does not appear in the list. However, logging in with the root token, it is visible.
Is there something I'm missing here? Trying to list the mounts using the API shows the created secrets engine too.

Besides granting the read and list capabilities on /sys/internal/ui/mounts to the identity logging in to the UI, you need to set "listing_visibility" to unauth on every secrete engine you want to start showing up in web-ui.
So, granting the mentioned read+list capabilities:
path "/sys/internal/ui/mounts"
{
capabilities = ["read","list"]
}
And then:
$ vault write /sys/mounts/database/tune listing_visibility=unauth
Success! Data written to: sys/mounts/database/tune
$ vault read /sys/mounts/database/tune
Key Value
--- -----
default_lease_ttl 768h
description n/a
force_no_cache false
listing_visibility unauth
max_lease_ttl 768h
$
I had to repeat this for every secret engine enabled (vault secrets list) for my secret engines to finally show-up in the web ui.
I went the same path as you, that is I first enabled the secrets engine from the command-line using root token, and then decided to switch to a non-root user.
This behaviour is sparsely documented here sys/mounts documentation

Related

EFS policy with Identifier other than * not working in EKS

I have an EFS policy statement attached as below,
policy_statements = [
{
sid = "${var.cluster_name}"
actions = ["elasticfilesystem:ClientMount",
"elasticfilesystem:ClientRootAccess",
"elasticfilesystem:ClientWrite"]
principals = [
{
type = "AWS"
identifiers = ["arn:aws:iam::xxxx:role/efs-test-role"]
}
]
}
]
If I use "*" for Identifier it works.
I tried to create the role arn:aws:iam::xxxx:role/efs-test-role with oidc Federation,
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::xxxx:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/xxxx"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"oidc.eks.us-west-2.amazonaws.com/id/xxxx:aud": "sts.amazonaws.com"
}
}
}
]
}
I created a ServiceAccount with annotation with this role,
eks.amazonaws.com/role-arn: arn:aws:iam::xxxx:role/efs-test-role
When I created pods with EFS volume mount using Persistant volume, got error as,
Output: Could not start amazon-efs-mount-watchdog, unrecognized init system "aws-efs-csi-dri"
b'mount.nfs4: access denied by server while mounting 127.0.0.1:/'
I even tried to follow https://docs.amazonaws.cn/en_us/efs/latest/ug/manage-efs-utils-with-aws-sys-manager.html#configure-sys-mgr-iam-instance-profile to add AmazonElasticFileSystemsUtils to EKS worker nodes instance profiles. But it still threw same error..
I am not sure what I am missing. Please advice.

external-dns in Kubernetes cluster not authorized to list hosted zones error

I have installed external-dns using bitnami helm chart with version 6.10.2.
I have created IAM policy to give permissions to list Route53 hosted zones.
IAM policy
{
"Statement": [
{
"Action": [
"route53:ListHostedZones",
"route53:ListResourceRecordSets"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}
IAM policy is also mapped to serviceaccount
eksctl get iamserviceaccount --cluster=testcluster --namespace add-ons
NAMESPACE NAME ROLE ARN
add-ons external-dns arn:aws:iam::442355839237:role/eksctl-testcluster-addon-iamserviceaccount-ad-Role1-6AV1JQ2NPCMO
OIDC is already enabled
eksctl utils associate-iam-oidc-provider --cluster=testcluster
2022-10-17 14:31:13 [ℹ] IAM Open ID Connect provider is already associated with cluster "testcluster" in "us-west-2"
However I still see the error in external-dns pod that it is unable to list the hosted zones.
7T21:24:03Z" level=error msg="records retrieval failed: failed to list hosted zones: AccessDenied: User: arn:aws:sts::442355839237:assumed-role/workers-eks-node-group-20221013233723853600000005/i-0a1b8a914bcff1436 is not authorized to perform: route53:ListHostedZones because no identity-based policy allows the route53:ListHostedZones action\n\tstatus code: 403, request id: a8f86d66-d7af-4bd0-975c-6f99d1134d50"
Strangely..it is also showing "User: arn:aws:sts::442355839237:assumed-role/workers-eks-node-group-20221013233723853600000005/i-0a1b8a914bcff1436" workers-eks-node-group in the role name instead of the role arn:aws:iam::442355839237:role/eksctl-testcluster-addon-iamserviceaccount-ad-Role1-6AV1JQ2NPCMO which maps the service account with Route53 permissions.
Any pointers on why it is failing?arn:aws:iam::442355839237:role/eksctl-testcluster-addon-iamserviceaccount-ad-Role1-6AV1JQ2NPCMO
I have fixed this..
changeresourcerecordsets need to be specific with arns of hosted zones instead of "*"
https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md#iam-policy
data "aws_iam_policy_document" "external_dns" {
count = var.create_role && var.attach_external_dns_policy ? 1 : 0
statement {
actions = ["route53:ChangeResourceRecordSets"]
resources = var.external_dns_hosted_zone_arns
}
statement {
actions = [
"route53:ListHostedZones",
"route53:ListResourceRecordSets",
]
resources = ["*"]
}
}

I have an RBAC problem, but everything I test seems ok?

This is a continuation of the problem described here (How do I fix a role-based problem when my role appears to have the correct permissions?)
I have done much more testing and still do not understand the error
Error from server (Forbidden): pods is forbidden: User "dma" cannot list resource "pods" in API group "" at the cluster scope
UPDATE: Here is another hint from the API server
watch chan error: etcdserver: mvcc: required revision has been compacted
I found this thread, but I am working in the current kubernetes
How fix this error "watch chan error: etcdserver: mvcc: required revision has been compacted"?
My user exists
NAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITION
dma 77m kubernetes.io/kube-apiserver-client kubernetes-admin <none> Approved,Issued
The clusterrole exists
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"annotations":{},"name":"kubelet-runtime"},"rules":[{"apiGroups":["","extensions","apps","argoproj.io","workflows.argoproj.io","events.argoproj.io","coordination.k8s.io"],"resources":["*"],"verbs":["*"]},{"apiGroups":["batch"],"resources":["jobs","cronjobs"],"verbs":["*"]}]}
creationTimestamp: "2021-12-16T00:24:56Z"
name: kubelet-runtime
resourceVersion: "296716"
uid: a4697d6e-c786-4ec9-bf3e-88e3dbfdb6d9
rules:
- apiGroups:
- ""
- extensions
- apps
- argoproj.io
- workflows.argoproj.io
- events.argoproj.io
- coordination.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- '*'
The sandbox namespace exists
NAME STATUS AGE
sandbox Active 6d6h
My user has authority to operate in the kubelet cluster and the namespace "sandbox"
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRoleBinding",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"rbac.authorization.k8s.io/v1\",\"kind\":\"ClusterRoleBinding\",\"metadata\":{\"annotations\":{},\"name\":\"dma-kubelet-binding\"},\"roleRef\":{\"apiGroup\":\"rbac.authorization.k8s.io\",\"kind\":\"ClusterRole\",\"name\":\"kubelet-runtime\"},\"subjects\":[{\"kind\":\"ServiceAccount\",\"name\":\"dma\",\"namespace\":\"argo\"},{\"kind\":\"ServiceAccount\",\"name\":\"dma\",\"namespace\":\"argo-events\"},{\"kind\":\"ServiceAccount\",\"name\":\"dma\",\"namespace\":\"sandbox\"}]}\n"
},
"creationTimestamp": "2021-12-16T00:25:42Z",
"name": "dma-kubelet-binding",
"resourceVersion": "371397",
"uid": "a2fb6d5b-8dba-4320-af74-71caac7bdc39"
},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "kubelet-runtime"
},
"subjects": [
{
"kind": "ServiceAccount",
"name": "dma",
"namespace": "argo"
},
{
"kind": "ServiceAccount",
"name": "dma",
"namespace": "argo-events"
},
{
"kind": "ServiceAccount",
"name": "dma",
"namespace": "sandbox"
}
]
}
My user has the correct permissions
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "Role",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"rbac.authorization.k8s.io/v1\",\"kind\":\"Role\",\"metadata\":{\"annotations\":{},\"name\":\"dma\",\"namespace\":\"sandbox\"},\"rules\":[{\"apiGroups\":[\"\",\"apps\",\"autoscaling\",\"batch\",\"extensions\",\"policy\",\"rbac.authorization.k8s.io\",\"argoproj.io\",\"workflows.argoproj.io\"],\"resources\":[\"pods\",\"configmaps\",\"deployments\",\"events\",\"pods\",\"persistentvolumes\",\"persistentvolumeclaims\",\"services\",\"workflows\"],\"verbs\":[\"get\",\"list\",\"watch\",\"create\",\"update\",\"patch\",\"delete\"]}]}\n"
},
"creationTimestamp": "2021-12-21T19:41:38Z",
"name": "dma",
"namespace": "sandbox",
"resourceVersion": "1058387",
"uid": "94191881-895d-4457-9764-5db9b54cdb3f"
},
"rules": [
{
"apiGroups": [
"",
"apps",
"autoscaling",
"batch",
"extensions",
"policy",
"rbac.authorization.k8s.io",
"argoproj.io",
"workflows.argoproj.io"
],
"resources": [
"pods",
"configmaps",
"deployments",
"events",
"pods",
"persistentvolumes",
"persistentvolumeclaims",
"services",
"workflows"
],
"verbs": [
"get",
"list",
"watch",
"create",
"update",
"patch",
"delete"
]
}
]
}
My user is configured correctly on all nodes
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: DATA+OMITTED
server: https://206.81.25.186:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: dma
name: dma#kubernetes
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin#kubernetes
current-context: kubernetes-admin#kubernetes
kind: Config
preferences: {}
users:
- name: dma
user:
client-certificate-data: REDACTED
client-key-data: REDACTED
- name: kubernetes-admin
user:
client-certificate-data: REDACTED
client-key-data: REDACTED
Based on this website, I have been searching for a watch event.
I think have rebuilt everything above the control plane but the problem persists.
The next step would be to rebuild the entire cluster, but it would be so much more satisfying to find the actual problem.
Please help.
FIX:
So the policy for the sandbox namespace was wrong. I fixed that and the problem is gone!
I think finally understand RBAC (policies and all). Thank you very much to members of the Kubernetes slack channel. These policies have passed the first set of tests for a development environment ("sandbox") for Argo workflows. Still testing.
policies.yaml file:
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dev
namespace: sandbox
rules:
- apiGroups:
- "*"
attributeRestrictions: null
resources: ["*"]
verbs:
- get
- watch
- list
- apiGroups: ["argoproj.io", "workflows.argoproj.io", "events.argoprpj.io"]
attributeRestrictions: null
resources:
- pods
- configmaps
- deployments
- events
- pods
- persistentvolumes
- persistentvolumeclaims
- services
- workflows
- eventbus
- eventsource
- sensor
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dma-dev
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dev
subjects:
- kind: User
name: dma
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dma-admin
subjects:
- kind: User
name: dma
namespace: sandbox
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: access-nginx
namespace: sandbox
spec:
podSelector:
matchLabels:
app: nginx
ingress:
- from:
- podSelector:
matchLabels:
run: access
...

Mongo Aggregate Nested Array, Lookup

I am creating a web application using MongoDB and Go, which includes role based access control. I am storing the information regarding this in 2 collections, permissions and roles.
This is how these two collections look like.
Permissions
{
"operation": "create",
"resource": "project"
}
{
"operation": "read",
"resource": "project"
}
{
"operation": "update",
"resource": "project"
}
{
"operation": "delete",
"resource": "project"
}
{
"operation": "create",
"resource": "user"
}
resource is something on which an operation is performed. So if there is some operation which cannot be performed on some resource, then, I needn't store it. For example user can only be created hence only create user need to be stored.
Currently there are only 4 operations(create, read, update, delete) in the scope of the application, but could increase, like upload could come into picture.
Roles
{
"role": "admin",
"display_name": "Administrator",
"permissions": [
{
"operation": "create",
"resource": "project"
},
{
"operation": "read",
"resource": "project"
},
{
"operation": "update",
"resource": "project"
},
{
"operation": "delete",
"resource": "project"
}
]
}
Roles contain role, the name of role to be displayed on UI and the set of permissions that role has.
I need to send this information to UI using a REST API in a specific format, which would describe whether a specific role can perform an operation on a resource or not using the checked flag and whether a specific operation on a resource is editable or not by using the flag isEditable.
For example the permissions collection doesn't contain an operation delete on resource user, so it should not be editable, hence flag is set to false. Similarly user can be created, hence it is set to true.
{
display_name: "System Administrator",
role: "admin",
permissions: [
{
resource: "user",
privilages: {
create: { checked: false, isEditable: true },
delete: { checked: false, isEditable: false },
update: { checked: false, isEditable: false },
read: { checked: false, isEditable: false }
}
},
{
resource: "project",
privilages: {
create: { checked: true, isEditable: true },
delete: { checked: true, isEditable: true },
update: { checked: true, isEditable: true },
read: { checked: true, isEditable: true }
}
}
]
}
Is it possible to perform this using mongo aggregations? Or do I need to make modifications in my schema, If yes, then what modifications should I make.
I was able to solve the problem in 3 steps:
Include all the permissions for every role and add a flag called checked. This increased data redundancy but that wasn't a big issue.
Do a group by on resource field in roles collection.
Populate the missing privileges for every resource with isEditable set to false on server side.
I had to traverse the data on server side, but this was the most efficient way I could think of.

Forbidden to access Kubernetes API Server

I have defined a ClusterRole for Prometheus:
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: prometheus
labels:
k8s-app: prometheus
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- endpoints
- services
- nodes
- pods
verbs:
- get
- watch
- list
- nonResourceURLs:
- /metrics
- /api/*
verbs:
- get
Prometheus is able to access the API-Servers /metrics route:
https://10.0.1.104:443/metrics
https://10.0.2.112:443/metrics
But I get "server returned HTTP status 403 Forbidden" on
https://kubernetes.default.svc:443/api/v1/nodes/ip-10-0-0-219.eu-west-1.compute.internal/proxy/metrics
and
https://kubernetes.default.svc:443/api/v1/nodes/ip-10-0-0-219.eu-west-1.compute.internal/proxy/metrics/cadvisor
I thought I had this covered by
- nonResourceURLs:
- /api/*
What am I missing?
I tried this myself and yes nodes/proxy is missing. (it works for me after adding it)
rules:
- apiGroups: [""]
resources:
- namespaces
- endpoints
- services
- nodes
- nodes/proxy <===
- pods
# From my K8s master
$ curl -k -H 'Authorization: Bearer <redacted>' https://localhost:6443/api/v1/nodes/ip-x-x-x-x.us-west-2.compute.internal/proxy/stats/summary
{
"node": {
"nodeName": "ip-x-x-x-x.us-west-2.compute.internal",
"systemContainers": [
{
"name": "kubelet",
"startTime": "2018-10-19T21:02:19Z",
"cpu": {
"time": "2018-11-09T23:51:15Z",
"usageNanoCores": 30779949,
"usageCoreNanoSeconds": 59446529195638
},
....
Removing it:
$ curl -k -H 'Authorization: Bearer <redacted>' https://localhost:6443/api/v1/nodes/ip-x-x-x-x.us-west-2.compute.internal/proxy/stats/summary
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {
},
"status": "Failure",
"message": "nodes \"ip-x-x-x-x.us-west-2.compute.internal\" is forbidden: User \"system:serviceaccount:default:prometheus-k8s\" cannot get resource \"nodes/proxy\" in API group \"\" at the cluster scope",
"reason": "Forbidden",
"details": {
"name": "ip-x-x-x-x.us-west-2.compute.internal",
"kind": "nodes"
},
"code": 403
}
For those two endpoints, the rules may be missing nodes/metrics and nodes/proxy for (sub)resources, and possibly the proxy verb.
If acceptable from security standpoint, it will be much easier to assign the cluster-reader role to the prometheus' service account.