Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: fix pushing logs to s3 and opensearch #181

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion dependencies/code_builder/buildspec.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ phases:
commands:
- aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin https://$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com
- |
PULL_FROM_DOCKER=("grafana/grafana" "grafana/loki" "grafana/promtail" "grafana/fluent-bit-plugin-loki" "nginx" "juspaydotin/hyperswitch-router" "juspaydotin/hyperswitch-producer" "juspaydotin/hyperswitch-consumer" "juspaydotin/hyperswitch-control-center" "juspaydotin/hyperswitch-web" "bitnami/metrics-server" "istio/proxyv2" "istio/pilot")
PULL_FROM_DOCKER=("grafana/grafana" "grafana/loki" "grafana/promtail" "grafana/fluent-bit-plugin-loki" "nginx" "juspaydotin/hyperswitch-router" "juspaydotin/hyperswitch-producer" "juspaydotin/hyperswitch-consumer" "juspaydotin/hyperswitch-control-center" "juspaydotin/hyperswitch-web" "bitnami/metrics-server" "istio/proxyv2" "istio/pilot" "fluent/fluentd-kubernetes-daemonset")
PULL_FROM_AWS=("eks/aws-load-balancer-controller" "ebs-csi-driver/aws-ebs-csi-driver" "eks-distro/kubernetes-csi/external-provisioner" "eks-distro/kubernetes-csi/external-attacher" "eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter" "eks-distro/kubernetes-csi/livenessprobe" "eks-distro/kubernetes-csi/external-resizer" "eks-distro/kubernetes-csi/node-driver-registrar" "ebs-csi-driver/volume-modifier-for-k8s")

repository_exists() {
Expand Down Expand Up @@ -48,6 +48,14 @@ phases:
docker pull "$IMAGE:1.21.2" && \
docker tag "$IMAGE:1.21.2" "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE:1.21.2"
docker push "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE:1.21.2"
elif [[ $IMAGE == "fluent/fluentd-kubernetes-daemonset" ]]; then
docker pull "$IMAGE:v1.16-debian-s3-1" && \
docker tag "$IMAGE:v1.16-debian-s3-1" "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE:v1.16-debian-s3-1"
docker push "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE:v1.16-debian-s3-1"
elif [[ $IMAGE == "fluent/fluentd-kubernetes-daemonset" ]]; then
docker pull "$IMAGE:v1.16-debian-opensearch-2" && \
docker tag "$IMAGE:v1.16-debian-opensearch-2" "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE:v1.16-debian-opensearch-2"
docker push "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE:v1.16-debian-opensearch-2"
else
docker pull "$IMAGE" && \
docker tag "$IMAGE" "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE:latest" && \
Expand Down
29 changes: 22 additions & 7 deletions lib/aws/eks.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ export class EksStack {
lokiChart: eks.HelmChart;
sdkBucket: s3.Bucket;
sdkDistribution: cloudfront.CloudFrontWebDistribution;
ext_app_lb_sg: ec2.SecurityGroup;
outbound_proxy: string;
privateEcrRepository: string;
constructor(
scope: Construct,
config: Config,
Expand All @@ -45,6 +48,7 @@ export class EksStack {

const ecrTransfer = new DockerImagesToEcr(scope, vpc);
const privateEcrRepository = `${process.env.CDK_DEFAULT_ACCOUNT}.dkr.ecr.${process.env.CDK_DEFAULT_REGION}.amazonaws.com`
this.privateEcrRepository = privateEcrRepository;
let vpn_ips: string[] = (scope.node.tryGetContext("vpn_ips") || "0.0.0.0").split(",");
vpn_ips = vpn_ips.map((ip: string) => {
if (ip === "0.0.0.0") {
Expand All @@ -68,10 +72,19 @@ export class EksStack {
eks.ClusterLoggingTypes.SCHEDULER,
]
});

const lbSecurityGroup = new ec2.SecurityGroup(scope, "HSLBSecurityGroup", {
vpc: cluster.vpc,
allowAllOutbound: false,
securityGroupName: "hs-loadbalancer-sg",
});

this.sg = cluster.clusterSecurityGroup;
this.ext_app_lb_sg = lbSecurityGroup;

let push_logs = scope.node.tryGetContext('open_search_service') || 'n';
if (`${push_logs}` == "y"){
const logsStack = new LogsStack(scope, cluster, "app-logs-s3-service-account");
const logsStack = new LogsStack(scope, cluster, this, "app-logs-s3-service-account");
}

cluster.node.addDependency(ecrTransfer.codebuildTrigger);
Expand Down Expand Up @@ -516,13 +529,13 @@ export class EksStack {
// );

// Create a security group for the load balancer
const lbSecurityGroup = new ec2.SecurityGroup(scope, "HSLBSecurityGroup", {
vpc: cluster.vpc,
allowAllOutbound: false,
securityGroupName: "hs-loadbalancer-sg",
});
// const lbSecurityGroup = new ec2.SecurityGroup(scope, "HSLBSecurityGroup", {
// vpc: cluster.vpc,
// allowAllOutbound: false,
// securityGroupName: "hs-loadbalancer-sg",
// });

this.sg = cluster.clusterSecurityGroup;
// this.sg = cluster.clusterSecurityGroup;

// Add inbound rule for all traffic
lbSecurityGroup.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.allTraffic());
Expand Down Expand Up @@ -1155,6 +1168,8 @@ export class EksStack {
}
});

this.outbound_proxy= squidLoadBalncer.loadBalancerDnsName

const logsBucket = new s3.Bucket(scope, "hyperswitch-outgoing-proxy-logs-bucket", {
bucketName: `outgoing-proxy-logs-bucket-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`,
blockPublicAccess: new s3.BlockPublicAccess({
Expand Down
63 changes: 38 additions & 25 deletions lib/aws/log_stack.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@ import * as iam from "aws-cdk-lib/aws-iam";
import * as eks from "aws-cdk-lib/aws-eks";
import * as opensearch from 'aws-cdk-lib/aws-opensearchservice';
import { Domain, EngineVersion, IpAddressType } from 'aws-cdk-lib/aws-opensearchservice';
import { EksStack } from "./eks";


export class LogsStack {
bucket: s3.Bucket;
domain: Domain;
constructor(scope: Construct, cluster: eks.Cluster, serviceAccountName?: string) {
els_sg: ec2.SecurityGroup;
constructor(scope: Construct, cluster: eks.Cluster, eks: EksStack, serviceAccountName?: string) {
this.bucket = new s3.Bucket(scope, "LogsBucket", {
removalPolicy: cdk.RemovalPolicy.DESTROY,
bucketName: `logs-bucket-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`,
Expand Down Expand Up @@ -67,7 +69,7 @@ export class LogsStack {
type: "ClusterIP",
},
image: {
repository: "fluent/fluentd-kubernetes-daemonset",
repository: `${eks.privateEcrRepository}/fluent/fluentd-kubernetes-daemonset`,
pullPolicy: "IfNotPresent",
tag: "v1.16-debian-s3-1"
},
Expand Down Expand Up @@ -134,42 +136,51 @@ export class LogsStack {
});

fluentdChart.node.addDependency(sa);
let open_search_master_user_name = scope.node.tryGetContext('open_search_master_user_name') || "admin";
let open_search_master_password = scope.node.tryGetContext('open_search_master_password') || "Password@123";

const opensearch_sg = new ec2.SecurityGroup(scope, 'opensearch-sg', {
vpc: cluster.vpc,
allowAllOutbound: true,
securityGroupName: "els-sg",
});

this.domain = new opensearch.Domain(scope, 'OpenSearch', {
domainName: "hyperswitch-logs-oopensearch",
version: opensearch.EngineVersion.OPENSEARCH_2_11,
enableVersionUpgrade: false,
ebs: {
volumeSize: 50,
volumeType: ec2.EbsDeviceVolumeType.GP3,
throughput: 125,
iops: 3000,
volumeSize: 50,
volumeType: ec2.EbsDeviceVolumeType.GP3,
throughput: 125,
iops: 3000,
},
fineGrainedAccessControl: {
masterUserName: "admin",
masterUserPassword: cdk.SecretValue.unsafePlainText("Pluentd@123"),
masterUserName: open_search_master_user_name,
masterUserPassword: cdk.SecretValue.unsafePlainText(`${open_search_master_password}`),
},
nodeToNodeEncryption: true,
encryptionAtRest: {
enabled: true,
enabled: true,
},
removalPolicy: cdk.RemovalPolicy.DESTROY,
enforceHttps: true,
zoneAwareness:{
enabled: true,
availabilityZoneCount: 2
zoneAwareness: {
enabled: true,
availabilityZoneCount: 2
},
capacity: {
dataNodes: 2,
dataNodeInstanceType: "r6g.large.search",
multiAzWithStandbyEnabled: false
dataNodes: 2,
dataNodeInstanceType: "r6g.large.search",
multiAzWithStandbyEnabled: false
}
});
// this.domain.grantReadWrite(new iam.AnyPrincipal());
const policy = new iam.PolicyStatement({
effect: iam.Effect.ALLOW,
principals: [new iam.AnyPrincipal()],
actions: ["es:*"],
resources: [`${this.domain.domainArn}/*`],
effect: iam.Effect.ALLOW,
principals: [new iam.AnyPrincipal()],
actions: ["es:*"],
resources: [`${this.domain.domainArn}/*`],
});
this.domain.addAccessPolicies(policy);

Expand All @@ -182,8 +193,6 @@ export class LogsStack {
});

kAnalyticsNS.node.addDependency(this.domain);
let open_search_master_user_name = scope.node.tryGetContext('open_search_master_user_name') || "admin";
let open_search_master_password = scope.node.tryGetContext('open_search_master_password') || "Password@123";

const openSearchFluentdChart = cluster.addHelmChart("fluentd-opensearch", {
chart: "fluentd",
Expand Down Expand Up @@ -221,7 +230,7 @@ export class LogsStack {
type: "ClusterIP",
},
image: {
repository: "fluent/fluentd-kubernetes-daemonset",
repository: `${eks.privateEcrRepository}/fluent/fluentd-kubernetes-daemonset`,
pullPolicy: "IfNotPresent",
tag: "v1.16-debian-opensearch-2"
},
Expand Down Expand Up @@ -249,6 +258,10 @@ export class LogsStack {
{
name: "FLUENT_OPENSEARCH_SCHEME",
value: "https",
},
{
name: "HTTP_PROXY",
value: `http://${eks.outbound_proxy}:80`
}

],
Expand Down Expand Up @@ -316,7 +329,7 @@ export class LogsStack {
expression /^(?<time>.+) (?<stream>stdout|stderr)( (?<logtag>.))? (?<log>.*)$/
</parse>
</source>`,

"02_filters.conf": `
# Parse JSON Logs
<filter hyperswitch.**>
Expand All @@ -342,7 +355,7 @@ export class LogsStack {
<filter hyperswitch.**>
@type kubernetes_metadata
</filter>`,

"03_dispatch.conf": "",

"04_outputs.conf": `
Expand Down Expand Up @@ -387,7 +400,7 @@ export class LogsStack {
</store>
</match>`
},
ingress:{
ingress: {
enabled: false,
}
}
Expand Down