1.Updated AWS Lambda Code 2. Added additional environment variables 3.Updated README.MD
This commit is contained in:
Родитель
a8edc4823e
Коммит
7ddc395a80
Двоичный файл не отображается.
До Ширина: | Высота: | Размер: 60 KiB После Ширина: | Высота: | Размер: 70 KiB |
|
@ -33,6 +33,8 @@ $secretValue = ConvertFrom-Json (Get-SECSecretValue -SecretId $secretName -Error
|
|||
$workspaceId = $secretValue.LAWID
|
||||
$workspaceKey = $secretValue.LAWKEY
|
||||
$LATableName = $env:LogAnalyticsTableName
|
||||
$IsCoreFieldsAllTable = $env:CoreFieldsAllTable
|
||||
$IsSplitAWSResourceTypes = $env:SplitAWSResourceTypeTables
|
||||
$ResourceID = ''
|
||||
|
||||
#The $eventobjectlist is the Json Parameter field names that form the core of the Json message that we want in the ALL Table in Log Ananlytics
|
||||
|
@ -130,6 +132,98 @@ Function Invoke-LogAnalyticsData {
|
|||
}
|
||||
|
||||
|
||||
Function Ingest-Core-Fields-Single-Table {
|
||||
Param(
|
||||
$coreEvents)
|
||||
|
||||
$coreJson = convertto-json $coreEvents -depth 5 -Compress
|
||||
$Table = "$LATableName" + "_All"
|
||||
IF (($corejson.Length) -gt 28MB) {
|
||||
Write-Host "Log length is greater than 28 MB, splitting and sending to Log Analytics"
|
||||
$bits = [math]::Round(($corejson.length) / 20MB) + 1
|
||||
$TotalRecords = $coreEvents.Count
|
||||
$RecSetSize = [math]::Round($TotalRecords / $bits) + 1
|
||||
$start = 0
|
||||
For ($x = 0; $x -lt $bits; $X++) {
|
||||
IF ( ($start + $recsetsize) -gt $TotalRecords) {
|
||||
$finish = $totalRecords
|
||||
}
|
||||
ELSE {
|
||||
$finish = $start + $RecSetSize
|
||||
}
|
||||
$body = Convertto-Json ($coreEvents[$start..$finish]) -Depth 5 -Compress
|
||||
$result = Invoke-LogAnalyticsData -CustomerId $workspaceId -SharedKey $workspaceKey -Body $body -LogTable $Table -TimeStampField 'eventTime' -ResourceId $ResourceID
|
||||
if ($result -eq 200)
|
||||
{
|
||||
Write-Host "CloudTrail Logs successfully ingested to LogAnalytics Workspace under Custom Logs --> Table: $Table"
|
||||
}
|
||||
$start = $Finish + 1
|
||||
}
|
||||
$null = Remove-variable -name body
|
||||
|
||||
}
|
||||
Else {
|
||||
#$logEvents = Convertto-Json $events -depth 20 -compress
|
||||
$result = Invoke-LogAnalyticsData -CustomerId $workspaceId -SharedKey $workspaceKey -Body $coreJson -LogTable $Table -TimeStampField 'eventTime' -ResourceId $ResourceID
|
||||
if ($result -eq 200)
|
||||
{
|
||||
Write-Host "CloudTrail Logs successfully ingested to LogAnalytics Workspace under Custom Logs --> Table: $Table"
|
||||
}
|
||||
}
|
||||
|
||||
$null = remove-variable -name coreEvents
|
||||
$null = remove-variable -name coreJson
|
||||
}
|
||||
|
||||
|
||||
|
||||
Function Ingest-AWS-ResourceType-Multi-Tables {
|
||||
Param(
|
||||
$eventSources,
|
||||
$groupEvents)
|
||||
|
||||
$RecCount = 0
|
||||
foreach ($d in $eventSources) {
|
||||
#$events = $groupevents[$d]
|
||||
$eventsJson = ConvertTo-Json $groupEvents[$d] -depth 5 -Compress
|
||||
$Table = $LATableName + '_' + $d
|
||||
$TotalRecords = $groupEvents[$d].Count
|
||||
$recCount += $TotalRecords
|
||||
IF (($eventsjson.Length) -gt 28MB) {
|
||||
#$events = Convertfrom-json $corejson
|
||||
$bits = [math]::Round(($eventsjson.length) / 20MB) + 1
|
||||
$TotalRecords = $groupEvents[$d].Count
|
||||
$RecSetSize = [math]::Round($TotalRecords / $bits) + 1
|
||||
$start = 0
|
||||
For ($x = 0; $x -lt $bits; $X++) {
|
||||
IF ( ($start + $recsetsize) -gt $TotalRecords) {
|
||||
$finish = $totalRecords
|
||||
}
|
||||
ELSE {
|
||||
$finish = $start + $RecSetSize
|
||||
}
|
||||
$body = Convertto-Json ($groupEvents[$d][$start..$finish]) -Depth 5 -Compress
|
||||
$result = Invoke-LogAnalyticsData -CustomerId $workspaceId -SharedKey $workspaceKey -Body $body -LogTable $Table -TimeStampField 'eventTime' -ResourceId $ResourceID
|
||||
if ($result -eq 200)
|
||||
{
|
||||
Write-Host "CloudTrail Logs successfully ingested to LogAnalytics Workspace under Custom Logs --> Table: $Table"
|
||||
}
|
||||
$start = $Finish + 1
|
||||
}
|
||||
$null = Remove-variable -name body
|
||||
}
|
||||
Else {
|
||||
#$logEvents = Convertto-Json $events -depth 20 -compress
|
||||
$result = Invoke-LogAnalyticsData -CustomerId $workspaceId -SharedKey $workspaceKey -Body $eventsJson -LogTable $Table -TimeStampField 'eventTime' -ResourceId $ResourceID
|
||||
if ($result -eq 200)
|
||||
{
|
||||
Write-Host "CloudTrail Logs successfully ingested to LogAnalytics Workspace under Custom Logs --> Table: $Table"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
foreach ($snsRecord in $LambdaInput.Records)
|
||||
{
|
||||
$snsMessage = ConvertFrom-Json -InputObject $snsRecord.Sns.Message
|
||||
|
@ -222,86 +316,22 @@ foreach ($snsRecord in $LambdaInput.Records)
|
|||
|
||||
}
|
||||
|
||||
$coreJson = convertto-json $coreevents -depth 5 -Compress
|
||||
$Table = "$LATableName" + "_All"
|
||||
IF (($corejson.Length) -gt 28MB) {
|
||||
Write-Host "Log length is greater than 28 MB, splitting and sending to Log Analytics"
|
||||
$bits = [math]::Round(($corejson.length) / 20MB) + 1
|
||||
$TotalRecords = $coreEvents.Count
|
||||
$RecSetSize = [math]::Round($TotalRecords / $bits) + 1
|
||||
$start = 0
|
||||
For ($x = 0; $x -lt $bits; $X++) {
|
||||
IF ( ($start + $recsetsize) -gt $TotalRecords) {
|
||||
$finish = $totalRecords
|
||||
}
|
||||
ELSE {
|
||||
$finish = $start + $RecSetSize
|
||||
}
|
||||
$body = Convertto-Json ($coreEvents[$start..$finish]) -Depth 5 -Compress
|
||||
$result = Invoke-LogAnalyticsData -CustomerId $workspaceId -SharedKey $workspaceKey -Body $body -LogTable $Table -TimeStampField 'eventTime' -ResourceId $ResourceID
|
||||
if ($result -eq 200)
|
||||
{
|
||||
Write-Host "CloudTrail Logs successfully ingested to LogAnalytics Workspace under Custom Logs --> Table: $Table"
|
||||
}
|
||||
$start = $Finish + 1
|
||||
}
|
||||
$null = Remove-variable -name body
|
||||
|
||||
IF ($IsCoreFieldsAllTable -eq "true" -and $IsSplitAWSResourceTypes -eq "true") {
|
||||
Ingest-Core-Fields-Single-Table -CoreEvents $coreEvents
|
||||
Ingest-AWS-ResourceType-Multi-Tables -EventSources $eventSources -GroupEvents $groupevents
|
||||
}
|
||||
Else {
|
||||
#$logEvents = Convertto-Json $events -depth 20 -compress
|
||||
$result = Invoke-LogAnalyticsData -CustomerId $workspaceId -SharedKey $workspaceKey -Body $coreJson -LogTable $Table -TimeStampField 'eventTime' -ResourceId $ResourceID
|
||||
if ($result -eq 200)
|
||||
{
|
||||
Write-Host "CloudTrail Logs successfully ingested to LogAnalytics Workspace under Custom Logs --> Table: $Table"
|
||||
}
|
||||
ELSEIF ($IsCoreFieldsAllTable -eq "true" -and $IsSplitAWSResourceTypes -eq "false"){
|
||||
Ingest-Core-Fields-Single-Table -CoreEvents $coreEvents
|
||||
}
|
||||
ELSEIF ($IsCoreFieldsAllTable -eq "false" -and $IsSplitAWSResourceTypes -eq "true"){
|
||||
Ingest-AWS-ResourceType-Multi-Tables -EventSources $eventSources -GroupEvents $groupevents
|
||||
}
|
||||
ELSE {
|
||||
Write-Host "Make sure you have correct values supplied in Environment Variables for CoreFieldsAllTable and SplitAWSResourceTypeTables"
|
||||
}
|
||||
|
||||
$null = remove-variable -name coreEvents
|
||||
$null = remove-variable -name coreJson
|
||||
|
||||
$RecCount = 0
|
||||
foreach ($d in $eventSources) {
|
||||
#$events = $groupevents[$d]
|
||||
$eventsJson = ConvertTo-Json $groupevents[$d] -depth 5 -Compress
|
||||
$Table = $LATableName + '_' + $d
|
||||
$TotalRecords = $groupevents[$d].Count
|
||||
$recCount += $TotalRecords
|
||||
IF (($eventsjson.Length) -gt 28MB) {
|
||||
#$events = Convertfrom-json $corejson
|
||||
$bits = [math]::Round(($eventsjson.length) / 20MB) + 1
|
||||
$TotalRecords = $groupevents[$d].Count
|
||||
$RecSetSize = [math]::Round($TotalRecords / $bits) + 1
|
||||
$start = 0
|
||||
For ($x = 0; $x -lt $bits; $X++) {
|
||||
IF ( ($start + $recsetsize) -gt $TotalRecords) {
|
||||
$finish = $totalRecords
|
||||
}
|
||||
ELSE {
|
||||
$finish = $start + $RecSetSize
|
||||
}
|
||||
$body = Convertto-Json ($groupevents[$d][$start..$finish]) -Depth 5 -Compress
|
||||
$result = Invoke-LogAnalyticsData -CustomerId $workspaceId -SharedKey $workspaceKey -Body $body -LogTable $Table -TimeStampField 'eventTime' -ResourceId $ResourceID
|
||||
if ($result -eq 200)
|
||||
{
|
||||
Write-Host "CloudTrail Logs successfully ingested to LogAnalytics Workspace under Custom Logs --> Table: $Table"
|
||||
}
|
||||
$start = $Finish + 1
|
||||
}
|
||||
$null = Remove-variable -name body
|
||||
}
|
||||
Else {
|
||||
#$logEvents = Convertto-Json $events -depth 20 -compress
|
||||
$result = Invoke-LogAnalyticsData -CustomerId $workspaceId -SharedKey $workspaceKey -Body $eventsJson -LogTable $Table -TimeStampField 'eventTime' -ResourceId $ResourceID
|
||||
if ($result -eq 200)
|
||||
{
|
||||
Write-Host "CloudTrail Logs successfully ingested to LogAnalytics Workspace under Custom Logs --> Table: $Table"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$null = Remove-Variable -Name groupevents
|
||||
$null = Remove-Variable -Name LogEvents
|
||||
$null = Remove-Variable -Name LogEvents
|
||||
}
|
||||
}
|
||||
}
|
Двоичный файл не отображается.
|
@ -4,16 +4,25 @@ This Lambda function is designed to ingest AWS CloudTrail Events and send them t
|
|||
AWS CloudTrail logs are audit type events from all/any AWS resources in a tenancy. Each AWS resource has a unique set of Request and Response Parameters. Azure Log Analytics has a column per table limit of 500, (plus some system columns) the aggregate of AWS parameter fields will exceed this quickly leading to potential loss of event records
|
||||
|
||||
Code does the following things with the logs it processes.
|
||||
1. Takes the core fields of the record. i.e. all fields except for the Request and Response associated fields and puts them in a Table_ALL. providing a single table with all records with core event information.
|
||||
2. Looks at each event and puts it into a table with an extension <AWSREsourceType> i.e. AwsCloudTrail_s3
|
||||
3. Exception to 2 above is for EC2 events. the volume of fields for EC2 Request and Response parameters exceeds 500 columns. EC2 data is split into 3 tables, Header, Request & Response.
|
||||
1. Takes the core fields of the record. i.e. all fields except for the Request and Response associated fields and puts them in a LogAnalyticsTableName_ALL. Providing a single table with all records with core event information.
|
||||
2. Looks at each event and puts it into a table with an extension <AWSREsourceType> i.e. LogAnalyticsTableName_S3
|
||||
3. Exception to 2 above is for EC2 events, the volume of fields for EC2 Request and Response parameters exceeds 500 columns. EC2 data is split into 3 tables, Header, Request & Response.
|
||||
Ex: LogAnalyticsTableName_EC2_Header
|
||||
4. In future if other AWS datatypes exceed 500 columns a similar split may be required for them as well.
|
||||
5. The processing of Data as described in 3 will lead to some data being ingested into 2 or more different tables and increase the log ingestion metrics\billing. The customer can decide they don't want the _ALL table and this would remove the duplicate data storage volume
|
||||
|
||||
Special thanks to [Chris Abberley](https://github.com/cabberley) for the above logic
|
||||
|
||||
**Note**
|
||||
|
||||
To avoid additional billing and duplication:
|
||||
1. You can turn off LogAnalyticsTableName_ALL using additional Environment Variable **CoreFieldsAllTable** to **false**
|
||||
2. You can turn off LogAnalyticsTableName_AWSREsourceType using additional Environment Variable **SplitAWSResourceTypeTables** to **false**
|
||||
|
||||
**Either CoreFieldsAllTable or SplitAWSResourceTypeTables must be true or both can be true**
|
||||
|
||||
|
||||
## **Function Flow process**
|
||||
CloudTrail Logs --> AWS S3 --> AWS SNS Topic --> AWS Lambda --> Azure Log Analytics
|
||||
**CloudTrail Logs --> AWS S3 --> AWS SNS Topic --> AWS Lambda --> Azure Log Analytics**
|
||||
![Picture9](./Graphics/Picture9.png)
|
||||
|
||||
## Installation / Setup Guide
|
||||
|
@ -64,15 +73,17 @@ You might need –ProfileName if your configuration of .aws/credentials file doe
|
|||
1. Once created, login to the AWS console. In Find services, search for Lambda. Click on Lambda.
|
||||
![Picture1](./Graphics/Picture1.png)
|
||||
|
||||
2. Click on the lambda function name you used with the cmdlet. Click Environment Variables and add the following
|
||||
2. Click on the lambda function name you used with the cmdlet. Click Environment Variables and add the following
|
||||
```
|
||||
SecretName
|
||||
LogAnalyticsTableName
|
||||
CoreFieldsAllTable --> Boolean
|
||||
SplitAWSResourceTypeTables --> Boolean
|
||||
```
|
||||
![Picture4](./Graphics/Picture4.png)
|
||||
3. Click on the lambda function name you used with the cmdlet.Click Add Trigger
|
||||
![Picture2](./Graphics/Picture2.png)
|
||||
4. Select SNS. Select the SNS Name. Click Add.
|
||||
4. Select SNS. Select the SNS Name. Click Add.
|
||||
![Picture3](./Graphics/Picture3.png)
|
||||
|
||||
5. Create AWS Role : The Lambda function will need an execution role defined that grants access to the S3 bucket and CloudWatch logs. To create an execution role:
|
||||
|
|
Загрузка…
Ссылка в новой задаче