зеркало из https://github.com/Azure/ipam.git
Fixed bug in visualization, handling AccessDenied errors properly now, and prepared for unified Dockerfile for Debian and RHEL
This commit is contained in:
Родитель
8dbd017506
Коммит
1df04fd501
16
default.conf
16
default.conf
|
@ -1,18 +1,30 @@
|
|||
# This is a default site configuration which will simply return 404, preventing
|
||||
# chance access to any other virtualhost.
|
||||
|
||||
upstream ui {
|
||||
server ipam-ui;
|
||||
server ipam-ui:8080 backup;
|
||||
}
|
||||
|
||||
upstream engine {
|
||||
server ipam-engine;
|
||||
server ipam-engine:8080 backup;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
proxy_next_upstream error timeout http_502;
|
||||
|
||||
# Frontend
|
||||
location / {
|
||||
proxy_pass http://ipam-ui;
|
||||
proxy_pass http://ui;
|
||||
}
|
||||
|
||||
# Backend
|
||||
location /api {
|
||||
proxy_pass http://ipam-engine;
|
||||
proxy_pass http://engine;
|
||||
}
|
||||
|
||||
# You may need this to prevent return 404 recursion.
|
||||
|
|
|
@ -1,18 +1,30 @@
|
|||
# This is a default site configuration which will simply return 404, preventing
|
||||
# chance access to any other virtualhost.
|
||||
|
||||
upstream ui {
|
||||
server ipam-ui;
|
||||
server ipam-ui:8080 backup;
|
||||
}
|
||||
|
||||
upstream engine {
|
||||
server ipam-engine;
|
||||
server ipam-engine:8080 backup;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
proxy_next_upstream error timeout http_502;
|
||||
|
||||
# Frontend
|
||||
location / {
|
||||
proxy_pass http://ipam-ui-dev;
|
||||
proxy_pass http://ui;
|
||||
}
|
||||
|
||||
# WebSockets
|
||||
location /ws {
|
||||
proxy_pass http://ipam-ui-dev;
|
||||
proxy_pass http://ui;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
|
@ -21,7 +33,7 @@ server {
|
|||
|
||||
# Backend
|
||||
location /api {
|
||||
proxy_pass http://ipam-engine-dev;
|
||||
proxy_pass http://engine;
|
||||
}
|
||||
|
||||
# You may need this to prevent return 404 recursion.
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
ARG BASE_IMAGE=python:3.9-slim
|
||||
FROM $BASE_IMAGE
|
||||
|
||||
# FROM python:3.9-slim
|
||||
ARG PORT=80
|
||||
|
||||
WORKDIR /code
|
||||
ADD ./requirements.txt $HOME
|
||||
|
||||
COPY ./requirements.txt /code/requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r ./requirements.txt
|
||||
|
||||
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
||||
ADD ./app $HOME/app
|
||||
|
||||
COPY ./app /code/app
|
||||
EXPOSE $PORT
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
CMD uvicorn "app.main:app" --reload --host "0.0.0.0" --port 8080
|
||||
CMD uvicorn "app.main:app" --reload --host "0.0.0.0" --port $PORT
|
||||
|
|
|
@ -2,12 +2,14 @@ from fastapi import HTTPException
|
|||
from fastapi.responses import JSONResponse
|
||||
|
||||
from azure.identity.aio import OnBehalfOfCredential, ClientSecretCredential
|
||||
|
||||
from azure.core import MatchConditions
|
||||
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ServiceRequestError
|
||||
|
||||
from azure.mgmt.resourcegraph.aio import ResourceGraphClient
|
||||
from azure.mgmt.resourcegraph.models import *
|
||||
from azure.mgmt.managementgroups.aio import ManagementGroupsAPI
|
||||
|
||||
from azure.core import MatchConditions
|
||||
from azure.cosmos.aio import CosmosClient
|
||||
import azure.cosmos.exceptions as exceptions
|
||||
|
||||
|
@ -304,6 +306,9 @@ async def arg_query_helper(credentials, query):
|
|||
except ServiceRequestError as e:
|
||||
print(e)
|
||||
raise HTTPException(status_code=500, detail="Error communicating with Azure.")
|
||||
except HttpResponseError as e:
|
||||
print(e)
|
||||
raise HTTPException(status_code=403, detail="Access denied.")
|
||||
finally:
|
||||
await resource_graph_client.close()
|
||||
|
||||
|
|
|
@ -1,17 +1,7 @@
|
|||
ARG BASE_IMAGE=node:16-slim
|
||||
FROM $BASE_IMAGE
|
||||
|
||||
# FROM node:16-slim
|
||||
|
||||
# Set Working Directory
|
||||
# WORKDIR /app
|
||||
|
||||
# Add `/app/node_modules/.bin` to $PATH
|
||||
# ENV PATH /app/node_modules/.bin:$PATH
|
||||
|
||||
# Copy Node Package Files
|
||||
# COPY package.json ./
|
||||
# COPY package-lock.json ./
|
||||
ARG PORT=80
|
||||
|
||||
# Add application sources
|
||||
ADD . $HOME
|
||||
|
@ -19,14 +9,11 @@ ADD . $HOME
|
|||
# Install Dependencies
|
||||
RUN npm install
|
||||
|
||||
# Copy Application Code
|
||||
# COPY . ./
|
||||
|
||||
# Build Application
|
||||
RUN npm run build
|
||||
|
||||
# Expose Port
|
||||
EXPOSE 80
|
||||
EXPOSE $PORT
|
||||
|
||||
# Inject ENV Variables & Start Server
|
||||
CMD npx --yes react-inject-env set && npx --yes http-server -a 0.0.0.0 -P http://localhost? -p 8080 build
|
||||
CMD npx --yes react-inject-env set && npx --yes http-server -a 0.0.0.0 -P http://localhost? -p $PORT build
|
||||
|
|
|
@ -335,11 +335,11 @@ function parseTree(spaces, vnets, endpoints) {
|
|||
size: block.size,
|
||||
used: block.used
|
||||
},
|
||||
children: block.vnets.map((vnet) => {
|
||||
children: block.vnets.reduce((results, vnet) => {
|
||||
const target = vnets.find((x) => x.id === vnet.id);
|
||||
|
||||
if(target) {
|
||||
return {
|
||||
results.push({
|
||||
name: target.name,
|
||||
value: {
|
||||
type: 'vnet',
|
||||
|
@ -382,9 +382,11 @@ function parseTree(spaces, vnets, endpoints) {
|
|||
}),
|
||||
};
|
||||
}),
|
||||
};
|
||||
});
|
||||
}
|
||||
}),
|
||||
|
||||
return results;
|
||||
}, []),
|
||||
};
|
||||
}),
|
||||
};
|
||||
|
|
|
@ -279,7 +279,7 @@ export function refreshAll(token) {
|
|||
(async () => await fetchEndpoints(token))()
|
||||
];
|
||||
|
||||
return Promise.all(stack);
|
||||
return Promise.allSettled(stack);
|
||||
}
|
||||
|
||||
export function fetchTreeView(token) {
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import { createAsyncThunk, createSlice } from '@reduxjs/toolkit';
|
||||
import { values } from 'lodash';
|
||||
import {
|
||||
fetchSpaces,
|
||||
fetchVNets,
|
||||
|
@ -145,7 +146,7 @@ export const ipamSlice = createSlice({
|
|||
.addCase(refreshAllAsync.fulfilled, (state, action) => {
|
||||
state.refreshing = false;
|
||||
|
||||
const spaces = action.payload[0].map((space) => {
|
||||
const spaces = action.payload[0].value.map((space) => {
|
||||
if('size' in space && 'used' in space) {
|
||||
space.available = (space.size - space.used);
|
||||
space.utilization = Math.round((space.used / space.size) * 100) || 0;
|
||||
|
@ -156,7 +157,7 @@ export const ipamSlice = createSlice({
|
|||
|
||||
state.spaces = spaces;
|
||||
|
||||
state.blocks = action.payload[0].map((space) => {
|
||||
state.blocks = action.payload[0].value.map((space) => {
|
||||
space.blocks.forEach((block) => {
|
||||
block.parentSpace = space.name;
|
||||
block.available = (block.size - block.used);
|
||||
|
@ -167,45 +168,54 @@ export const ipamSlice = createSlice({
|
|||
return space.blocks;
|
||||
}).flat();
|
||||
|
||||
const vnets = action.payload[1].map((vnet) => {
|
||||
vnet.available = (vnet.size - vnet.used);
|
||||
vnet.utilization = Math.round((vnet.used / vnet.size) * 100);
|
||||
vnet.prefixes = vnet.prefixes.join(", ");
|
||||
if(action.payload[1].status === 'fulfilled') {
|
||||
const vnets = action.payload[1].value.map((vnet) => {
|
||||
vnet.available = (vnet.size - vnet.used);
|
||||
vnet.utilization = Math.round((vnet.used / vnet.size) * 100);
|
||||
vnet.prefixes = vnet.prefixes.join(", ");
|
||||
|
||||
return vnet;
|
||||
});
|
||||
|
||||
state.vNets = vnets;
|
||||
|
||||
const subnets = action.payload[1].map((vnet) => {
|
||||
var subnetArray = [];
|
||||
|
||||
vnet.subnets.forEach((subnet) => {
|
||||
const subnetDetails = {
|
||||
name: subnet.name,
|
||||
id: `${vnet.id}/subnets/${subnet.name}`,
|
||||
prefix: subnet.prefix,
|
||||
resource_group: vnet.resource_group,
|
||||
subscription_id: vnet.subscription_id,
|
||||
tenant_id: vnet.tenant_id,
|
||||
vnet_name: vnet.name,
|
||||
vnet_id: vnet.id,
|
||||
used: subnet.used,
|
||||
size: subnet.size,
|
||||
available: (subnet.size - subnet.used),
|
||||
utilization: Math.round((subnet.used / subnet.size) * 100),
|
||||
type: subnetMap[subnet.type]
|
||||
};
|
||||
|
||||
subnetArray.push(subnetDetails);
|
||||
return vnet;
|
||||
});
|
||||
|
||||
return subnetArray;
|
||||
}).flat();
|
||||
state.vNets = vnets;
|
||||
|
||||
state.subnets = subnets;
|
||||
const subnets = action.payload[1].value.map((vnet) => {
|
||||
var subnetArray = [];
|
||||
|
||||
vnet.subnets.forEach((subnet) => {
|
||||
const subnetDetails = {
|
||||
name: subnet.name,
|
||||
id: `${vnet.id}/subnets/${subnet.name}`,
|
||||
prefix: subnet.prefix,
|
||||
resource_group: vnet.resource_group,
|
||||
subscription_id: vnet.subscription_id,
|
||||
tenant_id: vnet.tenant_id,
|
||||
vnet_name: vnet.name,
|
||||
vnet_id: vnet.id,
|
||||
used: subnet.used,
|
||||
size: subnet.size,
|
||||
available: (subnet.size - subnet.used),
|
||||
utilization: Math.round((subnet.used / subnet.size) * 100),
|
||||
type: subnetMap[subnet.type]
|
||||
};
|
||||
|
||||
state.endpoints = action.payload[2];
|
||||
subnetArray.push(subnetDetails);
|
||||
});
|
||||
|
||||
return subnetArray;
|
||||
}).flat();
|
||||
|
||||
state.subnets = subnets;
|
||||
} else {
|
||||
state.vNets = [];
|
||||
state.subnets = [];
|
||||
}
|
||||
|
||||
if(action.payload[2].status === 'fulfilled') {
|
||||
state.endpoints = action.payload[2].value;
|
||||
} else {
|
||||
state.endpoints = [];
|
||||
}
|
||||
})
|
||||
.addCase(refreshAllAsync.pending, (state) => {
|
||||
state.refreshing = true;
|
||||
|
|
Загрузка…
Ссылка в новой задаче