59 Commits

Author SHA1 Message Date
reynisson
43036b4312 Merge remote-tracking branch 'origin/dev' into dev 2021-11-21 22:37:26 +01:00
reynisson
0f5adff167 Updated ADRs 2021-11-21 22:37:17 +01:00
df0e91fb23 Merge pull request #83 from SCS-ASSE-FS21-Group1/db_roster
added database to roster
2021-11-21 21:19:10 +01:00
d70ea75fb7 Merge pull request #84 from SCS-ASSE-FS21-Group1/Persistence-mongodb
Persistence mongodb
2021-11-21 21:18:14 +01:00
reynisson
c1f27d51f7 Updated ADRs 2021-11-21 19:48:37 +01:00
reynisson
54f959ac23 Updated ADRs 2021-11-21 19:45:16 +01:00
julius.lautz
ae9e3acbd2 added database to roster 2021-11-21 19:40:32 +01:00
rahimiankeanu
2aeaf86e59 Persistence layer in executor pool
(RemoveExecutorFrom... under Service folder still has an error)
2021-11-21 18:07:07 +01:00
reynisson
5968184b08 Merge remote-tracking branch 'origin/dev' into dev 2021-11-21 17:16:13 +01:00
reynisson
af820f23d9 Path the task output data instead of a hard coded 0 2021-11-21 17:16:01 +01:00
bc5c654154 Merge remote-tracking branch 'upstream/main' into dev 2021-11-21 12:14:26 +01:00
Ronny Seiger
b027a83aad Merge pull request #11 from SCS-ASSE-FS21-Tutors/test
Test
2021-11-21 11:32:13 +01:00
reynisson
1bd387413e fixed Content-type header in external patch 2021-11-20 23:44:47 +01:00
reynisson
53d815b93d smallest fixes 2021-11-20 23:26:02 +01:00
reynisson
3141f97f41 smaller fixes 2021-11-20 23:08:22 +01:00
reynisson
ad27697bc7 Merge pull request #78 from SCS-ASSE-FS21-Group1/dev
using https for auction house link
2021-11-20 22:54:41 +01:00
reynisson
332f035622 small fixes 2021-11-20 22:53:52 +01:00
reynisson
3b205d80a5 using the bidders auction house uri 2021-11-20 22:39:20 +01:00
reynisson
e3768280ac using https for auction house link 2021-11-20 22:09:07 +01:00
ronsei
f6c0495c04 ArchUnit test for Assignment 8 2021-11-18 18:13:49 +01:00
ronsei
c126c34972 First set tests for Assignment 8 2021-11-18 16:02:28 +01:00
reynisson
ef044f1cf1 Merge pull request #76 from SCS-ASSE-FS21-Group1/dev
bugfixes + env variables
2021-11-17 21:37:26 +01:00
6fe4b4dfbe bugfixes + env variables 2021-11-17 21:35:38 +01:00
reynisson
ee818c8149 Merge pull request #75 from SCS-ASSE-FS21-Group1/dev
deployment script & tasklist & executor fixes
2021-11-17 19:55:36 +01:00
9d75a87dd6 deployment script & tasklist & executor fixes 2021-11-17 19:54:04 +01:00
reynisson
47956d1ac6 Merge pull request #73 from SCS-ASSE-FS21-Group1/dev
fixed deployment script
2021-11-17 19:03:13 +01:00
6b8f5bf013 fixed deployment script 2021-11-17 19:00:52 +01:00
reynisson
1e917ec3e8 Merge pull request #72 from SCS-ASSE-FS21-Group1/dev
Bugfixes + Security fixes
2021-11-17 18:29:37 +01:00
reynisson
9d8e6cf216 Logging bids received 2021-11-17 18:29:08 +01:00
reynisson
96b0e82113 Merge remote-tracking branch 'origin/dev' into dev 2021-11-17 18:26:02 +01:00
reynisson
01b5056671 Testing out getting Environment variables from main 2021-11-17 18:25:49 +01:00
Andrei Ciortea
1d2c7103e4 Merge branch 'main' of github.com:SCS-ASSE-FS21-Tutors/tapas into main 2021-11-16 22:41:45 +01:00
Andrei Ciortea
292d30d1bd Add example for reading config properties in the main method for TapasAuctionHouse 2021-11-16 22:41:22 +01:00
add257fd5e updated dependencies to remove security issues 2021-11-16 21:20:26 +01:00
b6488fa36f added environment variables for uri's 2021-11-16 21:17:10 +01:00
reynisson
778535a437 Merge pull request #71 from SCS-ASSE-FS21-Group1/dev
fixes
2021-11-16 19:47:09 +01:00
084270bbb8 env changes 2021-11-16 19:43:46 +01:00
reynisson
1bc6ec6813 Small URI fixes 2021-11-16 19:41:56 +01:00
98c38b4fcf Merge pull request #65 from SCS-ASSE-FS21-Group1/dev
Dev
2021-11-16 19:13:59 +01:00
1d13b8e746 Merge pull request #69 from SCS-ASSE-FS21-Group1/change_mqtt_broker_in_roster
Changed mqtt broker address in roster
2021-11-16 19:11:49 +01:00
861cf82f67 Merge pull request #68 from SCS-ASSE-FS21-Group1/auction_house_external_communication
Implemented auctioning of tasks workflow in auction house
2021-11-16 19:11:16 +01:00
reynisson
7a0488df74 Merge pull request #70 from SCS-ASSE-FS21-Group1/improvments-for-plugfest
Fixes
2021-11-16 19:10:58 +01:00
44cc0929bd fixes 2021-11-16 19:09:38 +01:00
reynisson
5a2cc7a131 Changed mqtt broker address in roster 2021-11-16 18:53:06 +01:00
reynisson
e869fb9699 Bidding workflow 2021-11-16 18:51:39 +01:00
reynisson
33ab37c661 Merge pull request #67 from SCS-ASSE-FS21-Group1/renaming-executors
Renaming executors
2021-11-16 17:46:28 +01:00
reynisson
8fba9136b2 Implemented auctioning of tasks workflow in auction house 2021-11-16 17:42:14 +01:00
Peter Guhl
df53236853 Compiling and running tapas on a local docker installation (#10)
* First version building tasks and mongodb

* Creating container for local tests

* Initial version

* Added mongodb to correct network

* Added auction house

* Acknowledgments, Dockerfile for app

* Added app to build

* Dockerfile for app

* Mapped app port to localhost:8080

Co-authored-by: Ubuntu <ubuntu@docker-tests.novalocal>
2021-11-16 16:12:15 +01:00
bce3619638 renaming 2021-11-16 15:48:46 +01:00
247768b5d6 Merge pull request #66 from SCS-ASSE-FS21-Group1/v2bugfixes
fixed multiple bugs & updated cicd workflows
2021-11-16 14:32:23 +01:00
4c5da8eed6 fix naming 2021-11-15 12:03:25 +01:00
1c4da28480 fixed multiple bugs & updated cicd workflows 2021-11-15 11:59:27 +01:00
ronsei
343d33270a Extending the README.md for tapas-tasks with details on setting up MongoDB as DB for repositories. 2021-11-12 10:21:05 +01:00
Andrei Ciortea
430055a6ff Merge branch 'main' of github.com:SCS-ASSE-FS21-Tutors/tapas into main 2021-11-11 17:17:12 +01:00
Andrei Ciortea
3f4f2f4a1b Update BROKERS.md 2021-11-11 17:16:09 +01:00
Ronny Seiger
b4efa1ee54 Minor change in dbadmin URL 2021-11-11 11:47:28 +01:00
Ronny Seiger
f97188cdad Merge pull request #8 from SCS-ASSE-FS21-Tutors/db
Adding a MongoDB repository and extending the use cases for adding ne…
2021-11-11 11:37:41 +01:00
ronsei
32ecd6a5d4 Adding a MongoDB repository and extending the use cases for adding new tasks and retrieval of a new task 2021-11-11 11:32:10 +01:00
Ronny Seiger
fdb7d2bf64 Update build-and-deploy.yml
Removing old containers during deploy
2021-11-11 11:03:53 +01:00
163 changed files with 2753 additions and 426 deletions

View File

@@ -32,6 +32,8 @@ services:
restart: unless-stopped restart: unless-stopped
volumes: volumes:
- ./:/data/ - ./:/data/
environment:
roster.uri: http://roster:8082
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.tapas-tasks.rule=Host(`tapas-tasks.${PUB_IP}.nip.io`)" - "traefik.http.routers.tapas-tasks.rule=Host(`tapas-tasks.${PUB_IP}.nip.io`)"
@@ -47,6 +49,8 @@ services:
restart: unless-stopped restart: unless-stopped
volumes: volumes:
- ./:/data/ - ./:/data/
environment:
mqtt.broker.uri: tcp://broker.hivemq.com:1883
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.tapas-auction-house.rule=Host(`tapas-auction-house.${PUB_IP}.nip.io`)" - "traefik.http.routers.tapas-auction-house.rule=Host(`tapas-auction-house.${PUB_IP}.nip.io`)"
@@ -56,20 +60,25 @@ services:
- "traefik.http.routers.tapas-auction-house.entryPoints=web,websecure" - "traefik.http.routers.tapas-auction-house.entryPoints=web,websecure"
- "traefik.http.routers.tapas-auction-house.tls.certresolver=le" - "traefik.http.routers.tapas-auction-house.tls.certresolver=le"
assignment: roster:
image: openjdk image: openjdk
command: "java -jar /data/assignment-0.0.1-SNAPSHOT.jar" command: "java -jar /data/roster-0.0.1-SNAPSHOT.jar"
restart: unless-stopped restart: unless-stopped
volumes: volumes:
- ./:/data/ - ./:/data/
environment:
task-list.uri: http://tapas-tasks:8081
executor-robot.uri: http://executor-robot:8084
executor-computation.uri: http://executor-computation:8085
mqtt.broker.uri: tcp://broker.hivemq.com:1883
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.assignment.rule=Host(`assignment.${PUB_IP}.nip.io`)" - "traefik.http.routers.roster.rule=Host(`roster.${PUB_IP}.nip.io`)"
- "traefik.http.routers.assignment.service=assignment" - "traefik.http.routers.roster.service=roster"
- "traefik.http.services.assignment.loadbalancer.server.port=8082" - "traefik.http.services.roster.loadbalancer.server.port=8082"
- "traefik.http.routers.assignment.tls=true" - "traefik.http.routers.roster.tls=true"
- "traefik.http.routers.assignment.entryPoints=web,websecure" - "traefik.http.routers.roster.entryPoints=web,websecure"
- "traefik.http.routers.assignment.tls.certresolver=le" - "traefik.http.routers.roster.tls.certresolver=le"
executor-pool: executor-pool:
image: openjdk image: openjdk
@@ -77,6 +86,8 @@ services:
restart: unless-stopped restart: unless-stopped
volumes: volumes:
- ./:/data/ - ./:/data/
environment:
mqtt.broker.uri: tcp://broker.hivemq.com:1883
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.executor-pool.rule=Host(`executor-pool.${PUB_IP}.nip.io`)" - "traefik.http.routers.executor-pool.rule=Host(`executor-pool.${PUB_IP}.nip.io`)"
@@ -86,38 +97,43 @@ services:
- "traefik.http.routers.executor-pool.entryPoints=web,websecure" - "traefik.http.routers.executor-pool.entryPoints=web,websecure"
- "traefik.http.routers.executor-pool.tls.certresolver=le" - "traefik.http.routers.executor-pool.tls.certresolver=le"
executor1: executor-computation:
image: openjdk image: openjdk
command: "java -jar /data/executor1-0.0.1-SNAPSHOT.jar" command: "java -jar /data/executor-computation-0.0.1-SNAPSHOT.jar"
restart: unless-stopped restart: unless-stopped
depends_on: depends_on:
- executor-pool - executor-pool
- assignment - roster
volumes: volumes:
- ./:/data/ - ./:/data/
environment:
executor_pool_uri: http://executor-pool:8083
roster_uri: http://roster:8082
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.executor1.rule=Host(`executor1.${PUB_IP}.nip.io`)" - "traefik.http.routers.executor-computation.rule=Host(`executor-computation.${PUB_IP}.nip.io`)"
- "traefik.http.routers.executor1.service=executor1" - "traefik.http.routers.executor-computation.service=executor-computation"
- "traefik.http.services.executor1.loadbalancer.server.port=8084" - "traefik.http.services.executor-computation.loadbalancer.server.port=8084"
- "traefik.http.routers.executor1.tls=true" - "traefik.http.routers.executor-computation.tls=true"
- "traefik.http.routers.executor1.entryPoints=web,websecure" - "traefik.http.routers.executor-computation.entryPoints=web,websecure"
- "traefik.http.routers.executor1.tls.certresolver=le" - "traefik.http.routers.executor-computation.tls.certresolver=le"
executor-robot:
executor2:
image: openjdk image: openjdk
command: "java -jar /data/executor2-0.0.1-SNAPSHOT.jar" command: "java -jar /data/executor-robot-0.0.1-SNAPSHOT.jar"
restart: unless-stopped restart: unless-stopped
depends_on: depends_on:
- executor-pool - executor-pool
- assignment - roster
volumes: volumes:
- ./:/data/ - ./:/data/
environment:
executor_pool_uri: http://executor-pool:8083
roster_uri: http://roster:8082
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.executor2.rule=Host(`executor2.${PUB_IP}.nip.io`)" - "traefik.http.routers.executor-robot.rule=Host(`executor-robot.${PUB_IP}.nip.io`)"
- "traefik.http.routers.executor2.service=executor2" - "traefik.http.routers.executor-robot.service=executor-robot"
- "traefik.http.services.executor2.loadbalancer.server.port=8085" - "traefik.http.services.executor-robot.loadbalancer.server.port=8085"
- "traefik.http.routers.executor2.tls=true" - "traefik.http.routers.executor-robot.tls=true"
- "traefik.http.routers.executor2.entryPoints=web,websecure" - "traefik.http.routers.executor-robot.entryPoints=web,websecure"
- "traefik.http.routers.executor2.tls.certresolver=le" - "traefik.http.routers.executor-robot.tls.certresolver=le"

View File

@@ -33,30 +33,33 @@ jobs:
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2 restore-keys: ${{ runner.os }}-m2
- name: Build with Maven - name: Build common library
run: mvn -f assignment/pom.xml --batch-mode --update-snapshots verify run: mvn -f common/pom.xml --batch-mode --update-snapshots install
- run: cp ./assignment/target/assignment-0.0.1-SNAPSHOT.jar ./target
- name: Build with Maven - name: Build roster service
run: mvn -f roster/pom.xml --batch-mode --update-snapshots verify
- run: cp ./roster/target/roster-0.0.1-SNAPSHOT.jar ./target
- name: Build executor-pool service
run: mvn -f executor-pool/pom.xml --batch-mode --update-snapshots verify run: mvn -f executor-pool/pom.xml --batch-mode --update-snapshots verify
- run: cp ./executor-pool/target/executor-pool-0.0.1-SNAPSHOT.jar ./target - run: cp ./executor-pool/target/executor-pool-0.0.1-SNAPSHOT.jar ./target
- name: Build with Maven - name: Build executor-base library
run: mvn -f executor-base/pom.xml --batch-mode --update-snapshots install run: mvn -f executor-base/pom.xml --batch-mode --update-snapshots install
- name: Build with Maven - name: Build executor-computation service
run: mvn -f executor1/pom.xml --batch-mode --update-snapshots verify run: mvn -f executor-computation/pom.xml --batch-mode --update-snapshots verify
- run: cp ./executor1/target/executor1-0.0.1-SNAPSHOT.jar ./target - run: cp ./executor-computation/target/executor-computation-0.0.1-SNAPSHOT.jar ./target
- name: Build with Maven - name: Build executor-robot service
run: mvn -f executor2/pom.xml --batch-mode --update-snapshots verify run: mvn -f executor-robot/pom.xml --batch-mode --update-snapshots verify
- run: cp ./executor2/target/executor2-0.0.1-SNAPSHOT.jar ./target - run: cp ./executor-robot/target/executor-robot-0.0.1-SNAPSHOT.jar ./target
- name: Build with Maven - name: Build tapas-task service
run: mvn -f tapas-tasks/pom.xml --batch-mode --update-snapshots verify run: mvn -f tapas-tasks/pom.xml --batch-mode --update-snapshots verify
- run: cp ./tapas-tasks/target/tapas-tasks-0.0.1-SNAPSHOT.jar ./target - run: cp ./tapas-tasks/target/tapas-tasks-0.0.1-SNAPSHOT.jar ./target
- name: Build with Maven - name: Build tapas-auction-house service
run: mvn -f tapas-auction-house/pom.xml --batch-mode --update-snapshots verify run: mvn -f tapas-auction-house/pom.xml --batch-mode --update-snapshots verify
- run: cp ./tapas-auction-house/target/tapas-auction-house-0.0.1-SNAPSHOT.jar ./target - run: cp ./tapas-auction-house/target/tapas-auction-house-0.0.1-SNAPSHOT.jar ./target

View File

@@ -1,41 +0,0 @@
name: CI Assignment
on:
push:
branches: [main, dev]
paths:
- "assignment/**"
pull_request:
branches: [main, dev]
paths:
- "assignment/**"
workflow_dispatch:
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up JDK 11
uses: actions/setup-java@v1
with:
java-version: 11
- name: Cache SonarCloud packages
uses: actions/cache@v1
with:
path: ~/.sonar/cache
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Cache Maven packages
uses: actions/cache@v1
with:
path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2
- name: Build and analyze
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
run: mvn -f assignment/pom.xml -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=scs-asse-fs21-group1_tapas-assignment

View File

@@ -1,15 +1,17 @@
name: CI Executor 2 name: CI executor-computation
on: on:
push: push:
branches: [main, dev] branches: [main, dev]
paths: paths:
- "executor-base/**" - "executor-base/**"
- "executor2/**" - "executor-computation/**"
- "common/**"
pull_request: pull_request:
branches: [main, dev] branches: [main, dev]
paths: paths:
- "executor-base/**" - "executor-base/**"
- "executor2/**" - "executor-computation/**"
- "common/**"
workflow_dispatch: workflow_dispatch:
jobs: jobs:
@@ -36,10 +38,12 @@ jobs:
path: ~/.m2 path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2 restore-keys: ${{ runner.os }}-m2
- name: Build common
run: mvn -f common/pom.xml -B install
- name: Build executorBase - name: Build executorBase
run: mvn -f executor-base/pom.xml -B install run: mvn -f executor-base/pom.xml -B install
- name: Build and analyze - name: Build and analyze
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
run: mvn -f executor2/pom.xml -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=scs-asse-fs21-group1_tapas-executor2 run: mvn -f executor-computation/pom.xml -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=scs-asse-fs21-group1_tapas-executor2

View File

@@ -1,15 +1,17 @@
name: CI Executor 1 name: CI executor-robot
on: on:
push: push:
branches: [main, dev] branches: [main, dev]
paths: paths:
- "executor-base/**" - "executor-base/**"
- "executor1/**" - "executor-robot/**"
- "common/**"
pull_request: pull_request:
branches: [main, dev] branches: [main, dev]
paths: paths:
- "executor-base/**" - "executor-base/**"
- "executor1/**" - "executor-robot/**"
- "common/**"
workflow_dispatch: workflow_dispatch:
jobs: jobs:
@@ -36,10 +38,12 @@ jobs:
path: ~/.m2 path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2 restore-keys: ${{ runner.os }}-m2
- name: Build executorBase
run: mvn -f common/pom.xml -B install
- name: Build executorBase - name: Build executorBase
run: mvn -f executor-base/pom.xml -B install run: mvn -f executor-base/pom.xml -B install
- name: Build and analyze - name: Build and analyze
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
run: mvn -f executor1/pom.xml -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=scs-asse-fs21-group1_tapas-executor1 run: mvn -f executor-robot/pom.xml -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=scs-asse-fs21-group1_tapas-executor1

45
.github/workflows/ci.roster.yml vendored Normal file
View File

@@ -0,0 +1,45 @@
name: CI Roster
on:
push:
branches: [main, dev]
paths:
- "roster/**"
- "common/**"
pull_request:
branches: [main, dev]
paths:
- "roster/**"
- "common/**"
workflow_dispatch:
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up JDK 11
uses: actions/setup-java@v1
with:
java-version: 11
- name: Cache SonarCloud packages
uses: actions/cache@v1
with:
path: ~/.sonar/cache
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Cache Maven packages
uses: actions/cache@v1
with:
path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2
- name: Build common package
run: mvn -f common/pom.xml -B install
- name: Build and analyze
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
run: mvn -f roster/pom.xml -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=scs-asse-fs21-group1_tapas-assignment

13
app/Dockerfile Normal file
View File

@@ -0,0 +1,13 @@
# Dockerfile/Docker-Compose file based on an initial version authored by Alexander Lontke (ASSE, Fall Semester 2021)
FROM maven as build
COPY . /app
RUN mvn -f app/pom.xml --batch-mode --update-snapshots verify
FROM openjdk
COPY --from=build /app/target/app-0.1.0.jar ./app-0.1.0.jar
CMD java -jar app-0.1.0.jar

View File

@@ -0,0 +1,23 @@
# 2. Seperate service for Executors and Executor Pool
Date: 2021-11-21
## Status
Accepted
## Context
The executor pool has a complete list of all executors and knows if they are available or not, executors can execute tasks that match their type. The executors can therefore be part of the executor pool service, or each executor is a standalone service, as well as the executor pool.
## Decision
We will use a separate microservice for each executor and one service for the executor pool.
Having the executor pool and the executors as separate services would increase fault tolerance. If the executor pool goes down, the executors would stay online and execute their tasks without being affected by the executor pools outage. Likewise, if an executor goes down it does not impact other executors or the executor pool.
Different executors can have different execution times and a different load. This means the executors scale differently. Thus, we need a separate service for each executor.
Executors of different kinds will also scale differently than the executor pool and new executors of new types might be added at some point, further increasing the need for separate services to guarantee scalability and evolvability.
New executors will be added/removed during runtime. Therefore, we need a high extensibility.
## Consequences
Executors will be added/removed quite frequently, making the deployment of the system easier and less risk-prone if each executor is a separate service, also separated from the executor pool, which just keeps track of the executors and their status. However, having these separate services, the complexity might increase, and the testability of the system will decrease.

View File

@@ -1,21 +0,0 @@
# 2. Seperate service for Executors
Date: 2021-10-18
## Status
Accepted
## Context
The users need to be able to add new executors to the executor pool. The functionality of the executor is currently unknown.
## Decision
We will use a separate microservice for each executor.
New executors will be added/removed during runtime. Therefore, we need a high extensibility.
Different executors can have different execution times and a different load. This means the executors scale differently.
## Consequences
Having executors as its own service we can deploy new executors independently and easily add new executors during runtime and guarantee high scalability as well as evolvability.

View File

@@ -1,21 +0,0 @@
# 3. Seperate service for assignment domain
Date: 2021-10-18
## Status
Accepted
## Context
The Assignment Service handles the assignment of a task to a corresponding and available executor. It keeps track of all the connections between tasks and executors.
## Decision
The assignment domain will be its own service.
The assignment service will be a central point in our application. It will have most of the business logic in it and will communicate with all the different services. Therefore, other services can be kind of “dumb” and only need to focus on their simple tasks.
The code of the assignment will change more often than the code of the other services, thus having the assignment service split from the other makes it more deployable.
## Consequences
Having this system as its own service we reduce the Fault tolerance because the assignment service can be the single point of failure. We can mitigate this risk by implementing (server) replication and/or having an event driven communication with persisting messages. Therefore, all other services can run independently, and the assignment service can recover from a crash.

View File

@@ -0,0 +1,21 @@
# 3. Separate service for the Roster
Date: 2021-11-21
## Status
Accepted
## Context
The roster acts as an orchestrator for the system. It communicates directly with the task list, the executors, the executor pool, and the auction house. It handles the assignment of a task to a corresponding and available executor, keeps track of all the connections between tasks and executors, and communicates the status of tasks and executors to other services.
## Decision
The Roster domain will be its own service.
The Roster service will be a central point in our application. It will have most of the workflow logic in it and will communicate with all the different services. Therefore, other services can focus on their business logic and be largely ignorant of the overall workflow.
The code of the assignment will change more often than the code of the other services, thus having the assignment service split from the other makes it more deployable.
## Consequences
Having this system as its own service will reduce the fault tolerance because the assignment service can be the single point of failure. We can mitigate this risk by implementing (server) replication and/or having an event driven communication with persisting messages. Therefore, all other services can run independently, and the assignment service can recover from a crash. Additionally, we need to ensure a high level of interoperability, since the roster has to communicate with all other parts of the system.

View File

@@ -1,21 +0,0 @@
# 4. Seperate service for executor pool
Date: 2021-10-18
## Status
Accepted
## Context
The Executor pool keeps track of the connected executors and their purpose and status.
## Decision
We will have a separate service for the executor pool.
There are no other domains which share the same or similar functionality.
The executor pool also scales differently than other services.
## Consequences
Having the executor pool as a separate service will help with the deployability of this service but will make the overall structure more complex and reduces testability.

View File

@@ -0,0 +1,20 @@
# 4. Separate service for the Task List
Date: 2021-11-21
## Status
Accepted
## Context
Tasks are created in the task list, and the status of each task (created, assigned, executing, executed) is tracked in the task list as well. The task list mainly communicates with the roster so that tasks can get assigned and the roster will give the task list feedback about the tasks status.
## Decision
The task list will be its own service.
The task list needs to scale based on the number of active users and the intensity of their activity at any time while the scaling of other parts of the system can be constrained by other factors.
## Consequences
Although having the task list as its own service might slightly increase the complexity of the system and decrease the testability, it also makes the system easier to deploy and protective of its data. However, to ensure that this data is always available and does not get lost, the task list needs to be able to recover all its data (the entire history of all tasks) in case it goes down.

View File

@@ -4,7 +4,7 @@ Date: 2021-10-18
## Status ## Status
Accepted Superceded by [8. Switch to an event-driven microservices architecture](0008-switch-to-an-event-driven-microservices-architecture.md)
## Context ## Context

View File

@@ -0,0 +1,22 @@
# 7. Seperate service for Auction House
Date: 2021-11-21
## Status
Accepted
## Context
The auction house is the service that can connect to other groups auction houses. If there is a task whose task type does not match that of our executors, the auction house can start an auction where other groups can bid on doing the task for us. Moreover, it can also bid on other groups auctions.
## Decision
The auction house will be its own service.
The auction house is the only part of our system that has external communication; therefore, it makes sense to have it as its own service, also to guarantee better deployability.
The auction house does not scale directly based on the number of tasks, but only the proportion which needs external executors. Moreover, there could be limits on the number of auctions that could be started. Therefore, the auction house scales differently to other services.
Moreover, having the auction house as its own service also improves the fault tolerance of our system.
## Consequences
Since the auction house will be a standalone service, we have to make sure that if it goes down, it can recover its data in some way (which auctions it has launched, which auctions it has placed bids on or even won, etc.). Even though the testability and latency of our system might worsen by having a separate service for the auction house, we can implement different kinds of communication for internal and external communication in a much easier way.

View File

@@ -0,0 +1,26 @@
# 8. Switch to an event-driven microservices architecture
Date: 2021-11-21
## Status
Proposed
Supercedes [5. Event driven communication](0005-event-driven-communication.md)
## Context
Our Tapas App is currently implemented based on a microservice architecture, where the services communicate synchronously via request-response. Each service encapsulates a different bounded context with different functional and non-functional requirements. Internal communication could also be done using asynchronous or event-driven communication.
## Decision
Pros:
Scalability: Different services within the Tapas app are not always able to scale at the same rate. For example, we could have thousands of users adding printing tasks at the same time, but maybe we only have one printer. In this scenario we might want to scale the task-list service up to handle the creation load, but scaling up the printing executor operates on a different time-scale (i.e. adding a printer takes time). Moreover, we could have a lot of new tasks coming in, most of which can be executed internally. In this case we want to be able to scale up the task list but might not need to scale up the auction house. Event-driven communication would decrease the coupling of services. Consequently, the scalability of individual services would be enhanced as they no longer depend on the scalability of other services. This improves the apps overall scalability. Since scalability is one of the systems top 3 -ility, this seems quite important.
Fault tolerance: Another of the systems top 3 -ilities is fault tolerance. We could have highly unstable IoT executors that fail often. This should not disrupt the systems overall operation. The decoupling facilitated by event-driven, asynchronous, communication ensures that when individual services go down, the impact of other services is limited and once they go back up then can recover the systems state from persisted messages.
Cons:
Error handling, workflow control, and event timing:
The aforementioned topics outline the drawbacks of event- driven architecture. These drawbacks can be mitigated by using an orchestrator (like we currently do with the roster) to orchestrate assignment of tasks, auctioning off tasks and error handling when executors fail. More research needed.
## Consequences
Consequences to be determined but would relate to the three concepts mentioned as cons.

View File

@@ -0,0 +1,19 @@
# 9. common library for shared code
Date: 2021-11-21
## Status
Accepted
## Context
The numerous services that make up the Tapas app all have common, non-domain specific, functionality that can be shared via a common library, or be replicated in each service.
## Decision
Use a common code library for shared code which does not change frequently, but if it would be changed, would need to be changed everywhere.
## Consequences
Changes in the common code will most likely require multiple services to be redeployed. However, those services would most likely have to have been changed individually and redepolyed anyways. Another consequence is that versioning becomes more complicated.

View File

@@ -0,0 +1,20 @@
# 10. executor base library
Date: 2021-11-21
## Status
Accepted
## Context
Executors all use the same logic to communicate with other services. This means that their code base is near identical except for the code that implements their specific task execution. The code that implements the executor's shared logic can either be implemented by a shared library, or replicated across all executors.
## Decision
Since all executors use the same logic to communicate with other services, any change to this logic would have to be made for every executor. We will therefore use a shared library for executors, called executor-base. The library includes all the shared logic which every executor needs.
Having this shared logic in a separate library makes it easy to change the common logic at one place. The code sharing happens at compile time, which reduces the chance of runtime errors, compared to other code sharing approaches. If other people need to create executors, they can just reference the executor-base library and implement the actual execution part. Therefore they dont need to worry about the the connection implementations to the Tapas system.
## Consequences
It becomes easier to change the way that the executors communicate with the rest of the system. Moreover, changes are less risky as they only need to be implemented once. It also becomes easier for other teams within the organisation to create executors as they can use the executor-base to implement the shared logic. However, using a shared library will increase the complexity of the executors. Also there needs to be a clear way to use proper versioning. Lastly, by using this library we might be making assumptions for future executors that might not hold. For example, if we want to create more lightweight executors for IoT devices we might need to create a separate base package (if the current one becomes too fat), so that the executors can stay lightweight and dont implement unused code.

View File

@@ -0,0 +1,19 @@
# 11. seperation of common and executor-base library
Date: 2021-11-21
## Status
Accepted
## Context
We have two code sharing libraries, the executor-base and the common library. The executor-base implements shared logic that all executors need, but other services don't. The common library has much more wide reaching implementations, such as the implementation of the SelfValidating class. These could form a single common library, or two seperate common libraries.
## Decision
There will be a separate library for common and executor-base. The libraries share different type of code, and have different reasons to change. It would not make sense to have the shared code from executors in every other service which needs access to other shared code. Services that use the code in the common library should not need to be dependent in any way on the executor-base.
## Consequences
Changes impact fewer services. However, this decision will increase the number of service dependencies and therefore increase complexity in managing those dependencies.

60
docker-compose-local.yml Normal file
View File

@@ -0,0 +1,60 @@
# Dockerfile/Docker-Compose file based on an initial version authored by Alexander Lontke (ASSE, Fall Semester 2021)
version: "3.7"
services:
app:
build:
context: ./app
dockerfile: Dockerfile
# Use environment variables instead of application.properties
environment:
- KEY=VALUE
ports: #Just needed when testing from outside the docker network
- "8080:8080"
networks:
- tapas-network
tapas-tasks:
build:
context: ./tapas-tasks
dockerfile: Dockerfile
# Use environment variables instead of application.properties
environment:
- KEY=VALUE
ports: #Just needed when testing from outside
- "8081:8081"
networks:
- tapas-network
tapas-auction-house:
build:
context: ./tapas-auction-house
dockerfile: Dockerfile
# Use environment variables instead of application.properties
environment:
- KEY=VALUE
ports: #Just needed when testing from outside
- "8082:8082"
networks:
- tapas-network
mongodb:
image: mongo
container_name: mongodb
restart: unless-stopped
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: 8nP7s0a # Can not be changed again later on
volumes:
- database:/data/db
networks:
- tapas-network
#Volume for mongodb. One per server.
volumes:
database:
networks:
tapas-network:
driver: bridge

View File

@@ -9,9 +9,9 @@
<relativePath/> <!-- lookup parent from repository --> <relativePath/> <!-- lookup parent from repository -->
</parent> </parent>
<groupId>ch.unisg</groupId> <groupId>ch.unisg</groupId>
<artifactId>executorbase</artifactId> <artifactId>executor-base</artifactId>
<version>0.0.1-SNAPSHOT</version> <version>0.0.1-SNAPSHOT</version>
<name>executorbase</name> <name>executor-base</name>
<description>Demo project for Spring Boot</description> <description>Demo project for Spring Boot</description>
<properties> <properties>
<java.version>11</java.version> <java.version>11</java.version>

View File

@@ -1,5 +1,7 @@
package ch.unisg.executorbase.executor.adapter.in.web; package ch.unisg.executorbase.executor.adapter.in.web;
import java.util.logging.Logger;
import org.springframework.http.HttpHeaders; import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus; import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity; import org.springframework.http.ResponseEntity;
@@ -19,6 +21,8 @@ public class TaskAvailableController {
this.taskAvailableUseCase = taskAvailableUseCase; this.taskAvailableUseCase = taskAvailableUseCase;
} }
Logger logger = Logger.getLogger(TaskAvailableController.class.getName());
/** /**
* Controller for notification about new events. * Controller for notification about new events.
* @return 200 OK * @return 200 OK
@@ -26,6 +30,8 @@ public class TaskAvailableController {
@GetMapping(path = "/newtask/{taskType}", consumes = { "application/json" }) @GetMapping(path = "/newtask/{taskType}", consumes = { "application/json" })
public ResponseEntity<String> retrieveTaskFromTaskList(@PathVariable("taskType") String taskType) { public ResponseEntity<String> retrieveTaskFromTaskList(@PathVariable("taskType") String taskType) {
logger.info("New " + taskType + " available");
if (ExecutorType.contains(taskType.toUpperCase())) { if (ExecutorType.contains(taskType.toUpperCase())) {
TaskAvailableCommand command = new TaskAvailableCommand( TaskAvailableCommand command = new TaskAvailableCommand(
ExecutorType.valueOf(taskType.toUpperCase())); ExecutorType.valueOf(taskType.toUpperCase()));

View File

@@ -9,15 +9,15 @@ import java.util.logging.Level;
import java.util.logging.Logger; import java.util.logging.Logger;
import org.json.JSONObject; import org.json.JSONObject;
import org.springframework.beans.factory.annotation.Value;
import ch.unisg.executorbase.executor.application.port.out.ExecutionFinishedEventPort; import ch.unisg.executorbase.executor.application.port.out.ExecutionFinishedEventPort;
import ch.unisg.executorbase.executor.domain.ExecutionFinishedEvent; import ch.unisg.executorbase.executor.domain.ExecutionFinishedEvent;
public class ExecutionFinishedEventAdapter implements ExecutionFinishedEventPort { public class ExecutionFinishedEventAdapter implements ExecutionFinishedEventPort {
@Value("${roster.url}") String server = System.getenv("roster_uri") == null ?
String server; "http://localhost:8082" : System.getenv("roster_uri");
Logger logger = Logger.getLogger(ExecutionFinishedEventAdapter.class.getName()); Logger logger = Logger.getLogger(ExecutionFinishedEventAdapter.class.getName());
@@ -30,7 +30,7 @@ public class ExecutionFinishedEventAdapter implements ExecutionFinishedEventPort
String body = new JSONObject() String body = new JSONObject()
.put("taskID", event.getTaskID()) .put("taskID", event.getTaskID())
.put("result", event.getResult()) .put("outputData", event.getOutputData())
.put("status", event.getStatus()) .put("status", event.getStatus())
.toString(); .toString();
@@ -41,6 +41,7 @@ public class ExecutionFinishedEventAdapter implements ExecutionFinishedEventPort
.POST(HttpRequest.BodyPublishers.ofString(body)) .POST(HttpRequest.BodyPublishers.ofString(body))
.build(); .build();
try { try {
client.send(request, HttpResponse.BodyHandlers.ofString()); client.send(request, HttpResponse.BodyHandlers.ofString());
} catch (InterruptedException e) { } catch (InterruptedException e) {
@@ -50,7 +51,7 @@ public class ExecutionFinishedEventAdapter implements ExecutionFinishedEventPort
logger.log(Level.SEVERE, e.getLocalizedMessage(), e); logger.log(Level.SEVERE, e.getLocalizedMessage(), e);
} }
logger.log(Level.INFO, "Finish execution event sent with result: {}", event.getResult()); logger.log(Level.INFO, "Finish execution event sent with result: {0}", event.getOutputData());
} }

View File

@@ -23,8 +23,8 @@ import org.json.JSONObject;
@Primary @Primary
public class GetAssignmentAdapter implements GetAssignmentPort { public class GetAssignmentAdapter implements GetAssignmentPort {
@Value("${roster.url}") String server = System.getenv("roster_uri") == null ?
String server; "http://localhost:8082" : System.getenv("roster_uri");
Logger logger = Logger.getLogger(GetAssignmentAdapter.class.getName()); Logger logger = Logger.getLogger(GetAssignmentAdapter.class.getName());
@@ -51,12 +51,14 @@ public class GetAssignmentAdapter implements GetAssignmentPort {
try { try {
logger.info("Sending getAssignment Request"); logger.info("Sending getAssignment Request");
HttpResponse<String> response = client.send(request, HttpResponse.BodyHandlers.ofString()); HttpResponse<String> response = client.send(request, HttpResponse.BodyHandlers.ofString());
logger.log(Level.INFO, "getAssignment request result:\n {}", response.body()); logger.log(Level.INFO, "getAssignment request result:\n {0}", response.body());
if (response.body().equals("")) { if (response.body().equals("")) {
return null; return null;
} }
JSONObject responseBody = new JSONObject(response.body()); JSONObject responseBody = new JSONObject(response.body());
return new Task(responseBody.getString("taskID"), responseBody.getString("input"));
String inputData = responseBody.getString("inputData");
return new Task(responseBody.getString("taskID"), inputData);
} catch (InterruptedException e) { } catch (InterruptedException e) {
logger.log(Level.SEVERE, e.getLocalizedMessage(), e); logger.log(Level.SEVERE, e.getLocalizedMessage(), e);

View File

@@ -22,8 +22,8 @@ import ch.unisg.executorbase.executor.domain.ExecutorType;
@Primary @Primary
public class NotifyExecutorPoolAdapter implements NotifyExecutorPoolPort { public class NotifyExecutorPoolAdapter implements NotifyExecutorPoolPort {
@Value("${executor-pool.url}") String server = System.getenv("executor_pool_uri") == null ?
String server; "http://localhost:8083" : System.getenv("executor_pool_uri");
Logger logger = Logger.getLogger(NotifyExecutorPoolAdapter.class.getName()); Logger logger = Logger.getLogger(NotifyExecutorPoolAdapter.class.getName());
@@ -36,7 +36,7 @@ public class NotifyExecutorPoolAdapter implements NotifyExecutorPoolPort {
String body = new JSONObject() String body = new JSONObject()
.put("executorTaskType", executorType) .put("executorTaskType", executorType)
.put("executorURI", executorURI.getValue()) .put("executorUri", executorURI.getValue())
.toString(); .toString();
HttpClient client = HttpClient.newHttpClient(); HttpClient client = HttpClient.newHttpClient();

View File

@@ -8,14 +8,14 @@ public class ExecutionFinishedEvent {
private String taskID; private String taskID;
@Getter @Getter
private String result; private String outputData;
@Getter @Getter
private String status; private String status;
public ExecutionFinishedEvent(String taskID, String result, String status) { public ExecutionFinishedEvent(String taskID, String outputData, String status) {
this.taskID = taskID; this.taskID = taskID;
this.result = result; this.outputData = outputData;
this.status = status; this.status = status;
} }
} }

View File

@@ -1,5 +1,6 @@
package ch.unisg.executorbase.executor.domain; package ch.unisg.executorbase.executor.domain;
import java.util.logging.Level;
import java.util.logging.Logger; import java.util.logging.Logger;
import ch.unisg.common.valueobject.ExecutorURI; import ch.unisg.common.valueobject.ExecutorURI;
@@ -23,9 +24,8 @@ public abstract class ExecutorBase {
@Getter @Getter
private ExecutorStatus status; private ExecutorStatus status;
// TODO Violation of the Dependency Inversion Principle?, but we havn't really got a better solutions to send a http request / access a service from a domain model // TODO Violation of the Dependency Inversion Principle?,
// TODO I guess we can implement the execution as a service but there still is the problem with the startup request. // TODO do this with only services
// TODO I guess we can somehow autowire this but I don't know why it's not working :D
private final NotifyExecutorPoolPort notifyExecutorPoolPort = new NotifyExecutorPoolAdapter(); private final NotifyExecutorPoolPort notifyExecutorPoolPort = new NotifyExecutorPoolAdapter();
private final NotifyExecutorPoolService notifyExecutorPoolService = new NotifyExecutorPoolService(notifyExecutorPoolPort); private final NotifyExecutorPoolService notifyExecutorPoolService = new NotifyExecutorPoolService(notifyExecutorPoolPort);
private final GetAssignmentPort getAssignmentPort = new GetAssignmentAdapter(); private final GetAssignmentPort getAssignmentPort = new GetAssignmentAdapter();
@@ -38,12 +38,14 @@ public abstract class ExecutorBase {
this.status = ExecutorStatus.STARTING_UP; this.status = ExecutorStatus.STARTING_UP;
this.executorType = executorType; this.executorType = executorType;
// TODO set this automaticly // TODO set this automaticly
this.executorURI = new ExecutorURI("localhost:8084"); this.executorURI = new ExecutorURI("http://localhost:8084");
// TODO do this in main
// Notify executor-pool about existence. If executor-pools response is successfull start with getting an assignment, else shut down executor. // Notify executor-pool about existence. If executor-pools response is successfull start with getting an assignment, else shut down executor.
if(!notifyExecutorPoolService.notifyExecutorPool(this.executorURI, this.executorType)) { if(!notifyExecutorPoolService.notifyExecutorPool(this.executorURI, this.executorType)) {
logger.log(Level.WARNING, "Executor could not connect to executor pool! Shuting down!");
System.exit(0); System.exit(0);
} else { } else {
logger.info("Executor conntected to executor pool");
this.status = ExecutorStatus.IDLING; this.status = ExecutorStatus.IDLING;
getAssignment(); getAssignment();
} }
@@ -56,8 +58,10 @@ public abstract class ExecutorBase {
public void getAssignment() { public void getAssignment() {
Task newTask = getAssignmentPort.getAssignment(this.getExecutorType(), this.getExecutorURI()); Task newTask = getAssignmentPort.getAssignment(this.getExecutorType(), this.getExecutorURI());
if (newTask != null) { if (newTask != null) {
logger.info("Executor got a new task");
this.executeTask(newTask); this.executeTask(newTask);
} else { } else {
logger.info("Executor got no new task");
this.status = ExecutorStatus.IDLING; this.status = ExecutorStatus.IDLING;
} }
} }
@@ -70,11 +74,11 @@ public abstract class ExecutorBase {
logger.info("Starting execution"); logger.info("Starting execution");
this.status = ExecutorStatus.EXECUTING; this.status = ExecutorStatus.EXECUTING;
task.setResult(execution(task.getInput())); task.setOutputData(execution(task.getInputData()));
// TODO implement logic if execution was not successful // TODO implement logic if execution was not successful
executionFinishedEventPort.publishExecutionFinishedEvent( executionFinishedEventPort.publishExecutionFinishedEvent(
new ExecutionFinishedEvent(task.getTaskID(), task.getResult(), "SUCCESS")); new ExecutionFinishedEvent(task.getTaskID(), task.getOutputData(), "SUCCESS"));
logger.info("Finish execution"); logger.info("Finish execution");
getAssignment(); getAssignment();
@@ -84,6 +88,6 @@ public abstract class ExecutorBase {
* Implementation of the actual execution method of an executor * Implementation of the actual execution method of an executor
* @return the execution result * @return the execution result
**/ **/
protected abstract String execution(String... input); protected abstract String execution(String input);
} }

View File

@@ -10,14 +10,16 @@ public class Task {
@Getter @Getter
@Setter @Setter
private String result; private String outputData;
// TODO maybe create a value object for inputData so we can make sure it is in the right
// format.
@Getter @Getter
private String[] input; private String inputData;
public Task(String taskID, String... input) { public Task(String taskID, String inputData) {
this.taskID = taskID; this.taskID = taskID;
this.input = input; this.inputData= inputData;
} }
} }

View File

@@ -1,6 +1,3 @@
server.port=8081 server.port=8081
roster.url=http://127.0.0.1:8082 roster.url=http://127.0.0.1:8082
executor-pool.url=http://127.0.0.1:8083 executor.pool.url=http://127.0.0.1:8083
executor1.url=http://127.0.0.1:8084
executor2.url=http://127.0.0.1:8085
task-list.url=http://127.0.0.1:8081

View File

@@ -9,9 +9,9 @@
<relativePath/> <!-- lookup parent from repository --> <relativePath/> <!-- lookup parent from repository -->
</parent> </parent>
<groupId>ch.unisg</groupId> <groupId>ch.unisg</groupId>
<artifactId>executorcomputation</artifactId> <artifactId>executor-computation</artifactId>
<version>0.0.1-SNAPSHOT</version> <version>0.0.1-SNAPSHOT</version>
<name>executorcomputation</name> <name>executor-computation</name>
<description>Demo project for Spring Boot</description> <description>Demo project for Spring Boot</description>
<properties> <properties>
<java.version>11</java.version> <java.version>11</java.version>
@@ -42,7 +42,7 @@
</dependency> </dependency>
<dependency> <dependency>
<groupId>ch.unisg</groupId> <groupId>ch.unisg</groupId>
<artifactId>executorbase</artifactId> <artifactId>executor-base</artifactId>
<version>0.0.1-SNAPSHOT</version> <version>0.0.1-SNAPSHOT</version>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>

View File

@@ -0,0 +1,63 @@
package ch.unisg.executorcomputation.executor.domain;
import java.util.concurrent.TimeUnit;
import ch.unisg.executorbase.executor.domain.ExecutorBase;
import ch.unisg.executorbase.executor.domain.ExecutorType;
public class Executor extends ExecutorBase {
private static final Executor executor = new Executor(ExecutorType.ADDITION);
public static Executor getExecutor() {
return executor;
}
private Executor(ExecutorType executorType) {
super(executorType);
}
@Override
protected
String execution(String inputData) {
String operator = "";
if (inputData.contains("+")) {
operator = "+";
} else if (inputData.contains("-")) {
operator = "-";
} else if (inputData.contains("*")) {
operator = "*";
} else {
return "invalid data";
}
double result = Double.NaN;
// try {
// TimeUnit.SECONDS.sleep(5);
// } catch (InterruptedException e) {
// e.printStackTrace();
// }
if (operator.equalsIgnoreCase("+")) {
String[] parts = inputData.split("\\+");
double a = Double.parseDouble(parts[0]);
double b = Double.parseDouble(parts[1]);
result = a + b;
} else if (operator.equalsIgnoreCase("*")) {
String[] parts = inputData.split("\\*");
double a = Double.parseDouble(parts[0]);
double b = Double.parseDouble(parts[1]);
result = a * b;
} else if (operator.equalsIgnoreCase("-")) {
String[] parts = inputData.split("-");
double a = Double.parseDouble(parts[0]);
double b = Double.parseDouble(parts[1]);
result = a - b;
}
return Double.toString(result);
}
}

View File

@@ -68,6 +68,15 @@
<artifactId>org.eclipse.paho.client.mqttv3</artifactId> <artifactId>org.eclipse.paho.client.mqttv3</artifactId>
<version>1.2.5</version> <version>1.2.5</version>
<scope>compile</scope> <scope>compile</scope>
</dependency>
<dependency>
<groupId>org.springframework.data</groupId>
<artifactId>spring-data-mongodb</artifactId>
<version>3.2.6</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-mongodb</artifactId>
</dependency> </dependency>
</dependencies> </dependencies>

View File

@@ -0,0 +1,31 @@
package ch.unisg.executorpool.adapter.out.persistence.mongodb;
import java.net.URI;
import java.net.URISyntaxException;
import org.springframework.stereotype.Component;
import ch.unisg.executorpool.domain.ExecutorClass;
@Component
public class ExecutorMapper {
ExecutorClass mapToDomainEntity(MongoExecutorDocument executorClass) {
try {
return new ExecutorClass(
new ExecutorClass.ExecutorUri(new URI(executorClass.executorUri)),
new ExecutorClass.ExecutorTaskType(executorClass.executorTaskType)
);
} catch (URISyntaxException e) {
// TODO Auto-generated catch block
e.printStackTrace();
return null;
}
}
MongoExecutorDocument mapToMongoDocument(ExecutorClass executorClass) {
return new MongoExecutorDocument(executorClass.getExecutorUri().getValue().toString(),
executorClass.getExecutorTaskType().getValue()
);
}
}

View File

@@ -0,0 +1,40 @@
package ch.unisg.executorpool.adapter.out.persistence.mongodb;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import ch.unisg.executorpool.application.port.out.AddExecutorPort;
import ch.unisg.executorpool.application.port.out.LoadExecutorPort;
import ch.unisg.executorpool.application.port.out.RemoveExecutorPort;
import ch.unisg.executorpool.domain.ExecutorClass;
import lombok.RequiredArgsConstructor;
@Component
@RequiredArgsConstructor
public class ExecutorPersistenceAdapter implements AddExecutorPort, RemoveExecutorPort, LoadExecutorPort {
@Autowired
private final ExecutorRepository executorRepository;
private final ExecutorMapper executorMapper;
@Override
public void addExecutor(ExecutorClass executorClass) {
MongoExecutorDocument mongoExecutorDocument = executorMapper.mapToMongoDocument(executorClass);
executorRepository.save(mongoExecutorDocument);
}
@Override
public void removeExecutor(ExecutorClass executorClass) {
MongoExecutorDocument mongoExecutorDocument = executorMapper.mapToMongoDocument(executorClass);
executorRepository.delete(mongoExecutorDocument);
}
@Override
public ExecutorClass loadExecutor(ExecutorClass.ExecutorUri executorUri, ExecutorClass.ExecutorTaskType executorTaskType) {
MongoExecutorDocument mongoExecutorDocument = executorRepository.findByExecutorUri(executorUri.getValue().toString(), executorTaskType.getValue());
ExecutorClass executorClass = executorMapper.mapToDomainEntity(mongoExecutorDocument);
return executorClass;
}
}

View File

@@ -0,0 +1,15 @@
package ch.unisg.executorpool.adapter.out.persistence.mongodb;
import java.util.List;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
@Repository
public interface ExecutorRepository extends MongoRepository<MongoExecutorDocument, String> {
public MongoExecutorDocument findByExecutorUri(String executorUri, String executorTaskType);
public List<MongoExecutorDocument> findByExecutorTaskType(String executorTaskType);
}

View File

@@ -0,0 +1,16 @@
package ch.unisg.executorpool.adapter.out.persistence.mongodb;
import lombok.Data;
@Data
public class MongoExecutorDocument {
public String executorUri;
public String executorTaskType;
public MongoExecutorDocument(String executorUri, String executorTaskType) {
this.executorUri = executorUri;
this.executorTaskType = executorTaskType;
}
}

View File

@@ -0,0 +1,9 @@
package ch.unisg.executorpool.application.port.out;
import ch.unisg.executorpool.domain.ExecutorClass;
public interface AddExecutorPort {
void addExecutor(ExecutorClass executorClass);
}

View File

@@ -1,7 +1,6 @@
package ch.unisg.executorpool.application.port.out; package ch.unisg.executorpool.application.port.out;
import ch.unisg.executorpool.domain.ExecutorAddedEvent; import ch.unisg.executorpool.domain.ExecutorAddedEvent;
import org.eclipse.paho.client.mqttv3.MqttException;
public interface ExecutorAddedEventPort { public interface ExecutorAddedEventPort {
void publishExecutorAddedEvent(ExecutorAddedEvent event); void publishExecutorAddedEvent(ExecutorAddedEvent event);

View File

@@ -0,0 +1,9 @@
package ch.unisg.executorpool.application.port.out;
import ch.unisg.executorpool.domain.ExecutorClass;
public interface LoadExecutorPort {
ExecutorClass loadExecutor(ExecutorClass.ExecutorUri executorUri, ExecutorClass.ExecutorTaskType executorTaskType);
}

View File

@@ -0,0 +1,9 @@
package ch.unisg.executorpool.application.port.out;
import ch.unisg.executorpool.domain.ExecutorClass;
public interface RemoveExecutorPort {
void removeExecutor(ExecutorClass executorClass);
}

View File

@@ -2,11 +2,11 @@ package ch.unisg.executorpool.application.service;
import ch.unisg.executorpool.application.port.in.AddNewExecutorToExecutorPoolUseCase; import ch.unisg.executorpool.application.port.in.AddNewExecutorToExecutorPoolUseCase;
import ch.unisg.executorpool.application.port.in.AddNewExecutorToExecutorPoolCommand; import ch.unisg.executorpool.application.port.in.AddNewExecutorToExecutorPoolCommand;
import ch.unisg.executorpool.application.port.out.AddExecutorPort;
import ch.unisg.executorpool.application.port.out.ExecutorAddedEventPort; import ch.unisg.executorpool.application.port.out.ExecutorAddedEventPort;
import ch.unisg.executorpool.domain.ExecutorAddedEvent; import ch.unisg.executorpool.domain.ExecutorAddedEvent;
import ch.unisg.executorpool.domain.ExecutorClass; import ch.unisg.executorpool.domain.ExecutorClass;
import ch.unisg.executorpool.domain.ExecutorPool; import ch.unisg.executorpool.domain.ExecutorPool;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Component; import org.springframework.stereotype.Component;
import javax.transaction.Transactional; import javax.transaction.Transactional;
@@ -16,9 +16,11 @@ import javax.transaction.Transactional;
public class AddNewExecutorToExecutorPoolService implements AddNewExecutorToExecutorPoolUseCase { public class AddNewExecutorToExecutorPoolService implements AddNewExecutorToExecutorPoolUseCase {
private final ExecutorAddedEventPort executorAddedEventPort; private final ExecutorAddedEventPort executorAddedEventPort;
private final AddExecutorPort addExecutorToRepositoryPort;
public AddNewExecutorToExecutorPoolService(ExecutorAddedEventPort executorAddedEventPort){ public AddNewExecutorToExecutorPoolService(ExecutorAddedEventPort executorAddedEventPort, AddExecutorPort addExecutorToRepositoryPort){
this.executorAddedEventPort = executorAddedEventPort; this.executorAddedEventPort = executorAddedEventPort;
this.addExecutorToRepositoryPort = addExecutorToRepositoryPort;
} }
@Override @Override
@@ -29,6 +31,8 @@ public class AddNewExecutorToExecutorPoolService implements AddNewExecutorToExec
var executorAddedEvent = new ExecutorAddedEvent(newExecutor); var executorAddedEvent = new ExecutorAddedEvent(newExecutor);
executorAddedEventPort.publishExecutorAddedEvent(executorAddedEvent); executorAddedEventPort.publishExecutorAddedEvent(executorAddedEvent);
addExecutorToRepositoryPort.addExecutor(newExecutor);
return newExecutor; return newExecutor;
} }
} }

View File

@@ -3,10 +3,10 @@ package ch.unisg.executorpool.application.service;
import ch.unisg.executorpool.application.port.in.RemoveExecutorFromExecutorPoolCommand; import ch.unisg.executorpool.application.port.in.RemoveExecutorFromExecutorPoolCommand;
import ch.unisg.executorpool.application.port.in.RemoveExecutorFromExecutorPoolUseCase; import ch.unisg.executorpool.application.port.in.RemoveExecutorFromExecutorPoolUseCase;
import ch.unisg.executorpool.application.port.out.ExecutorRemovedEventPort; import ch.unisg.executorpool.application.port.out.ExecutorRemovedEventPort;
import ch.unisg.executorpool.application.port.out.RemoveExecutorPort;
import ch.unisg.executorpool.domain.ExecutorClass; import ch.unisg.executorpool.domain.ExecutorClass;
import ch.unisg.executorpool.domain.ExecutorPool; import ch.unisg.executorpool.domain.ExecutorPool;
import ch.unisg.executorpool.domain.ExecutorRemovedEvent; import ch.unisg.executorpool.domain.ExecutorRemovedEvent;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Component; import org.springframework.stereotype.Component;
import javax.transaction.Transactional; import javax.transaction.Transactional;
@@ -17,9 +17,11 @@ import java.util.Optional;
public class RemoveExecutorFromExecutorPoolService implements RemoveExecutorFromExecutorPoolUseCase { public class RemoveExecutorFromExecutorPoolService implements RemoveExecutorFromExecutorPoolUseCase {
private final ExecutorRemovedEventPort executorRemovedEventPort; private final ExecutorRemovedEventPort executorRemovedEventPort;
private final RemoveExecutorPort removeExecutorFromRepositoryPort;
public RemoveExecutorFromExecutorPoolService(ExecutorRemovedEventPort executorRemovedEventPort){ public RemoveExecutorFromExecutorPoolService(ExecutorRemovedEventPort executorRemovedEventPort, RemoveExecutorPort removeExecutorFromRepositoryPort){
this.executorRemovedEventPort = executorRemovedEventPort; this.executorRemovedEventPort = executorRemovedEventPort;
this.removeExecutorFromRepositoryPort = removeExecutorFromRepositoryPort;
} }
@Override @Override
@@ -32,6 +34,8 @@ public class RemoveExecutorFromExecutorPoolService implements RemoveExecutorFrom
executorRemovedEventPort.publishExecutorRemovedEvent(executorRemovedEvent); executorRemovedEventPort.publishExecutorRemovedEvent(executorRemovedEvent);
} }
removeExecutorFromRepositoryPort.removeExecutor(removedExecutor);
return removedExecutor; return removedExecutor;
} }
} }

View File

@@ -1,6 +1,5 @@
package ch.unisg.executorpool.domain; package ch.unisg.executorpool.domain;
import ch.unisg.executorpool.domain.ExecutorClass;
import lombok.Getter; import lombok.Getter;
public class ExecutorRemovedEvent { public class ExecutorRemovedEvent {

View File

@@ -9,9 +9,9 @@
<relativePath/> <!-- lookup parent from repository --> <relativePath/> <!-- lookup parent from repository -->
</parent> </parent>
<groupId>ch.unisg</groupId> <groupId>ch.unisg</groupId>
<artifactId>executorrobot</artifactId> <artifactId>executor-robot</artifactId>
<version>0.0.1-SNAPSHOT</version> <version>0.0.1-SNAPSHOT</version>
<name>executorrobot</name> <name>executor-robot</name>
<description>Demo project for Spring Boot</description> <description>Demo project for Spring Boot</description>
<properties> <properties>
<java.version>11</java.version> <java.version>11</java.version>
@@ -42,7 +42,7 @@
</dependency> </dependency>
<dependency> <dependency>
<groupId>ch.unisg</groupId> <groupId>ch.unisg</groupId>
<artifactId>executorbase</artifactId> <artifactId>executor-base</artifactId>
<version>0.0.1-SNAPSHOT</version> <version>0.0.1-SNAPSHOT</version>
</dependency> </dependency>
</dependencies> </dependencies>

View File

@@ -28,7 +28,7 @@ public class Executor extends ExecutorBase {
@Override @Override
protected protected
String execution(String... input) { String execution(String input) {
String key = userToRobotPort.userToRobot(); String key = userToRobotPort.userToRobot();
try { try {

View File

@@ -1,46 +0,0 @@
package ch.unisg.executorcomputation.executor.domain;
import java.util.concurrent.TimeUnit;
import ch.unisg.executorbase.executor.domain.ExecutorBase;
import ch.unisg.executorbase.executor.domain.ExecutorType;
public class Executor extends ExecutorBase {
private static final Executor executor = new Executor(ExecutorType.ADDITION);
public static Executor getExecutor() {
return executor;
}
private Executor(ExecutorType executorType) {
super(executorType);
}
@Override
protected
String execution(String... input) {
double result = Double.NaN;
int a = Integer.parseInt(input[0]);
int b = Integer.parseInt(input[2]);
String operation = input[1];
try {
TimeUnit.SECONDS.sleep(20);
} catch (InterruptedException e) {
e.printStackTrace();
}
if (operation == "+") {
result = a + b;
} else if (operation == "*") {
result = a * b;
} else if (operation == "-") {
result = a - b;
}
return Double.toString(result);
}
}

View File

@@ -73,9 +73,13 @@
<artifactId>common</artifactId> <artifactId>common</artifactId>
<version>0.0.1-SNAPSHOT</version> <version>0.0.1-SNAPSHOT</version>
</dependency> </dependency>
<dependency>
<groupId>org.springframework.data</groupId>
<artifactId>spring-data-mongodb</artifactId>
</dependency>
</dependencies> </dependencies>
<build> <build>
<plugins> <plugins>

View File

@@ -1,13 +1,60 @@
package ch.unisg.roster; package ch.unisg.roster;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import ch.unisg.roster.roster.adapter.out.persistence.mongodb.RosterRepository;
import ch.unisg.roster.roster.application.port.in.LoadRosterItemPort;
import ch.unisg.roster.roster.domain.Roster;
import ch.unisg.roster.roster.domain.RosterItem;
import org.eclipse.paho.client.mqttv3.MqttException;
import org.springframework.boot.SpringApplication; import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.core.env.ConfigurableEnvironment;
import ch.unisg.roster.roster.adapter.common.clients.TapasMqttClient;
import ch.unisg.roster.roster.adapter.in.messaging.mqtt.ExecutorEventMqttListener;
import ch.unisg.roster.roster.adapter.in.messaging.mqtt.ExecutorEventsMqttDispatcher;
import org.springframework.data.mongodb.repository.config.EnableMongoRepositories;
@SpringBootApplication @SpringBootApplication
@EnableMongoRepositories(basePackageClasses = RosterRepository.class)
public class RosterApplication { public class RosterApplication {
static Logger logger = Logger.getLogger(RosterApplication.class.getName());
private static ConfigurableEnvironment ENVIRONMENT;
private static final LoadRosterItemPort loadRosterItemPort;
public static void main(String[] args) { public static void main(String[] args) {
SpringApplication.run(RosterApplication.class, args);
SpringApplication rosterApp = new SpringApplication(RosterApplication.class);
ENVIRONMENT = rosterApp.run(args).getEnvironment();
bootstrapMarketplaceWithMqtt();
initialiseRoster();
} }
/**
* Connects to an MQTT broker, presumably the one used by all TAPAS groups to communicate with
* one another
*/
private static void bootstrapMarketplaceWithMqtt() {
String broker = ENVIRONMENT.getProperty("mqtt.broker.uri");
try {
ExecutorEventsMqttDispatcher dispatcher = new ExecutorEventsMqttDispatcher();
TapasMqttClient client = TapasMqttClient.getInstance(broker, dispatcher);
client.startReceivingMessages();
} catch (MqttException e) {
logger.log(Level.SEVERE, e.getMessage(), e);
}
}
private static void initialiseRoster(){
List<RosterItem> rosterItemList = loadRosterItemPort.loadAllRosterItems();
Roster.getInstance().initialiseRoster(rosterItemList);
}
} }

View File

@@ -17,6 +17,9 @@ public class ExecutorAddedEventListenerMqttAdapter extends ExecutorEventMqttList
@Override @Override
public boolean handleEvent(MqttMessage message) { public boolean handleEvent(MqttMessage message) {
System.out.println("New Executor added!");
String payload = new String(message.getPayload()); String payload = new String(message.getPayload());
try { try {
@@ -25,7 +28,7 @@ public class ExecutorAddedEventListenerMqttAdapter extends ExecutorEventMqttList
JsonNode data = new ObjectMapper().readTree(payload); JsonNode data = new ObjectMapper().readTree(payload);
String taskType = data.get("executorTaskType").asText(); String taskType = data.get("executorTaskType").asText();
String executorId = data.get("executorURI").asText(); String executorId = data.get("executorUri").asText();
ExecutorAddedEvent executorAddedEvent = new ExecutorAddedEvent( ExecutorAddedEvent executorAddedEvent = new ExecutorAddedEvent(
new ExecutorURI(executorId), new ExecutorURI(executorId),

View File

@@ -16,8 +16,8 @@ public class ExecutorEventsMqttDispatcher {
// TODO: Register here your topics and event listener adapters // TODO: Register here your topics and event listener adapters
private void initRouter() { private void initRouter() {
router.put("ch/unisg/tapas-group-tutors/executors/added", new ExecutorAddedEventListenerMqttAdapter()); router.put("ch/unisg/tapas/executors/added", new ExecutorAddedEventListenerMqttAdapter());
router.put("ch/unisg/tapas-group-tutors/executors/removed", new ExecutorRemovedEventListenerMqttAdapter()); router.put("ch/unisg/tapas/executors/removed", new ExecutorRemovedEventListenerMqttAdapter());
} }
/** /**

View File

@@ -23,7 +23,7 @@ public class ExecutorRemovedEventListenerMqttAdapter extends ExecutorEventMqttLi
// representation that makes sense in the context of your application. // representation that makes sense in the context of your application.
JsonNode data = new ObjectMapper().readTree(payload); JsonNode data = new ObjectMapper().readTree(payload);
String executorId = data.get("executorURI").asText(); String executorId = data.get("executorUri").asText();
ExecutorRemovedEvent executorRemovedEvent = new ExecutorRemovedEvent( ExecutorRemovedEvent executorRemovedEvent = new ExecutorRemovedEvent(
new ExecutorURI(executorId)); new ExecutorURI(executorId));

View File

@@ -1,5 +1,7 @@
package ch.unisg.roster.roster.adapter.in.web; package ch.unisg.roster.roster.adapter.in.web;
import java.util.logging.Logger;
import org.springframework.http.HttpStatus; import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity; import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PostMapping;
@@ -18,6 +20,8 @@ public class NewTaskController {
this.newTaskUseCase = newTaskUseCase; this.newTaskUseCase = newTaskUseCase;
} }
Logger logger = Logger.getLogger(NewTaskController.class.getName());
/** /**
* Controller which handles the new task event from the tasklist * Controller which handles the new task event from the tasklist
* @return 201 Create or 409 Conflict * @return 201 Create or 409 Conflict
@@ -25,10 +29,15 @@ public class NewTaskController {
@PostMapping(path = "/task", consumes = {"application/task+json"}) @PostMapping(path = "/task", consumes = {"application/task+json"})
public ResponseEntity<Void> newTaskController(@RequestBody Task task) { public ResponseEntity<Void> newTaskController(@RequestBody Task task) {
NewTaskCommand command = new NewTaskCommand(task.getTaskID(), task.getTaskType()); logger.info("New task with id:" + task.getTaskID());
NewTaskCommand command = new NewTaskCommand(task.getTaskID(), task.getTaskType(),
task.getInputData());
boolean success = newTaskUseCase.addNewTaskToQueue(command); boolean success = newTaskUseCase.addNewTaskToQueue(command);
logger.info("Could create task: " + success);
if (success) { if (success) {
return new ResponseEntity<>(HttpStatus.CREATED); return new ResponseEntity<>(HttpStatus.CREATED);
} }

View File

@@ -25,9 +25,8 @@ public class TaskCompletedController {
**/ **/
@PostMapping(path = "/task/completed", consumes = {"application/json"}) @PostMapping(path = "/task/completed", consumes = {"application/json"})
public ResponseEntity<Void> addNewTaskTaskToTaskList(@RequestBody Task task) { public ResponseEntity<Void> addNewTaskTaskToTaskList(@RequestBody Task task) {
TaskCompletedCommand command = new TaskCompletedCommand(task.getTaskID(), TaskCompletedCommand command = new TaskCompletedCommand(task.getTaskID(),
task.getStatus(), task.getResult()); task.getStatus(), task.getOutputData());
taskCompletedUseCase.taskCompleted(command); taskCompletedUseCase.taskCompleted(command);

View File

@@ -0,0 +1,21 @@
package ch.unisg.roster.roster.adapter.out.persistence.mongodb;
import lombok.Data;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
@Data
@Document(collection="roster")
public class MongoRosterDocument {
@Id
public String taskId;
public String taskType;
public String executorURI;
public MongoRosterDocument(String taskId, String taskType, String executorURI){
this.taskId = taskId;
this.taskType = taskType;
this.executorURI = executorURI;
}
}

View File

@@ -0,0 +1,23 @@
package ch.unisg.roster.roster.adapter.out.persistence.mongodb;
import ch.unisg.common.valueobject.ExecutorURI;
import ch.unisg.roster.roster.domain.RosterItem;
import org.springframework.stereotype.Component;
@Component
public class RosterMapper {
RosterItem maptoDomainEntity(MongoRosterDocument rosterItem) {
return new RosterItem(rosterItem.taskId, rosterItem.taskType,
new ExecutorURI(rosterItem.executorURI));
}
MongoRosterDocument mapToMongoDocument(RosterItem rosterItem){
return new MongoRosterDocument(
rosterItem.getTaskID(),
rosterItem.getTaskType(),
rosterItem.getExecutorURI().getValue().toString());
}
}

View File

@@ -0,0 +1,50 @@
package ch.unisg.roster.roster.adapter.out.persistence.mongodb;
import ch.unisg.roster.roster.application.port.in.AddRosterItemPort;
import ch.unisg.roster.roster.application.port.in.DeleteRosterItem;
import ch.unisg.roster.roster.application.port.in.LoadRosterItemPort;
import ch.unisg.roster.roster.domain.RosterItem;
import lombok.RequiredArgsConstructor;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
@Component
@RequiredArgsConstructor
public class RosterPersistenceAdapter implements AddRosterItemPort, LoadRosterItemPort, DeleteRosterItem {
@Autowired
private final RosterRepository rosterRepository;
private final RosterMapper rosterMapper;
@Override
public void addRosterItem(RosterItem rosterItem){
MongoRosterDocument mongoRosterDocument = rosterMapper.mapToMongoDocument(rosterItem);
rosterRepository.save(mongoRosterDocument);
}
@Override
public RosterItem loadRosterItem(String taskId){
MongoRosterDocument mongoRosterDocument = rosterRepository.findByTaskId(taskId);
RosterItem rosterItem = rosterMapper.maptoDomainEntity(mongoRosterDocument);
return rosterItem;
}
@Override
public List<RosterItem> loadAllRosterItems(){
List<MongoRosterDocument> rosterList = rosterRepository.findAll();
List<RosterItem> rosterItemList = new ArrayList<>();
for(MongoRosterDocument rosterDocument : rosterList){
rosterItemList.add(rosterMapper.maptoDomainEntity(rosterDocument));
}
return rosterItemList;
}
@Override
public void deleteRosterItem(String taskId){
rosterRepository.deleteById(taskId);
}
}

View File

@@ -0,0 +1,9 @@
package ch.unisg.roster.roster.adapter.out.persistence.mongodb;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
@Repository
public interface RosterRepository extends MongoRepository<MongoRosterDocument,String>{
public MongoRosterDocument findByTaskId(String taskId);
}

View File

@@ -1,63 +0,0 @@
package ch.unisg.roster.roster.adapter.out.web;
import java.io.IOException;
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.json.JSONArray;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Primary;
import org.springframework.http.HttpStatus;
import org.springframework.stereotype.Component;
import ch.unisg.roster.roster.application.port.out.GetAllExecutorInExecutorPoolByTypePort;
import ch.unisg.roster.roster.domain.valueobject.ExecutorType;
@Component
@Primary
public class GetAllExecutorInExecutorPoolByTypeAdapter implements GetAllExecutorInExecutorPoolByTypePort {
@Value("${executor-pool.url}")
private String server;
/**
* Requests all executor of the give type from the executor-pool and cheks if there is one
* avaialable of this type.
* @return Whether an executor exist or not
**/
@Override
public boolean doesExecutorTypeExist(ExecutorType type) {
Logger logger = Logger.getLogger(PublishNewTaskEventAdapter.class.getName());
HttpClient client = HttpClient.newHttpClient();
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(server + "/executor-pool/GetAllExecutorInExecutorPoolByType/" + type.getValue()))
.header("Content-Type", "application/json")
.GET()
.build();
try {
HttpResponse<String> response = client.send(request, HttpResponse.BodyHandlers.ofString());
if (response.statusCode() == HttpStatus.OK.value()) {
JSONArray jsonArray = new JSONArray(response.body());
if (jsonArray.length() > 0) {
return true;
}
}
} catch (InterruptedException e) {
logger.log(Level.SEVERE, e.getLocalizedMessage(), e);
Thread.currentThread().interrupt();
} catch (IOException e) {
logger.log(Level.SEVERE, e.getLocalizedMessage(), e);
}
return false;
}
}

View File

@@ -19,10 +19,10 @@ import ch.unisg.roster.roster.domain.event.NewTaskEvent;
@Primary @Primary
public class PublishNewTaskEventAdapter implements NewTaskEventPort { public class PublishNewTaskEventAdapter implements NewTaskEventPort {
@Value("${executor1.url}") @Value("${executor-robot.uri}")
private String server; private String server;
@Value("${executor2.url}") @Value("${executor-computation.uri}")
private String server2; private String server2;
Logger logger = Logger.getLogger(PublishNewTaskEventAdapter.class.getName()); Logger logger = Logger.getLogger(PublishNewTaskEventAdapter.class.getName());
@@ -34,21 +34,21 @@ public class PublishNewTaskEventAdapter implements NewTaskEventPort {
@Override @Override
public void publishNewTaskEvent(NewTaskEvent event) { public void publishNewTaskEvent(NewTaskEvent event) {
HttpClient client = HttpClient.newHttpClient(); // HttpClient client = HttpClient.newHttpClient();
HttpRequest request = HttpRequest.newBuilder() // HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(server + "/newtask/" + event.taskType.getValue())) // .uri(URI.create(server + "/newtask/" + event.taskType.getValue()))
.GET() // .GET()
.build(); // .build();
try { // try {
client.send(request, HttpResponse.BodyHandlers.ofString()); // client.send(request, HttpResponse.BodyHandlers.ofString());
} catch (InterruptedException e) { // } catch (InterruptedException e) {
logger.log(Level.SEVERE, e.getLocalizedMessage(), e); // logger.log(Level.SEVERE, e.getLocalizedMessage(), e);
Thread.currentThread().interrupt(); // Thread.currentThread().interrupt();
} catch (IOException e) { // } catch (IOException e) {
logger.log(Level.SEVERE, e.getLocalizedMessage(), e); // logger.log(Level.SEVERE, e.getLocalizedMessage(), e);
} // }
HttpClient client2 = HttpClient.newHttpClient(); HttpClient client2 = HttpClient.newHttpClient();
HttpRequest request2 = HttpRequest.newBuilder() HttpRequest request2 = HttpRequest.newBuilder()

View File

@@ -20,7 +20,7 @@ import ch.unisg.roster.roster.domain.event.TaskAssignedEvent;
@Primary @Primary
public class PublishTaskAssignedEventAdapter implements TaskAssignedEventPort { public class PublishTaskAssignedEventAdapter implements TaskAssignedEventPort {
@Value("${task-list.url}") @Value("${task-list.uri}")
private String server; private String server;
Logger logger = Logger.getLogger(PublishTaskAssignedEventAdapter.class.getName()); Logger logger = Logger.getLogger(PublishTaskAssignedEventAdapter.class.getName());
@@ -39,7 +39,7 @@ public class PublishTaskAssignedEventAdapter implements TaskAssignedEventPort {
HttpClient client = HttpClient.newHttpClient(); HttpClient client = HttpClient.newHttpClient();
HttpRequest request = HttpRequest.newBuilder() HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(server + "/tasks/assignTask")) .uri(URI.create(server + "/tasks/assignTask"))
.header("Content-Type", "application/json") .header("Content-Type", "application/task+json")
.POST(HttpRequest.BodyPublishers.ofString(body)) .POST(HttpRequest.BodyPublishers.ofString(body))
.build(); .build();

View File

@@ -20,7 +20,7 @@ import ch.unisg.roster.roster.domain.event.TaskCompletedEvent;
@Primary @Primary
public class PublishTaskCompletedEventAdapter implements TaskCompletedEventPort { public class PublishTaskCompletedEventAdapter implements TaskCompletedEventPort {
@Value("${task-list.url}") @Value("${task-list.uri}")
private String server; private String server;
Logger logger = Logger.getLogger(PublishTaskCompletedEventAdapter.class.getName()); Logger logger = Logger.getLogger(PublishTaskCompletedEventAdapter.class.getName());
@@ -35,13 +35,13 @@ public class PublishTaskCompletedEventAdapter implements TaskCompletedEventPort
String body = new JSONObject() String body = new JSONObject()
.put("taskId", event.taskID) .put("taskId", event.taskID)
.put("status", event.status) .put("status", event.status)
.put("taskResult", event.result) .put("outputData", event.result)
.toString(); .toString();
HttpClient client = HttpClient.newHttpClient(); HttpClient client = HttpClient.newHttpClient();
HttpRequest request = HttpRequest.newBuilder() HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(server + "/tasks/completeTask")) .uri(URI.create(server + "/tasks/completeTask/"))
.header("Content-Type", "application/json") .header("Content-Type", "application/task+json")
.POST(HttpRequest.BodyPublishers.ofString(body)) .POST(HttpRequest.BodyPublishers.ofString(body))
.build(); .build();

View File

@@ -0,0 +1,9 @@
package ch.unisg.roster.roster.application.port.in;
import ch.unisg.roster.roster.domain.RosterItem;
public interface AddRosterItemPort {
void addRosterItem(RosterItem rosterItem);
}

View File

@@ -0,0 +1,8 @@
package ch.unisg.roster.roster.application.port.in;
import ch.unisg.roster.roster.domain.RosterItem;
public interface DeleteRosterItem {
void deleteRosterItem(String taskId);
}

View File

@@ -0,0 +1,12 @@
package ch.unisg.roster.roster.application.port.in;
import ch.unisg.roster.roster.domain.RosterItem;
import java.util.List;
public interface LoadRosterItemPort {
RosterItem loadRosterItem(String taskId);
List<RosterItem> loadAllRosterItems();
}

View File

@@ -17,9 +17,13 @@ public class NewTaskCommand extends SelfValidating<NewTaskCommand> {
@NotNull @NotNull
private final ExecutorType taskType; private final ExecutorType taskType;
public NewTaskCommand(String taskID, ExecutorType taskType) { @NotNull
private final String inputData;
public NewTaskCommand(String taskID, ExecutorType taskType, String inputData) {
this.taskID = taskID; this.taskID = taskID;
this.taskType = taskType; this.taskType = taskType;
this.inputData = inputData;
this.validateSelf(); this.validateSelf();
} }
} }

View File

@@ -1,13 +0,0 @@
package ch.unisg.roster.roster.application.port.out;
import ch.unisg.roster.roster.domain.valueobject.ExecutorType;
public interface GetAllExecutorInExecutorPoolByTypePort {
/**
* Checks if a executor with the given type exist in our executor pool
* @return boolean
**/
boolean doesExecutorTypeExist(ExecutorType type);
}

View File

@@ -2,6 +2,8 @@ package ch.unisg.roster.roster.application.service;
import javax.transaction.Transactional; import javax.transaction.Transactional;
import ch.unisg.roster.roster.application.port.in.AddRosterItemPort;
import ch.unisg.roster.roster.domain.RosterItem;
import org.springframework.stereotype.Component; import org.springframework.stereotype.Component;
import ch.unisg.roster.roster.application.port.in.ApplyForTaskCommand; import ch.unisg.roster.roster.application.port.in.ApplyForTaskCommand;
@@ -18,6 +20,7 @@ import lombok.RequiredArgsConstructor;
public class ApplyForTaskService implements ApplyForTaskUseCase { public class ApplyForTaskService implements ApplyForTaskUseCase {
private final TaskAssignedEventPort taskAssignedEventPort; private final TaskAssignedEventPort taskAssignedEventPort;
private final AddRosterItemPort addRosterItemPort;
/** /**
* Checks if a task is available and assignes it to the executor. If task got assigned a task * Checks if a task is available and assignes it to the executor. If task got assigned a task
@@ -31,6 +34,7 @@ public class ApplyForTaskService implements ApplyForTaskUseCase {
if (task != null) { if (task != null) {
taskAssignedEventPort.publishTaskAssignedEvent(new TaskAssignedEvent(task.getTaskID())); taskAssignedEventPort.publishTaskAssignedEvent(new TaskAssignedEvent(task.getTaskID()));
addRosterItemPort.addRosterItem(new RosterItem(task.getTaskID(), task.getTaskType().getValue(), command.getExecutorURI()));
} }
return task; return task;

View File

@@ -29,11 +29,12 @@ public class NewTaskService implements NewTaskUseCase {
public boolean addNewTaskToQueue(NewTaskCommand command) { public boolean addNewTaskToQueue(NewTaskCommand command) {
ExecutorRegistry executorRegistry = ExecutorRegistry.getInstance(); ExecutorRegistry executorRegistry = ExecutorRegistry.getInstance();
if (!executorRegistry.containsTaskType(command.getTaskType())) { if (!executorRegistry.containsTaskType(command.getTaskType())) {
return false; return false;
} }
Task task = new Task(command.getTaskID(), command.getTaskType()); Task task = new Task(command.getTaskID(), command.getTaskType(), command.getInputData());
Roster.getInstance().addTaskToQueue(task); Roster.getInstance().addTaskToQueue(task);

View File

@@ -2,6 +2,7 @@ package ch.unisg.roster.roster.application.service;
import javax.transaction.Transactional; import javax.transaction.Transactional;
import ch.unisg.roster.roster.application.port.in.DeleteRosterItem;
import org.springframework.stereotype.Component; import org.springframework.stereotype.Component;
import ch.unisg.roster.roster.application.port.in.TaskCompletedCommand; import ch.unisg.roster.roster.application.port.in.TaskCompletedCommand;
@@ -17,6 +18,7 @@ import lombok.RequiredArgsConstructor;
public class TaskCompletedService implements TaskCompletedUseCase { public class TaskCompletedService implements TaskCompletedUseCase {
private final TaskCompletedEventPort taskCompletedEventPort; private final TaskCompletedEventPort taskCompletedEventPort;
private final DeleteRosterItem deleteRosterItem;
/** /**
* Completes the task in the roster and publishes a task completed event. * Completes the task in the roster and publishes a task completed event.
@@ -26,6 +28,7 @@ public class TaskCompletedService implements TaskCompletedUseCase {
public void taskCompleted(TaskCompletedCommand command) { public void taskCompleted(TaskCompletedCommand command) {
Roster.getInstance().taskCompleted(command.getTaskID()); Roster.getInstance().taskCompleted(command.getTaskID());
deleteRosterItem.deleteRosterItem(command.getTaskID());
taskCompletedEventPort.publishTaskCompleted(new TaskCompletedEvent(command.getTaskID(), taskCompletedEventPort.publishTaskCompleted(new TaskCompletedEvent(command.getTaskID(),
command.getTaskStatus(), command.getTaskResult())); command.getTaskStatus(), command.getTaskResult()));

View File

@@ -3,6 +3,7 @@ package ch.unisg.roster.roster.domain;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.logging.Level; import java.util.logging.Level;
import java.util.logging.Logger; import java.util.logging.Logger;
@@ -83,4 +84,10 @@ public class Roster {
return queues.get(taskType.getValue()).removeIf(task -> task.getTaskID().equalsIgnoreCase(taskID)); return queues.get(taskType.getValue()).removeIf(task -> task.getTaskID().equalsIgnoreCase(taskID));
} }
public void initialiseRoster(List<RosterItem> rosterItemList){
for(RosterItem rosterItem : rosterItemList){
rosterMap.put(rosterItem.getTaskID(), rosterItem);
}
}
} }

Some files were not shown because too many files have changed in this diff Show More