diff --git a/.github/workflows/ci-run-tests.yml b/.github/workflows/ci-run-tests.yml
index e8e9fe324..7339c7299 100644
--- a/.github/workflows/ci-run-tests.yml
+++ b/.github/workflows/ci-run-tests.yml
@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- 'main'
+ - 'feat/rfc'
paths:
- 'client/**'
- 'ietf/**'
diff --git a/.pnp.cjs b/.pnp.cjs
index 0269c96db..5fbe6b1a4 100644
--- a/.pnp.cjs
+++ b/.pnp.cjs
@@ -65,7 +65,7 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
["eslint-plugin-promise", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:6.1.1"],\
["eslint-plugin-vue", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:9.14.1"],\
["file-saver", "npm:2.0.5"],\
- ["highcharts", "npm:11.0.1"],\
+ ["highcharts", "npm:11.1.0"],\
["html-validate", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:7.18.1"],\
["ical.js", "npm:1.5.0"],\
["jquery", "npm:3.7.0"],\
@@ -84,7 +84,7 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
["pinia", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:2.1.3"],\
["pinia-plugin-persist", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:1.0.0"],\
["pug", "npm:3.0.2"],\
- ["sass", "npm:1.62.1"],\
+ ["sass", "npm:1.63.4"],\
["seedrandom", "npm:3.0.5"],\
["select2", "npm:4.1.0-rc.0"],\
["select2-bootstrap-5-theme", "npm:1.3.0"],\
@@ -5487,10 +5487,10 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
}]\
]],\
["highcharts", [\
- ["npm:11.0.1", {\
- "packageLocation": "./.yarn/cache/highcharts-npm-11.0.1-05a14e3887-773a7b8765.zip/node_modules/highcharts/",\
+ ["npm:11.1.0", {\
+ "packageLocation": "./.yarn/cache/highcharts-npm-11.1.0-0d42a04430-f9b8cdc38b.zip/node_modules/highcharts/",\
"packageDependencies": [\
- ["highcharts", "npm:11.0.1"]\
+ ["highcharts", "npm:11.1.0"]\
],\
"linkType": "HARD"\
}]\
@@ -7895,7 +7895,7 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
["eslint-plugin-promise", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:6.1.1"],\
["eslint-plugin-vue", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:9.14.1"],\
["file-saver", "npm:2.0.5"],\
- ["highcharts", "npm:11.0.1"],\
+ ["highcharts", "npm:11.1.0"],\
["html-validate", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:7.18.1"],\
["ical.js", "npm:1.5.0"],\
["jquery", "npm:3.7.0"],\
@@ -7914,7 +7914,7 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
["pinia", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:2.1.3"],\
["pinia-plugin-persist", "virtual:dc3fc578bfa5e06182a4d2be39ede0bc5b74940b1ffe0d70c26892ab140a4699787750fba175dc306292e80b4aa2c8c5f68c2a821e69b2c37e360c0dff36ff66#npm:1.0.0"],\
["pug", "npm:3.0.2"],\
- ["sass", "npm:1.62.1"],\
+ ["sass", "npm:1.63.4"],\
["seedrandom", "npm:3.0.5"],\
["select2", "npm:4.1.0-rc.0"],\
["select2-bootstrap-5-theme", "npm:1.3.0"],\
@@ -7998,10 +7998,10 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
],\
"linkType": "HARD"\
}],\
- ["npm:1.62.1", {\
- "packageLocation": "./.yarn/cache/sass-npm-1.62.1-c16d65fd28-1b1b3584b3.zip/node_modules/sass/",\
+ ["npm:1.63.4", {\
+ "packageLocation": "./.yarn/cache/sass-npm-1.63.4-bf5f3496c2-12bde5beff.zip/node_modules/sass/",\
"packageDependencies": [\
- ["sass", "npm:1.62.1"],\
+ ["sass", "npm:1.63.4"],\
["chokidar", "npm:3.5.3"],\
["immutable", "npm:4.0.0"],\
["source-map-js", "npm:1.0.2"]\
@@ -8716,7 +8716,7 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
["less", null],\
["postcss", "npm:8.4.23"],\
["rollup", "npm:3.21.6"],\
- ["sass", "npm:1.62.1"],\
+ ["sass", "npm:1.63.4"],\
["stylus", null],\
["sugarss", null],\
["terser", null]\
diff --git a/.yarn/cache/highcharts-npm-11.0.1-05a14e3887-773a7b8765.zip b/.yarn/cache/highcharts-npm-11.1.0-0d42a04430-f9b8cdc38b.zip
similarity index 70%
rename from .yarn/cache/highcharts-npm-11.0.1-05a14e3887-773a7b8765.zip
rename to .yarn/cache/highcharts-npm-11.1.0-0d42a04430-f9b8cdc38b.zip
index c953a6456..ccf9aece9 100644
Binary files a/.yarn/cache/highcharts-npm-11.0.1-05a14e3887-773a7b8765.zip and b/.yarn/cache/highcharts-npm-11.1.0-0d42a04430-f9b8cdc38b.zip differ
diff --git a/.yarn/cache/sass-npm-1.62.1-c16d65fd28-1b1b3584b3.zip b/.yarn/cache/sass-npm-1.62.1-c16d65fd28-1b1b3584b3.zip
deleted file mode 100644
index 745da7a17..000000000
Binary files a/.yarn/cache/sass-npm-1.62.1-c16d65fd28-1b1b3584b3.zip and /dev/null differ
diff --git a/.yarn/cache/sass-npm-1.63.4-bf5f3496c2-12bde5beff.zip b/.yarn/cache/sass-npm-1.63.4-bf5f3496c2-12bde5beff.zip
new file mode 100644
index 000000000..adadea953
Binary files /dev/null and b/.yarn/cache/sass-npm-1.63.4-bf5f3496c2-12bde5beff.zip differ
diff --git a/README.md b/README.md
index f4e4df7f9..25af704be 100644
--- a/README.md
+++ b/README.md
@@ -140,10 +140,10 @@ This will create packages under `ietf/static/dist-neue`, which are then served b
#### Parcel *(Legacy/jQuery)*
-The Datatracker includes these packages from the various Javascript and CSS files in `ietf/static/js` and `ietf/static/css`, respectively.
+The Datatracker includes these packages from the various Javascript and CSS files in `ietf/static/js` and `ietf/static/css` respectively, bundled using Parcel.
Static images are likewise in `ietf/static/images`.
-Whenever changes are made to the files under `ietf/static`, you must re-run `parcel` to package them:
+Whenever changes are made to the files under `ietf/static`, you must re-run the build command to package them:
``` shell
yarn legacy:build
diff --git a/dev/coverage-action/package-lock.json b/dev/coverage-action/package-lock.json
index 4b9633e11..77373cc97 100644
--- a/dev/coverage-action/package-lock.json
+++ b/dev/coverage-action/package-lock.json
@@ -17,8 +17,8 @@
"luxon": "3.3.0"
},
"devDependencies": {
- "eslint": "8.41.0",
- "eslint-config-standard": "17.0.0",
+ "eslint": "8.42.0",
+ "eslint-config-standard": "17.1.0",
"eslint-plugin-import": "2.27.5",
"eslint-plugin-node": "11.1.0",
"eslint-plugin-promise": "6.1.1",
@@ -111,9 +111,9 @@
}
},
"node_modules/@eslint/js": {
- "version": "8.41.0",
- "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.41.0.tgz",
- "integrity": "sha512-LxcyMGxwmTh2lY9FwHPGWOHmYFCZvbrFCBZL4FzSSsxsRPuhrYUg/49/0KDfW8tnIEaEHtfmn6+NPN+1DqaNmA==",
+ "version": "8.42.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.42.0.tgz",
+ "integrity": "sha512-6SWlXpWU5AvId8Ac7zjzmIOqMOba/JWY8XZ4A7q7Gn1Vlfg/SFFIlrtHXt9nPn4op9ZPAkl91Jao+QQv3r/ukw==",
"dev": true,
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
@@ -126,9 +126,9 @@
"dev": true
},
"node_modules/@humanwhocodes/config-array": {
- "version": "0.11.8",
- "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz",
- "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==",
+ "version": "0.11.10",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.10.tgz",
+ "integrity": "sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==",
"dev": true,
"dependencies": {
"@humanwhocodes/object-schema": "^1.2.1",
@@ -1718,16 +1718,16 @@
}
},
"node_modules/eslint": {
- "version": "8.41.0",
- "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.41.0.tgz",
- "integrity": "sha512-WQDQpzGBOP5IrXPo4Hc0814r4/v2rrIsB0rhT7jtunIalgg6gYXWhRMOejVO8yH21T/FGaxjmFjBMNqcIlmH1Q==",
+ "version": "8.42.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.42.0.tgz",
+ "integrity": "sha512-ulg9Ms6E1WPf67PHaEY4/6E2tEn5/f7FXGzr3t9cBMugOmf1INYvuUwwh1aXQN4MfJ6a5K2iNwP3w4AColvI9A==",
"dev": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.4.0",
"@eslint/eslintrc": "^2.0.3",
- "@eslint/js": "8.41.0",
- "@humanwhocodes/config-array": "^0.11.8",
+ "@eslint/js": "8.42.0",
+ "@humanwhocodes/config-array": "^0.11.10",
"@humanwhocodes/module-importer": "^1.0.1",
"@nodelib/fs.walk": "^1.2.8",
"ajv": "^6.10.0",
@@ -1774,9 +1774,9 @@
}
},
"node_modules/eslint-config-standard": {
- "version": "17.0.0",
- "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-17.0.0.tgz",
- "integrity": "sha512-/2ks1GKyqSOkH7JFvXJicu0iMpoojkwB+f5Du/1SC0PtBL+s8v30k9njRZ21pm2drKYm2342jFnGWzttxPmZVg==",
+ "version": "17.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-17.1.0.tgz",
+ "integrity": "sha512-IwHwmaBNtDK4zDHQukFDW5u/aTb8+meQWZvNFWkiGmbWjD6bqyuSSBxxXKkCftCUzc1zwCH2m/baCNDLGmuO5Q==",
"dev": true,
"funding": [
{
@@ -1792,10 +1792,13 @@
"url": "https://feross.org/support"
}
],
+ "engines": {
+ "node": ">=12.0.0"
+ },
"peerDependencies": {
"eslint": "^8.0.1",
"eslint-plugin-import": "^2.25.2",
- "eslint-plugin-n": "^15.0.0",
+ "eslint-plugin-n": "^15.0.0 || ^16.0.0 ",
"eslint-plugin-promise": "^6.0.0"
}
},
@@ -6326,9 +6329,9 @@
}
},
"@eslint/js": {
- "version": "8.41.0",
- "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.41.0.tgz",
- "integrity": "sha512-LxcyMGxwmTh2lY9FwHPGWOHmYFCZvbrFCBZL4FzSSsxsRPuhrYUg/49/0KDfW8tnIEaEHtfmn6+NPN+1DqaNmA==",
+ "version": "8.42.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.42.0.tgz",
+ "integrity": "sha512-6SWlXpWU5AvId8Ac7zjzmIOqMOba/JWY8XZ4A7q7Gn1Vlfg/SFFIlrtHXt9nPn4op9ZPAkl91Jao+QQv3r/ukw==",
"dev": true
},
"@gar/promisify": {
@@ -6338,9 +6341,9 @@
"dev": true
},
"@humanwhocodes/config-array": {
- "version": "0.11.8",
- "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz",
- "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==",
+ "version": "0.11.10",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.10.tgz",
+ "integrity": "sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==",
"dev": true,
"requires": {
"@humanwhocodes/object-schema": "^1.2.1",
@@ -7531,16 +7534,16 @@
"dev": true
},
"eslint": {
- "version": "8.41.0",
- "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.41.0.tgz",
- "integrity": "sha512-WQDQpzGBOP5IrXPo4Hc0814r4/v2rrIsB0rhT7jtunIalgg6gYXWhRMOejVO8yH21T/FGaxjmFjBMNqcIlmH1Q==",
+ "version": "8.42.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.42.0.tgz",
+ "integrity": "sha512-ulg9Ms6E1WPf67PHaEY4/6E2tEn5/f7FXGzr3t9cBMugOmf1INYvuUwwh1aXQN4MfJ6a5K2iNwP3w4AColvI9A==",
"dev": true,
"requires": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.4.0",
"@eslint/eslintrc": "^2.0.3",
- "@eslint/js": "8.41.0",
- "@humanwhocodes/config-array": "^0.11.8",
+ "@eslint/js": "8.42.0",
+ "@humanwhocodes/config-array": "^0.11.10",
"@humanwhocodes/module-importer": "^1.0.1",
"@nodelib/fs.walk": "^1.2.8",
"ajv": "^6.10.0",
@@ -7589,9 +7592,9 @@
}
},
"eslint-config-standard": {
- "version": "17.0.0",
- "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-17.0.0.tgz",
- "integrity": "sha512-/2ks1GKyqSOkH7JFvXJicu0iMpoojkwB+f5Du/1SC0PtBL+s8v30k9njRZ21pm2drKYm2342jFnGWzttxPmZVg==",
+ "version": "17.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-17.1.0.tgz",
+ "integrity": "sha512-IwHwmaBNtDK4zDHQukFDW5u/aTb8+meQWZvNFWkiGmbWjD6bqyuSSBxxXKkCftCUzc1zwCH2m/baCNDLGmuO5Q==",
"dev": true,
"requires": {}
},
diff --git a/dev/coverage-action/package.json b/dev/coverage-action/package.json
index b05f079a6..37d84bf1c 100644
--- a/dev/coverage-action/package.json
+++ b/dev/coverage-action/package.json
@@ -14,8 +14,8 @@
"luxon": "3.3.0"
},
"devDependencies": {
- "eslint": "8.41.0",
- "eslint-config-standard": "17.0.0",
+ "eslint": "8.42.0",
+ "eslint-config-standard": "17.1.0",
"eslint-plugin-import": "2.27.5",
"eslint-plugin-node": "11.1.0",
"eslint-plugin-promise": "6.1.1",
diff --git a/dev/del-old-packages/package-lock.json b/dev/del-old-packages/package-lock.json
index 682d088ff..68268cbaa 100644
--- a/dev/del-old-packages/package-lock.json
+++ b/dev/del-old-packages/package-lock.json
@@ -9,7 +9,7 @@
"version": "1.0.0",
"license": "ISC",
"dependencies": {
- "@octokit/core": "^4.2.1",
+ "@octokit/core": "^4.2.4",
"luxon": "^3.3.0"
}
},
@@ -25,9 +25,9 @@
}
},
"node_modules/@octokit/core": {
- "version": "4.2.1",
- "resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.1.tgz",
- "integrity": "sha512-tEDxFx8E38zF3gT7sSMDrT1tGumDgsw5yPG6BBh/X+5ClIQfMH/Yqocxz1PnHx6CHyF6pxmovUTOfZAUvQ0Lvw==",
+ "version": "4.2.4",
+ "resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.4.tgz",
+ "integrity": "sha512-rYKilwgzQ7/imScn3M9/pFfUf4I1AZEH3KhyJmtPdE2zfaXAn2mFfUy4FbKewzc2We5y/LlKLj36fWJLKC2SIQ==",
"dependencies": {
"@octokit/auth-token": "^3.0.0",
"@octokit/graphql": "^5.0.0",
@@ -215,9 +215,9 @@
}
},
"@octokit/core": {
- "version": "4.2.1",
- "resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.1.tgz",
- "integrity": "sha512-tEDxFx8E38zF3gT7sSMDrT1tGumDgsw5yPG6BBh/X+5ClIQfMH/Yqocxz1PnHx6CHyF6pxmovUTOfZAUvQ0Lvw==",
+ "version": "4.2.4",
+ "resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.4.tgz",
+ "integrity": "sha512-rYKilwgzQ7/imScn3M9/pFfUf4I1AZEH3KhyJmtPdE2zfaXAn2mFfUy4FbKewzc2We5y/LlKLj36fWJLKC2SIQ==",
"requires": {
"@octokit/auth-token": "^3.0.0",
"@octokit/graphql": "^5.0.0",
diff --git a/dev/del-old-packages/package.json b/dev/del-old-packages/package.json
index 068b56961..97b0e02f6 100644
--- a/dev/del-old-packages/package.json
+++ b/dev/del-old-packages/package.json
@@ -10,7 +10,7 @@
"author": "",
"license": "ISC",
"dependencies": {
- "@octokit/core": "^4.2.1",
+ "@octokit/core": "^4.2.4",
"luxon": "^3.3.0"
}
}
diff --git a/docker/configs/nginx-proxy.conf b/docker/configs/nginx-proxy.conf
index d5681fb23..3068cc71d 100644
--- a/docker/configs/nginx-proxy.conf
+++ b/docker/configs/nginx-proxy.conf
@@ -1,6 +1,9 @@
server {
listen 8000 default_server;
listen [::]:8000 default_server;
+
+ proxy_read_timeout 1d;
+ proxy_send_timeout 1d;
root /var/www/html;
index index.html index.htm index.nginx-debian.html;
diff --git a/docker/scripts/app-init.sh b/docker/scripts/app-init.sh
index 73469ae20..e15aed38b 100755
--- a/docker/scripts/app-init.sh
+++ b/docker/scripts/app-init.sh
@@ -22,7 +22,6 @@ echo "Fix chromedriver /dev/shm permissions..."
sudo chmod 1777 /dev/shm
# Run nginx
-
echo "Starting nginx..."
sudo nginx
@@ -30,6 +29,9 @@ sudo nginx
echo "Compiling native node packages..."
yarn rebuild
+# Silence Browserlist warnings
+export BROWSERSLIST_IGNORE_OLD_DATA=1
+
# Generate static assets
echo "Building static assets... (this could take a minute or two)"
yarn build
diff --git a/ietf/api/__init__.py b/ietf/api/__init__.py
index b4c6203d9..54b4b7424 100644
--- a/ietf/api/__init__.py
+++ b/ietf/api/__init__.py
@@ -12,12 +12,11 @@ from django.core.exceptions import ObjectDoesNotExist
import debug # pyflakes:ignore
-import tastypie
import tastypie.resources
+import tastypie.serializers
from tastypie.api import Api
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError
-from tastypie.serializers import Serializer # pyflakes:ignore (we're re-exporting this)
from tastypie.fields import ApiField
_api_list = []
@@ -152,3 +151,8 @@ class ToOneField(tastypie.fields.ToOneField):
dehydrated = self.dehydrate_related(fk_bundle, fk_resource, for_list=for_list)
fk_resource._meta.cache.set(cache_key, dehydrated)
return dehydrated
+
+
+class Serializer(tastypie.serializers.Serializer):
+ def format_datetime(self, data):
+ return data.astimezone(datetime.timezone.utc).replace(tzinfo=None).isoformat(timespec="seconds") + "Z"
diff --git a/ietf/api/tests.py b/ietf/api/tests.py
index ed635f69e..6f1b79532 100644
--- a/ietf/api/tests.py
+++ b/ietf/api/tests.py
@@ -964,11 +964,11 @@ class RfcdiffSupportTests(TestCase):
def do_rfc_test(self, draft_name):
draft = WgDraftFactory(name=draft_name, create_revisions=range(0,2))
- draft.docalias.create(name=f'rfc{self.next_rfc_number():04}')
+ rfc = WgRfcFactory(group=draft.group, rfc_number=self.next_rfc_number())
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
draft.set_state(State.objects.get(type_id='draft',slug='rfc'))
draft.set_state(State.objects.get(type_id='draft-iesg', slug='pub'))
- draft = reload_db_objects(draft)
- rfc = WgRfcFactory(group=draft.group) # todo link this with its pre-publication draft
+ draft, rfc = reload_db_objects(draft, rfc)
number = rfc.rfc_number
received = self.getJson(dict(name=number))
@@ -976,7 +976,7 @@ class RfcdiffSupportTests(TestCase):
received,
dict(
content_url=rfc.get_href(),
- name=rfc.canonical_name(),
+ name=rfc.name,
previous=f'{draft.name}-{draft.rev}',
previous_url= draft.history_set.get(rev=draft.rev).get_href(),
),
@@ -1016,11 +1016,11 @@ class RfcdiffSupportTests(TestCase):
def test_rfc_with_tombstone(self):
draft = WgDraftFactory(create_revisions=range(0,2))
- draft.docalias.create(name='rfc3261') # See views_doc.HAS_TOMBSTONE
+ rfc = WgRfcFactory(rfc_number=3261,group=draft.group)# See views_doc.HAS_TOMBSTONE
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
draft.set_state(State.objects.get(type_id='draft',slug='rfc'))
draft.set_state(State.objects.get(type_id='draft-iesg', slug='pub'))
draft = reload_db_objects(draft)
- rfc = draft
# Some old rfcs had tombstones that shouldn't be used for comparisons
received = self.getJson(dict(name=rfc.canonical_name()))
@@ -1028,11 +1028,11 @@ class RfcdiffSupportTests(TestCase):
def do_rfc_with_broken_history_test(self, draft_name):
draft = WgDraftFactory(rev='10', name=draft_name)
- draft.docalias.create(name=f'rfc{self.next_rfc_number():04}')
+ rfc = WgRfcFactory(group=draft.group, rfc_number=self.next_rfc_number())
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
draft.set_state(State.objects.get(type_id='draft',slug='rfc'))
draft.set_state(State.objects.get(type_id='draft-iesg', slug='pub'))
draft = reload_db_objects(draft)
- rfc = draft
received = self.getJson(dict(name=draft.name))
self.assertEqual(
diff --git a/ietf/api/views.py b/ietf/api/views.py
index 477b662f0..1122ff9db 100644
--- a/ietf/api/views.py
+++ b/ietf/api/views.py
@@ -317,12 +317,9 @@ def get_previous_url(name, rev=None):
previous_url = ''
if condition in ('historic version', 'current version'):
doc = history if history else document
- if found_rev:
- doc.is_rfc = lambda: False
previous_url = doc.get_href()
elif condition == 'version dochistory not found':
document.rev = found_rev
- document.is_rfc = lambda: False
previous_url = document.get_href()
return previous_url
@@ -330,22 +327,32 @@ def get_previous_url(name, rev=None):
def rfcdiff_latest_json(request, name, rev=None):
response = dict()
condition, document, history, found_rev = find_doc_for_rfcdiff(name, rev)
-
+ if document.type_id == "rfc":
+ draft_alias = next(iter(document.related_that('became_rfc')), None)
if condition == 'no such document':
raise Http404
elif condition in ('historic version', 'current version'):
doc = history if history else document
- if not found_rev and doc.is_rfc():
- response['content_url'] = doc.get_href()
- response['name']=doc.canonical_name()
- if doc.name != doc.canonical_name():
+ if doc.type_id == "rfc":
+ response['content_url'] = doc.get_href()
+ response['name']=doc.name
+ if draft_alias:
+ draft = draft_alias.document
+ prev_rev = draft.rev
+ if doc.rfc_number in HAS_TOMBSTONE and prev_rev != '00':
+ prev_rev = f'{(int(draft.rev)-1):02d}'
+ response['previous'] = f'{draft.name}-{prev_rev}'
+ response['previous_url'] = get_previous_url(draft.name, prev_rev)
+ elif doc.type_id == "draft" and not found_rev and doc.relateddocument_set.filter(relationship_id="became_rfc").exists():
+ rfc = doc.related_that_doc("became_rfc")[0].document
+ response['content_url'] = rfc.get_href()
+ response['name']=rfc.name
prev_rev = doc.rev
- if doc.rfc_number in HAS_TOMBSTONE and prev_rev != '00':
+ if rfc.rfc_number in HAS_TOMBSTONE and prev_rev != '00':
prev_rev = f'{(int(doc.rev)-1):02d}'
response['previous'] = f'{doc.name}-{prev_rev}'
response['previous_url'] = get_previous_url(doc.name, prev_rev)
else:
- doc.is_rfc = lambda: False
response['content_url'] = doc.get_href()
response['rev'] = doc.rev
response['name'] = doc.name
@@ -371,7 +378,6 @@ def rfcdiff_latest_json(request, name, rev=None):
response['name'] = document.name
response['rev'] = found_rev
document.rev = found_rev
- document.is_rfc = lambda: False
response['content_url'] = document.get_href()
# not sure what to do if non-numeric values come back, so at least log it
log.assertion('found_rev.isdigit()')
diff --git a/ietf/doc/factories.py b/ietf/doc/factories.py
index 8c4f061f2..111fdbe93 100644
--- a/ietf/doc/factories.py
+++ b/ietf/doc/factories.py
@@ -116,7 +116,7 @@ class DocumentFactory(BaseDocumentFactory):
class RfcFactory(BaseDocumentFactory):
type_id = "rfc"
rfc_number = factory.Sequence(lambda n: n + 1000)
- name = factory.LazyAttribute(lambda o: f"rfc{o.rfc_number:04d}")
+ name = factory.LazyAttribute(lambda o: f"rfc{o.rfc_number:d}")
expires = None
@factory.post_generation
diff --git a/ietf/doc/feeds.py b/ietf/doc/feeds.py
index 92871efc3..4f49aec66 100644
--- a/ietf/doc/feeds.py
+++ b/ietf/doc/feeds.py
@@ -1,6 +1,7 @@
# Copyright The IETF Trust 2007-2020, All Rights Reserved
# -*- coding: utf-8 -*-
+import debug # pyflakes:ignore
import datetime
import unicodedata
@@ -8,8 +9,12 @@ import unicodedata
from django.contrib.syndication.views import Feed, FeedDoesNotExist
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
from django.urls import reverse as urlreverse
-from django.template.defaultfilters import truncatewords, truncatewords_html, date as datefilter
-from django.template.defaultfilters import linebreaks # type: ignore
+from django.template.defaultfilters import (
+ truncatewords,
+ truncatewords_html,
+ date as datefilter,
+)
+from django.template.defaultfilters import linebreaks # type: ignore
from django.utils import timezone
from django.utils.html import strip_tags
@@ -21,12 +26,12 @@ from ietf.utils.timezone import RPC_TZINFO
def strip_control_characters(s):
"""Remove Unicode control / non-printing characters from a string"""
- replacement_char = unicodedata.lookup('REPLACEMENT CHARACTER')
- return ''.join(
- replacement_char if unicodedata.category(c)[0] == 'C' else c
- for c in s
+ replacement_char = unicodedata.lookup("REPLACEMENT CHARACTER")
+ return "".join(
+ replacement_char if unicodedata.category(c)[0] == "C" else c for c in s
)
+
class DocumentChangesFeed(Feed):
feed_type = Atom1Feed
@@ -39,25 +44,37 @@ class DocumentChangesFeed(Feed):
def link(self, obj):
if obj is None:
raise FeedDoesNotExist
- return urlreverse('ietf.doc.views_doc.document_history', kwargs=dict(name=obj.canonical_name()))
+ return urlreverse(
+ "ietf.doc.views_doc.document_history",
+ kwargs=dict(name=obj.canonical_name()),
+ )
def subtitle(self, obj):
return "History of change entries for %s." % obj.display_name()
def items(self, obj):
- events = obj.docevent_set.all().order_by("-time","-id").select_related("by", "newrevisiondocevent", "submissiondocevent")
+ events = (
+ obj.docevent_set.all()
+ .order_by("-time", "-id")
+ .select_related("by", "newrevisiondocevent", "submissiondocevent")
+ )
augment_events_with_revision(obj, events)
return events
def item_title(self, item):
- return strip_control_characters("[%s] %s [rev. %s]" % (
- item.by,
- truncatewords(strip_tags(item.desc), 15),
- item.rev,
- ))
+ return strip_control_characters(
+ "[%s] %s [rev. %s]"
+ % (
+ item.by,
+ truncatewords(strip_tags(item.desc), 15),
+ item.rev,
+ )
+ )
def item_description(self, item):
- return strip_control_characters(truncatewords_html(format_textarea(item.desc), 20))
+ return strip_control_characters(
+ truncatewords_html(format_textarea(item.desc), 20)
+ )
def item_pubdate(self, item):
return item.time
@@ -66,17 +83,28 @@ class DocumentChangesFeed(Feed):
return str(item.by)
def item_link(self, item):
- return urlreverse('ietf.doc.views_doc.document_history', kwargs=dict(name=item.doc.canonical_name())) + "#history-%s" % item.pk
+ return (
+ urlreverse(
+ "ietf.doc.views_doc.document_history",
+ kwargs=dict(name=item.doc.canonical_name()),
+ )
+ + "#history-%s" % item.pk
+ )
+
class InLastCallFeed(Feed):
title = "Documents in Last Call"
subtitle = "Announcements for documents in last call."
feed_type = Atom1Feed
- author_name = 'IESG Secretary'
+ author_name = "IESG Secretary"
link = "/doc/iesg/last-call/"
def items(self):
- docs = list(Document.objects.filter(type="draft", states=State.objects.get(type="draft-iesg", slug="lc")))
+ docs = list(
+ Document.objects.filter(
+ type="draft", states=State.objects.get(type="draft-iesg", slug="lc")
+ )
+ )
for d in docs:
d.lc_event = d.latest_event(LastCallDocEvent, type="sent_last_call")
@@ -86,9 +114,11 @@ class InLastCallFeed(Feed):
return docs
def item_title(self, item):
- return "%s (%s - %s)" % (item.name,
- datefilter(item.lc_event.time, "F j"),
- datefilter(item.lc_event.expires, "F j, Y"))
+ return "%s (%s - %s)" % (
+ item.name,
+ datefilter(item.lc_event.time, "F j"),
+ datefilter(item.lc_event.expires, "F j, Y"),
+ )
def item_description(self, item):
return strip_control_characters(linebreaks(item.lc_event.desc))
@@ -96,33 +126,55 @@ class InLastCallFeed(Feed):
def item_pubdate(self, item):
return item.lc_event.time
+
class Rss201WithNamespacesFeed(Rss201rev2Feed):
def root_attributes(self):
attrs = super(Rss201WithNamespacesFeed, self).root_attributes()
- attrs['xmlns:dcterms'] = 'http://purl.org/dc/terms/'
- attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'
- attrs['xmlns:xsi'] = 'http://www.w3.org/2001/XMLSchema-instance'
+ attrs["xmlns:dcterms"] = "http://purl.org/dc/terms/"
+ attrs["xmlns:media"] = "http://search.yahoo.com/mrss/"
+ attrs["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
return attrs
def add_item_elements(self, handler, item):
super(Rss201WithNamespacesFeed, self).add_item_elements(handler, item)
- for element_name in ['abstract','accessRights', 'format', 'publisher',]:
- dc_item_name = 'dcterms_%s' % element_name
- dc_element_name = 'dcterms:%s' % element_name
- attrs= {'xsi:type':'dcterms:local'} if element_name == 'publisher' else {}
+ for element_name in [
+ "abstract",
+ "accessRights",
+ "format",
+ "publisher",
+ ]:
+ dc_item_name = "dcterms_%s" % element_name
+ dc_element_name = "dcterms:%s" % element_name
+ attrs = {"xsi:type": "dcterms:local"} if element_name == "publisher" else {}
if dc_item_name in item and item[dc_item_name] is not None:
- handler.addQuickElement(dc_element_name,item[dc_item_name],attrs)
+ handler.addQuickElement(dc_element_name, item[dc_item_name], attrs)
- if 'doi' in item and item['doi'] is not None:
- handler.addQuickElement('dcterms:identifier',item['doi'],{'xsi:type':'dcterms:doi'})
- if 'doiuri' in item and item['doiuri'] is not None:
- handler.addQuickElement('dcterms:identifier',item['doiuri'],{'xsi:type':'dcterms:uri'})
+ if "doi" in item and item["doi"] is not None:
+ handler.addQuickElement(
+ "dcterms:identifier", item["doi"], {"xsi:type": "dcterms:doi"}
+ )
+ if "doiuri" in item and item["doiuri"] is not None:
+ handler.addQuickElement(
+ "dcterms:identifier", item["doiuri"], {"xsi:type": "dcterms:uri"}
+ )
+
+ # TODO: consider using media:group
+ if "media_contents" in item and item["media_contents"] is not None:
+ for media_content in item["media_contents"]:
+ handler.startElement(
+ "media:content",
+ {
+ "url": media_content["url"],
+ "type": media_content["media_type"],
+ },
+ )
+ if "is_format_of" in media_content:
+ handler.addQuickElement(
+ "dcterms:isFormatOf", media_content["is_format_of"]
+ )
+ handler.endElement("media:content")
- if 'media_content' in item and item['media_content'] is not None:
- handler.startElement('media:content',{'url':item['media_content']['url'],'type':'text/plain'})
- handler.addQuickElement('dcterms:isFormatOf',item['media_content']['link_url'])
- handler.endElement('media:content')
class RfcFeed(Feed):
feed_type = Rss201WithNamespacesFeed
@@ -130,55 +182,96 @@ class RfcFeed(Feed):
author_name = "RFC Editor"
link = "https://www.rfc-editor.org/rfc-index2.html"
- def get_object(self,request,year=None):
+ def get_object(self, request, year=None):
self.year = year
-
+
def items(self):
if self.year:
# Find published RFCs based on their official publication year
start_of_year = datetime.datetime(int(self.year), 1, 1, tzinfo=RPC_TZINFO)
- start_of_next_year = datetime.datetime(int(self.year) + 1, 1, 1, tzinfo=RPC_TZINFO)
+ start_of_next_year = datetime.datetime(
+ int(self.year) + 1, 1, 1, tzinfo=RPC_TZINFO
+ )
rfc_events = DocEvent.objects.filter(
- type='published_rfc',
+ type="published_rfc",
time__gte=start_of_year,
time__lt=start_of_next_year,
- ).order_by('-time')
+ ).order_by("-time")
else:
cutoff = timezone.now() - datetime.timedelta(days=8)
- rfc_events = DocEvent.objects.filter(type='published_rfc',time__gte=cutoff).order_by('-time')
+ rfc_events = DocEvent.objects.filter(
+ type="published_rfc", time__gte=cutoff
+ ).order_by("-time")
results = [(e.doc, e.time) for e in rfc_events]
- for doc,time in results:
+ for doc, time in results:
doc.publication_time = time
- return [doc for doc,time in results]
-
+ return [doc for doc, time in results]
+
def item_title(self, item):
- return "%s : %s" % (item.canonical_name(),item.title)
+ return "%s : %s" % (item.canonical_name(), item.title)
def item_description(self, item):
return item.abstract
def item_link(self, item):
- return "https://rfc-editor.org/info/%s"%item.canonical_name()
+ return "https://rfc-editor.org/info/%s" % item.canonical_name()
def item_pubdate(self, item):
return item.publication_time
def item_extra_kwargs(self, item):
extra = super(RfcFeed, self).item_extra_kwargs(item)
- extra.update({'dcterms_accessRights': 'gratis'})
- extra.update({'dcterms_format': 'text/html'})
- extra.update({'media_content': {'url': 'https://rfc-editor.org/rfc/%s.txt' % item.canonical_name(),
- 'link_url': self.item_link(item)
- }
- })
- extra.update({'doi':'10.17487/%s' % item.canonical_name().upper()})
- extra.update({'doiuri':'http://dx.doi.org/10.17487/%s' % item.canonical_name().upper()})
+ extra.update({"dcterms_accessRights": "gratis"})
+ extra.update({"dcterms_format": "text/html"})
+ media_contents = []
+ if item.rfc_number < 8650:
+ if item.rfc_number not in [8, 9, 51, 418, 500, 530, 589]:
+ for fmt, media_type in [("txt", "text/plain"), ("html", "text/html")]:
+ media_contents.append(
+ {
+ "url": f"https://rfc-editor.org/rfc/{item.canonical_name()}.{fmt}",
+ "media_type": media_type,
+ "is_format_of": self.item_link(item),
+ }
+ )
+ if item.rfc_number not in [571, 587]:
+ media_contents.append(
+ {
+ "url": f"https://www.rfc-editor.org/rfc/pdfrfc/{item.canonical_name()}.txt.pdf",
+ "media_type": "application/pdf",
+ "is_format_of": self.item_link(item),
+ }
+ )
+ else:
+ media_contents.append(
+ {
+ "url": f"https://www.rfc-editor.org/rfc/{item.canonical_name()}.xml",
+ "media_type": "application/rfc+xml",
+ }
+ )
+ for fmt, media_type in [
+ ("txt", "text/plain"),
+ ("html", "text/html"),
+ ("pdf", "application/pdf"),
+ ]:
+ media_contents.append(
+ {
+ "url": f"https://rfc-editor.org/rfc/{item.canonical_name()}.{fmt}",
+ "media_type": media_type,
+ "is_format_of": f"https://www.rfc-editor.org/rfc/{item.canonical_name()}.xml",
+ }
+ )
+ extra.update({"media_contents": media_contents})
+
+ extra.update({"doi": "10.17487/%s" % item.canonical_name().upper()})
+ extra.update(
+ {"doiuri": "http://dx.doi.org/10.17487/%s" % item.canonical_name().upper()}
+ )
- #TODO
# R104 Publisher (Mandatory - but we need a string from them first)
- extra.update({'dcterms_publisher':'rfc-editor.org'})
+ extra.update({"dcterms_publisher": "rfc-editor.org"})
- #TODO MAYBE (Optional stuff)
+ # TODO MAYBE (Optional stuff)
# R108 License
# R115 Creator/Contributor (which would we use?)
# F305 Checksum (do they use it?) (or should we put the our digital signature in here somewhere?)
@@ -188,4 +281,3 @@ class RfcFeed(Feed):
# R118 Keyword
return extra
-
diff --git a/ietf/doc/forms.py b/ietf/doc/forms.py
index 7d04ffa81..b91e38531 100644
--- a/ietf/doc/forms.py
+++ b/ietf/doc/forms.py
@@ -148,7 +148,7 @@ class AddDownrefForm(forms.Form):
raise forms.ValidationError("Please provide a referenced RFC and a referencing Internet-Draft")
rfc = self.cleaned_data['rfc']
- if not rfc.is_rfc():
+ if rfc.document.type_id != "rfc":
raise forms.ValidationError("Cannot find the RFC: " + rfc.name)
return rfc
diff --git a/ietf/doc/management/commands/generate_draft_aliases.py b/ietf/doc/management/commands/generate_draft_aliases.py
index 88f4aa98c..9d62cf527 100755
--- a/ietf/doc/management/commands/generate_draft_aliases.py
+++ b/ietf/doc/management/commands/generate_draft_aliases.py
@@ -24,6 +24,7 @@ from ietf.doc.models import Document
from ietf.group.utils import get_group_role_emails, get_group_ad_emails
from ietf.utils.aliases import dump_sublist
from utils.mail import parseaddr
+from ietf.utils import log
DEFAULT_YEARS = 2
@@ -120,16 +121,19 @@ class Command(BaseCommand):
vfile.write("%s anything\n" % settings.DRAFT_VIRTUAL_DOMAIN)
# Internet-Drafts with active status or expired within DEFAULT_YEARS
- drafts = Document.objects.filter(name__startswith='draft-')
+ drafts = Document.objects.filter(type_id="draft")
active_drafts = drafts.filter(states__slug='active')
inactive_recent_drafts = drafts.exclude(states__slug='active').filter(expires__gte=show_since)
interesting_drafts = active_drafts | inactive_recent_drafts
alias_domains = ['ietf.org', ]
for draft in interesting_drafts.distinct().iterator():
- # Omit RFCs, unless they were published in the last DEFAULT_YEARS
- if draft.docalias.filter(name__startswith='rfc'):
- if draft.latest_event(type='published_rfc').time < show_since:
+ # Omit drafts that became RFCs, unless they were published in the last DEFAULT_YEARS
+ if draft.get_state_slug()=="rfc":
+ rfc_alias = next(iter(draft.related_that_doc("became_rfc")), None)
+ log.assertion("rfc_alias is not None")
+ rfc = rfc_alias.document
+ if rfc.latest_event(type='published_rfc').time < show_since:
continue
alias = draft.name
diff --git a/ietf/doc/migrations/0007_create_rfc_documents.py b/ietf/doc/migrations/0007_create_rfc_documents.py
index 1faee3169..c11dcc291 100644
--- a/ietf/doc/migrations/0007_create_rfc_documents.py
+++ b/ietf/doc/migrations/0007_create_rfc_documents.py
@@ -24,7 +24,7 @@ def forward(apps, schema_editor):
assert set(found_by_name) == set(found_by_state), "mismatch between rfcs identified by state and docalias"
# As of 2023-06-15, there is one Document with two rfc aliases: rfc6312 and rfc6342 are the same Document. This
- # was due to a publication error. We'll handle that specially.
+ # was due to a publication error. Because we go alias-by-alias, no special handling is needed in this migration.
for rfc_alias in rfc_docaliases.order_by("name"):
assert rfc_alias.docs.count() == 1, f"DocAlias {rfc_alias} is linked to more than 1 Document"
@@ -40,11 +40,15 @@ def forward(apps, schema_editor):
type=rfc_doctype,
name=rfc_alias.name,
rfc_number=int(rfc_alias.name[3:]),
+ time=draft.time,
title=draft.title,
+ stream=draft.stream,
+ group=draft.group,
abstract=draft.abstract,
pages=draft.pages,
words=draft.words,
std_level=draft.std_level,
+ ad=draft.ad,
external_url=draft.external_url,
uploaded_filename=draft.uploaded_filename,
note=draft.note,
diff --git a/ietf/doc/migrations/0009_move_rfc_docaliases.py b/ietf/doc/migrations/0009_move_rfc_docaliases.py
deleted file mode 100644
index 824c9d80f..000000000
--- a/ietf/doc/migrations/0009_move_rfc_docaliases.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Generated by Django 4.2.2 on 2023-06-20 18:36
-
-from django.db import migrations
-
-
-def forward(apps, schema_editor):
- """Point "rfc..." DocAliases at the rfc-type Document"""
- DocAlias = apps.get_model("doc", "DocAlias")
- Document = apps.get_model("doc", "Document")
- for rfc_alias in DocAlias.objects.filter(name__startswith="rfc"):
- rfc = Document.objects.get(name=rfc_alias.name)
- rfc_alias.docs.set([rfc])
-
-
-class Migration(migrations.Migration):
- dependencies = [
- ("doc", "0008_move_rfc_docevents"),
- ]
-
- operations = [
- migrations.RunPython(forward),
- ]
diff --git a/ietf/doc/migrations/0009_rfc_relateddocuments.py b/ietf/doc/migrations/0009_rfc_relateddocuments.py
new file mode 100644
index 000000000..a00de56d6
--- /dev/null
+++ b/ietf/doc/migrations/0009_rfc_relateddocuments.py
@@ -0,0 +1,45 @@
+# Generated by Django 4.2.3 on 2023-07-05 22:40
+
+from django.db import migrations
+
+
+def forward(apps, schema_editor):
+ DocAlias = apps.get_model("doc", "DocAlias")
+ Document = apps.get_model("doc", "Document")
+ RelatedDocument = apps.get_model("doc", "RelatedDocument")
+ for rfc_alias in DocAlias.objects.filter(name__startswith="rfc").exclude(
+ docs__type__slug="rfc"
+ ):
+ # Move these over to the RFC
+ RelatedDocument.objects.filter(
+ relationship__slug__in=(
+ "tobcp",
+ "toexp",
+ "tohist",
+ "toinf",
+ "tois",
+ "tops",
+ "obs",
+ "updates",
+ ),
+ source__docalias=rfc_alias,
+ ).update(source=Document.objects.get(name=rfc_alias.name))
+ # Duplicate references on the RFC but keep the ones on the draft as well
+ originals = list(
+ RelatedDocument.objects.filter(
+ relationship__slug__in=("refinfo", "refnorm", "refold", "refunk"),
+ source__docalias=rfc_alias,
+ )
+ )
+ for o in originals:
+ o.pk = None
+ o.source = Document.objects.get(name=rfc_alias.name)
+ RelatedDocument.objects.bulk_create(originals)
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("doc", "0008_move_rfc_docevents"),
+ ]
+
+ operations = [migrations.RunPython(forward)]
diff --git a/ietf/doc/migrations/0010_move_rfc_docaliases.py b/ietf/doc/migrations/0010_move_rfc_docaliases.py
new file mode 100644
index 000000000..af12c26a1
--- /dev/null
+++ b/ietf/doc/migrations/0010_move_rfc_docaliases.py
@@ -0,0 +1,38 @@
+# Generated by Django 4.2.2 on 2023-06-20 18:36
+
+from django.db import migrations
+
+
+def forward(apps, schema_editor):
+ """Point "rfc..." DocAliases at the rfc-type Document
+
+ Creates a became_rfc RelatedDocument to preserve the connection between the draft and the rfc.
+ """
+ DocAlias = apps.get_model("doc", "DocAlias")
+ Document = apps.get_model("doc", "Document")
+ RelatedDocument = apps.get_model("doc", "RelatedDocument")
+
+ for rfc_alias in DocAlias.objects.filter(name__startswith="rfc"):
+ rfc = Document.objects.get(name=rfc_alias.name)
+ aliased_doc = rfc_alias.docs.get() # implicitly confirms only one value in rfc_alias.docs
+ if aliased_doc != rfc:
+ # If the DocAlias was not already pointing at the rfc, it was pointing at the draft
+ # it came from. Create the relationship between draft and rfc Documents.
+ assert aliased_doc.type_id == "draft", f"Alias for {rfc.name} should be pointing at a draft"
+ RelatedDocument.objects.create(
+ source=aliased_doc,
+ target=rfc_alias,
+ relationship_id="became_rfc",
+ )
+ # Now move the alias from the draft to the rfc
+ rfc_alias.docs.set([rfc])
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("doc", "0009_rfc_relateddocuments"),
+ ]
+
+ operations = [
+ migrations.RunPython(forward),
+ ]
diff --git a/ietf/doc/models.py b/ietf/doc/models.py
index 0772fb014..cf666bfb3 100644
--- a/ietf/doc/models.py
+++ b/ietf/doc/models.py
@@ -137,18 +137,17 @@ class DocumentInfo(models.Model):
def get_file_path(self):
if not hasattr(self, '_cached_file_path'):
- if self.type_id == "draft":
+ if self.type_id == "rfc":
+ self._cached_file_path = settings.RFC_PATH
+ elif self.type_id == "draft":
if self.is_dochistory():
self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
else:
- if self.get_state_slug() == "rfc":
- self._cached_file_path = settings.RFC_PATH
+ draft_state = self.get_state('draft')
+ if draft_state and draft_state.slug == 'active':
+ self._cached_file_path = settings.INTERNET_DRAFT_PATH
else:
- draft_state = self.get_state('draft')
- if draft_state and draft_state.slug == 'active':
- self._cached_file_path = settings.INTERNET_DRAFT_PATH
- else:
- self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
+ self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
elif self.meeting_related() and self.type_id in (
"agenda", "minutes", "slides", "bluesheets", "procmaterials", "chatlog", "polls"
):
@@ -173,14 +172,13 @@ class DocumentInfo(models.Model):
if not hasattr(self, '_cached_base_name'):
if self.uploaded_filename:
self._cached_base_name = self.uploaded_filename
+ elif self.type_id == 'rfc':
+ self._cached_base_name = "%s.txt" % self.canonical_name()
elif self.type_id == 'draft':
if self.is_dochistory():
self._cached_base_name = "%s-%s.txt" % (self.doc.name, self.rev)
else:
- if self.get_state_slug() == 'rfc':
- self._cached_base_name = "%s.txt" % self.canonical_name()
- else:
- self._cached_base_name = "%s-%s.txt" % (self.name, self.rev)
+ self._cached_base_name = "%s-%s.txt" % (self.name, self.rev)
elif self.type_id in ["slides", "agenda", "minutes", "bluesheets", "procmaterials", ] and self.meeting_related():
ext = 'pdf' if self.type_id == 'procmaterials' else 'txt'
self._cached_base_name = f'{self.canonical_name()}-{self.rev}.{ext}'
@@ -245,7 +243,7 @@ class DocumentInfo(models.Model):
format = settings.DOC_HREFS[self.type_id]
elif self.type_id in settings.DOC_HREFS:
self.is_meeting_related = False
- if self.is_rfc():
+ if self.type_id == "rfc":
format = settings.DOC_HREFS['rfc']
else:
format = settings.DOC_HREFS[self.type_id]
@@ -348,10 +346,9 @@ class DocumentInfo(models.Model):
iesg_state_summary = iesg_state_summary + "::"+"::".join(tag.name for tag in iesg_substate)
if state.slug == "rfc":
- # todo check this once became-rfc relationships are actually created
- rfcs = self.related_that("became-rfc") # should be only one
+ rfcs = self.related_that_doc("became_rfc") # should be only one
if len(rfcs) > 0:
- rfc = rfcs[0]
+ rfc = rfcs[0].document
return f"Became RFC {rfc.rfc_number} ({rfc.std_level})"
else:
return "Became RFC"
@@ -384,9 +381,6 @@ class DocumentInfo(models.Model):
else:
return state.name
- def is_rfc(self):
- return self.type_id == "rfc"
-
def author_list(self):
best_addresses = []
for author in self.documentauthor_set.all():
@@ -646,10 +640,20 @@ class DocumentInfo(models.Model):
return self.relations_that_doc(('refnorm','refinfo','refunk','refold'))
def referenced_by(self):
- return self.relations_that(('refnorm','refinfo','refunk','refold')).filter(source__states__type__slug='draft',source__states__slug__in=['rfc','active'])
-
+ return self.relations_that(("refnorm", "refinfo", "refunk", "refold")).filter(
+ models.Q(
+ source__type__slug="draft",
+ source__states__type__slug="draft",
+ source__states__slug="active",
+ )
+ | models.Q(source__type__slug="rfc")
+ )
+
+
def referenced_by_rfcs(self):
- return self.relations_that(('refnorm','refinfo','refunk','refold')).filter(source__states__type__slug='draft',source__states__slug='rfc')
+ return self.relations_that(("refnorm", "refinfo", "refunk", "refold")).filter(
+ source__type__slug="rfc"
+ )
class Meta:
abstract = True
@@ -681,7 +685,7 @@ class RelatedDocument(models.Model):
if source_lvl not in ['bcp','ps','ds','std']:
return None
- if self.target.get_state().slug == 'rfc':
+ if self.target.type_id == 'rfc':
if not self.target.std_level:
target_lvl = 'unkn'
else:
@@ -704,8 +708,8 @@ class RelatedDocument(models.Model):
def is_approved_downref(self):
- if self.target.get_state().slug == 'rfc':
- if RelatedDocument.objects.filter(relationship_id='downref-approval', target=self.target):
+ if self.target.type_id == 'rfc':
+ if RelatedDocument.objects.filter(relationship_id='downref-approval', target=self.target).exists():
return "Approved Downref"
return False
@@ -1002,7 +1006,7 @@ class Document(DocumentInfo):
This is the rfc publication date for RFCs, and the new-revision date for other documents.
"""
- if self.get_state_slug() == "rfc":
+ if self.type_id == "rfc":
# As of Sept 2022, in ietf.sync.rfceditor.update_docs_from_rfc_index() `published_rfc` events are
# created with a timestamp whose date *in the PST8PDT timezone* is the official publication date
# assigned by the RFC editor.
diff --git a/ietf/doc/templatetags/ietf_filters.py b/ietf/doc/templatetags/ietf_filters.py
index c8257de00..e22912f95 100644
--- a/ietf/doc/templatetags/ietf_filters.py
+++ b/ietf/doc/templatetags/ietf_filters.py
@@ -556,7 +556,7 @@ def consensus(doc):
@register.filter
def std_level_to_label_format(doc):
"""Returns valid Bootstrap classes to label a status level badge."""
- if doc.is_rfc():
+ if doc.type_id == "rfc":
if doc.related_that("obs"):
return "obs"
else:
diff --git a/ietf/doc/tests.py b/ietf/doc/tests.py
index 38f34698b..112803f76 100644
--- a/ietf/doc/tests.py
+++ b/ietf/doc/tests.py
@@ -619,7 +619,6 @@ Man Expires September 22, 2015 [Page 3]
def test_document_draft(self):
draft = WgDraftFactory(name='draft-ietf-mars-test',rev='01', create_revisions=range(0,2))
-
HolderIprDisclosureFactory(docs=[draft])
# Docs for testing relationships. Does not test 'possibly-replaces'. The 'replaced_by' direction
@@ -635,7 +634,7 @@ Man Expires September 22, 2015 [Page 3]
updated_by = IndividualDraftFactory()
updated_by.relateddocument_set.create(relationship_id='updates',source=obsoleted_by,target=draft)
- external_resource = DocExtResourceFactory(doc=draft)
+ DocExtResourceFactory(doc=draft)
# these tests aren't testing all attributes yet, feel free to
# expand them
@@ -648,16 +647,6 @@ Man Expires September 22, 2015 [Page 3]
self.assertNotContains(r, "Deimos street")
self.assertContains(r, replaced.canonical_name())
self.assertContains(r, replaced.title)
- # obs/updates not included until draft is RFC
- self.assertNotContains(r, obsoleted.canonical_name())
- self.assertNotContains(r, obsoleted.title)
- self.assertNotContains(r, obsoleted_by.canonical_name())
- self.assertNotContains(r, obsoleted_by.title)
- self.assertNotContains(r, updated.canonical_name())
- self.assertNotContains(r, updated.title)
- self.assertNotContains(r, updated_by.canonical_name())
- self.assertNotContains(r, updated_by.title)
- self.assertContains(r, external_resource.value)
r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)) + "?include_text=0")
self.assertEqual(r.status_code, 200)
@@ -666,15 +655,6 @@ Man Expires September 22, 2015 [Page 3]
self.assertNotContains(r, "Deimos street")
self.assertContains(r, replaced.canonical_name())
self.assertContains(r, replaced.title)
- # obs/updates not included until draft is RFC
- self.assertNotContains(r, obsoleted.canonical_name())
- self.assertNotContains(r, obsoleted.title)
- self.assertNotContains(r, obsoleted_by.canonical_name())
- self.assertNotContains(r, obsoleted_by.title)
- self.assertNotContains(r, updated.canonical_name())
- self.assertNotContains(r, updated.title)
- self.assertNotContains(r, updated_by.canonical_name())
- self.assertNotContains(r, updated_by.title)
r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)) + "?include_text=foo")
self.assertEqual(r.status_code, 200)
@@ -683,15 +663,6 @@ Man Expires September 22, 2015 [Page 3]
self.assertContains(r, "Deimos street")
self.assertContains(r, replaced.canonical_name())
self.assertContains(r, replaced.title)
- # obs/updates not included until draft is RFC
- self.assertNotContains(r, obsoleted.canonical_name())
- self.assertNotContains(r, obsoleted.title)
- self.assertNotContains(r, obsoleted_by.canonical_name())
- self.assertNotContains(r, obsoleted_by.title)
- self.assertNotContains(r, updated.canonical_name())
- self.assertNotContains(r, updated.title)
- self.assertNotContains(r, updated_by.canonical_name())
- self.assertNotContains(r, updated_by.title)
r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)) + "?include_text=1")
self.assertEqual(r.status_code, 200)
@@ -700,15 +671,6 @@ Man Expires September 22, 2015 [Page 3]
self.assertContains(r, "Deimos street")
self.assertContains(r, replaced.canonical_name())
self.assertContains(r, replaced.title)
- # obs/updates not included until draft is RFC
- self.assertNotContains(r, obsoleted.canonical_name())
- self.assertNotContains(r, obsoleted.title)
- self.assertNotContains(r, obsoleted_by.canonical_name())
- self.assertNotContains(r, obsoleted_by.title)
- self.assertNotContains(r, updated.canonical_name())
- self.assertNotContains(r, updated.title)
- self.assertNotContains(r, updated_by.canonical_name())
- self.assertNotContains(r, updated_by.title)
self.client.cookies = SimpleCookie({str('full_draft'): str('on')})
r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)))
@@ -718,15 +680,6 @@ Man Expires September 22, 2015 [Page 3]
self.assertContains(r, "Deimos street")
self.assertContains(r, replaced.canonical_name())
self.assertContains(r, replaced.title)
- # obs/updates not included until draft is RFC
- self.assertNotContains(r, obsoleted.canonical_name())
- self.assertNotContains(r, obsoleted.title)
- self.assertNotContains(r, obsoleted_by.canonical_name())
- self.assertNotContains(r, obsoleted_by.title)
- self.assertNotContains(r, updated.canonical_name())
- self.assertNotContains(r, updated.title)
- self.assertNotContains(r, updated_by.canonical_name())
- self.assertNotContains(r, updated_by.title)
self.client.cookies = SimpleCookie({str('full_draft'): str('off')})
r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)))
@@ -736,15 +689,6 @@ Man Expires September 22, 2015 [Page 3]
self.assertNotContains(r, "Deimos street")
self.assertContains(r, replaced.canonical_name())
self.assertContains(r, replaced.title)
- # obs/updates not included until draft is RFC
- self.assertNotContains(r, obsoleted.canonical_name())
- self.assertNotContains(r, obsoleted.title)
- self.assertNotContains(r, obsoleted_by.canonical_name())
- self.assertNotContains(r, obsoleted_by.title)
- self.assertNotContains(r, updated.canonical_name())
- self.assertNotContains(r, updated.title)
- self.assertNotContains(r, updated_by.canonical_name())
- self.assertNotContains(r, updated_by.title)
self.client.cookies = SimpleCookie({str('full_draft'): str('foo')})
r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)))
@@ -755,15 +699,6 @@ Man Expires September 22, 2015 [Page 3]
self.assertNotContains(r, "Deimos street")
self.assertContains(r, replaced.canonical_name())
self.assertContains(r, replaced.title)
- # obs/updates not included until draft is RFC
- self.assertNotContains(r, obsoleted.canonical_name())
- self.assertNotContains(r, obsoleted.title)
- self.assertNotContains(r, obsoleted_by.canonical_name())
- self.assertNotContains(r, obsoleted_by.title)
- self.assertNotContains(r, updated.canonical_name())
- self.assertNotContains(r, updated.title)
- self.assertNotContains(r, updated_by.canonical_name())
- self.assertNotContains(r, updated_by.title)
r = self.client.get(urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=draft.name)))
self.assertEqual(r.status_code, 200)
@@ -831,26 +766,29 @@ Man Expires September 22, 2015 [Page 3]
# draft published as RFC
draft.set_state(State.objects.get(type="draft", slug="rfc"))
- draft.std_level_id = "bcp"
- draft.save_with_history([DocEvent.objects.create(doc=draft, rev=draft.rev, type="published_rfc", by=Person.objects.get(name="(System)"))])
+ draft.std_level_id = "ps"
+ rfc = WgRfcFactory(group=draft.group, name="rfc123456")
+ rfc.save_with_history([DocEvent.objects.create(doc=rfc, rev=None, type="published_rfc", by=Person.objects.get(name="(System)"))])
- rfc_alias = DocAlias.objects.create(name="rfc123456")
- rfc_alias.docs.add(draft)
- bcp_alias = DocAlias.objects.create(name="bcp123456")
- bcp_alias.docs.add(draft)
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
+
+ obsoleted = IndividualRfcFactory()
+ rfc.relateddocument_set.create(relationship_id='obs',target=obsoleted.docalias.first())
+ obsoleted_by = IndividualRfcFactory()
+ obsoleted_by.relateddocument_set.create(relationship_id='obs',target=rfc.docalias.first())
+ updated = IndividualRfcFactory()
+ rfc.relateddocument_set.create(relationship_id='updates',target=updated.docalias.first())
+ updated_by = IndividualRfcFactory()
+ updated_by.relateddocument_set.create(relationship_id='updates',target=rfc.docalias.first())
r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)))
self.assertEqual(r.status_code, 302)
- r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=bcp_alias.name)))
- self.assertEqual(r.status_code, 302)
- r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc_alias.name)))
+ r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name)))
self.assertEqual(r.status_code, 200)
self.assertContains(r, "RFC 123456")
self.assertContains(r, draft.name)
- self.assertContains(r, replaced.canonical_name())
- self.assertContains(r, replaced.title)
# obs/updates included with RFC
self.assertContains(r, obsoleted.canonical_name())
self.assertContains(r, obsoleted.title)
@@ -1490,11 +1428,11 @@ Man Expires September 22, 2015 [Page 3]
self.assertEqual(r.status_code, 200)
self.assert_correct_wg_group_link(r, group)
- rfc = WgRfcFactory(name='draft-rfc-document-%s' % group_type_id, group=group)
+ rfc = WgRfcFactory(group=group)
+ draft = WgDraftFactory(group=group)
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
DocEventFactory.create(doc=rfc, type='published_rfc', time=event_datetime)
- # get the rfc name to avoid a redirect
- rfc_name = rfc.docalias.filter(name__startswith='rfc').first().name
- r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc_name)))
+ r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name)))
self.assertEqual(r.status_code, 200)
self.assert_correct_wg_group_link(r, group)
@@ -1505,11 +1443,11 @@ Man Expires September 22, 2015 [Page 3]
self.assertEqual(r.status_code, 200)
self.assert_correct_non_wg_group_link(r, group)
- rfc = WgRfcFactory(name='draft-rfc-document-%s' % group_type_id, group=group)
+ rfc = WgRfcFactory(group=group)
+ draft = WgDraftFactory(name='draft-rfc-document-%s'% group_type_id, group=group)
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
DocEventFactory.create(doc=rfc, type='published_rfc', time=event_datetime)
- # get the rfc name to avoid a redirect
- rfc_name = rfc.docalias.filter(name__startswith='rfc').first().name
- r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc_name)))
+ r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name)))
self.assertEqual(r.status_code, 200)
self.assert_correct_non_wg_group_link(r, group)
@@ -1611,7 +1549,7 @@ class DocTestCase(TestCase):
r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=statchg.name)))
self.assertEqual(r.status_code, 200)
r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=statchg.relateddocument_set.first().target)))
- self.assertEqual(r.status_code, 302)
+ self.assertEqual(r.status_code, 200)
def test_document_charter(self):
CharterFactory(name='charter-ietf-mars')
@@ -1853,15 +1791,14 @@ class DocTestCase(TestCase):
self.assertContains(r, e.desc)
def test_history_bis_00(self):
- rfcname='rfc9090'
- rfc = WgRfcFactory(alias2=rfcname)
- bis_draft = WgDraftFactory(name='draft-ietf-{}-{}bis'.format(rfc.group.acronym,rfcname))
+ rfc = WgRfcFactory(rfc_number=9090)
+ bis_draft = WgDraftFactory(name='draft-ietf-{}-{}bis'.format(rfc.group.acronym,rfc.name))
url = urlreverse('ietf.doc.views_doc.document_history', kwargs=dict(name=bis_draft.name))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(unicontent(r))
- attr1='value="{}"'.format(rfcname)
+ attr1='value="{}"'.format(rfc.name)
self.assertEqual(len(q('option['+attr1+'][selected="selected"]')), 1)
@@ -1911,11 +1848,31 @@ class DocTestCase(TestCase):
self.assertContains(r, doc.name)
def test_rfc_feed(self):
- WgRfcFactory()
+ rfc = WgRfcFactory(rfc_number=9000)
+ DocEventFactory(doc=rfc, type="published_rfc")
r = self.client.get("/feed/rfc/")
self.assertTrue(r.status_code, 200)
+ q = PyQuery(r.content[39:]) # Strip off the xml declaration
+ self.assertEqual(len(q("item")), 1)
+ item = q("item")[0]
+ media_content = item.findall("{http://search.yahoo.com/mrss/}content")
+ self.assertEqual(len(media_content),4)
+ types = set([m.attrib["type"] for m in media_content])
+ self.assertEqual(types, set(["application/rfc+xml", "text/plain", "text/html", "application/pdf"]))
+ rfcs_2016 = WgRfcFactory.create_batch(3) # rfc numbers will be well below v3
+ for rfc in rfcs_2016:
+ e = DocEventFactory(doc=rfc, type="published_rfc")
+ e.time = e.time.replace(year=2016)
+ e.save()
r = self.client.get("/feed/rfc/2016")
self.assertTrue(r.status_code, 200)
+ q = PyQuery(r.content[39:])
+ self.assertEqual(len(q("item")), 3)
+ item = q("item")[0]
+ media_content = item.findall("{http://search.yahoo.com/mrss/}content")
+ self.assertEqual(len(media_content), 3)
+ types = set([m.attrib["type"] for m in media_content])
+ self.assertEqual(types, set(["text/plain", "text/html", "application/pdf"]))
def test_state_help(self):
url = urlreverse('ietf.doc.views_help.state_help', kwargs=dict(type="draft-iesg"))
@@ -1965,7 +1922,7 @@ class DocTestCase(TestCase):
r = self.client.get(url)
entry = self._parse_bibtex_response(r)["rfc%s"%num]
self.assertEqual(entry['series'], 'Request for Comments')
- self.assertEqual(entry['number'], num)
+ self.assertEqual(int(entry['number']), num)
self.assertEqual(entry['doi'], '10.17487/RFC%s'%num)
self.assertEqual(entry['year'], '2010')
self.assertEqual(entry['month'].lower()[0:3], 'oct')
@@ -1979,7 +1936,7 @@ class DocTestCase(TestCase):
std_level_id = 'inf',
time = datetime.datetime(1990, 4, 1, tzinfo=ZoneInfo(settings.TIME_ZONE)),
)
- num = april1.rfc_number()
+ num = april1.rfc_number
DocEventFactory.create(
doc=april1,
type='published_rfc',
@@ -1991,7 +1948,7 @@ class DocTestCase(TestCase):
self.assertEqual(r.get('Content-Type'), 'text/plain; charset=utf-8')
entry = self._parse_bibtex_response(r)["rfc%s"%num]
self.assertEqual(entry['series'], 'Request for Comments')
- self.assertEqual(entry['number'], num)
+ self.assertEqual(int(entry['number']), num)
self.assertEqual(entry['doi'], '10.17487/RFC%s'%num)
self.assertEqual(entry['year'], '1990')
self.assertEqual(entry['month'].lower()[0:3], 'apr')
@@ -2100,124 +2057,168 @@ class ReferencesTest(TestCase):
self.assertContains(r, doc1.name)
class GenerateDraftAliasesTests(TestCase):
- def setUp(self):
- super().setUp()
- self.doc_aliases_file = NamedTemporaryFile(delete=False, mode='w+')
- self.doc_aliases_file.close()
- self.doc_virtual_file = NamedTemporaryFile(delete=False, mode='w+')
- self.doc_virtual_file.close()
- self.saved_draft_aliases_path = settings.DRAFT_ALIASES_PATH
- self.saved_draft_virtual_path = settings.DRAFT_VIRTUAL_PATH
- settings.DRAFT_ALIASES_PATH = self.doc_aliases_file.name
- settings.DRAFT_VIRTUAL_PATH = self.doc_virtual_file.name
+ def setUp(self):
+ super().setUp()
+ self.doc_aliases_file = NamedTemporaryFile(delete=False, mode="w+")
+ self.doc_aliases_file.close()
+ self.doc_virtual_file = NamedTemporaryFile(delete=False, mode="w+")
+ self.doc_virtual_file.close()
+ self.saved_draft_aliases_path = settings.DRAFT_ALIASES_PATH
+ self.saved_draft_virtual_path = settings.DRAFT_VIRTUAL_PATH
+ settings.DRAFT_ALIASES_PATH = self.doc_aliases_file.name
+ settings.DRAFT_VIRTUAL_PATH = self.doc_virtual_file.name
- def tearDown(self):
- settings.DRAFT_ALIASES_PATH = self.saved_draft_aliases_path
- settings.DRAFT_VIRTUAL_PATH = self.saved_draft_virtual_path
- os.unlink(self.doc_aliases_file.name)
- os.unlink(self.doc_virtual_file.name)
- super().tearDown()
+ def tearDown(self):
+ settings.DRAFT_ALIASES_PATH = self.saved_draft_aliases_path
+ settings.DRAFT_VIRTUAL_PATH = self.saved_draft_virtual_path
+ os.unlink(self.doc_aliases_file.name)
+ os.unlink(self.doc_virtual_file.name)
+ super().tearDown()
- def testManagementCommand(self):
- a_month_ago = (timezone.now() - datetime.timedelta(30)).astimezone(RPC_TZINFO)
- a_month_ago = a_month_ago.replace(hour=0, minute=0, second=0, microsecond=0)
- ad = RoleFactory(name_id='ad', group__type_id='area', group__state_id='active').person
- shepherd = PersonFactory()
- author1 = PersonFactory()
- author2 = PersonFactory()
- author3 = PersonFactory()
- author4 = PersonFactory()
- author5 = PersonFactory()
- author6 = PersonFactory()
- mars = GroupFactory(type_id='wg', acronym='mars')
- marschairman = PersonFactory(user__username='marschairman')
- mars.role_set.create(name_id='chair', person=marschairman, email=marschairman.email())
- doc1 = IndividualDraftFactory(authors=[author1], shepherd=shepherd.email(), ad=ad)
- doc2 = WgDraftFactory(name='draft-ietf-mars-test', group__acronym='mars', authors=[author2], ad=ad)
- doc3 = WgRfcFactory.create(name='draft-ietf-mars-finished', group__acronym='mars', authors=[author3], ad=ad, std_level_id='ps', states=[('draft','rfc'),('draft-iesg','pub')], time=a_month_ago)
- DocEventFactory.create(doc=doc3, type='published_rfc', time=a_month_ago)
- doc4 = WgRfcFactory.create(authors=[author4,author5], ad=ad, std_level_id='ps', states=[('draft','rfc'),('draft-iesg','pub')], time=datetime.datetime(2010,10,10, tzinfo=ZoneInfo(settings.TIME_ZONE)))
- DocEventFactory.create(doc=doc4, type='published_rfc', time=datetime.datetime(2010, 10, 10, tzinfo=RPC_TZINFO))
- doc5 = IndividualDraftFactory(authors=[author6])
+ def testManagementCommand(self):
+ a_month_ago = (timezone.now() - datetime.timedelta(30)).astimezone(RPC_TZINFO)
+ a_month_ago = a_month_ago.replace(hour=0, minute=0, second=0, microsecond=0)
+ ad = RoleFactory(
+ name_id="ad", group__type_id="area", group__state_id="active"
+ ).person
+ shepherd = PersonFactory()
+ author1 = PersonFactory()
+ author2 = PersonFactory()
+ author3 = PersonFactory()
+ author4 = PersonFactory()
+ author5 = PersonFactory()
+ author6 = PersonFactory()
+ mars = GroupFactory(type_id="wg", acronym="mars")
+ marschairman = PersonFactory(user__username="marschairman")
+ mars.role_set.create(
+ name_id="chair", person=marschairman, email=marschairman.email()
+ )
+ doc1 = IndividualDraftFactory(
+ authors=[author1], shepherd=shepherd.email(), ad=ad
+ )
+ doc2 = WgDraftFactory(
+ name="draft-ietf-mars-test", group__acronym="mars", authors=[author2], ad=ad
+ )
+ doc3 = WgDraftFactory.create(
+ name="draft-ietf-mars-finished",
+ group__acronym="mars",
+ authors=[author3],
+ ad=ad,
+ std_level_id="ps",
+ states=[("draft", "rfc"), ("draft-iesg", "pub")],
+ time=a_month_ago,
+ )
+ rfc3 = WgRfcFactory()
+ DocEventFactory.create(doc=rfc3, type="published_rfc", time=a_month_ago)
+ doc3.relateddocument_set.create(
+ relationship_id="became_rfc", target=rfc3.docalias.first()
+ )
+ doc4 = WgDraftFactory.create(
+ authors=[author4, author5],
+ ad=ad,
+ std_level_id="ps",
+ states=[("draft", "rfc"), ("draft-iesg", "pub")],
+ time=datetime.datetime(2010, 10, 10, tzinfo=ZoneInfo(settings.TIME_ZONE)),
+ )
+ rfc4 = WgRfcFactory()
+ DocEventFactory.create(
+ doc=rfc4,
+ type="published_rfc",
+ time=datetime.datetime(2010, 10, 10, tzinfo=RPC_TZINFO),
+ )
+ doc4.relateddocument_set.create(
+ relationship_id="became_rfc", target=rfc4.docalias.first()
+ )
+ doc5 = IndividualDraftFactory(authors=[author6])
- args = [ ]
- kwargs = { }
- out = io.StringIO()
- call_command("generate_draft_aliases", *args, **kwargs, stdout=out, stderr=out)
- self.assertFalse(out.getvalue())
+ args = []
+ kwargs = {}
+ out = io.StringIO()
+ call_command("generate_draft_aliases", *args, **kwargs, stdout=out, stderr=out)
+ self.assertFalse(out.getvalue())
- with open(settings.DRAFT_ALIASES_PATH) as afile:
- acontent = afile.read()
- self.assertTrue(all([x in acontent for x in [
- 'xfilter-' + doc1.name,
- 'xfilter-' + doc1.name + '.ad',
- 'xfilter-' + doc1.name + '.authors',
- 'xfilter-' + doc1.name + '.shepherd',
- 'xfilter-' + doc1.name + '.all',
- 'xfilter-' + doc2.name,
- 'xfilter-' + doc2.name + '.ad',
- 'xfilter-' + doc2.name + '.authors',
- 'xfilter-' + doc2.name + '.chairs',
- 'xfilter-' + doc2.name + '.all',
- 'xfilter-' + doc3.name,
- 'xfilter-' + doc3.name + '.ad',
- 'xfilter-' + doc3.name + '.authors',
- 'xfilter-' + doc3.name + '.chairs',
- 'xfilter-' + doc5.name,
- 'xfilter-' + doc5.name + '.authors',
- 'xfilter-' + doc5.name + '.all',
- ]]))
- self.assertFalse(all([x in acontent for x in [
- 'xfilter-' + doc1.name + '.chairs',
- 'xfilter-' + doc2.name + '.shepherd',
- 'xfilter-' + doc3.name + '.shepherd',
- 'xfilter-' + doc4.name,
- 'xfilter-' + doc5.name + '.shepherd',
- 'xfilter-' + doc5.name + '.ad',
- ]]))
+ with open(settings.DRAFT_ALIASES_PATH) as afile:
+ acontent = afile.read()
+ for x in [
+ "xfilter-" + doc1.name,
+ "xfilter-" + doc1.name + ".ad",
+ "xfilter-" + doc1.name + ".authors",
+ "xfilter-" + doc1.name + ".shepherd",
+ "xfilter-" + doc1.name + ".all",
+ "xfilter-" + doc2.name,
+ "xfilter-" + doc2.name + ".ad",
+ "xfilter-" + doc2.name + ".authors",
+ "xfilter-" + doc2.name + ".chairs",
+ "xfilter-" + doc2.name + ".all",
+ "xfilter-" + doc3.name,
+ "xfilter-" + doc3.name + ".ad",
+ "xfilter-" + doc3.name + ".authors",
+ "xfilter-" + doc3.name + ".chairs",
+ "xfilter-" + doc5.name,
+ "xfilter-" + doc5.name + ".authors",
+ "xfilter-" + doc5.name + ".all",
+ ]:
+ self.assertIn(x, acontent)
- with open(settings.DRAFT_VIRTUAL_PATH) as vfile:
- vcontent = vfile.read()
- self.assertTrue(all([x in vcontent for x in [
- ad.email_address(),
- shepherd.email_address(),
- marschairman.email_address(),
- author1.email_address(),
- author2.email_address(),
- author3.email_address(),
- author6.email_address(),
- ]]))
- self.assertFalse(all([x in vcontent for x in [
- author4.email_address(),
- author5.email_address(),
- ]]))
- self.assertTrue(all([x in vcontent for x in [
- 'xfilter-' + doc1.name,
- 'xfilter-' + doc1.name + '.ad',
- 'xfilter-' + doc1.name + '.authors',
- 'xfilter-' + doc1.name + '.shepherd',
- 'xfilter-' + doc1.name + '.all',
- 'xfilter-' + doc2.name,
- 'xfilter-' + doc2.name + '.ad',
- 'xfilter-' + doc2.name + '.authors',
- 'xfilter-' + doc2.name + '.chairs',
- 'xfilter-' + doc2.name + '.all',
- 'xfilter-' + doc3.name,
- 'xfilter-' + doc3.name + '.ad',
- 'xfilter-' + doc3.name + '.authors',
- 'xfilter-' + doc3.name + '.chairs',
- 'xfilter-' + doc5.name,
- 'xfilter-' + doc5.name + '.authors',
- 'xfilter-' + doc5.name + '.all',
- ]]))
- self.assertFalse(all([x in vcontent for x in [
- 'xfilter-' + doc1.name + '.chairs',
- 'xfilter-' + doc2.name + '.shepherd',
- 'xfilter-' + doc3.name + '.shepherd',
- 'xfilter-' + doc4.name,
- 'xfilter-' + doc5.name + '.shepherd',
- 'xfilter-' + doc5.name + '.ad',
- ]]))
+ for x in [
+ "xfilter-" + doc1.name + ".chairs",
+ "xfilter-" + doc2.name + ".shepherd",
+ "xfilter-" + doc3.name + ".shepherd",
+ "xfilter-" + doc4.name,
+ "xfilter-" + doc5.name + ".shepherd",
+ "xfilter-" + doc5.name + ".ad",
+ ]:
+ self.assertNotIn(x, acontent)
+
+ with open(settings.DRAFT_VIRTUAL_PATH) as vfile:
+ vcontent = vfile.read()
+ for x in [
+ ad.email_address(),
+ shepherd.email_address(),
+ marschairman.email_address(),
+ author1.email_address(),
+ author2.email_address(),
+ author3.email_address(),
+ author6.email_address(),
+ ]:
+ self.assertIn(x, vcontent)
+
+ for x in [
+ author4.email_address(),
+ author5.email_address(),
+ ]:
+ self.assertNotIn(x, vcontent)
+
+ for x in [
+ "xfilter-" + doc1.name,
+ "xfilter-" + doc1.name + ".ad",
+ "xfilter-" + doc1.name + ".authors",
+ "xfilter-" + doc1.name + ".shepherd",
+ "xfilter-" + doc1.name + ".all",
+ "xfilter-" + doc2.name,
+ "xfilter-" + doc2.name + ".ad",
+ "xfilter-" + doc2.name + ".authors",
+ "xfilter-" + doc2.name + ".chairs",
+ "xfilter-" + doc2.name + ".all",
+ "xfilter-" + doc3.name,
+ "xfilter-" + doc3.name + ".ad",
+ "xfilter-" + doc3.name + ".authors",
+ "xfilter-" + doc3.name + ".chairs",
+ "xfilter-" + doc5.name,
+ "xfilter-" + doc5.name + ".authors",
+ "xfilter-" + doc5.name + ".all",
+ ]:
+ self.assertIn(x, vcontent)
+
+ for x in [
+ "xfilter-" + doc1.name + ".chairs",
+ "xfilter-" + doc2.name + ".shepherd",
+ "xfilter-" + doc3.name + ".shepherd",
+ "xfilter-" + doc4.name,
+ "xfilter-" + doc5.name + ".shepherd",
+ "xfilter-" + doc5.name + ".ad",
+ ]:
+ self.assertNotIn(x, vcontent)
class EmailAliasesTests(TestCase):
@@ -2650,10 +2651,10 @@ class Idnits2SupportTests(TestCase):
settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['DERIVED_DIR']
def test_obsoleted(self):
- rfc = WgRfcFactory(alias2__name='rfc1001')
- WgRfcFactory(alias2__name='rfc1003',relations=[('obs',rfc)])
- rfc = WgRfcFactory(alias2__name='rfc1005')
- WgRfcFactory(alias2__name='rfc1007',relations=[('obs',rfc)])
+ rfc = WgRfcFactory(rfc_number=1001)
+ WgRfcFactory(rfc_number=1003,relations=[('obs',rfc)])
+ rfc = WgRfcFactory(rfc_number=1005)
+ WgRfcFactory(rfc_number=1007,relations=[('obs',rfc)])
url = urlreverse('ietf.doc.views_doc.idnits2_rfcs_obsoleted')
r = self.client.get(url)
@@ -2678,6 +2679,8 @@ class Idnits2SupportTests(TestCase):
def test_idnits2_state(self):
rfc = WgRfcFactory()
+ draft = WgDraftFactory()
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
url = urlreverse('ietf.doc.views_doc.idnits2_state', kwargs=dict(name=rfc.canonical_name()))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
@@ -2736,16 +2739,12 @@ class RawIdTests(TestCase):
self.should_succeed(dict(name=draft.name, rev='00',ext='txt'))
self.should_404(dict(name=draft.name, rev='00',ext='html'))
- def test_raw_id_rfc(self):
- rfc = WgRfcFactory()
- dir = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
- (Path(dir) / f'{rfc.name}-{rfc.rev}.txt').touch()
- self.should_succeed(dict(name=rfc.name))
- self.should_404(dict(name=rfc.canonical_name()))
+ # test_raw_id_rfc intentionally removed
+ # an rfc is no longer a pseudo-version of a draft.
def test_non_draft(self):
- charter = CharterFactory()
- self.should_404(dict(name=charter.name))
+ for doc in [CharterFactory(), WgRfcFactory()]:
+ self.should_404(dict(name=doc.name))
class PdfizedTests(TestCase):
@@ -2764,24 +2763,27 @@ class PdfizedTests(TestCase):
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
+ # This takes a _long_ time (32s on a 2022 m1 macbook pro) - is it worth what it covers?
def test_pdfized(self):
- rfc = WgRfcFactory(create_revisions=range(0,2))
+ rfc = WgRfcFactory()
+ draft = WgDraftFactory(create_revisions=range(0,2))
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
dir = settings.RFC_PATH
- with (Path(dir) / f'{rfc.canonical_name()}.txt').open('w') as f:
+ with (Path(dir) / f'{rfc.name}.txt').open('w') as f:
f.write('text content')
dir = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
for r in range(0,2):
- with (Path(dir) / f'{rfc.name}-{r:02d}.txt').open('w') as f:
+ with (Path(dir) / f'{draft.name}-{r:02d}.txt').open('w') as f:
f.write('text content')
- self.should_succeed(dict(name=rfc.canonical_name()))
self.should_succeed(dict(name=rfc.name))
+ self.should_succeed(dict(name=draft.name))
for r in range(0,2):
- self.should_succeed(dict(name=rfc.name,rev=f'{r:02d}'))
+ self.should_succeed(dict(name=draft.name,rev=f'{r:02d}'))
for ext in ('pdf','txt','html','anythingatall'):
- self.should_succeed(dict(name=rfc.name,rev=f'{r:02d}',ext=ext))
- self.should_404(dict(name=rfc.name,rev='02'))
+ self.should_succeed(dict(name=draft.name,rev=f'{r:02d}',ext=ext))
+ self.should_404(dict(name=draft.name,rev='02'))
class NotifyValidationTests(TestCase):
def test_notify_validation(self):
diff --git a/ietf/doc/tests_ballot.py b/ietf/doc/tests_ballot.py
index 6f587159c..9c9287dab 100644
--- a/ietf/doc/tests_ballot.py
+++ b/ietf/doc/tests_ballot.py
@@ -1121,8 +1121,8 @@ class RegenerateLastCallTestCase(TestCase):
self.assertFalse("contains these normative down" in lc_text)
rfc = IndividualRfcFactory.create(
+ rfc_number=6666,
stream_id='ise',
- name='rfc6666',
states=[('draft','rfc'),('draft-iesg','pub')],
std_level_id='inf',
)
diff --git a/ietf/doc/tests_downref.py b/ietf/doc/tests_downref.py
index acf078c86..6c8fb685f 100644
--- a/ietf/doc/tests_downref.py
+++ b/ietf/doc/tests_downref.py
@@ -22,7 +22,7 @@ class Downref(TestCase):
self.draftalias = self.draft.docalias.get(name='draft-ietf-mars-test')
self.doc = WgDraftFactory(name='draft-ietf-mars-approved-document',states=[('draft-iesg','rfcqueue')])
self.docalias = self.doc.docalias.get(name='draft-ietf-mars-approved-document')
- self.rfc = WgRfcFactory(name='rfc9998')
+ self.rfc = WgRfcFactory(rfc_number=9998)
self.rfcalias = self.rfc.docalias.get(name='rfc9998')
RelatedDocument.objects.create(source=self.doc, target=self.rfc, relationship_id='downref-approval')
@@ -100,7 +100,7 @@ class Downref(TestCase):
def test_downref_last_call(self):
draft = WgDraftFactory(name='draft-ietf-mars-ready-for-lc-document',intended_std_level_id='ps',states=[('draft-iesg','iesg-eva')])
WgDraftFactory(name='draft-ietf-mars-another-approved-document',states=[('draft-iesg','rfcqueue')])
- rfc9999 = WgRfcFactory(name='rfc9999', std_level_id=None)
+ rfc9999 = WgRfcFactory(alias2__name='rfc9999', std_level_id=None)
RelatedDocument.objects.create(source=draft, target=rfc9999, relationship_id='refnorm')
url = urlreverse('ietf.doc.views_ballot.lastcalltext', kwargs=dict(name=draft.name))
login_testing_unauthorized(self, "secretary", url)
diff --git a/ietf/doc/tests_review.py b/ietf/doc/tests_review.py
index 5347abf42..fb40803f4 100644
--- a/ietf/doc/tests_review.py
+++ b/ietf/doc/tests_review.py
@@ -137,10 +137,18 @@ class ReviewTests(TestCase):
url = urlreverse('ietf.doc.views_review.request_review', kwargs={ "name": doc.name })
login_testing_unauthorized(self, "ad", url)
- # get should fail
+ # get should fail - all non draft types 404
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 404)
+
+ # Can only request reviews on active draft documents
+ doc = WgDraftFactory(states=[("draft","rfc")])
+ url = urlreverse('ietf.doc.views_review.request_review', kwargs={ "name": doc.name })
r = self.client.get(url)
self.assertEqual(r.status_code, 403)
+
+
def test_doc_page(self):
doc = WgDraftFactory(group__acronym='mars',rev='01')
diff --git a/ietf/doc/tests_status_change.py b/ietf/doc/tests_status_change.py
index bb332d6ca..26bee27e1 100644
--- a/ietf/doc/tests_status_change.py
+++ b/ietf/doc/tests_status_change.py
@@ -14,7 +14,7 @@ from textwrap import wrap
from django.conf import settings
from django.urls import reverse as urlreverse
-from ietf.doc.factories import DocumentFactory, IndividualRfcFactory, WgRfcFactory
+from ietf.doc.factories import DocumentFactory, IndividualRfcFactory, WgRfcFactory, WgDraftFactory
from ietf.doc.models import ( Document, State, DocEvent,
BallotPositionDocEvent, NewRevisionDocEvent, TelechatDocEvent, WriteupDocEvent )
from ietf.doc.utils import create_ballot_if_not_open
@@ -449,9 +449,16 @@ class StatusChangeTests(TestCase):
def setUp(self):
super().setUp()
- IndividualRfcFactory(name='rfc14',std_level_id='unkn')
- WgRfcFactory(name='rfc9999',std_level_id='ps')
- WgRfcFactory(name='rfc9998',std_level_id='inf')
+ IndividualRfcFactory(rfc_number=14,std_level_id='unkn') # draft was never issued
+
+ rfc = WgRfcFactory(rfc_number=9999,std_level_id='ps')
+ draft = WgDraftFactory(name='draft-ietf-random-thing')
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
+
+ rfc = WgRfcFactory(rfc_number=9998,std_level_id='inf')
+ draft = WgDraftFactory(name='draft-ietf-random-other-thing')
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
+
DocumentFactory(type_id='statchg',name='status-change-imaginary-mid-review',notify='notify@example.org')
class StatusChangeSubmitTests(TestCase):
diff --git a/ietf/doc/tests_utils.py b/ietf/doc/tests_utils.py
index b2147dcb6..dde4a1841 100644
--- a/ietf/doc/tests_utils.py
+++ b/ietf/doc/tests_utils.py
@@ -11,10 +11,10 @@ from django.utils import timezone
from ietf.group.factories import GroupFactory, RoleFactory
from ietf.name.models import DocTagName
from ietf.person.factories import PersonFactory
-from ietf.utils.test_utils import TestCase, name_of_file_containing
+from ietf.utils.test_utils import TestCase, name_of_file_containing, reload_db_objects
from ietf.person.models import Person
from ietf.doc.factories import DocumentFactory, WgRfcFactory, WgDraftFactory
-from ietf.doc.models import State, DocumentActionHolder, DocumentAuthor, Document
+from ietf.doc.models import State, DocumentActionHolder, DocumentAuthor
from ietf.doc.utils import (update_action_holders, add_state_change_event, update_documentauthors,
fuzzy_find_documents, rebuild_reference_relations, build_file_urls)
from ietf.utils.draft import Draft, PlaintextDraft
@@ -251,40 +251,42 @@ class MiscTests(TestCase):
self.assertEqual(docauth.country, '')
def do_fuzzy_find_documents_rfc_test(self, name):
- rfc = WgRfcFactory(name=name, create_revisions=(0, 1, 2))
- rfc = Document.objects.get(pk=rfc.pk) # clear out any cached values
+ draft = WgDraftFactory(name=name, create_revisions=(0, 1, 2))
+ rfc = WgRfcFactory()
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
+ draft, rfc = reload_db_objects(draft, rfc)
# by canonical name
- found = fuzzy_find_documents(rfc.canonical_name(), None)
- self.assertCountEqual(found.documents, [rfc])
- self.assertEqual(found.matched_rev, None)
- self.assertEqual(found.matched_name, rfc.canonical_name())
-
- # by draft name, no rev
found = fuzzy_find_documents(rfc.name, None)
self.assertCountEqual(found.documents, [rfc])
self.assertEqual(found.matched_rev, None)
self.assertEqual(found.matched_name, rfc.name)
+ # by draft name, no rev
+ found = fuzzy_find_documents(draft.name, None)
+ self.assertCountEqual(found.documents, [draft])
+ self.assertEqual(found.matched_rev, None)
+ self.assertEqual(found.matched_name, draft.name)
+
# by draft name, latest rev
- found = fuzzy_find_documents(rfc.name, '02')
- self.assertCountEqual(found.documents, [rfc])
+ found = fuzzy_find_documents(draft.name, '02')
+ self.assertCountEqual(found.documents, [draft])
self.assertEqual(found.matched_rev, '02')
- self.assertEqual(found.matched_name, rfc.name)
+ self.assertEqual(found.matched_name, draft.name)
# by draft name, earlier rev
- found = fuzzy_find_documents(rfc.name, '01')
- self.assertCountEqual(found.documents, [rfc])
+ found = fuzzy_find_documents(draft.name, '01')
+ self.assertCountEqual(found.documents, [draft])
self.assertEqual(found.matched_rev, '01')
- self.assertEqual(found.matched_name, rfc.name)
+ self.assertEqual(found.matched_name, draft.name)
# wrong name or revision
- found = fuzzy_find_documents(rfc.name + '-incorrect')
+ found = fuzzy_find_documents(draft.name + '-incorrect')
self.assertCountEqual(found.documents, [], 'Should not find document that does not match')
- found = fuzzy_find_documents(rfc.name + '-incorrect', '02')
+ found = fuzzy_find_documents(draft.name + '-incorrect', '02')
self.assertCountEqual(found.documents, [], 'Still should not find document, even with a version')
- found = fuzzy_find_documents(rfc.name, '22')
- self.assertCountEqual(found.documents, [rfc],
+ found = fuzzy_find_documents(draft.name, '22')
+ self.assertCountEqual(found.documents, [draft],
'Should find document even if rev does not exist')
diff --git a/ietf/doc/utils.py b/ietf/doc/utils.py
index cf192e446..d26e75065 100644
--- a/ietf/doc/utils.py
+++ b/ietf/doc/utils.py
@@ -998,14 +998,11 @@ def get_search_cache_key(params):
kwargs = dict([ (k,v) for (k,v) in list(params.items()) if k in fields ])
key = "doc:document:search:" + hashlib.sha512(json.dumps(kwargs, sort_keys=True).encode('utf-8')).hexdigest()
return key
-
-def build_file_urls(doc: Union[Document, DocHistory]):
- if doc.type_id != 'draft':
- return [], []
- if doc.is_rfc():
- name = doc.canonical_name()
- base_path = os.path.join(settings.RFC_PATH, name + ".")
+
+def build_file_urls(doc: Union[Document, DocHistory]):
+ if doc.type_id == "rfc":
+ base_path = os.path.join(settings.RFC_PATH, doc.name + ".")
possible_types = settings.RFC_FILE_TYPES
found_types = [t for t in possible_types if os.path.exists(base_path + t)]
@@ -1014,17 +1011,17 @@ def build_file_urls(doc: Union[Document, DocHistory]):
file_urls = []
for t in found_types:
label = "plain text" if t == "txt" else t
- file_urls.append((label, base + name + "." + t))
+ file_urls.append((label, base + doc.name + "." + t))
if "pdf" not in found_types and "txt" in found_types:
- file_urls.append(("pdf", base + "pdfrfc/" + name + ".txt.pdf"))
+ file_urls.append(("pdf", base + "pdfrfc/" + doc.name + ".txt.pdf"))
if "txt" in found_types:
- file_urls.append(("htmlized", urlreverse('ietf.doc.views_doc.document_html', kwargs=dict(name=name))))
+ file_urls.append(("htmlized", urlreverse('ietf.doc.views_doc.document_html', kwargs=dict(name=doc.name))))
if doc.tags.filter(slug="verified-errata").exists():
file_urls.append(("with errata", settings.RFC_EDITOR_INLINE_ERRATA_URL.format(rfc_number=doc.rfc_number)))
- file_urls.append(("bibtex", urlreverse('ietf.doc.views_doc.document_bibtex',kwargs=dict(name=name))))
- elif doc.rev:
+ file_urls.append(("bibtex", urlreverse('ietf.doc.views_doc.document_bibtex',kwargs=dict(name=doc.name))))
+ elif doc.type_id == "draft" and doc.rev != "":
base_path = os.path.join(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR, doc.name + "-" + doc.rev + ".")
possible_types = settings.IDSUBMIT_FILE_TYPES
found_types = [t for t in possible_types if os.path.exists(base_path + t)]
@@ -1039,12 +1036,14 @@ def build_file_urls(doc: Union[Document, DocHistory]):
file_urls.append(("pdfized", urlreverse('ietf.doc.views_doc.document_pdfized', kwargs=dict(name=doc.name, rev=doc.rev))))
file_urls.append(("bibtex", urlreverse('ietf.doc.views_doc.document_bibtex',kwargs=dict(name=doc.name,rev=doc.rev))))
else:
- # As of 2022-12-14, there are 1463 Document and 3136 DocHistory records with type='draft' and rev=''.
- # All of these are in the rfc state and are covered by the above cases.
- log.unreachable('2022-12-14')
+ if doc.type_id == "draft":
+ # TODO: look at the state of the database post migration and update this comment, or remove the block
+ # As of 2022-12-14, there are 1463 Document and 3136 DocHistory records with type='draft' and rev=''.
+ # All of these are in the rfc state and are covered by the above cases.
+ log.unreachable('2022-12-14')
file_urls = []
found_types = []
-
+
return file_urls, found_types
def augment_docs_and_user_with_user_info(docs, user):
@@ -1111,7 +1110,7 @@ def generate_idnits2_rfc_status():
'unkn': 'U',
}
- rfcs = Document.objects.filter(type_id='rfc',states__slug='published',states__type='rfc')
+ rfcs = Document.objects.filter(type_id='rfc')
for rfc in rfcs:
offset = int(rfc.rfc_number)-1
blob[offset] = symbols[rfc.std_level_id]
@@ -1170,8 +1169,14 @@ def fuzzy_find_documents(name, rev=None):
if re.match("^[0-9]+$", name):
name = f'rfc{name}'
+ if name.startswith("rfc"):
+ sought_type = "rfc"
+ log.assertion("rev is None")
+ else:
+ sought_type = "draft"
+
# see if we can find a document using this name
- docs = Document.objects.filter(docalias__name=name, type_id='draft')
+ docs = Document.objects.filter(docalias__name=name, type_id=sought_type)
if rev and not docs.exists():
# No document found, see if the name/rev split has been misidentified.
# Handles some special cases, like draft-ietf-tsvwg-ieee-802-11.
diff --git a/ietf/doc/utils_search.py b/ietf/doc/utils_search.py
index a4997c2a1..8e614b3e9 100644
--- a/ietf/doc/utils_search.py
+++ b/ietf/doc/utils_search.py
@@ -93,7 +93,7 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False):
# emulate canonical name which is used by a lot of the utils
# d.canonical_name = wrap_value(rfc_aliases[d.pk] if d.pk in rfc_aliases else d.name)
- if d.is_rfc() and d.latest_event_cache["published_rfc"]:
+ if d.type_id == "rfc" and d.latest_event_cache["published_rfc"]:
d.latest_revision_date = d.latest_event_cache["published_rfc"].time
elif d.latest_event_cache["new_revision"]:
d.latest_revision_date = d.latest_event_cache["new_revision"].time
@@ -140,17 +140,35 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False):
d.obsoleted_by_list = []
d.updated_by_list = []
- xed_by = RelatedDocument.objects.filter(target__name__in=list(rfc_aliases.values()),
- relationship__in=("obs", "updates")).select_related('target')
- rel_rfc_aliases = dict([ (a.document.id, re.sub(r"rfc(\d+)", r"RFC \1", a.name, flags=re.IGNORECASE)) for a in DocAlias.objects.filter(name__startswith="rfc", docs__id__in=[rel.source_id for rel in xed_by]) ])
+ # Revisit this block after RFCs become first-class Document objects
+ xed_by = list(
+ RelatedDocument.objects.filter(
+ target__name__in=list(rfc_aliases.values()),
+ relationship__in=("obs", "updates"),
+ ).select_related("target")
+ )
+ rel_rfc_aliases = {
+ a.document.id: re.sub(r"rfc(\d+)", r"RFC \1", a.name, flags=re.IGNORECASE)
+ for a in DocAlias.objects.filter(
+ name__startswith="rfc", docs__id__in=[rel.source_id for rel in xed_by]
+ )
+ }
+ xed_by.sort(
+ key=lambda rel: int(
+ re.sub(
+ r"rfc\s*(\d+)",
+ r"\1",
+ rel_rfc_aliases[rel.source_id],
+ flags=re.IGNORECASE,
+ )
+ )
+ )
for rel in xed_by:
d = doc_dict[rel.target.id]
if rel.relationship_id == "obs":
- l = d.obsoleted_by_list
+ d.obsoleted_by_list.append(s)
elif rel.relationship_id == "updates":
- l = d.updated_by_list
- l.append(rel_rfc_aliases[rel.source_id])
- l.sort()
+ d.updated_by_list.append(s)
def augment_docs_with_related_docs_info(docs):
"""Augment all documents with related documents information.
diff --git a/ietf/doc/views_ballot.py b/ietf/doc/views_ballot.py
index 7e9f54298..564148932 100644
--- a/ietf/doc/views_ballot.py
+++ b/ietf/doc/views_ballot.py
@@ -953,7 +953,13 @@ def approve_downrefs(request, name):
login = request.user.person
- downrefs_to_rfc = [rel for rel in doc.relateddocument_set.all() if rel.is_downref() and not rel.is_approved_downref() and rel.target.is_rfc()]
+ downrefs_to_rfc = [
+ rel
+ for rel in doc.relateddocument_set.all()
+ if rel.is_downref()
+ and not rel.is_approved_downref()
+ and rel.target.type_id == "rfc"
+ ]
downrefs_to_rfc_qs = RelatedDocument.objects.filter(pk__in=[r.pk for r in downrefs_to_rfc])
diff --git a/ietf/doc/views_doc.py b/ietf/doc/views_doc.py
index fe2013ddf..e8502e105 100644
--- a/ietf/doc/views_doc.py
+++ b/ietf/doc/views_doc.py
@@ -54,13 +54,13 @@ from django.contrib.staticfiles import finders
import debug # pyflakes:ignore
-from ietf.doc.models import ( Document, DocAlias, DocHistory, DocEvent, BallotDocEvent, BallotType,
+from ietf.doc.models import ( Document, DocHistory, DocEvent, BallotDocEvent, BallotType,
ConsensusDocEvent, NewRevisionDocEvent, TelechatDocEvent, WriteupDocEvent, IanaExpertDocEvent,
IESG_BALLOT_ACTIVE_STATES, STATUSCHANGE_RELATIONS, DocumentActionHolder, DocumentAuthor,
RelatedDocument, RelatedDocHistory)
from ietf.doc.utils import (augment_events_with_revision,
can_adopt_draft, can_unadopt_draft, get_chartering_type, get_tags_for_stream_id,
- needed_ballot_positions, nice_consensus, prettify_std_name, update_telechat, has_same_ballot,
+ needed_ballot_positions, nice_consensus, update_telechat, has_same_ballot,
get_initial_notify, make_notify_changed_event, make_rev_history, default_consensus,
add_events_message_info, get_unicode_document_content,
augment_docs_and_user_with_user_info, irsg_needed_ballot_positions, add_action_holder_change_event,
@@ -114,13 +114,46 @@ def render_document_top(request, doc, tab, name):
rsab_ballot,
None if rsab_ballot else "RSAB Evaluation Ballot has not been created yet"
))
- if doc.type_id in ("draft","conflrev", "statchg"):
- tabs.append(("IESG Evaluation Record", "ballot", urlreverse("ietf.doc.views_doc.document_ballot", kwargs=dict(name=name)), iesg_ballot, None if iesg_ballot else "IESG Evaluation Ballot has not been created yet"))
- elif doc.type_id == "charter" and doc.group.type_id == "wg":
- tabs.append(("IESG Review", "ballot", urlreverse("ietf.doc.views_doc.document_ballot", kwargs=dict(name=name)), iesg_ballot, None if iesg_ballot else "IESG Review Ballot has not been created yet"))
-
- if doc.type_id == "draft" or (doc.type_id == "charter" and doc.group.type_id == "wg"):
- tabs.append(("IESG Writeups", "writeup", urlreverse('ietf.doc.views_doc.document_writeup', kwargs=dict(name=name)), True, None))
+
+ if iesg_ballot or (doc.group and doc.group.type_id == "wg"):
+ if doc.type_id in ("draft", "conflrev", "statchg"):
+ tabs.append(
+ (
+ "IESG Evaluation Record",
+ "ballot",
+ urlreverse(
+ "ietf.doc.views_doc.document_ballot", kwargs=dict(name=name)
+ ),
+ iesg_ballot,
+ None,
+ )
+ )
+ elif doc.type_id == "charter" and doc.group and doc.group.type_id == "wg":
+ tabs.append(
+ (
+ "IESG Review",
+ "ballot",
+ urlreverse(
+ "ietf.doc.views_doc.document_ballot", kwargs=dict(name=name)
+ ),
+ iesg_ballot,
+ None,
+ )
+ )
+ if doc.type_id == "draft" or (
+ doc.type_id == "charter" and doc.group and doc.group.type_id == "wg"
+ ):
+ tabs.append(
+ (
+ "IESG Writeups",
+ "writeup",
+ urlreverse(
+ "ietf.doc.views_doc.document_writeup", kwargs=dict(name=name)
+ ),
+ True,
+ None,
+ )
+ )
tabs.append(("Email expansions","email",urlreverse('ietf.doc.views_doc.document_email', kwargs=dict(name=name)), True, None))
tabs.append(("History", "history", urlreverse('ietf.doc.views_doc.document_history', kwargs=dict(name=name)), True, None))
@@ -147,9 +180,9 @@ def interesting_doc_relations(doc):
else:
raise TypeError("Expected this method to be called with a Document or DocHistory object")
- that_relationships = STATUSCHANGE_RELATIONS + ('conflrev', 'replaces', 'possibly_replaces', 'updates', 'obs')
+ that_relationships = STATUSCHANGE_RELATIONS + ('conflrev', 'replaces', 'possibly_replaces', 'updates', 'obs', 'became_rfc')
- that_doc_relationships = ('replaces', 'possibly_replaces', 'updates', 'obs')
+ that_doc_relationships = ('replaces', 'possibly_replaces', 'updates', 'obs', 'became_rfc')
interesting_relations_that = cls.objects.filter(target=target, relationship__in=that_relationships).select_related('source')
interesting_relations_that_doc = cls.objects.filter(source=doc, relationship__in=that_doc_relationships).prefetch_related('target')
@@ -160,11 +193,10 @@ def document_main(request, name, rev=None, document_html=False):
doc = get_object_or_404(Document.objects.select_related(), docalias__name=name)
# take care of possible redirections
- aliases = DocAlias.objects.filter(docs=doc).values_list("name", flat=True)
- if document_html is False and rev==None and doc.type_id == "draft" and not name.startswith("rfc"):
- for a in aliases:
- if a.startswith("rfc"):
- return redirect("ietf.doc.views_doc.document_main", name=a)
+ if document_html is False and rev is None:
+ became_rfc = next(iter(doc.related_that_doc("became_rfc")), None)
+ if became_rfc:
+ return redirect("ietf.doc.views_doc.document_main", name=became_rfc.name)
revisions = []
for h in doc.history_set.order_by("time", "id"):
@@ -206,7 +238,163 @@ def document_main(request, name, rev=None, document_html=False):
# specific document types
- if doc.type_id == "draft":
+ if doc.type_id == "rfc":
+ split_content = request.COOKIES.get("full_draft", settings.USER_PREFERENCE_DEFAULTS["full_draft"]) == "off"
+ if request.GET.get('include_text') == "0":
+ split_content = True
+ elif request.GET.get('include_text') == "1":
+ split_content = False
+ else:
+ pass
+
+ interesting_relations_that, interesting_relations_that_doc = interesting_doc_relations(doc)
+
+ can_edit = has_role(request.user, ("Area Director", "Secretariat"))
+ can_edit_authors = has_role(request.user, ("Secretariat"))
+
+ stream_slugs = StreamName.objects.values_list("slug", flat=True)
+ # For some reason, AnonymousUser has __iter__, but is not iterable,
+ # which causes problems in the filter() below. Work around this:
+ if request.user.is_authenticated:
+ roles = Role.objects.filter(group__acronym__in=stream_slugs, person__user=request.user)
+ roles = group_features_role_filter(roles, request.user.person, 'docman_roles')
+ else:
+ roles = []
+
+ can_change_stream = bool(can_edit or roles)
+
+ file_urls, found_types = build_file_urls(doc)
+ content = doc.text_or_error() # pyflakes:ignore
+ content = markup_txt.markup(maybe_split(content, split=split_content))
+
+ if not found_types:
+ content = "This RFC is not currently available online."
+ split_content = False
+ elif "txt" not in found_types:
+ content = "This RFC is not available in plain text format."
+ split_content = False
+
+ # mailing list search archive
+ search_archive = "www.ietf.org/mail-archive/web/"
+ if doc.stream_id == "ietf" and group.type_id == "wg" and group.list_archive:
+ search_archive = group.list_archive
+
+ search_archive = quote(search_archive, safe="~")
+
+ # status changes
+ status_changes = []
+ proposed_status_changes = []
+ for r in interesting_relations_that.filter(relationship__in=STATUSCHANGE_RELATIONS):
+ state_slug = r.source.get_state_slug()
+ if state_slug in ('appr-sent', 'appr-pend'):
+ status_changes.append(r)
+ elif state_slug in ('needshep','adrev','iesgeval','defer','appr-pr'):
+ proposed_status_changes.append(r)
+ else:
+ pass
+
+ presentations = doc.future_presentations()
+
+ augment_docs_and_user_with_user_info([doc], request.user)
+
+ exp_comment = doc.latest_event(IanaExpertDocEvent,type="comment")
+ iana_experts_comment = exp_comment and exp_comment.desc
+
+ # Do not show the Auth48 URL in the "Additional URLs" section
+ additional_urls = doc.documenturl_set.exclude(tag_id='auth48')
+
+ html = None
+ js = None
+ css = None
+ diff_revisions = None
+ simple_diff_revisions = None
+ if document_html:
+ diff_revisions=get_diff_revisions(request, name, doc if isinstance(doc,Document) else doc.doc)
+ simple_diff_revisions = [t[1] for t in diff_revisions if t[0] == doc.name]
+ simple_diff_revisions.reverse()
+ if rev and rev != doc.rev:
+ # No DocHistory was found matching rev - snapshot will be false
+ # and doc will be a Document object, not a DocHistory
+ snapshot = True
+ doc = doc.fake_history_obj(rev)
+ else:
+ html = doc.html_body()
+ if request.COOKIES.get("pagedeps") == "inline":
+ js = Path(finders.find("ietf/js/document_html.js")).read_text()
+ css = Path(finders.find("ietf/css/document_html_inline.css")).read_text()
+ if html:
+ css += Path(finders.find("ietf/css/document_html_txt.css")).read_text()
+ draft_that_became_rfc = None
+ became_rfc_alias = next(iter(doc.related_that("became_rfc")), None)
+ if became_rfc_alias:
+ draft_that_became_rfc = became_rfc_alias.document
+ # submission
+ submission = ""
+ if group is None:
+ submission = "unknown"
+ elif group.type_id == "individ":
+ submission = "individual"
+ elif group.type_id == "area" and doc.stream_id == "ietf":
+ submission = "individual in %s area" % group.acronym
+ else:
+ if group.features.acts_like_wg and not group.type_id == "edwg":
+ submission = "%s %s" % (group.acronym, group.type)
+ else:
+ submission = group.acronym
+ submission = '%s' % (group.about_url(), submission)
+ # Should be unreachable?
+ if (
+ draft_that_became_rfc
+ and draft_that_became_rfc.stream_id
+ and draft_that_became_rfc.get_state_slug(
+ "draft-stream-%s" % draft_that_became_rfc.stream_id
+ )
+ == "c-adopt"
+ ):
+ submission = "candidate for %s" % submission
+
+
+ # todo replace document_html?
+ return render(request, "doc/document_rfc.html" if document_html is False else "doc/document_html.html",
+ dict(doc=doc,
+ document_html=document_html,
+ css=css,
+ js=js,
+ html=html,
+ group=group,
+ top=top,
+ name=doc.name,
+ content=content,
+ split_content=split_content,
+ revisions=simple_diff_revisions if document_html else revisions,
+ snapshot=snapshot,
+ latest_rev=latest_rev,
+ can_edit=can_edit,
+ can_edit_authors=can_edit_authors,
+ can_change_stream=can_change_stream,
+ rfc_number=doc.rfc_number,
+ draft_name=draft_that_became_rfc and draft_that_became_rfc.name,
+ updates=interesting_relations_that_doc.filter(relationship="updates"),
+ updated_by=interesting_relations_that.filter(relationship="updates"),
+ obsoletes=interesting_relations_that_doc.filter(relationship="obs"),
+ obsoleted_by=interesting_relations_that.filter(relationship="obs"),
+ status_changes=status_changes,
+ proposed_status_changes=proposed_status_changes,
+ has_errata=doc.pk and doc.tags.filter(slug="errata"), # doc.pk == None if using a fake_history_obj
+ file_urls=file_urls,
+ additional_urls=additional_urls,
+ rfc_editor_state=doc.get_state("draft-rfceditor"),
+ iana_review_state=doc.get_state("draft-iana-review"),
+ iana_action_state=doc.get_state("draft-iana-action"),
+ iana_experts_state=doc.get_state("draft-iana-experts"),
+ iana_experts_comment=iana_experts_comment,
+ search_archive=search_archive,
+ presentations=presentations,
+ diff_revisions=diff_revisions,
+ submission=submission
+ ))
+
+ elif doc.type_id == "draft":
split_content = request.COOKIES.get("full_draft", settings.USER_PREFERENCE_DEFAULTS["full_draft"]) == "off"
if request.GET.get('include_text') == "0":
split_content = True
@@ -244,43 +432,13 @@ def document_main(request, name, rev=None, document_html=False):
is_author = request.user.is_authenticated and doc.documentauthor_set.filter(person__user=request.user).exists()
can_view_possibly_replaces = can_edit_replaces or is_author
- rfc_number = name[3:] if name.startswith("rfc") else None
- draft_name = None
- for a in aliases:
- if a.startswith("draft"):
- draft_name = a
-
- rfc_aliases = [prettify_std_name(a) for a in aliases
- if a.startswith("fyi") or a.startswith("std") or a.startswith("bcp")]
-
latest_revision = None
- # Workaround to allow displaying last rev of draft that became rfc as a draft
- # This should be unwound when RFCs become their own documents.
- if snapshot:
- doc.name = doc.doc.name
- name = doc.doc.name
- else:
- name = doc.name
-
file_urls, found_types = build_file_urls(doc)
- if not snapshot and doc.get_state_slug() == "rfc":
- # content
- content = doc.text_or_error() # pyflakes:ignore
- content = markup_txt.markup(maybe_split(content, split=split_content))
-
content = doc.text_or_error() # pyflakes:ignore
content = markup_txt.markup(maybe_split(content, split=split_content))
- if not snapshot and doc.get_state_slug() == "rfc":
- if not found_types:
- content = "This RFC is not currently available online."
- split_content = False
- elif "txt" not in found_types:
- content = "This RFC is not available in plain text format."
- split_content = False
- else:
- latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision")
+ latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision")
# ballot
iesg_ballot_summary = None
@@ -460,7 +618,7 @@ def document_main(request, name, rev=None, document_html=False):
augment_docs_and_user_with_user_info([doc], request.user)
- published = doc.latest_event(type="published_rfc")
+ published = doc.latest_event(type="published_rfc") # todo rethink this now that published_rfc is on rfc
started_iesg_process = doc.latest_event(type="started_iesg_process")
review_assignments = review_assignments_to_list_for_docs([doc]).get(doc.name, [])
@@ -478,12 +636,6 @@ def document_main(request, name, rev=None, document_html=False):
# Do not show the Auth48 URL in the "Additional URLs" section
additional_urls = doc.documenturl_set.exclude(tag_id='auth48')
- # Stream description passing test
- if doc.stream != None:
- stream_desc = doc.stream.desc
- else:
- stream_desc = "(None)"
-
html = None
js = None
css = None
@@ -514,12 +666,11 @@ def document_main(request, name, rev=None, document_html=False):
html=html,
group=group,
top=top,
- name=name,
+ name=doc.name,
content=content,
split_content=split_content,
revisions=simple_diff_revisions if document_html else revisions,
snapshot=snapshot,
- stream_desc=stream_desc,
latest_revision=latest_revision,
latest_rev=latest_rev,
can_edit=can_edit,
@@ -537,8 +688,7 @@ def document_main(request, name, rev=None, document_html=False):
can_request_review=can_request_review,
can_submit_unsolicited_review_for_teams=can_submit_unsolicited_review_for_teams,
- rfc_number=rfc_number,
- draft_name=draft_name,
+ draft_name=doc.name,
telechat=telechat,
iesg_ballot_summary=iesg_ballot_summary,
submission=submission,
@@ -555,7 +705,6 @@ def document_main(request, name, rev=None, document_html=False):
conflict_reviews=conflict_reviews,
status_changes=status_changes,
proposed_status_changes=proposed_status_changes,
- rfc_aliases=rfc_aliases,
has_errata=doc.pk and doc.tags.filter(slug="errata"), # doc.pk == None if using a fake_history_obj
published=published,
file_urls=file_urls,
@@ -584,7 +733,7 @@ def document_main(request, name, rev=None, document_html=False):
diff_revisions=diff_revisions
))
- if doc.type_id == "charter":
+ elif doc.type_id == "charter":
content = doc.text_or_error() # pyflakes:ignore
content = markup_txt.markup(content)
@@ -621,7 +770,7 @@ def document_main(request, name, rev=None, document_html=False):
can_manage=can_manage,
))
- if doc.type_id == "bofreq":
+ elif doc.type_id == "bofreq":
content = markdown.markdown(doc.text_or_error())
editors = bofreq_editors(doc)
responsible = bofreq_responsible(doc)
@@ -641,7 +790,7 @@ def document_main(request, name, rev=None, document_html=False):
editor_can_manage=editor_can_manage,
))
- if doc.type_id == "conflrev":
+ elif doc.type_id == "conflrev":
filename = "%s-%s.txt" % (doc.canonical_name(), doc.rev)
pathname = os.path.join(settings.CONFLICT_REVIEW_PATH,filename)
@@ -671,7 +820,7 @@ def document_main(request, name, rev=None, document_html=False):
approved_states=('appr-reqnopub-pend','appr-reqnopub-sent','appr-noprob-pend','appr-noprob-sent'),
))
- if doc.type_id == "statchg":
+ elif doc.type_id == "statchg":
filename = "%s-%s.txt" % (doc.canonical_name(), doc.rev)
pathname = os.path.join(settings.STATUS_CHANGE_PATH,filename)
@@ -705,7 +854,7 @@ def document_main(request, name, rev=None, document_html=False):
sorted_relations=sorted_relations,
))
- if doc.type_id in ("slides", "agenda", "minutes", "bluesheets", "procmaterials",):
+ elif doc.type_id in ("slides", "agenda", "minutes", "bluesheets", "procmaterials",):
can_manage_material = can_manage_materials(request.user, doc.group)
presentations = doc.future_presentations()
if doc.uploaded_filename:
@@ -761,7 +910,7 @@ def document_main(request, name, rev=None, document_html=False):
))
- if doc.type_id == "review":
+ elif doc.type_id == "review":
basename = "{}.txt".format(doc.name)
pathname = os.path.join(doc.get_file_path(), basename)
content = get_unicode_document_content(basename, pathname)
@@ -787,7 +936,7 @@ def document_main(request, name, rev=None, document_html=False):
assignments=assignments,
))
- if doc.type_id in ("chatlog", "polls"):
+ elif doc.type_id in ("chatlog", "polls"):
if isinstance(doc,DocHistory):
session = doc.doc.sessionpresentation_set.last().session
else:
@@ -862,7 +1011,7 @@ def document_html(request, name, rev=None):
doc = found.documents.get()
rev = found.matched_rev
- if not requested_rev and doc.is_rfc(): # Someone asked for /doc/html/8989
+ if not requested_rev and doc.type_id == "rfc": # Someone asked for /doc/html/8989
if not name.startswith('rfc'):
return redirect('ietf.doc.views_doc.document_html', name=doc.canonical_name())
@@ -872,7 +1021,12 @@ def document_html(request, name, rev=None):
if not os.path.exists(doc.get_file_name()):
raise Http404("File not found: %s" % doc.get_file_name())
- return document_main(request, name=doc.name if requested_rev else doc.canonical_name(), rev=doc.rev if requested_rev or not doc.is_rfc() else None, document_html=True)
+ return document_main(
+ request,
+ name=doc.name if requested_rev else doc.canonical_name(),
+ rev=doc.rev if requested_rev or doc.type_id != "rfc" else None,
+ document_html=True,
+ )
def document_pdfized(request, name, rev=None, ext=None):
@@ -1028,21 +1182,46 @@ def document_history(request, name):
add_events_message_info(events)
# figure out if the current user can add a comment to the history
- if doc.type_id == "draft" and doc.group != None:
- can_add_comment = bool(has_role(request.user, ("Area Director", "Secretariat", "IRTF Chair", "IANA", "RFC Editor")) or (
- request.user.is_authenticated and
- Role.objects.filter(name__in=("chair", "secr"),
- group__acronym=doc.group.acronym,
- person__user=request.user)))
+ if doc.type_id in ("draft", "rfc") and doc.group is not None:
+ can_add_comment = bool(
+ has_role(
+ request.user,
+ ("Area Director", "Secretariat", "IRTF Chair", "IANA", "RFC Editor"),
+ )
+ or (
+ request.user.is_authenticated
+ and Role.objects.filter(
+ name__in=("chair", "secr"),
+ group__acronym=doc.group.acronym,
+ person__user=request.user,
+ )
+ )
+ )
else:
- can_add_comment = has_role(request.user, ("Area Director", "Secretariat", "IRTF Chair"))
- return render(request, "doc/document_history.html",
- dict(doc=doc,
- top=top,
- diff_revisions=diff_revisions,
- events=events,
- can_add_comment=can_add_comment,
- ))
+ can_add_comment = has_role(
+ request.user, ("Area Director", "Secretariat", "IRTF Chair")
+ )
+
+ # Get related docs whose history should be linked
+ if doc.type_id == "draft":
+ related = doc.related_that_doc("became_rfc")
+ elif doc.type_id == "rfc":
+ related = doc.related_that("became_rfc")
+ else:
+ related = []
+
+ return render(
+ request,
+ "doc/document_history.html",
+ {
+ "doc": doc,
+ "top": top,
+ "diff_revisions": diff_revisions,
+ "events": events,
+ "related": related,
+ "can_add_comment": can_add_comment,
+ },
+ )
def document_bibtex(request, name, rev=None):
@@ -1058,31 +1237,35 @@ def document_bibtex(request, name, rev=None):
doc = get_object_or_404(Document, docalias__name=name)
- latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision")
- replaced_by = [d.name for d in doc.related_that("replaces")]
- published = doc.latest_event(type="published_rfc") is not None
- rfc = latest_revision.doc if latest_revision and latest_revision.doc.get_state_slug() == "rfc" else None
+ doi = None
+ draft_became_rfc = None
+ replaced_by = None
+ latest_revision = None
+ if doc.type_id == "draft":
+ latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision")
+ replaced_by = [d.name for d in doc.related_that("replaces")]
+ draft_became_rfc_alias = next(iter(doc.related_that_doc("became_rfc")), None)
- if rev != None and rev != doc.rev:
- # find the entry in the history
- for h in doc.history_set.order_by("-time"):
- if rev == h.rev:
- doc = h
- break
-
- if doc.is_rfc():
+ if rev != None and rev != doc.rev:
+ # find the entry in the history
+ for h in doc.history_set.order_by("-time"):
+ if rev == h.rev:
+ doc = h
+ break
+
+ if draft_became_rfc_alias:
+ draft_became_rfc = draft_became_rfc_alias.document
+
+ elif doc.type_id == "rfc":
# This needs to be replaced with a lookup, as the mapping may change
# over time. Probably by updating ietf/sync/rfceditor.py to add the
# as a DocAlias, and use a method on Document to retrieve it.
doi = f"10.17487/RFC{doc.rfc_number:04d}"
- else:
- doi = None
return render(request, "doc/document_bibtex.bib",
dict(doc=doc,
replaced_by=replaced_by,
- published=published,
- rfc=rfc,
+ published_as=draft_became_rfc,
latest_revision=latest_revision,
doi=doi,
),
@@ -1945,9 +2128,16 @@ def idnits2_rfc_status(request):
def idnits2_state(request, name, rev=None):
doc = get_object_or_404(Document, docalias__name=name)
- if doc.type_id!='draft':
+ if doc.type_id not in ["draft", "rfc"]:
raise Http404
- zero_revision = NewRevisionDocEvent.objects.filter(doc=doc,rev='00').first()
+ zero_revision = None
+ if doc.type_id == "rfc":
+ draft_alias = next(iter(doc.related_that('became_rfc')), None)
+ if draft_alias:
+ draft = draft_alias.document
+ zero_revision = NewRevisionDocEvent.objects.filter(doc=draft,rev='00').first()
+ else:
+ zero_revision = NewRevisionDocEvent.objects.filter(doc=doc,rev='00').first()
if zero_revision:
doc.created = zero_revision.time
else:
diff --git a/ietf/doc/views_review.py b/ietf/doc/views_review.py
index fa6e3a7ff..2ded177f5 100644
--- a/ietf/doc/views_review.py
+++ b/ietf/doc/views_review.py
@@ -117,7 +117,7 @@ class RequestReviewForm(forms.ModelForm):
@login_required
def request_review(request, name):
- doc = get_object_or_404(Document, name=name)
+ doc = get_object_or_404(Document, type_id="draft", name=name)
if not can_request_review_of_doc(request.user, doc):
permission_denied(request, "You do not have permission to perform this action")
diff --git a/ietf/doc/views_search.py b/ietf/doc/views_search.py
index 7b144051c..6a9e55b79 100644
--- a/ietf/doc/views_search.py
+++ b/ietf/doc/views_search.py
@@ -409,7 +409,7 @@ def shorten_group_name(name):
def ad_dashboard_sort_key(doc):
- if doc.type.slug=='rfc' and doc.get_state_slug('rfc') == 'published':
+ if doc.type.slug=='rfc':
return "21%04d" % int(doc.rfc_number)
if doc.type.slug=='statchg' and doc.get_state_slug('statchg') == 'appr-sent':
return "22%d" % 0 # TODO - get the date of the transition into this state here
@@ -805,21 +805,20 @@ def recent_drafts(request, days=7):
})
-def index_all_drafts(request):
+def index_all_drafts(request): # Should we rename this
# try to be efficient since this view returns a lot of data
categories = []
- for s in ("active", "rfc", "expired", "repl", "auth-rm", "ietf-rm"):
+ # Gather drafts
+ for s in ("active", "expired", "repl", "auth-rm", "ietf-rm"):
state = State.objects.get(type="draft", slug=s)
- if state.slug == "rfc":
- heading = "RFCs"
- elif state.slug in ("ietf-rm", "auth-rm"):
+ if state.slug in ("ietf-rm", "auth-rm"):
heading = "Internet-Drafts %s" % state.name
else:
heading = "%s Internet-Drafts" % state.name
- draft_names = DocAlias.objects.filter(docs__states=state).values_list("name", "docs__name")
+ draft_names = DocAlias.objects.filter(docs__type_id="draft", docs__states=state).values_list("name", "docs__name")
names = []
names_to_skip = set()
@@ -828,24 +827,52 @@ def index_all_drafts(request):
if name != doc:
if not name.startswith("rfc"):
name, doc = doc, name
- names_to_skip.add(doc)
-
- if name.startswith("rfc"):
- name = name.upper()
- sort_key = '%09d' % (100000000-int(name[3:]))
+ names_to_skip.add(doc) # this is filtering out subseries docaliases (which we will delete, so TODO clean this out after doing so)
names.append((name, sort_key))
names.sort(key=lambda t: t[1])
names = [f'{n}'
- for n, __ in names if n not in names_to_skip]
+ for n, __ in names if n not in names_to_skip]
categories.append((state,
heading,
len(names),
"
".join(names)
))
+
+ # gather RFCs
+ rfc_names = DocAlias.objects.filter(docs__type_id="rfc").values_list("name", "docs__name")
+ names = []
+ names_to_skip = set()
+ for name, doc in rfc_names:
+ sort_key = name
+ if name != doc: # There are some std docalias that pointed to rfc names pre-migration.
+ if not name.startswith("rfc"):
+ name, doc = doc, name
+ names_to_skip.add(doc) # this is filtering out those std docaliases (which we will delete, so TODO clean this out after doing so)
+ name = name.upper()
+ sort_key = '%09d' % (100000000-int(name[3:]))
+
+ names.append((name, sort_key))
+
+ names.sort(key=lambda t: t[1])
+
+ names = [f'{n}'
+ for n, __ in names if n not in names_to_skip]
+
+ state = State.objects.get(type_id="rfc", slug="published")
+
+ categories.append((state,
+ "RFCs",
+ len(names),
+ "
".join(names)
+ ))
+
+ # Return to the previous section ordering
+ categories = categories[0:1]+categories[5:]+categories[1:5]
+
return render(request, 'doc/index_all_drafts.html', { "categories": categories })
def index_active_drafts(request):
diff --git a/ietf/group/views.py b/ietf/group/views.py
index dc31e1df1..cfc701246 100644
--- a/ietf/group/views.py
+++ b/ietf/group/views.py
@@ -534,7 +534,7 @@ def group_documents_txt(request, acronym, group_type=None):
rows = []
for d in itertools.chain(docs, docs_related):
- if d.is_rfc():
+ if d.type_id == "rfc":
name = str(d.rfc_number)
else:
name = "%s-%s" % (d.name, d.rev)
diff --git a/ietf/iesg/tests.py b/ietf/iesg/tests.py
index 53172e645..a5482f574 100644
--- a/ietf/iesg/tests.py
+++ b/ietf/iesg/tests.py
@@ -107,7 +107,7 @@ class IESGAgendaTests(TestCase):
super().setUp()
mars = GroupFactory(acronym='mars',parent=Group.objects.get(acronym='farfut'))
wgdraft = WgDraftFactory(name='draft-ietf-mars-test', group=mars, intended_std_level_id='ps')
- rfc = IndividualRfcFactory.create(stream_id='irtf', other_aliases=['rfc6666',], states=[('draft','rfc'),('draft-iesg','pub')], std_level_id='inf', )
+ rfc = IndividualRfcFactory.create(stream_id='irtf', rfc_number=6666, std_level_id='inf', )
wgdraft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'), relationship_id='refnorm')
ise_draft = IndividualDraftFactory(name='draft-imaginary-independent-submission')
ise_draft.stream = StreamName.objects.get(slug="ise")
diff --git a/ietf/iesg/views.py b/ietf/iesg/views.py
index 673c679e4..3038a2092 100644
--- a/ietf/iesg/views.py
+++ b/ietf/iesg/views.py
@@ -151,7 +151,7 @@ def agenda_json(request, date=None):
if doc.type_id == "draft":
docinfo['rev'] = doc.rev
docinfo['intended-std-level'] = str(doc.intended_std_level)
- if doc.is_rfc():
+ if doc.type_id == "rfc":
docinfo['rfc-number'] = doc.rfc_number
iana_state = doc.get_state("draft-iana-review")
diff --git a/ietf/name/fixtures/names.json b/ietf/name/fixtures/names.json
index af843512d..46cfd4c35 100644
--- a/ietf/name/fixtures/names.json
+++ b/ietf/name/fixtures/names.json
@@ -9919,7 +9919,7 @@
"used": true
},
"model": "name.docrelationshipname",
- "pk": "became-rfc"
+ "pk": "became_rfc"
},
{
"fields": {
diff --git a/ietf/name/migrations/0004_rfc_doctype_names.py b/ietf/name/migrations/0004_rfc_doctype_names.py
index 634a224c5..1e87feaf6 100644
--- a/ietf/name/migrations/0004_rfc_doctype_names.py
+++ b/ietf/name/migrations/0004_rfc_doctype_names.py
@@ -14,7 +14,7 @@ def forward(apps, schema_editor):
DocRelationshipName = apps.get_model("name", "DocRelationshipName")
DocRelationshipName.objects.get_or_create(
- slug="became-rfc",
+ slug="became_rfc",
name="became RFC",
used=True,
revname="came from draft",
diff --git a/ietf/nomcom/forms.py b/ietf/nomcom/forms.py
index 20bf508e8..ad7bc67c2 100644
--- a/ietf/nomcom/forms.py
+++ b/ietf/nomcom/forms.py
@@ -343,7 +343,7 @@ class NominateForm(forms.ModelForm):
'year': self.nomcom.year(),
}
path = nomcom_template_path + NOMINATION_RECEIPT_TEMPLATE
- send_mail(None, to_email, from_email, subject, path, context, cc=cc)
+ send_mail(None, to_email, from_email, subject, path, context, cc=cc, copy=False, save=False)
return nomination
@@ -458,7 +458,7 @@ class NominateNewPersonForm(forms.ModelForm):
'year': self.nomcom.year(),
}
path = nomcom_template_path + NOMINATION_RECEIPT_TEMPLATE
- send_mail(None, to_email, from_email, subject, path, context, cc=cc)
+ send_mail(None, to_email, from_email, subject, path, context, cc=cc, copy=False, save=False)
return nomination
@@ -551,7 +551,7 @@ class FeedbackForm(forms.ModelForm):
}
path = nomcom_template_path + FEEDBACK_RECEIPT_TEMPLATE
# TODO - make the thing above more generic
- send_mail(None, to_email, from_email, subject, path, context, cc=cc, copy=False)
+ send_mail(None, to_email, from_email, subject, path, context, cc=cc, copy=False, save=False)
class Meta:
model = Feedback
diff --git a/ietf/nomcom/tests.py b/ietf/nomcom/tests.py
index dff5fb950..216984776 100644
--- a/ietf/nomcom/tests.py
+++ b/ietf/nomcom/tests.py
@@ -122,7 +122,7 @@ class NomcomViewsTest(TestCase):
self.check_url_status(url, 200)
self.client.logout()
login_testing_unauthorized(self, MEMBER_USER, url)
- return self.check_url_status(url, 200)
+ self.check_url_status(url, 200)
def access_chair_url(self, url):
login_testing_unauthorized(self, COMMUNITY_USER, url)
@@ -134,7 +134,7 @@ class NomcomViewsTest(TestCase):
login_testing_unauthorized(self, COMMUNITY_USER, url)
login_testing_unauthorized(self, CHAIR_USER, url)
login_testing_unauthorized(self, SECRETARIAT_USER, url)
- return self.check_url_status(url, 200)
+ self.check_url_status(url, 200)
def test_private_index_view(self):
"""Verify private home view"""
@@ -599,6 +599,8 @@ class NomcomViewsTest(TestCase):
self.nominate_view(public=True,confirmation=True)
self.assertEqual(len(outbox), messages_before + 3)
+ self.assertEqual(Message.objects.count(), 2)
+ self.assertFalse(Message.objects.filter(subject="Nomination receipt").exists())
self.assertEqual('IETF Nomination Information', outbox[-3]['Subject'])
self.assertEqual(self.email_from, outbox[-3]['From'])
@@ -625,8 +627,7 @@ class NomcomViewsTest(TestCase):
def test_private_nominate(self):
self.access_member_url(self.private_nominate_url)
- return self.nominate_view(public=False)
- self.client.logout()
+ self.nominate_view(public=False)
def test_public_nominate_newperson(self):
login_testing_unauthorized(self, COMMUNITY_USER, self.public_nominate_url)
@@ -666,13 +667,13 @@ class NomcomViewsTest(TestCase):
def test_private_nominate_newperson(self):
self.access_member_url(self.private_nominate_url)
- return self.nominate_newperson_view(public=False)
- self.client.logout()
+ self.nominate_newperson_view(public=False, confirmation=True)
+ self.assertFalse(Message.objects.filter(subject="Nomination receipt").exists())
def test_private_nominate_newperson_who_already_exists(self):
EmailFactory(address='nominee@example.com')
self.access_member_url(self.private_nominate_newperson_url)
- return self.nominate_newperson_view(public=False)
+ self.nominate_newperson_view(public=False)
def test_public_nominate_with_automatic_questionnaire(self):
nomcom = get_nomcom_by_year(self.year)
@@ -844,8 +845,7 @@ class NomcomViewsTest(TestCase):
def test_add_questionnaire(self):
self.access_chair_url(self.add_questionnaire_url)
- return self.add_questionnaire()
- self.client.logout()
+ self.add_questionnaire()
def add_questionnaire(self, *args, **kwargs):
public = kwargs.pop('public', False)
@@ -906,6 +906,8 @@ class NomcomViewsTest(TestCase):
# We're interested in the confirmation receipt here
self.assertEqual(len(outbox),3)
self.assertEqual('NomCom comment confirmation', outbox[2]['Subject'])
+ self.assertEqual(Message.objects.count(), 2)
+ self.assertFalse(Message.objects.filter(subject="NomCom comment confirmation").exists())
email_body = get_payload_text(outbox[2])
self.assertIn(position, email_body)
self.assertNotIn('$', email_body)
@@ -920,7 +922,7 @@ class NomcomViewsTest(TestCase):
def test_private_feedback(self):
self.access_member_url(self.private_feedback_url)
- return self.feedback_view(public=False)
+ self.feedback_view(public=False)
def feedback_view(self, *args, **kwargs):
public = kwargs.pop('public', True)
diff --git a/ietf/nomcom/views.py b/ietf/nomcom/views.py
index c077afce4..d43f227c0 100644
--- a/ietf/nomcom/views.py
+++ b/ietf/nomcom/views.py
@@ -1106,6 +1106,41 @@ def edit_template(request, year, template_id):
def list_positions(request, year):
nomcom = get_nomcom_by_year(year)
positions = nomcom.position_set.order_by('-is_open')
+ if request.method == 'POST':
+ if nomcom.group.state_id != 'active':
+ messages.warning(request, "This nomcom is not active. Request administrative assistance if Position state needs to change.")
+ else:
+ action = request.POST.get('action')
+ positions_to_modify = request.POST.getlist('selected')
+ if positions_to_modify:
+ positions = positions.filter(id__in=positions_to_modify)
+ if action == "set_iesg":
+ positions.update(is_iesg_position=True)
+ messages.success(request,'The selected positions have been set as IESG Positions')
+ elif action == "unset_iesg":
+ positions.update(is_iesg_position=False)
+ messages.success(request,'The selected positions have been set as NOT IESG Positions')
+ elif action == "set_open":
+ positions.update(is_open=True)
+ messages.success(request,'The selected positions have been set as Open')
+ elif action == "unset_open":
+ positions.update(is_open=False)
+ messages.success(request,'The selected positions have been set as NOT Open')
+ elif action == "set_accept_nom":
+ positions.update(accepting_nominations=True)
+ messages.success(request,'The selected positions have been set as Accepting Nominations')
+ elif action == "unset_accept_nom":
+ positions.update(accepting_nominations=False)
+ messages.success(request,'The selected positions have been set as NOT Accepting Nominations')
+ elif action == "set_accept_fb":
+ positions.update(accepting_feedback=True)
+ messages.success(request,'The selected positions have been set as Accepting Feedback')
+ elif action == "unset_accept_fb":
+ positions.update(accepting_feedback=False)
+ messages.success(request,'The selected positions have been set as NOT Accepting Feedback')
+ positions = nomcom.position_set.order_by('-is_open')
+ else:
+ messages.warning(request, "Please select some positions to work with")
return render(request, 'nomcom/list_positions.html',
{'positions': positions,
diff --git a/ietf/review/utils.py b/ietf/review/utils.py
index 31b6b401f..979682ab6 100644
--- a/ietf/review/utils.py
+++ b/ietf/review/utils.py
@@ -50,6 +50,8 @@ def can_request_review_of_doc(user, doc):
if not user.is_authenticated:
return False
+ # This is in a strange place as it has nothing to do with the user
+ # but this utility is used in too many places to move this quickly.
if doc.type_id == 'draft' and doc.get_state_slug() != 'active':
return False
diff --git a/ietf/secr/sreq/views.py b/ietf/secr/sreq/views.py
index f52513bc7..eb93168e1 100644
--- a/ietf/secr/sreq/views.py
+++ b/ietf/secr/sreq/views.py
@@ -730,6 +730,7 @@ def no_session(request, acronym):
requested_duration=datetime.timedelta(0),
type_id='regular',
purpose_id='regular',
+ has_onsite_tool=group.features.acts_like_wg,
)
SchedulingEvent.objects.create(
session=session,
diff --git a/ietf/secr/telechat/tests.py b/ietf/secr/telechat/tests.py
index e4661b767..f12226a4f 100644
--- a/ietf/secr/telechat/tests.py
+++ b/ietf/secr/telechat/tests.py
@@ -67,8 +67,7 @@ class SecrTelechatTestCase(TestCase):
def test_doc_detail_draft_with_downref(self):
ad = Person.objects.get(user__username="ad")
draft = WgDraftFactory(ad=ad, intended_std_level_id='ps', states=[('draft-iesg','pub-req'),])
- rfc = IndividualRfcFactory.create(stream_id='irtf', other_aliases=['rfc6666',],
- states=[('draft','rfc'),('draft-iesg','pub')], std_level_id='inf', )
+ rfc = IndividualRfcFactory.create(stream_id='irtf', rfc_number=6666, std_level_id='inf')
draft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'),
relationship_id='refnorm')
create_ballot_if_not_open(None, draft, ad, 'approve')
diff --git a/ietf/submit/forms.py b/ietf/submit/forms.py
index 89a92e91e..65c0db1c7 100644
--- a/ietf/submit/forms.py
+++ b/ietf/submit/forms.py
@@ -704,6 +704,11 @@ class SubmissionAutoUploadForm(SubmissionBaseUploadForm):
'replaces',
forms.ValidationError("An Internet-Draft can only replace another Internet-Draft"),
)
+ elif doc.get_state_slug() == "rfc":
+ self.add_error(
+ 'replaces',
+ forms.ValidationError("An Internet-Draft cannot replace another Internet-Draft that has become an RFC"),
+ )
elif doc.get_state_slug('draft-iesg') in ('approved', 'ann', 'rfcqueue'):
self.add_error(
'replaces',
diff --git a/ietf/submit/tests.py b/ietf/submit/tests.py
index 5c20a7fb4..2478bbd99 100644
--- a/ietf/submit/tests.py
+++ b/ietf/submit/tests.py
@@ -32,7 +32,7 @@ from ietf.submit.utils import (expirable_submissions, expire_submission, find_su
process_and_accept_uploaded_submission, SubmissionError, process_submission_text,
process_submission_xml, process_uploaded_submission,
process_and_validate_submission)
-from ietf.doc.factories import (DocumentFactory, WgDraftFactory, IndividualDraftFactory, IndividualRfcFactory,
+from ietf.doc.factories import (DocumentFactory, WgDraftFactory, IndividualDraftFactory,
ReviewFactory, WgRfcFactory)
from ietf.doc.models import ( Document, DocAlias, DocEvent, State,
BallotPositionDocEvent, DocumentAuthor, SubmissionDocEvent )
@@ -3090,13 +3090,15 @@ class SubmissionUploadFormTests(BaseSubmitTestCase):
# can't replace RFC
rfc = WgRfcFactory()
+ draft = WgDraftFactory(states=[("draft", "rfc")])
+ draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc.docalias.first())
form = SubmissionAutoUploadForm(
request_factory.get('/some/url'),
- data={'user': auth.user.username, 'replaces': rfc.name},
+ data={'user': auth.user.username, 'replaces': draft.name},
files=files_dict,
)
self.assertFalse(form.is_valid())
- self.assertIn('An Internet-Draft can only replace another Internet-Draft', form.errors['replaces'])
+ self.assertIn('An Internet-Draft can only replace another Internet-Draft that has become an RFC', form.errors['replaces'])
# can't replace draft approved by iesg
existing_drafts[0].set_state(State.objects.get(type='draft-iesg', slug='approved'))
@@ -3688,25 +3690,9 @@ class RefsTests(BaseSubmitTestCase):
class PostSubmissionTests(BaseSubmitTestCase):
- @override_settings(RFC_FILE_TYPES=('txt', 'xml'), IDSUBMIT_FILE_TYPES=('pdf', 'md'))
- def test_find_submission_filenames_rfc(self):
- """Posting an RFC submission should use RFC_FILE_TYPES"""
- rfc = IndividualRfcFactory()
- path = Path(self.staging_dir)
- for ext in ['txt', 'xml', 'pdf', 'md']:
- (path / f'{rfc.name}-{rfc.rev}.{ext}').touch()
- files = find_submission_filenames(rfc)
- self.assertCountEqual(
- files,
- {
- 'txt': f'{path}/{rfc.name}-{rfc.rev}.txt',
- 'xml': f'{path}/{rfc.name}-{rfc.rev}.xml',
- # should NOT find the pdf or md
- }
- )
@override_settings(RFC_FILE_TYPES=('txt', 'xml'), IDSUBMIT_FILE_TYPES=('pdf', 'md'))
- def test_find_submission_filenames_draft(self):
+ def test_find_submission_filenames(self):
"""Posting an I-D submission should use IDSUBMIT_FILE_TYPES"""
draft = WgDraftFactory()
path = Path(self.staging_dir)
diff --git a/ietf/submit/utils.py b/ietf/submit/utils.py
index 471b9abb6..17127d474 100644
--- a/ietf/submit/utils.py
+++ b/ietf/submit/utils.py
@@ -287,7 +287,7 @@ def find_submission_filenames(draft):
"""
path = pathlib.Path(settings.IDSUBMIT_STAGING_PATH)
stem = f'{draft.name}-{draft.rev}'
- allowed_types = settings.RFC_FILE_TYPES if draft.get_state_slug() == 'rfc' else settings.IDSUBMIT_FILE_TYPES
+ allowed_types = settings.IDSUBMIT_FILE_TYPES
candidates = {ext: path / f'{stem}.{ext}' for ext in allowed_types}
return {ext: str(filename) for ext, filename in candidates.items() if filename.exists()}
diff --git a/ietf/templates/doc/document_bibtex.bib b/ietf/templates/doc/document_bibtex.bib
index 5dda4649e..2e4dc9c87 100644
--- a/ietf/templates/doc/document_bibtex.bib
+++ b/ietf/templates/doc/document_bibtex.bib
@@ -3,7 +3,7 @@
{% load ietf_filters %}
{% load textfilters %}
-{% if doc.get_state_slug == "rfc" %}
+{% if doc.type_id == "rfc" %}
{% if doc.stream|slugify == "legacy" %}
% Datatracker information for RFCs on the Legacy Stream is unfortunately often
% incorrect. Please correct the bibtex below based on the information in the
@@ -16,7 +16,7 @@
publisher = {RFC Editor},
doi = {% templatetag openbrace %}{{ doi }}{% templatetag closebrace %},
url = {% templatetag openbrace %}{{ doc.rfc_number|rfceditor_info_url }}{% templatetag closebrace %},{% else %}
-{% if published %}%% You should probably cite rfc{{ latest_revision.doc.rfc_number }} instead of this I-D.{% else %}{% if replaced_by %}%% You should probably cite {{replaced_by|join:" or "}} instead of this I-D.{% else %}
+{% if published_as %}%% You should probably cite rfc{{ published_as.rfc_number }} instead of this I-D.{% else %}{% if replaced_by %}%% You should probably cite {{replaced_by|join:" or "}} instead of this I-D.{% else %}
{% if doc.rev != latest_revision.rev %}%% You should probably cite {{latest_revision.doc.name}}-{{latest_revision.rev}} instead of this revision.{%endif%}{% endif %}{% endif %}
@techreport{% templatetag openbrace %}{{doc.name|slice:"6:"}}-{{doc.rev}},
number = {% templatetag openbrace %}{{doc.name}}-{{doc.rev}}{% templatetag closebrace %},
@@ -29,7 +29,7 @@
title = {% templatetag openbrace %}{% templatetag openbrace %}{{doc.title|texescape}}{% templatetag closebrace %}{% templatetag closebrace %},
pagetotal = {{ doc.pages }},
year = {{ doc.pub_date.year }},
- month = {{ doc.pub_date|date:"b" }},{% if not doc.rfc_number or doc.pub_date.day == 1 and doc.pub_date.month == 4 %}
+ month = {{ doc.pub_date|date:"b" }},{% if not doc.type_id == "rfc" or doc.pub_date.day == 1 and doc.pub_date.month == 4 %}
day = {{ doc.pub_date.day }},{% endif %}
abstract = {% templatetag openbrace %}{{ doc.abstract|clean_whitespace|texescape }}{% templatetag closebrace %},
{% templatetag closebrace %}
diff --git a/ietf/templates/doc/document_history.html b/ietf/templates/doc/document_history.html
index 9c76774b8..91d5d393c 100644
--- a/ietf/templates/doc/document_history.html
+++ b/ietf/templates/doc/document_history.html
@@ -11,7 +11,7 @@
-
+
{% endblock %}
{% block content %}
{% origin %}
@@ -20,7 +20,17 @@