import pandas as pd
# Data from Table S2
data = {
"Domain": [
"Q1. Functional unit defined",
"Q2. System boundaries specified",
"Q3. Stage exclusions justified",
"Q4. Data sources specified",
"Q5. Geographic relevance",
"Q6. Technology data up-to-date",
"Q7. Key assumptions listed",
"Q8. LCIA method identified",
"Q9. Sensitivity/uncertainty analysis",
"Q10. Conclusions supported",
"Q11. Funding/conflicts disclosed"
],
"Yes": [84.21, 100.00, 0.00, 94.74, 92.11, 81.58, 84.21, 39.47, 57.89, 92.11, 100.00],
"Partly": [15.79, 0.00, 2.63, 5.26, 5.26, 18.42, 10.53, 2.63, 10.53, 5.26, 0.00],
"No": [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 5.26, 57.89, 26.32, 2.63, 0.00],
"Not Reported": [0.00, 0.00, 97.37, 0.00, 2.63, 0.00, 0.00, 0.00, 5.26, 0.00, 0.00],
}
df = pd.DataFrame(data)
# Convert percentages to study counts (out of 38)
df
["Yes_count"] = (df
["Yes"] * 38 / 100).round().astype
(int
)df
["Partly_count"] = (df
["Partly"] * 38 / 100).round().astype
(int
)df
["No_count"] = (df
["No"] * 38 / 100).round().astype
(int
)df
["NR_count"] = (df
["Not Reported"] * 38 / 100).round().astype
(int
)
# Calculate average score per question using scoring rule (Yes=1, Partly=0.5, No/NR=0)
df["Avg_score"] = (
(df["Yes_count"] * 1 + df["Partly_count"] * 0.5) / 38
# Calculate total possible score = 11
# Average study-level quality score (sum of avg_scores across 11 domains)
total_avg_score
= df
["Avg_score"].sum
().round(2)
# Classification
if total_avg_score >= 9:
quality = "High quality"
elif total_avg_score >= 6:
quality = "Moderate quality"
else:
quality = "Low quality"
import caas_jupyter_tools
caas_jupyter_tools.display_dataframe_to_user("Risk of Bias Scoring Table", df)
(total_avg_score, quality)
aW1wb3J0IHBhbmRhcyBhcyBwZAoKIyBEYXRhIGZyb20gVGFibGUgUzIKZGF0YSA9IHsKICAgICJEb21haW4iOiBbCiAgICAgICAgIlExLiBGdW5jdGlvbmFsIHVuaXQgZGVmaW5lZCIsCiAgICAgICAgIlEyLiBTeXN0ZW0gYm91bmRhcmllcyBzcGVjaWZpZWQiLAogICAgICAgICJRMy4gU3RhZ2UgZXhjbHVzaW9ucyBqdXN0aWZpZWQiLAogICAgICAgICJRNC4gRGF0YSBzb3VyY2VzIHNwZWNpZmllZCIsCiAgICAgICAgIlE1LiBHZW9ncmFwaGljIHJlbGV2YW5jZSIsCiAgICAgICAgIlE2LiBUZWNobm9sb2d5IGRhdGEgdXAtdG8tZGF0ZSIsCiAgICAgICAgIlE3LiBLZXkgYXNzdW1wdGlvbnMgbGlzdGVkIiwKICAgICAgICAiUTguIExDSUEgbWV0aG9kIGlkZW50aWZpZWQiLAogICAgICAgICJROS4gU2Vuc2l0aXZpdHkvdW5jZXJ0YWludHkgYW5hbHlzaXMiLAogICAgICAgICJRMTAuIENvbmNsdXNpb25zIHN1cHBvcnRlZCIsCiAgICAgICAgIlExMS4gRnVuZGluZy9jb25mbGljdHMgZGlzY2xvc2VkIgogICAgXSwKICAgICJZZXMiOiBbODQuMjEsIDEwMC4wMCwgMC4wMCwgOTQuNzQsIDkyLjExLCA4MS41OCwgODQuMjEsIDM5LjQ3LCA1Ny44OSwgOTIuMTEsIDEwMC4wMF0sCiAgICAiUGFydGx5IjogWzE1Ljc5LCAwLjAwLCAyLjYzLCA1LjI2LCA1LjI2LCAxOC40MiwgMTAuNTMsIDIuNjMsIDEwLjUzLCA1LjI2LCAwLjAwXSwKICAgICJObyI6IFswLjAwLCAwLjAwLCAwLjAwLCAwLjAwLCAwLjAwLCAwLjAwLCA1LjI2LCA1Ny44OSwgMjYuMzIsIDIuNjMsIDAuMDBdLAogICAgIk5vdCBSZXBvcnRlZCI6IFswLjAwLCAwLjAwLCA5Ny4zNywgMC4wMCwgMi42MywgMC4wMCwgMC4wMCwgMC4wMCwgNS4yNiwgMC4wMCwgMC4wMF0sCn0KCmRmID0gcGQuRGF0YUZyYW1lKGRhdGEpCgojIENvbnZlcnQgcGVyY2VudGFnZXMgdG8gc3R1ZHkgY291bnRzIChvdXQgb2YgMzgpCmRmWyJZZXNfY291bnQiXSA9IChkZlsiWWVzIl0gKiAzOCAvIDEwMCkucm91bmQoKS5hc3R5cGUoaW50KQpkZlsiUGFydGx5X2NvdW50Il0gPSAoZGZbIlBhcnRseSJdICogMzggLyAxMDApLnJvdW5kKCkuYXN0eXBlKGludCkKZGZbIk5vX2NvdW50Il0gPSAoZGZbIk5vIl0gKiAzOCAvIDEwMCkucm91bmQoKS5hc3R5cGUoaW50KQpkZlsiTlJfY291bnQiXSA9IChkZlsiTm90IFJlcG9ydGVkIl0gKiAzOCAvIDEwMCkucm91bmQoKS5hc3R5cGUoaW50KQoKIyBDYWxjdWxhdGUgYXZlcmFnZSBzY29yZSBwZXIgcXVlc3Rpb24gdXNpbmcgc2NvcmluZyBydWxlIChZZXM9MSwgUGFydGx5PTAuNSwgTm8vTlI9MCkKZGZbIkF2Z19zY29yZSJdID0gKAogICAgKGRmWyJZZXNfY291bnQiXSAqIDEgKyBkZlsiUGFydGx5X2NvdW50Il0gKiAwLjUpIC8gMzgKKS5yb3VuZCgyKQoKIyBDYWxjdWxhdGUgdG90YWwgcG9zc2libGUgc2NvcmUgPSAxMQojIEF2ZXJhZ2Ugc3R1ZHktbGV2ZWwgcXVhbGl0eSBzY29yZSAoc3VtIG9mIGF2Z19zY29yZXMgYWNyb3NzIDExIGRvbWFpbnMpCnRvdGFsX2F2Z19zY29yZSA9IGRmWyJBdmdfc2NvcmUiXS5zdW0oKS5yb3VuZCgyKQoKIyBDbGFzc2lmaWNhdGlvbgppZiB0b3RhbF9hdmdfc2NvcmUgPj0gOToKICAgIHF1YWxpdHkgPSAiSGlnaCBxdWFsaXR5IgplbGlmIHRvdGFsX2F2Z19zY29yZSA+PSA2OgogICAgcXVhbGl0eSA9ICJNb2RlcmF0ZSBxdWFsaXR5IgplbHNlOgogICAgcXVhbGl0eSA9ICJMb3cgcXVhbGl0eSIKCmltcG9ydCBjYWFzX2p1cHl0ZXJfdG9vbHMKY2Fhc19qdXB5dGVyX3Rvb2xzLmRpc3BsYXlfZGF0YWZyYW1lX3RvX3VzZXIoIlJpc2sgb2YgQmlhcyBTY29yaW5nIFRhYmxlIiwgZGYpCgoodG90YWxfYXZnX3Njb3JlLCBxdWFsaXR5KQo=
import pandas as pd
# Data from Table S2
data = {
"Domain": [
"Q1. Functional unit defined",
"Q2. System boundaries specified",
"Q3. Stage exclusions justified",
"Q4. Data sources specified",
"Q5. Geographic relevance",
"Q6. Technology data up-to-date",
"Q7. Key assumptions listed",
"Q8. LCIA method identified",
"Q9. Sensitivity/uncertainty analysis",
"Q10. Conclusions supported",
"Q11. Funding/conflicts disclosed"
],
"Yes": [84.21, 100.00, 0.00, 94.74, 92.11, 81.58, 84.21, 39.47, 57.89, 92.11, 100.00],
"Partly": [15.79, 0.00, 2.63, 5.26, 5.26, 18.42, 10.53, 2.63, 10.53, 5.26, 0.00],
"No": [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 5.26, 57.89, 26.32, 2.63, 0.00],
"Not Reported": [0.00, 0.00, 97.37, 0.00, 2.63, 0.00, 0.00, 0.00, 5.26, 0.00, 0.00],
}
df = pd.DataFrame(data)
# Convert percentages to study counts (out of 38)
df["Yes_count"] = (df["Yes"] * 38 / 100).round().astype(int)
df["Partly_count"] = (df["Partly"] * 38 / 100).round().astype(int)
df["No_count"] = (df["No"] * 38 / 100).round().astype(int)
df["NR_count"] = (df["Not Reported"] * 38 / 100).round().astype(int)
# Calculate average score per question using scoring rule (Yes=1, Partly=0.5, No/NR=0)
df["Avg_score"] = (
(df["Yes_count"] * 1 + df["Partly_count"] * 0.5) / 38
).round(2)
# Calculate total possible score = 11
# Average study-level quality score (sum of avg_scores across 11 domains)
total_avg_score = df["Avg_score"].sum().round(2)
# Classification
if total_avg_score >= 9:
quality = "High quality"
elif total_avg_score >= 6:
quality = "Moderate quality"
else:
quality = "Low quality"
import caas_jupyter_tools
caas_jupyter_tools.display_dataframe_to_user("Risk of Bias Scoring Table", df)
(total_avg_score, quality)